1 //===- Vectorization.cpp - Implementation of linalg Vectorization ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the linalg dialect Vectorization transformations.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "mlir/Analysis/SliceAnalysis.h"
14 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
15 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
16 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
17 #include "mlir/Dialect/Linalg/Utils/Utils.h"
18 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
19 #include "mlir/Dialect/Vector/VectorOps.h"
20 #include "mlir/IR/AffineExpr.h"
21 #include "mlir/IR/Matchers.h"
22 #include "mlir/IR/PatternMatch.h"
23 #include "mlir/Pass/Pass.h"
24 #include "mlir/Support/LLVM.h"
25 #include "mlir/Transforms/RegionUtils.h"
26 #include "llvm/ADT/ScopeExit.h"
27 #include "llvm/ADT/TypeSwitch.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include <type_traits>
31 
32 using namespace mlir;
33 using namespace mlir::linalg;
34 
35 using llvm::dbgs;
36 
37 #define DEBUG_TYPE "linalg-vectorization"
38 
39 /// Return the unique instance of OpType in `block` if it is indeed unique.
40 /// Return null if none or more than 1 instances exist.
41 template <typename OpType>
42 static OpType getSingleOpOfType(Block &block) {
43   OpType res;
44   block.walk([&](OpType op) {
45     if (res) {
46       res = nullptr;
47       return WalkResult::interrupt();
48     }
49     res = op;
50     return WalkResult::advance();
51   });
52   return res;
53 }
54 
55 /// Given an indexing `map` coming from a LinalgOp indexing, restricted to a
56 /// projectedPermutation, compress the unused dimensions to serve as a
57 /// permutation_map for a vector transfer operation.
58 /// For example, given a linalg op such as:
59 ///
60 /// ```
61 ///   %0 = linalg.generic {
62 ///        indexing_maps = affine_map<(d0, d1, d2, d3, d4) -> (d4, d0, d2)>,
63 ///        indexing_maps = affine_map<(d0, d1, d2, d3, d4) -> (d1, d3)>
64 ///      }
65 ///     ins(%0 : tensor<2x3x4xf32>)
66 ///    outs(%1 : tensor<5x6xf32>)
67 /// ```
68 ///
69 /// the iteration domain size of the linalg op is 3x5x4x6x2. The first affine
70 /// map is reindexed to `affine_map<(d0, d1, d2) -> (d2, d0, d1)>`, the second
71 /// affine map is reindexed to `affine_map<(d0, d1) -> (d0, d1)>`.
72 static AffineMap reindexIndexingMap(AffineMap map) {
73   assert(map.isProjectedPermutation() && "expected projected permutation");
74   auto res = compressUnusedDims(map);
75   assert(res.getNumDims() == res.getNumResults() &&
76          "expected reindexed map with same number of dims and results");
77   return res;
78 }
79 
80 /// Helper data structure to represent the result of vectorization.
81 /// In certain specific cases, like terminators, we do not want to propagate/
82 enum VectorizationStatus {
83   /// Op failed to vectorize.
84   Failure = 0,
85   /// Op vectorized and custom function took care of replacement logic
86   NoReplace,
87   /// Op vectorized into a new Op whose results will replace original Op's
88   /// results.
89   NewOp
90   // TODO: support values if Op vectorized to Many-Ops whose results we need to
91   // aggregate for replacement.
92 };
93 struct VectorizationResult {
94   /// Return status from vectorizing the current op.
95   enum VectorizationStatus status = VectorizationStatus::Failure;
96   /// New vectorized operation to replace the current op.
97   /// Replacement behavior is specified by `status`.
98   Operation *newOp;
99 };
100 
101 /// Return a vector type of the same shape and element type as the (assumed)
102 /// ShapedType of `v`.
103 static VectorType extractVectorTypeFromShapedValue(Value v) {
104   auto st = v.getType().cast<ShapedType>();
105   if (st.isa<MemRefType>() && st.getShape().empty())
106     return VectorType();
107   return VectorType::get(st.getShape(), st.getElementType());
108 }
109 
110 /// Given an `outputOperand` of a LinalgOp, compute the intersection of the
111 /// forward slice starting from `outputOperand` and the backward slice
112 /// starting from the corresponding linalg.yield operand.
113 /// This intersection is assumed to have a single binary operation that is
114 /// the reduction operation. Multiple reduction operations would impose an
115 /// ordering between reduction dimensions and is currently unsupported in
116 /// Linalg. This limitation is motivated by the fact that e.g.
117 /// min(max(X)) != max(min(X))
118 // TODO: use in LinalgOp verification, there is a circular dependency atm.
119 static Operation *getSingleBinaryOpAssumedReduction(OpOperand *outputOperand) {
120   auto linalgOp = cast<LinalgOp>(outputOperand->getOwner());
121   auto yieldOp = cast<YieldOp>(linalgOp->getRegion(0).front().getTerminator());
122   unsigned yieldNum =
123       outputOperand->getOperandNumber() - linalgOp.getNumInputs();
124   llvm::SetVector<Operation *> backwardSlice, forwardSlice;
125   BlockArgument bbArg = linalgOp->getRegion(0).front().getArgument(
126       outputOperand->getOperandNumber());
127   Value yieldVal = yieldOp->getOperand(yieldNum);
128   getBackwardSlice(yieldVal, &backwardSlice, [&](Operation *op) {
129     return op->getParentOp() == linalgOp;
130   });
131   backwardSlice.insert(yieldVal.getDefiningOp());
132   getForwardSlice(bbArg, &forwardSlice,
133                   [&](Operation *op) { return op->getParentOp() == linalgOp; });
134   // Search for the (assumed unique) elementwiseMappable op at the intersection
135   // of forward and backward slices.
136   Operation *reductionOp = nullptr;
137   for (Operation *op : llvm::reverse(backwardSlice)) {
138     if (!forwardSlice.contains(op))
139       continue;
140     if (OpTrait::hasElementwiseMappableTraits(op)) {
141       if (reductionOp) {
142         // Reduction detection fails: found more than 1 elementwise-mappable op.
143         return nullptr;
144       }
145       reductionOp = op;
146     }
147   }
148   // TODO: also assert no other subsequent ops break the reduction.
149   return reductionOp;
150 }
151 
152 /// If `value` of assumed VectorType has a shape different than `shape`, try to
153 /// build and return a new vector.broadcast to `shape`.
154 /// Otherwise, just return `value`.
155 // TODO: this is best effort atm and there is currently no guarantee of
156 // correctness for the broadcast semantics.
157 static Value broadcastIfNeeded(OpBuilder &b, Value value,
158                                ArrayRef<int64_t> shape) {
159   unsigned numDimsGtOne = std::count_if(shape.begin(), shape.end(),
160                                         [](int64_t val) { return val > 1; });
161   auto vecType = value.getType().dyn_cast<VectorType>();
162   if (shape.empty() ||
163       (vecType != nullptr &&
164        (vecType.getShape() == shape || vecType.getRank() > numDimsGtOne)))
165     return value;
166   auto newVecType = VectorType::get(shape, vecType ? vecType.getElementType()
167                                                    : value.getType());
168   return b.create<vector::BroadcastOp>(b.getInsertionPoint()->getLoc(),
169                                        newVecType, value);
170 }
171 
172 static llvm::Optional<vector::CombiningKind>
173 getKindForOp(Operation *reductionOp) {
174   if (!reductionOp)
175     return llvm::None;
176   return llvm::TypeSwitch<Operation *, llvm::Optional<vector::CombiningKind>>(
177              reductionOp)
178       .Case<AddIOp, AddFOp>([&](auto op) {
179         return llvm::Optional<vector::CombiningKind>{
180             vector::CombiningKind::ADD};
181       })
182       .Default([&](auto op) { return llvm::None; });
183 }
184 
185 /// If value of assumed VectorType has a shape different than `shape`, build and
186 /// return a new vector.broadcast to `shape`.
187 /// Otherwise, just return value.
188 static Value reduceIfNeeded(OpBuilder &b, VectorType targetVectorType,
189                             Value value, OpOperand *outputOperand) {
190   auto linalgOp = cast<LinalgOp>(outputOperand->getOwner());
191   assert(targetVectorType.getShape() == linalgOp.getShape(outputOperand));
192   auto vecType = value.getType().dyn_cast<VectorType>();
193   if (!vecType || vecType.getShape() == targetVectorType.getShape())
194     return value;
195   // At this point, we know we need to reduce. Detect the reduction operator.
196   // TODO: Use the generic reduction detection util.
197   Operation *reductionOp = getSingleBinaryOpAssumedReduction(outputOperand);
198   unsigned pos = 0;
199   MLIRContext *ctx = b.getContext();
200   SmallVector<AffineExpr> exprs;
201   for (auto s : linalgOp.iterator_types())
202     if (isParallelIterator(s))
203       exprs.push_back(getAffineDimExpr(pos++, ctx));
204   auto loc = value.getLoc();
205   // TODO: reuse common CombiningKing logic and support more than add.
206   auto maybeKind = getKindForOp(reductionOp);
207   assert(maybeKind && "Failed precondition: could not get reduction kind");
208   unsigned idx = 0;
209   SmallVector<bool> reductionMask(linalgOp.iterator_types().size(), false);
210   for (auto attr : linalgOp.iterator_types()) {
211     if (isReductionIteratorType(attr))
212       reductionMask[idx] = true;
213     ++idx;
214   }
215   return b.create<vector::MultiDimReductionOp>(loc, value, reductionMask,
216                                                *maybeKind);
217 }
218 
219 /// Build a vector.transfer_read from `source` at indices set to all `0`.
220 /// If source has rank zero, build an memref.load.
221 /// Return the produced value.
222 static Value buildVectorRead(OpBuilder &b, Value source, VectorType vectorType,
223                              AffineMap map) {
224   Location loc = source.getLoc();
225   auto shapedType = source.getType().cast<ShapedType>();
226   SmallVector<Value> indices(shapedType.getRank(),
227                              b.create<ConstantIndexOp>(loc, 0));
228   return b.create<vector::TransferReadOp>(loc, vectorType, source, indices,
229                                           map);
230 }
231 
232 /// Build a vector.transfer_write of `value` into `outputOperand` at indices set
233 /// to all `0`; where `outputOperand` is an output operand of the LinalgOp
234 /// currently being vectorized. If `dest` has null rank, build an memref.store.
235 /// Return the produced value or null if no value is produced.
236 static Value buildVectorWrite(OpBuilder &b, Value value,
237                               OpOperand *outputOperand) {
238   Operation *write;
239   Location loc = value.getLoc();
240   if (VectorType vectorType =
241           extractVectorTypeFromShapedValue(outputOperand->get())) {
242     auto linalgOp = cast<LinalgOp>(outputOperand->getOwner());
243     AffineMap map =
244         reindexIndexingMap(linalgOp.getTiedIndexingMap(outputOperand));
245     SmallVector<Value> indices(linalgOp.getRank(outputOperand),
246                                b.create<ConstantIndexOp>(loc, 0));
247     value = broadcastIfNeeded(b, value, vectorType.getShape());
248     value = reduceIfNeeded(b, vectorType, value, outputOperand);
249     write = b.create<vector::TransferWriteOp>(loc, value, outputOperand->get(),
250                                               indices, map);
251   } else {
252     write = b.create<memref::StoreOp>(loc, value, outputOperand->get());
253   }
254   LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: vectorized op: " << *write);
255   if (!write->getResults().empty())
256     return write->getResult(0);
257   return Value();
258 }
259 
260 // Custom vectorization function type. Produce a vector form of Operation*
261 // assuming all its vectorized operands are already in the BlockAndValueMapping.
262 // Return nullptr if the Operation cannot be vectorized.
263 using CustomVectorizationHook = std::function<VectorizationResult(
264     Operation *, const BlockAndValueMapping &)>;
265 
266 /// Helper function to vectorize the terminator of a `linalgOp`. New result
267 /// vector values are appended to `newResults`. Return
268 /// VectorizationStatus::NoReplace to signal the vectorization algorithm that it
269 /// should not try to map produced operations and instead return the results
270 /// using the `newResults` vector making them available to the
271 /// vectorization algorithm for RAUW. This function is meant to be used as a
272 /// CustomVectorizationHook.
273 static VectorizationResult
274 vectorizeLinalgYield(OpBuilder &b, Operation *op,
275                      const BlockAndValueMapping &bvm, LinalgOp linalgOp,
276                      SmallVectorImpl<Value> &newResults) {
277   auto yieldOp = dyn_cast<linalg::YieldOp>(op);
278   if (!yieldOp)
279     return VectorizationResult{VectorizationStatus::Failure, nullptr};
280   for (auto outputs : llvm::enumerate(yieldOp.values())) {
281     // TODO: Scan for an opportunity for reuse.
282     // TODO: use a map.
283     Value vectorValue = bvm.lookup(outputs.value());
284     Value newResult = buildVectorWrite(
285         b, vectorValue, linalgOp.getOutputOperand(outputs.index()));
286     if (newResult)
287       newResults.push_back(newResult);
288   }
289   return VectorizationResult{VectorizationStatus::NoReplace, nullptr};
290 }
291 
292 /// Helper function to vectorize the index operations of a `linalgOp`. Return
293 /// VectorizationStatus::NewOp to signal the vectorization algorithm that it
294 /// should map the produced operations. This function is meant to be used as a
295 /// CustomVectorizationHook.
296 static VectorizationResult vectorizeLinalgIndex(OpBuilder &b, Operation *op,
297                                                 LinalgOp linalgOp) {
298   IndexOp indexOp = dyn_cast<linalg::IndexOp>(op);
299   if (!indexOp)
300     return VectorizationResult{VectorizationStatus::Failure, nullptr};
301   auto loc = indexOp.getLoc();
302   // Compute the static loop sizes of the index op.
303   auto targetShape = linalgOp.computeStaticLoopSizes();
304   // Compute a one-dimensional index vector for the index op dimension.
305   SmallVector<int64_t> constantSeq =
306       llvm::seq<int64_t>(0, targetShape[indexOp.dim()]).asSmallVector();
307   ConstantOp constantOp =
308       b.create<ConstantOp>(loc, b.getIndexVectorAttr(constantSeq));
309   // Return the one-dimensional index vector if it lives in the trailing
310   // dimension of the iteration space since the vectorization algorithm in this
311   // case can handle the broadcast.
312   if (indexOp.dim() == targetShape.size() - 1)
313     return VectorizationResult{VectorizationStatus::NewOp, constantOp};
314   // Otherwise permute the targetShape to move the index dimension last,
315   // broadcast the one-dimensional index vector to the permuted shape, and
316   // finally transpose the broadcasted index vector to undo the permutation.
317   std::swap(targetShape[indexOp.dim()], targetShape.back());
318   auto broadCastOp = b.create<vector::BroadcastOp>(
319       loc, VectorType::get(targetShape, b.getIndexType()), constantOp);
320   SmallVector<int64_t> transposition =
321       llvm::seq<int64_t>(0, linalgOp.getNumLoops()).asSmallVector();
322   std::swap(transposition.back(), transposition[indexOp.dim()]);
323   auto transposeOp =
324       b.create<vector::TransposeOp>(loc, broadCastOp, transposition);
325   return VectorizationResult{VectorizationStatus::NewOp, transposeOp};
326 }
327 
328 /// Generic vectorization for a single operation `op`, given already vectorized
329 /// operands carried by `bvm`. Vectorization occurs as follows:
330 ///   1. Try to apply any of the `customVectorizationHooks` and return its
331 ///   result on success.
332 ///   2. Clone any constant in the current scope without vectorization: each
333 ///   consumer of the constant will later determine the shape to which the
334 ///   constant needs to be broadcast to.
335 ///   3. Fail on any remaining non `ElementwiseMappable` op. It is the purpose
336 ///   of the `customVectorizationHooks` to cover such cases.
337 ///   4. Clone `op` in vector form to a vector of shape prescribed by the first
338 ///   operand of maximal rank. Other operands have smaller rank and are
339 ///   broadcast accordingly. It is assumed this broadcast is always legal,
340 ///   otherwise, it means one of the `customVectorizationHooks` is incorrect.
341 ///
342 /// This function assumes all operands of `op` have been vectorized and are in
343 /// the `bvm` mapping. As a consequence, this function is meant to be called on
344 /// a topologically-sorted list of ops.
345 /// This function does not update `bvm` but returns a VectorizationStatus that
346 /// instructs the caller what `bvm` update needs to occur.
347 static VectorizationResult
348 vectorizeOneOp(OpBuilder &b, Operation *op, const BlockAndValueMapping &bvm,
349                ArrayRef<CustomVectorizationHook> customVectorizationHooks) {
350   LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: vectorize op " << *op);
351 
352   // 1. Try to apply any CustomVectorizationHook.
353   if (!customVectorizationHooks.empty()) {
354     for (auto &customFunc : customVectorizationHooks) {
355       VectorizationResult result = customFunc(op, bvm);
356       if (result.status == VectorizationStatus::Failure)
357         continue;
358       return result;
359     }
360   }
361 
362   // 2. Constant ops don't get vectorized but rather broadcasted at their users.
363   // Clone so that the constant is not confined to the linalgOp block .
364   if (isa<ConstantOp>(op))
365     return VectorizationResult{VectorizationStatus::NewOp, b.clone(*op)};
366 
367   // 3. Only ElementwiseMappable are allowed in the generic vectorization.
368   if (!OpTrait::hasElementwiseMappableTraits(op))
369     return VectorizationResult{VectorizationStatus::Failure, nullptr};
370 
371   // 4. Generic vectorization path for ElementwiseMappable ops.
372   //   a. first get the first max ranked shape.
373   SmallVector<int64_t, 4> firstMaxRankedShape;
374   for (Value operand : op->getOperands()) {
375     auto vt = bvm.lookup(operand).getType().dyn_cast<VectorType>();
376     if (vt && firstMaxRankedShape.size() < vt.getShape().size())
377       firstMaxRankedShape.assign(vt.getShape().begin(), vt.getShape().end());
378   }
379   //   b. broadcast each op if needed.
380   auto vectorizedOperands = llvm::map_range(op->getOperands(), [&](Value v) {
381     return firstMaxRankedShape.empty()
382                ? bvm.lookup(v)
383                : broadcastIfNeeded(b, bvm.lookup(v), firstMaxRankedShape);
384   });
385   //   c. for elementwise, the result is the vector with the firstMaxRankedShape
386   auto returnTypes = llvm::map_range(op->getResultTypes(), [&](Type t) {
387     return firstMaxRankedShape.empty()
388                ? t
389                : VectorType::get(firstMaxRankedShape, t);
390   });
391 
392   // Build and return the new op.
393   OperationState state(op->getLoc(), op->getName());
394   state.addAttributes(op->getAttrs());
395   state.addOperands(llvm::to_vector<4>(vectorizedOperands));
396   state.addTypes(llvm::to_vector<4>(returnTypes));
397   return VectorizationResult{VectorizationStatus::NewOp,
398                              b.createOperation(state)};
399 }
400 
401 /// Detect whether `r` has only ConstantOp, ElementwiseMappable and YieldOp.
402 static bool hasOnlyScalarElementwiseOp(Region &r) {
403   if (!llvm::hasSingleElement(r))
404     return false;
405   for (Operation &op : r.front()) {
406     if (!(isa<ConstantOp, linalg::YieldOp, linalg::IndexOp>(op) ||
407           OpTrait::hasElementwiseMappableTraits(&op)) ||
408         llvm::any_of(op.getResultTypes(),
409                      [](Type type) { return !type.isIntOrIndexOrFloat(); }))
410       return false;
411   }
412   return true;
413 }
414 
415 // Return true if the op is an element-wise linalg op.
416 static bool isElementwise(Operation *op) {
417   auto linalgOp = dyn_cast<linalg::LinalgOp>(op);
418   if (!linalgOp)
419     return false;
420   if (linalgOp.getNumLoops() != linalgOp.getNumParallelLoops())
421     return false;
422   // TODO: relax the restrictions on indexing map.
423   for (OpOperand *opOperand : linalgOp.getOutputOperands()) {
424     if (!linalgOp.getTiedIndexingMap(opOperand).isIdentity())
425       return false;
426   }
427   if (linalgOp->getNumRegions() != 1)
428     return false;
429   return hasOnlyScalarElementwiseOp(linalgOp->getRegion(0));
430 }
431 
432 /// Generic vectorization function that rewrites the body of a `linalgOp` into
433 /// vector form. Generic vectorization proceeds as follows:
434 ///   1. Verify the `linalgOp` has one non-empty region.
435 ///   2. Values defined above the region are mapped to themselves and will be
436 ///   broadcasted on a per-need basis by their consumers.
437 ///   3. Each region argument is vectorized into a vector.transfer_read (or 0-d
438 ///   load).
439 ///   TODO: Reuse opportunities for RAR dependencies.
440 ///   4a. Register CustomVectorizationHook for YieldOp to capture the results.
441 ///   4b. Register CustomVectorizationHook for IndexOp to access the iteration
442 ///   indices.
443 ///   5. Iteratively call vectorizeOneOp on the region operations.
444 ///
445 /// When `broadcastToMaximalCommonShape` is set to true, eager broadcasting is
446 /// performed to the maximal common vector size implied by the `linalgOp`
447 /// iteration space. This eager broadcasting is introduced in the
448 /// permutation_map of the vector.transfer_read operations. The eager
449 /// broadcasting makes it trivial to detrmine where broadcast, transposes and
450 /// reductions should occur, without any bookkeeping. The tradeoff is that, in
451 /// the absence of good canonicalizations, the amount of work increases.
452 /// This is not deemed a problem as we expect canonicalizations and foldings to
453 /// aggressively clean up the useless work.
454 LogicalResult vectorizeAsLinalgGeneric(
455     OpBuilder &b, LinalgOp linalgOp, SmallVectorImpl<Value> &newResults,
456     bool broadcastToMaximalCommonShape = false,
457     ArrayRef<CustomVectorizationHook> customVectorizationHooks = {}) {
458   // 1. Fail to vectorize if the operation does not have one non-empty region.
459   if (linalgOp->getNumRegions() != 1 || linalgOp->getRegion(0).empty())
460     return failure();
461   auto &block = linalgOp->getRegion(0).front();
462 
463   // 2. Values defined above the region can only be broadcast for now. Make them
464   // map to themselves.
465   BlockAndValueMapping bvm;
466   SetVector<Value> valuesSet;
467   mlir::getUsedValuesDefinedAbove(linalgOp->getRegion(0), valuesSet);
468   bvm.map(valuesSet.getArrayRef(), valuesSet.getArrayRef());
469 
470   if (linalgOp.getNumOutputs() == 0)
471     return failure();
472 
473   // TODO: the common vector shape is equal to the static loop sizes only when
474   // all indexing maps are projected permutations. For convs and stencils the
475   // logic will need to evolve.
476   SmallVector<int64_t> commonVectorShape = linalgOp.computeStaticLoopSizes();
477 
478   // 3. Turn all BBArgs into vector.transfer_read / load.
479   SmallVector<AffineMap> indexings;
480   for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) {
481     BlockArgument bbarg = block.getArgument(opOperand->getOperandNumber());
482     // TODO: 0-d vectors.
483     if (linalgOp.getShape(opOperand).empty()) {
484       Value loaded =
485           b.create<memref::LoadOp>(linalgOp.getLoc(), opOperand->get());
486       LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: new vectorized bbarg("
487                         << bbarg.getArgNumber() << "): " << loaded);
488       bvm.map(bbarg, loaded);
489       bvm.map(opOperand->get(), loaded);
490       continue;
491     }
492     AffineMap map;
493     VectorType vectorType;
494     if (broadcastToMaximalCommonShape) {
495       map = inverseAndBroadcastProjectedPermuation(
496           linalgOp.getTiedIndexingMap(opOperand));
497       vectorType = VectorType::get(
498           commonVectorShape, getElementTypeOrSelf(opOperand->get().getType()));
499     } else {
500       map = inversePermutation(
501           reindexIndexingMap(linalgOp.getTiedIndexingMap(opOperand)));
502       vectorType =
503           VectorType::get(map.compose(linalgOp.getShape(opOperand)),
504                           getElementTypeOrSelf(opOperand->get().getType()));
505     }
506     Value vectorRead = buildVectorRead(b, opOperand->get(), vectorType, map);
507     LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: new vectorized bbarg("
508                       << bbarg.getArgNumber() << "): " << vectorRead);
509     bvm.map(bbarg, vectorRead);
510     bvm.map(opOperand->get(), vectorRead);
511   }
512 
513   auto hooks = llvm::to_vector<4>(customVectorizationHooks);
514   // 4a. Register CustomVectorizationHook for yieldOp.
515   CustomVectorizationHook vectorizeYield =
516       [&](Operation *op,
517           const BlockAndValueMapping &bvm) -> VectorizationResult {
518     return vectorizeLinalgYield(b, op, bvm, linalgOp, newResults);
519   };
520   hooks.push_back(vectorizeYield);
521 
522   // 4b. Register CustomVectorizationHook for indexOp.
523   CustomVectorizationHook vectorizeIndex =
524       [&](Operation *op,
525           const BlockAndValueMapping &bvm) -> VectorizationResult {
526     return vectorizeLinalgIndex(b, op, linalgOp);
527   };
528   hooks.push_back(vectorizeIndex);
529 
530   // 5. Iteratively call `vectorizeOneOp` to each op in the slice.
531   for (Operation &op : block.getOperations()) {
532     VectorizationResult result = vectorizeOneOp(b, &op, bvm, hooks);
533     if (result.status == VectorizationStatus::Failure) {
534       LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: failed to vectorize: " << op);
535       return failure();
536     }
537     if (result.status == VectorizationStatus::NewOp) {
538       LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: new vector op: "
539                         << *result.newOp;);
540       bvm.map(op.getResults(), result.newOp->getResults());
541     }
542   }
543 
544   return success();
545 }
546 
547 static LogicalResult vectorizeContraction(OpBuilder &b, LinalgOp linalgOp,
548                                           SmallVectorImpl<Value> &newResults) {
549   assert(isaContractionOpInterface(linalgOp) &&
550          "expected vectorizeContraction preconditions to be met");
551   Location loc = linalgOp.getLoc();
552   // Vectorize other ops as vector contraction.
553   // TODO: interface.
554   LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: "
555                     << "Rewrite linalg op as vector.contract: ";
556              linalgOp.dump());
557   // Special function that describes how to vectorize the multiplication op in a
558   // linalg contraction.
559   CustomVectorizationHook vectorizeContraction =
560       [&](Operation *op,
561           const BlockAndValueMapping &bvm) -> VectorizationResult {
562     if (!isa<MulIOp, MulFOp>(op))
563       return VectorizationResult{VectorizationStatus::Failure, nullptr};
564     ArrayRef<int64_t> outShape =
565         linalgOp.getShape(linalgOp.getOutputOperand(0));
566     auto vType = outShape.empty()
567                      ? op->getResult(0).getType()
568                      : VectorType::get(outShape, op->getResult(0).getType());
569     auto zero = b.create<ConstantOp>(loc, vType, b.getZeroAttr(vType));
570     // Indexing maps at the time of vector.transfer_read are adjusted to order
571     // vector dimensions in the same order as the canonical linalg op iteration
572     // space order.
573     // The indexings for the contraction therefore need to be adjusted.
574     // TODO: consider dropping contraction special casing altogether, this will
575     // require more advanced canonicalizations involving vector.multi_reduction
576     // that are not yet available.
577     SmallVector<AffineMap> indexingMaps;
578     indexingMaps.reserve(linalgOp.getNumInputsAndOutputs());
579     llvm::transform(linalgOp.getIndexingMaps(),
580                     std::back_inserter(indexingMaps),
581                     [](AffineMap indexingMap) {
582                       return inversePermutation(reindexIndexingMap(indexingMap))
583                           .compose(indexingMap);
584                     });
585     Operation *contract = b.create<vector::ContractionOp>(
586         loc, bvm.lookup(op->getOperand(0)), bvm.lookup(op->getOperand(1)), zero,
587         b.getAffineMapArrayAttr(indexingMaps), linalgOp.iterator_types());
588     return VectorizationResult{VectorizationStatus::NewOp, contract};
589   };
590   return vectorizeAsLinalgGeneric(b, linalgOp, newResults,
591                                   /*broadcastToMaximalCommonShape=*/false,
592                                   {vectorizeContraction});
593 }
594 
595 static bool allIndexingsAreProjectedPermutation(LinalgOp op) {
596   return llvm::all_of(op.getIndexingMaps(),
597                       [](AffineMap m) { return m.isProjectedPermutation(); });
598 }
599 
600 // TODO: probably need some extra checks for reduction followed by consumer
601 // ops that may not commute (e.g. linear reduction + non-linear instructions).
602 static LogicalResult reductionPreconditions(LinalgOp op) {
603   if (llvm::none_of(op.iterator_types(), isReductionIteratorType))
604     return failure();
605   for (OpOperand *opOperand : op.getOutputOperands()) {
606     Operation *reductionOp = getSingleBinaryOpAssumedReduction(opOperand);
607     if (!getKindForOp(reductionOp))
608       return failure();
609   }
610   return success();
611 }
612 
613 LogicalResult mlir::linalg::vectorizeLinalgOpPrecondition(Operation *op) {
614   auto linalgOp = cast<linalg::LinalgOp>(op);
615   // All types must be static shape to go to vector.
616   if (linalgOp.hasDynamicShape())
617     return failure();
618   if (isElementwise(op))
619     return success();
620   if (isaContractionOpInterface(linalgOp))
621     return success();
622   // TODO: the common vector shape is equal to the static loop sizes only when
623   // all indexing maps are projected permutations. For convs and stencils the
624   // logic will need to evolve.
625   if (allIndexingsAreProjectedPermutation(linalgOp) &&
626       succeeded(reductionPreconditions(linalgOp)))
627     return success();
628   return failure();
629 }
630 
631 LogicalResult
632 mlir::linalg::vectorizeLinalgOp(OpBuilder &b, Operation *op,
633                                 SmallVectorImpl<Value> &newResults) {
634   if (failed(vectorizeLinalgOpPrecondition(op)))
635     return failure();
636 
637   auto linalgOp = cast<LinalgOp>(op);
638   if (isaContractionOpInterface(linalgOp))
639     return vectorizeContraction(b, linalgOp, newResults);
640 
641   LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: "
642                     << "Vectorize linalg op as a generic by broadcasting to "
643                        "maximal common shape: "
644                     << *op);
645   return vectorizeAsLinalgGeneric(b, linalgOp, newResults,
646                                   /*broadcastToMaximalCommonShape=*/true);
647 }
648 
649 //----------------------------------------------------------------------------//
650 // Misc. vectorization patterns.
651 //----------------------------------------------------------------------------//
652 
653 /// Given a block, return the Value that the block yields if that Value is
654 /// constant. In this context, "constant" means "defined outside of the block".
655 /// Should not be called on blocks that yield more than one value.
656 ///
657 /// Values are considered constant in two cases:
658 ///  - A basic block argument from a different block.
659 ///  - A value defined outside of the block.
660 ///
661 /// If the yielded value is not constant, an empty Value is returned.
662 static Value getConstantYieldValueFromBlock(Block &block) {
663   auto yieldOp = cast<YieldOp>(block.getTerminator());
664   assert(yieldOp.getNumOperands() == 1 && "expected single operand yield");
665   Value result = yieldOp.values().front();
666   Operation *definingOp = result.getDefiningOp();
667 
668   // Check if yield value is defined inside the block.
669   if (definingOp && definingOp->getBlock() == &block)
670     return Value();
671   // Check if the yield value is a BB arg of the block.
672   if (!definingOp && result.cast<BlockArgument>().getOwner() == &block)
673     return Value();
674 
675   return result;
676 }
677 
678 /// Rewrite a PadTensorOp into a sequence of InitTensorOp, TransferReadOp and
679 /// TransferWriteOp. For now, this only applies when all low and high paddings
680 /// are determined to be zero.
681 struct GenericPadTensorOpVectorizationPattern
682     : public OpRewritePattern<PadTensorOp> {
683   using OpRewritePattern<PadTensorOp>::OpRewritePattern;
684 
685   LogicalResult matchAndRewrite(PadTensorOp padOp,
686                                 PatternRewriter &rewriter) const override {
687     /// Given an OpFoldResult, return true if its value is guaranteed to be a
688     /// zero integer.
689     auto isZeroInt = [&](OpFoldResult ofr) {
690       return isEqualConstantIntOrValue(ofr, rewriter.getIndexAttr(0)); };
691     // Low padding must be static 0.
692     if (!llvm::all_of(padOp.getMixedLowPad(), isZeroInt)) return failure();
693     // High padding must be static 0.
694     if (!llvm::all_of(padOp.getMixedHighPad(), isZeroInt)) return failure();
695     // Pad value must be a constant.
696     auto padValue = getConstantYieldValueFromBlock(padOp.region().front());
697     if (!padValue) return failure();
698 
699     // Bail on non-static shapes.
700     auto resultShapedType = padOp.result().getType().cast<ShapedType>();
701     if (!resultShapedType.hasStaticShape())
702       return failure();
703     VectorType vectorType = extractVectorTypeFromShapedValue(padOp.result());
704     if (!vectorType)
705       return failure();
706 
707     // Now we can rewrite as InitTensorOp + TransferReadOp@[0..0] +
708     // TransferWriteOp@[0..0].
709     SmallVector<Value> indices(
710         resultShapedType.getRank(),
711         rewriter.create<ConstantIndexOp>(padOp.getLoc(), 0));
712     Value read = rewriter.create<vector::TransferReadOp>(
713         padOp.getLoc(), vectorType, padOp.source(), indices, padValue);
714     Value init = rewriter.create<InitTensorOp>(
715         padOp.getLoc(), resultShapedType.getShape(),
716         resultShapedType.getElementType());
717     rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(padOp, read, init,
718                                                          indices);
719 
720     return success();
721   }
722 };
723 
724 void mlir::linalg::populatePadTensorOpVectorizationPatterns(
725     RewritePatternSet &patterns, PatternBenefit baseBenefit) {
726   patterns.add<GenericPadTensorOpVectorizationPattern>(
727       patterns.getContext(), baseBenefit);
728 }
729 
730 // TODO: cleanup all the convolution vectorization patterns.
731 template <class ConvOp, int N>
732 LogicalResult ConvOpVectorization<ConvOp, N>::matchAndRewrite(
733     ConvOp op, PatternRewriter &rewriter) const {
734   Location loc = op.getLoc();
735   MLIRContext *context = op.getContext();
736 
737   OpOperand *input = op.getInputOperand(0);
738   OpOperand *kernel = op.getInputOperand(1);
739   OpOperand *output = op.getOutputOperand(0);
740   ArrayRef<int64_t> inShape = op.getShape(input);
741   ArrayRef<int64_t> kShape = op.getShape(kernel);
742 
743   if (llvm::any_of(inShape, ShapedType::isDynamic) ||
744       llvm::any_of(kShape, ShapedType::isDynamic))
745     return failure();
746 
747   SmallVector<AffineExpr, 4> mapping;
748   SmallVector<int64_t, 4> vectorDims;
749   // Fail to apply when the size of not vectorized dimension is not 1.
750   for (unsigned i = 0; i < N; i++) {
751     if (!mask[i] && (inShape[i] != 1 || kShape[i] != 1))
752       return failure();
753 
754     if (mask[i] && inShape[i] != kShape[i])
755       return failure();
756 
757     if (mask[i]) {
758       mapping.push_back(getAffineDimExpr(i, context));
759       vectorDims.push_back(inShape[i]);
760     }
761   }
762 
763   int64_t rank = op.getRank(input);
764   int64_t numDims = mapping.size();
765   Type elemType = getElementTypeOrSelf(input->get().getType());
766 
767   auto map = AffineMap::get(rank, 0, mapping, context);
768   SmallVector<Value, 4> zeros(rank, rewriter.create<ConstantIndexOp>(loc, 0));
769   auto vecType = VectorType::get(vectorDims, elemType);
770 
771   auto inputVec = rewriter.create<vector::TransferReadOp>(
772       loc, vecType, input->get(), zeros, map);
773   auto kernelVec = rewriter.create<vector::TransferReadOp>(
774       loc, vecType, kernel->get(), zeros, map);
775 
776   auto acc = rewriter.create<ConstantOp>(loc, elemType,
777                                          rewriter.getZeroAttr(elemType));
778 
779   std::array<AffineMap, 3> indexingMaps{
780       AffineMap::getMultiDimIdentityMap(numDims, context),
781       AffineMap::getMultiDimIdentityMap(numDims, context),
782       AffineMap::get(numDims, 0, {}, context)};
783 
784   std::vector<StringRef> iteratorTypes(numDims, "reduction");
785 
786   auto result = rewriter.create<vector::ContractionOp>(
787       loc, inputVec, kernelVec, acc,
788       rewriter.getAffineMapArrayAttr(indexingMaps),
789       rewriter.getStrArrayAttr(iteratorTypes));
790 
791   rewriter.create<memref::StoreOp>(loc, result, output->get(),
792                                    ValueRange(zeros));
793   rewriter.eraseOp(op);
794   return success();
795 }
796 
797 using ConvOpConst = ConvOpVectorization<ConvWOp, 1>;
798 
799 /// Inserts tiling, promotion and vectorization pattern for ConvOp
800 /// conversion into corresponding pattern lists.
801 template <typename ConvOp, unsigned N>
802 static void populateVectorizationPatterns(
803     RewritePatternSet &tilingPatterns, RewritePatternSet &promotionPatterns,
804     RewritePatternSet &vectorizationPatterns, ArrayRef<int64_t> tileSizes) {
805   auto *context = tilingPatterns.getContext();
806   if (tileSizes.size() < N)
807     return;
808 
809   constexpr static StringRef kTiledMarker = "TILED";
810   constexpr static StringRef kPromotedMarker = "PROMOTED";
811   tilingPatterns.add<LinalgTilingPattern<ConvOp>>(
812       context, LinalgTilingOptions().setTileSizes(tileSizes),
813       LinalgTransformationFilter(ArrayRef<Identifier>{},
814                                  Identifier::get(kTiledMarker, context)));
815 
816   promotionPatterns.add<LinalgPromotionPattern<ConvOp>>(
817       context, LinalgPromotionOptions().setUseFullTileBuffersByDefault(true),
818       LinalgTransformationFilter(Identifier::get(kTiledMarker, context),
819                                  Identifier::get(kPromotedMarker, context)));
820 
821   SmallVector<bool, 4> mask(N);
822   int offset = tileSizes.size() - N;
823   std::transform(tileSizes.begin() + offset, tileSizes.end(), mask.begin(),
824                  [](int64_t i) -> bool { return i > 1; });
825 
826   vectorizationPatterns.add<ConvOpVectorization<ConvOp, N>>(context, mask);
827 }
828 
829 void mlir::linalg::populateConvVectorizationPatterns(
830     MLIRContext *context, SmallVectorImpl<RewritePatternSet> &patterns,
831     ArrayRef<int64_t> tileSizes) {
832   RewritePatternSet tiling(context);
833   RewritePatternSet promotion(context);
834   RewritePatternSet vectorization(context);
835   populateVectorizationPatterns<ConvWOp, 1>(tiling, promotion, vectorization,
836                                             tileSizes);
837 
838   populateVectorizationPatterns<ConvNWCOp, 3>(tiling, promotion, vectorization,
839                                               tileSizes);
840   populateVectorizationPatterns<ConvInputNWCFilterWCFOp, 3>(
841       tiling, promotion, vectorization, tileSizes);
842 
843   populateVectorizationPatterns<ConvNCWOp, 3>(tiling, promotion, vectorization,
844                                               tileSizes);
845   populateVectorizationPatterns<ConvInputNCWFilterWCFOp, 3>(
846       tiling, promotion, vectorization, tileSizes);
847 
848   populateVectorizationPatterns<ConvHWOp, 2>(tiling, promotion, vectorization,
849                                              tileSizes);
850 
851   populateVectorizationPatterns<ConvNHWCOp, 4>(tiling, promotion, vectorization,
852                                                tileSizes);
853   populateVectorizationPatterns<ConvInputNHWCFilterHWCFOp, 4>(
854       tiling, promotion, vectorization, tileSizes);
855 
856   populateVectorizationPatterns<ConvNCHWOp, 4>(tiling, promotion, vectorization,
857                                                tileSizes);
858   populateVectorizationPatterns<ConvInputNCHWFilterHWCFOp, 4>(
859       tiling, promotion, vectorization, tileSizes);
860 
861   populateVectorizationPatterns<ConvDHWOp, 3>(tiling, promotion, vectorization,
862                                               tileSizes);
863 
864   populateVectorizationPatterns<ConvNDHWCOp, 5>(tiling, promotion,
865                                                 vectorization, tileSizes);
866   populateVectorizationPatterns<ConvInputNDHWCFilterDHWCFOp, 5>(
867       tiling, promotion, vectorization, tileSizes);
868 
869   populateVectorizationPatterns<ConvNCDHWOp, 5>(tiling, promotion,
870                                                 vectorization, tileSizes);
871   populateVectorizationPatterns<ConvInputNCDHWFilterDHWCFOp, 5>(
872       tiling, promotion, vectorization, tileSizes);
873 
874   patterns.push_back(std::move(tiling));
875   patterns.push_back(std::move(promotion));
876   patterns.push_back(std::move(vectorization));
877 }
878 
879 //----------------------------------------------------------------------------//
880 // Forwarding patterns
881 //----------------------------------------------------------------------------//
882 
883 /// Check whether there is any interleaved use of any `values` between `firstOp`
884 /// and `secondOp`. Conservatively return `true` if any op or value is in a
885 /// different block.
886 static bool mayExistInterleavedUses(Operation *firstOp, Operation *secondOp,
887                                     ValueRange values) {
888   if (firstOp->getBlock() != secondOp->getBlock() ||
889       !firstOp->isBeforeInBlock(secondOp)) {
890     LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
891                             << "interleavedUses precondition failed, firstOp: "
892                             << *firstOp << ", second op: " << *secondOp);
893     return true;
894   }
895   for (auto v : values) {
896     for (auto &u : v.getUses()) {
897       Operation *owner = u.getOwner();
898       if (owner == firstOp || owner == secondOp)
899         continue;
900       // TODO: this is too conservative, use dominance info in the future.
901       if (owner->getBlock() == firstOp->getBlock() &&
902           (owner->isBeforeInBlock(firstOp) || secondOp->isBeforeInBlock(owner)))
903         continue;
904       LLVM_DEBUG(llvm::dbgs()
905                  << "\n[" DEBUG_TYPE "]: "
906                  << " found interleaved op " << *owner
907                  << ", firstOp: " << *firstOp << ", second op: " << *secondOp);
908       return true;
909     }
910   }
911   return false;
912 }
913 
914 /// Return the unique subview use of `v` if it is indeed unique, null otherwise.
915 static memref::SubViewOp getSubViewUseIfUnique(Value v) {
916   memref::SubViewOp subViewOp;
917   for (auto &u : v.getUses()) {
918     if (auto newSubViewOp = dyn_cast<memref::SubViewOp>(u.getOwner())) {
919       if (subViewOp)
920         return memref::SubViewOp();
921       subViewOp = newSubViewOp;
922     }
923   }
924   return subViewOp;
925 }
926 
927 /// TODO: use interfaces, side-effects and aliasing analysis as appropriate,
928 /// when available.
929 LogicalResult LinalgCopyVTRForwardingPattern::matchAndRewrite(
930     vector::TransferReadOp xferOp, PatternRewriter &rewriter) const {
931 
932   // Transfer into `view`.
933   Value viewOrAlloc = xferOp.source();
934   if (!viewOrAlloc.getDefiningOp<memref::ViewOp>() &&
935       !viewOrAlloc.getDefiningOp<memref::AllocOp>())
936     return failure();
937 
938   LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: " << viewOrAlloc);
939 
940   // Ensure there is exactly one subview of `viewOrAlloc` defining `subView`.
941   memref::SubViewOp subViewOp = getSubViewUseIfUnique(viewOrAlloc);
942   if (!subViewOp)
943     return failure();
944   Value subView = subViewOp.getResult();
945   LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
946                           << "with subView " << subView);
947 
948   // Find the copy into `subView` without interleaved uses.
949   CopyOp copyOp;
950   for (auto &u : subView.getUses()) {
951     if (auto newCopyOp = dyn_cast<CopyOp>(u.getOwner())) {
952       assert(newCopyOp.output().getType().isa<MemRefType>());
953       if (newCopyOp.output() != subView)
954         continue;
955       LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
956                               << "copy candidate " << *newCopyOp);
957       if (mayExistInterleavedUses(newCopyOp, xferOp, {viewOrAlloc, subView}))
958         continue;
959       copyOp = newCopyOp;
960       break;
961     }
962   }
963   if (!copyOp)
964     return failure();
965   LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
966                           << "with copy " << *copyOp);
967 
968   // Find the fill into `viewOrAlloc` without interleaved uses before the copy.
969   FillOp maybeFillOp;
970   for (auto &u : viewOrAlloc.getUses()) {
971     if (auto newFillOp = dyn_cast<FillOp>(u.getOwner())) {
972       assert(newFillOp.output().getType().isa<MemRefType>());
973       if (newFillOp.output() != viewOrAlloc)
974         continue;
975       LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
976                               << "fill candidate " << *newFillOp);
977       if (mayExistInterleavedUses(newFillOp, copyOp, {viewOrAlloc, subView}))
978         continue;
979       maybeFillOp = newFillOp;
980       break;
981     }
982   }
983   // Ensure padding matches.
984   if (maybeFillOp && xferOp.padding() != maybeFillOp.value())
985     return failure();
986   if (maybeFillOp)
987     LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
988                             << "with maybeFillOp " << *maybeFillOp);
989 
990   // `in` is the subview that linalg.copy reads. Replace it.
991   Value in = copyOp.input();
992 
993   // linalg.copy + linalg.fill can be used to create a padded local buffer.
994   // The `masked` attribute is only valid on this padded buffer.
995   // When forwarding to vector.transfer_read, the attribute must be reset
996   // conservatively.
997   Value res = rewriter.create<vector::TransferReadOp>(
998       xferOp.getLoc(), xferOp.getVectorType(), in, xferOp.indices(),
999       xferOp.permutation_map(), xferOp.padding(), ArrayAttr());
1000 
1001   if (maybeFillOp)
1002     rewriter.eraseOp(maybeFillOp);
1003   rewriter.eraseOp(copyOp);
1004   rewriter.replaceOp(xferOp, res);
1005 
1006   return success();
1007 }
1008 
1009 /// TODO: use interfaces, side-effects and aliasing analysis as appropriate,
1010 /// when available.
1011 LogicalResult LinalgCopyVTWForwardingPattern::matchAndRewrite(
1012     vector::TransferWriteOp xferOp, PatternRewriter &rewriter) const {
1013   // Transfer into `viewOrAlloc`.
1014   Value viewOrAlloc = xferOp.source();
1015   if (!viewOrAlloc.getDefiningOp<memref::ViewOp>() &&
1016       !viewOrAlloc.getDefiningOp<memref::AllocOp>())
1017     return failure();
1018 
1019   // Ensure there is exactly one subview of `viewOrAlloc` defining `subView`.
1020   memref::SubViewOp subViewOp = getSubViewUseIfUnique(viewOrAlloc);
1021   if (!subViewOp)
1022     return failure();
1023   Value subView = subViewOp.getResult();
1024 
1025   // Find the copy from `subView` without interleaved uses.
1026   CopyOp copyOp;
1027   for (auto &u : subViewOp.getResult().getUses()) {
1028     if (auto newCopyOp = dyn_cast<CopyOp>(u.getOwner())) {
1029       if (newCopyOp.getInputOperand(0)->get() != subView)
1030         continue;
1031       if (mayExistInterleavedUses(xferOp, newCopyOp, {viewOrAlloc, subView}))
1032         continue;
1033       copyOp = newCopyOp;
1034       break;
1035     }
1036   }
1037   if (!copyOp)
1038     return failure();
1039 
1040   // `out` is the subview copied into that we replace.
1041   assert(copyOp.output().getType().isa<MemRefType>());
1042   Value out = copyOp.output();
1043 
1044   // Forward vector.transfer into copy.
1045   // linalg.copy + linalg.fill can be used to create a padded local buffer.
1046   // The `masked` attribute is only valid on this padded buffer.
1047   // When forwarding to vector.transfer_write, the attribute must be reset
1048   // conservatively.
1049   rewriter.create<vector::TransferWriteOp>(
1050       xferOp.getLoc(), xferOp.vector(), out, xferOp.indices(),
1051       xferOp.permutation_map(), ArrayAttr());
1052 
1053   rewriter.eraseOp(copyOp);
1054   rewriter.eraseOp(xferOp);
1055 
1056   return success();
1057 }
1058