1 //===- Vectorization.cpp - Implementation of linalg Vectorization ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the linalg dialect Vectorization transformations.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "mlir/Analysis/SliceAnalysis.h"
14 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
15 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
16 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
17 #include "mlir/Dialect/Linalg/Utils/Utils.h"
18 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
19 #include "mlir/Dialect/Vector/VectorOps.h"
20 #include "mlir/IR/AffineExpr.h"
21 #include "mlir/IR/Matchers.h"
22 #include "mlir/IR/PatternMatch.h"
23 #include "mlir/Pass/Pass.h"
24 #include "mlir/Support/LLVM.h"
25 #include "mlir/Transforms/RegionUtils.h"
26 #include "llvm/ADT/ScopeExit.h"
27 #include "llvm/ADT/TypeSwitch.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include <type_traits>
31 
32 using namespace mlir;
33 using namespace mlir::linalg;
34 
35 using llvm::dbgs;
36 
37 #define DEBUG_TYPE "linalg-vectorization"
38 
39 /// Return the unique instance of OpType in `block` if it is indeed unique.
40 /// Return null if none or more than 1 instances exist.
41 template <typename OpType>
42 static OpType getSingleOpOfType(Block &block) {
43   OpType res;
44   block.walk([&](OpType op) {
45     if (res) {
46       res = nullptr;
47       return WalkResult::interrupt();
48     }
49     res = op;
50     return WalkResult::advance();
51   });
52   return res;
53 }
54 
55 /// Given an indexing `map` coming from a LinalgOp indexing, restricted to a
56 /// projectedPermutation, compress the unused dimensions to serve as a
57 /// permutation_map for a vector transfer operation.
58 /// For example, given a linalg op such as:
59 ///
60 /// ```
61 ///   %0 = linalg.generic {
62 ///        indexing_maps = affine_map<(d0, d1, d2, d3, d4) -> (d4, d0, d2)>,
63 ///        indexing_maps = affine_map<(d0, d1, d2, d3, d4) -> (d1, d3)>
64 ///      }
65 ///     ins(%0 : tensor<2x3x4xf32>)
66 ///    outs(%1 : tensor<5x6xf32>)
67 /// ```
68 ///
69 /// the iteration domain size of the linalg op is 3x5x4x6x2. The first affine
70 /// map is reindexed to `affine_map<(d0, d1, d2) -> (d2, d0, d1)>`, the second
71 /// affine map is reindexed to `affine_map<(d0, d1) -> (d0, d1)>`.
72 static AffineMap reindexIndexingMap(AffineMap map) {
73   assert(map.isProjectedPermutation() && "expected projected permutation");
74   auto res = compressUnusedDims(map);
75   assert(res.getNumDims() == res.getNumResults() &&
76          "expected reindexed map with same number of dims and results");
77   return res;
78 }
79 
80 /// Helper data structure to represent the result of vectorization.
81 /// In certain specific cases, like terminators, we do not want to propagate/
82 enum VectorizationStatus {
83   /// Op failed to vectorize.
84   Failure = 0,
85   /// Op vectorized and custom function took care of replacement logic
86   NoReplace,
87   /// Op vectorized into a new Op whose results will replace original Op's
88   /// results.
89   NewOp
90   // TODO: support values if Op vectorized to Many-Ops whose results we need to
91   // aggregate for replacement.
92 };
93 struct VectorizationResult {
94   /// Return status from vectorizing the current op.
95   enum VectorizationStatus status = VectorizationStatus::Failure;
96   /// New vectorized operation to replace the current op.
97   /// Replacement behavior is specified by `status`.
98   Operation *newOp;
99 };
100 
101 /// Return a vector type of the same shape and element type as the (assumed)
102 /// ShapedType of `v`.
103 static VectorType extractVectorTypeFromShapedValue(Value v) {
104   auto st = v.getType().cast<ShapedType>();
105   if (st.isa<MemRefType>() && st.getShape().empty())
106     return VectorType();
107   return VectorType::get(st.getShape(), st.getElementType());
108 }
109 
110 /// Given an `outputOperand` of a LinalgOp, compute the intersection of the
111 /// forward slice starting from `outputOperand` and the backward slice
112 /// starting from the corresponding linalg.yield operand.
113 /// This intersection is assumed to have a single binary operation that is
114 /// the reduction operation. Multiple reduction operations would impose an
115 /// ordering between reduction dimensions and is currently unsupported in
116 /// Linalg. This limitation is motivated by the fact that e.g.
117 /// min(max(X)) != max(min(X))
118 // TODO: use in LinalgOp verification, there is a circular dependency atm.
119 static Operation *getSingleBinaryOpAssumedReduction(OpOperand *outputOperand) {
120   auto linalgOp = cast<LinalgOp>(outputOperand->getOwner());
121   auto yieldOp = cast<YieldOp>(linalgOp->getRegion(0).front().getTerminator());
122   unsigned yieldNum =
123       outputOperand->getOperandNumber() - linalgOp.getNumInputs();
124   llvm::SetVector<Operation *> backwardSlice, forwardSlice;
125   BlockArgument bbArg = linalgOp->getRegion(0).front().getArgument(
126       outputOperand->getOperandNumber());
127   Value yieldVal = yieldOp->getOperand(yieldNum);
128   getBackwardSlice(yieldVal, &backwardSlice, [&](Operation *op) {
129     return op->getParentOp() == linalgOp;
130   });
131   backwardSlice.insert(yieldVal.getDefiningOp());
132   getForwardSlice(bbArg, &forwardSlice,
133                   [&](Operation *op) { return op->getParentOp() == linalgOp; });
134   // Search for the (assumed unique) elementwiseMappable op at the intersection
135   // of forward and backward slices.
136   Operation *reductionOp = nullptr;
137   for (Operation *op : llvm::reverse(backwardSlice)) {
138     if (!forwardSlice.contains(op))
139       continue;
140     if (OpTrait::hasElementwiseMappableTraits(op)) {
141       if (reductionOp) {
142         // Reduction detection fails: found more than 1 elementwise-mappable op.
143         return nullptr;
144       }
145       reductionOp = op;
146     }
147   }
148   // TODO: also assert no other subsequent ops break the reduction.
149   return reductionOp;
150 }
151 
152 /// If `value` of assumed VectorType has a shape different than `shape`, try to
153 /// build and return a new vector.broadcast to `shape`.
154 /// Otherwise, just return `value`.
155 // TODO: this is best effort atm and there is currently no guarantee of
156 // correctness for the broadcast semantics.
157 static Value broadcastIfNeeded(OpBuilder &b, Value value,
158                                ArrayRef<int64_t> shape) {
159   unsigned numDimsGtOne = std::count_if(shape.begin(), shape.end(),
160                                         [](int64_t val) { return val > 1; });
161   auto vecType = value.getType().dyn_cast<VectorType>();
162   if (shape.empty() ||
163       (vecType != nullptr &&
164        (vecType.getShape() == shape || vecType.getRank() > numDimsGtOne)))
165     return value;
166   auto newVecType = VectorType::get(shape, vecType ? vecType.getElementType()
167                                                    : value.getType());
168   return b.create<vector::BroadcastOp>(b.getInsertionPoint()->getLoc(),
169                                        newVecType, value);
170 }
171 
172 static llvm::Optional<vector::CombiningKind>
173 getKindForOp(Operation *reductionOp) {
174   if (!reductionOp)
175     return llvm::None;
176   return llvm::TypeSwitch<Operation *, llvm::Optional<vector::CombiningKind>>(
177              reductionOp)
178       .Case<AddIOp, AddFOp>([&](auto op) {
179         return llvm::Optional<vector::CombiningKind>{
180             vector::CombiningKind::ADD};
181       })
182       .Default([&](auto op) { return llvm::None; });
183 }
184 
185 /// If value of assumed VectorType has a shape different than `shape`, build and
186 /// return a new vector.broadcast to `shape`.
187 /// Otherwise, just return value.
188 static Value reduceIfNeeded(OpBuilder &b, VectorType targetVectorType,
189                             Value value, OpOperand *outputOperand) {
190   auto linalgOp = cast<LinalgOp>(outputOperand->getOwner());
191   assert(targetVectorType.getShape() == linalgOp.getShape(outputOperand));
192   auto vecType = value.getType().dyn_cast<VectorType>();
193   if (!vecType || vecType.getShape() == targetVectorType.getShape())
194     return value;
195   // At this point, we know we need to reduce. Detect the reduction operator.
196   // TODO: Use the generic reduction detection util.
197   Operation *reductionOp = getSingleBinaryOpAssumedReduction(outputOperand);
198   unsigned pos = 0;
199   MLIRContext *ctx = b.getContext();
200   SmallVector<AffineExpr> exprs;
201   for (auto s : linalgOp.iterator_types())
202     if (isParallelIterator(s))
203       exprs.push_back(getAffineDimExpr(pos++, ctx));
204   auto loc = value.getLoc();
205   // TODO: reuse common CombiningKing logic and support more than add.
206   auto maybeKind = getKindForOp(reductionOp);
207   assert(maybeKind && "Failed precondition: could not get reduction kind");
208   unsigned idx = 0;
209   SmallVector<bool> reductionMask(linalgOp.iterator_types().size(), false);
210   for (auto attr : linalgOp.iterator_types()) {
211     if (isReductionIteratorType(attr))
212       reductionMask[idx] = true;
213     ++idx;
214   }
215   return b.create<vector::MultiDimReductionOp>(loc, value, reductionMask,
216                                                *maybeKind);
217 }
218 
219 /// Build a vector.transfer_read from `source` at indices set to all `0`.
220 /// If source has rank zero, build an memref.load.
221 /// Return the produced value.
222 static Value buildVectorRead(OpBuilder &b, Value source, VectorType vectorType,
223                              AffineMap map) {
224   Location loc = source.getLoc();
225   auto shapedType = source.getType().cast<ShapedType>();
226   SmallVector<Value> indices(shapedType.getRank(),
227                              b.create<ConstantIndexOp>(loc, 0));
228   return b.create<vector::TransferReadOp>(loc, vectorType, source, indices,
229                                           map);
230 }
231 
232 /// Build a vector.transfer_write of `value` into `outputOperand` at indices set
233 /// to all `0`; where `outputOperand` is an output operand of the LinalgOp
234 /// currently being vectorized. If `dest` has null rank, build an memref.store.
235 /// Return the produced value or null if no value is produced.
236 static Value buildVectorWrite(OpBuilder &b, Value value,
237                               OpOperand *outputOperand) {
238   Operation *write;
239   Location loc = value.getLoc();
240   if (VectorType vectorType =
241           extractVectorTypeFromShapedValue(outputOperand->get())) {
242     auto linalgOp = cast<LinalgOp>(outputOperand->getOwner());
243     AffineMap map =
244         reindexIndexingMap(linalgOp.getTiedIndexingMap(outputOperand));
245     SmallVector<Value> indices(linalgOp.getRank(outputOperand),
246                                b.create<ConstantIndexOp>(loc, 0));
247     value = broadcastIfNeeded(b, value, vectorType.getShape());
248     value = reduceIfNeeded(b, vectorType, value, outputOperand);
249     write = b.create<vector::TransferWriteOp>(loc, value, outputOperand->get(),
250                                               indices, map);
251   } else {
252     write = b.create<memref::StoreOp>(loc, value, outputOperand->get());
253   }
254   LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: vectorized op: " << *write);
255   if (!write->getResults().empty())
256     return write->getResult(0);
257   return Value();
258 }
259 
260 // Custom vectorization function type. Produce a vector form of Operation*
261 // assuming all its vectorized operands are already in the BlockAndValueMapping.
262 // Return nullptr if the Operation cannot be vectorized.
263 using CustomVectorizationHook = std::function<VectorizationResult(
264     Operation *, const BlockAndValueMapping &)>;
265 
266 /// Helper function to vectorize the terminator of a `linalgOp`. New result
267 /// vector values are appended to `newResults`. Return
268 /// VectorizationStatus::NoReplace to signal the vectorization algorithm that it
269 /// should not try to map produced operations and instead return the results
270 /// using the `newResults` vector making them available to the
271 /// vectorization algorithm for RAUW. This function is meant to be used as a
272 /// CustomVectorizationHook.
273 static VectorizationResult
274 vectorizeLinalgYield(OpBuilder &b, Operation *op,
275                      const BlockAndValueMapping &bvm, LinalgOp linalgOp,
276                      SmallVectorImpl<Value> &newResults) {
277   auto yieldOp = dyn_cast<linalg::YieldOp>(op);
278   if (!yieldOp)
279     return VectorizationResult{VectorizationStatus::Failure, nullptr};
280   for (auto outputs : llvm::enumerate(yieldOp.values())) {
281     // TODO: Scan for an opportunity for reuse.
282     // TODO: use a map.
283     Value vectorValue = bvm.lookup(outputs.value());
284     Value newResult = buildVectorWrite(
285         b, vectorValue, linalgOp.getOutputOperand(outputs.index()));
286     if (newResult)
287       newResults.push_back(newResult);
288   }
289   return VectorizationResult{VectorizationStatus::NoReplace, nullptr};
290 }
291 
292 /// Helper function to vectorize the index operations of a `linalgOp`. Return
293 /// VectorizationStatus::NewOp to signal the vectorization algorithm that it
294 /// should map the produced operations. This function is meant to be used as a
295 /// CustomVectorizationHook.
296 static VectorizationResult vectorizeLinalgIndex(OpBuilder &b, Operation *op,
297                                                 LinalgOp linalgOp) {
298   IndexOp indexOp = dyn_cast<linalg::IndexOp>(op);
299   if (!indexOp)
300     return VectorizationResult{VectorizationStatus::Failure, nullptr};
301   auto loc = indexOp.getLoc();
302   // Compute the static loop sizes of the index op.
303   auto targetShape = linalgOp.computeStaticLoopSizes();
304   // Compute a one-dimensional index vector for the index op dimension.
305   SmallVector<int64_t> constantSeq(
306       llvm::seq<int64_t>(0, targetShape[indexOp.dim()]));
307   ConstantOp constantOp =
308       b.create<ConstantOp>(loc, b.getIndexVectorAttr(constantSeq));
309   // Return the one-dimensional index vector if it lives in the trailing
310   // dimension of the iteration space since the vectorization algorithm in this
311   // case can handle the broadcast.
312   if (indexOp.dim() == targetShape.size() - 1)
313     return VectorizationResult{VectorizationStatus::NewOp, constantOp};
314   // Otherwise permute the targetShape to move the index dimension last,
315   // broadcast the one-dimensional index vector to the permuted shape, and
316   // finally transpose the broadcasted index vector to undo the permutation.
317   std::swap(targetShape[indexOp.dim()], targetShape.back());
318   auto broadCastOp = b.create<vector::BroadcastOp>(
319       loc, VectorType::get(targetShape, b.getIndexType()), constantOp);
320   SmallVector<int64_t> transposition(
321       llvm::seq<int64_t>(0, linalgOp.getNumLoops()));
322   std::swap(transposition.back(), transposition[indexOp.dim()]);
323   auto transposeOp =
324       b.create<vector::TransposeOp>(loc, broadCastOp, transposition);
325   return VectorizationResult{VectorizationStatus::NewOp, transposeOp};
326 }
327 
328 /// Generic vectorization for a single operation `op`, given already vectorized
329 /// operands carried by `bvm`. Vectorization occurs as follows:
330 ///   1. Try to apply any of the `customVectorizationHooks` and return its
331 ///   result on success.
332 ///   2. Clone any constant in the current scope without vectorization: each
333 ///   consumer of the constant will later determine the shape to which the
334 ///   constant needs to be broadcast to.
335 ///   3. Fail on any remaining non `ElementwiseMappable` op. It is the purpose
336 ///   of the `customVectorizationHooks` to cover such cases.
337 ///   4. Clone `op` in vector form to a vector of shape prescribed by the first
338 ///   operand of maximal rank. Other operands have smaller rank and are
339 ///   broadcast accordingly. It is assumed this broadcast is always legal,
340 ///   otherwise, it means one of the `customVectorizationHooks` is incorrect.
341 ///
342 /// This function assumes all operands of `op` have been vectorized and are in
343 /// the `bvm` mapping. As a consequence, this function is meant to be called on
344 /// a topologically-sorted list of ops.
345 /// This function does not update `bvm` but returns a VectorizationStatus that
346 /// instructs the caller what `bvm` update needs to occur.
347 static VectorizationResult
348 vectorizeOneOp(OpBuilder &b, Operation *op, const BlockAndValueMapping &bvm,
349                ArrayRef<CustomVectorizationHook> customVectorizationHooks) {
350   LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: vectorize op " << *op);
351 
352   // 1. Try to apply any CustomVectorizationHook.
353   if (!customVectorizationHooks.empty()) {
354     for (auto &customFunc : customVectorizationHooks) {
355       VectorizationResult result = customFunc(op, bvm);
356       if (result.status == VectorizationStatus::Failure)
357         continue;
358       return result;
359     }
360   }
361 
362   // 2. Constant ops don't get vectorized but rather broadcasted at their users.
363   // Clone so that the constant is not confined to the linalgOp block .
364   if (isa<ConstantOp>(op))
365     return VectorizationResult{VectorizationStatus::NewOp, b.clone(*op)};
366 
367   // 3. Only ElementwiseMappable are allowed in the generic vectorization.
368   if (!OpTrait::hasElementwiseMappableTraits(op))
369     return VectorizationResult{VectorizationStatus::Failure, nullptr};
370 
371   // 4. Generic vectorization path for ElementwiseMappable ops.
372   //   a. first get the first max ranked shape.
373   SmallVector<int64_t, 4> firstMaxRankedShape;
374   for (Value operand : op->getOperands()) {
375     auto vt = bvm.lookup(operand).getType().dyn_cast<VectorType>();
376     if (vt && firstMaxRankedShape.size() < vt.getShape().size())
377       firstMaxRankedShape.assign(vt.getShape().begin(), vt.getShape().end());
378   }
379   //   b. broadcast each op if needed.
380   auto vectorizedOperands = llvm::map_range(op->getOperands(), [&](Value v) {
381     return firstMaxRankedShape.empty()
382                ? bvm.lookup(v)
383                : broadcastIfNeeded(b, bvm.lookup(v), firstMaxRankedShape);
384   });
385   //   c. for elementwise, the result is the vector with the firstMaxRankedShape
386   auto returnTypes = llvm::map_range(op->getResultTypes(), [&](Type t) {
387     return firstMaxRankedShape.empty()
388                ? t
389                : VectorType::get(firstMaxRankedShape, t);
390   });
391 
392   // Build and return the new op.
393   OperationState state(op->getLoc(), op->getName());
394   state.addAttributes(op->getAttrs());
395   state.addOperands(llvm::to_vector<4>(vectorizedOperands));
396   state.addTypes(llvm::to_vector<4>(returnTypes));
397   return VectorizationResult{VectorizationStatus::NewOp,
398                              b.createOperation(state)};
399 }
400 
401 /// Detect whether `r` has only ConstantOp, ElementwiseMappable and YieldOp.
402 static bool hasOnlyScalarElementwiseOp(Region &r) {
403   if (!llvm::hasSingleElement(r))
404     return false;
405   for (Operation &op : r.front()) {
406     if (!(isa<ConstantOp, linalg::YieldOp, linalg::IndexOp>(op) ||
407           OpTrait::hasElementwiseMappableTraits(&op)) ||
408         llvm::any_of(op.getResultTypes(),
409                      [](Type type) { return !type.isIntOrIndexOrFloat(); }))
410       return false;
411   }
412   return true;
413 }
414 
415 // Return true if the op is an element-wise linalg op.
416 static bool isElementwise(Operation *op) {
417   auto linalgOp = dyn_cast<linalg::LinalgOp>(op);
418   if (!linalgOp)
419     return false;
420   if (linalgOp.getNumLoops() != linalgOp.getNumParallelLoops())
421     return false;
422   // TODO: relax the restrictions on indexing map.
423   for (OpOperand *opOperand : linalgOp.getOutputOperands()) {
424     if (!linalgOp.getTiedIndexingMap(opOperand).isIdentity())
425       return false;
426   }
427   if (linalgOp->getNumRegions() != 1)
428     return false;
429   return hasOnlyScalarElementwiseOp(linalgOp->getRegion(0));
430 }
431 
432 /// Generic vectorization function that rewrites the body of a `linalgOp` into
433 /// vector form. Generic vectorization proceeds as follows:
434 ///   1. Verify the `linalgOp` has one non-empty region.
435 ///   2. Values defined above the region are mapped to themselves and will be
436 ///   broadcasted on a per-need basis by their consumers.
437 ///   3. Each region argument is vectorized into a vector.transfer_read (or 0-d
438 ///   load).
439 ///   TODO: Reuse opportunities for RAR dependencies.
440 ///   4a. Register CustomVectorizationHook for YieldOp to capture the results.
441 ///   4b. Register CustomVectorizationHook for IndexOp to access the iteration
442 ///   indices.
443 ///   5. Iteratively call vectorizeOneOp on the region operations.
444 ///
445 /// When `broadcastToMaximalCommonShape` is set to true, eager broadcasting is
446 /// performed to the maximal common vector size implied by the `linalgOp`
447 /// iteration space. This eager broadcasting is introduced in the
448 /// permutation_map of the vector.transfer_read operations. The eager
449 /// broadcasting makes it trivial to detrmine where broadcast, transposes and
450 /// reductions should occur, without any bookkeeping. The tradeoff is that, in
451 /// the absence of good canonicalizations, the amount of work increases.
452 /// This is not deemed a problem as we expect canonicalizations and foldings to
453 /// aggressively clean up the useless work.
454 LogicalResult vectorizeAsLinalgGeneric(
455     OpBuilder &b, LinalgOp linalgOp, SmallVectorImpl<Value> &newResults,
456     bool broadcastToMaximalCommonShape = false,
457     ArrayRef<CustomVectorizationHook> customVectorizationHooks = {}) {
458   // 1. Fail to vectorize if the operation does not have one non-empty region.
459   if (linalgOp->getNumRegions() != 1 || linalgOp->getRegion(0).empty())
460     return failure();
461   auto &block = linalgOp->getRegion(0).front();
462 
463   // 2. Values defined above the region can only be broadcast for now. Make them
464   // map to themselves.
465   BlockAndValueMapping bvm;
466   SetVector<Value> valuesSet;
467   mlir::getUsedValuesDefinedAbove(linalgOp->getRegion(0), valuesSet);
468   bvm.map(valuesSet.getArrayRef(), valuesSet.getArrayRef());
469 
470   if (linalgOp.getNumOutputs() == 0)
471     return failure();
472 
473   // TODO: the common vector shape is equal to the static loop sizes only when
474   // all indexing maps are projected permutations. For convs and stencils the
475   // logic will need to evolve.
476   SmallVector<int64_t> commonVectorShape = linalgOp.computeStaticLoopSizes();
477 
478   // 3. Turn all BBArgs into vector.transfer_read / load.
479   SmallVector<AffineMap> indexings;
480   for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) {
481     BlockArgument bbarg = block.getArgument(opOperand->getOperandNumber());
482     // TODO: 0-d vectors.
483     if (linalgOp.getShape(opOperand).empty()) {
484       Value loaded =
485           b.create<memref::LoadOp>(linalgOp.getLoc(), opOperand->get());
486       LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: new vectorized bbarg("
487                         << bbarg.getArgNumber() << "): " << loaded);
488       bvm.map(bbarg, loaded);
489       bvm.map(opOperand->get(), loaded);
490       continue;
491     }
492     AffineMap map;
493     VectorType vectorType;
494     if (broadcastToMaximalCommonShape) {
495       map = inverseAndBroadcastProjectedPermuation(
496           linalgOp.getTiedIndexingMap(opOperand));
497       vectorType = VectorType::get(
498           commonVectorShape, getElementTypeOrSelf(opOperand->get().getType()));
499     } else {
500       map = inversePermutation(
501           reindexIndexingMap(linalgOp.getTiedIndexingMap(opOperand)));
502       vectorType =
503           VectorType::get(map.compose(linalgOp.getShape(opOperand)),
504                           getElementTypeOrSelf(opOperand->get().getType()));
505     }
506     Value vectorRead = buildVectorRead(b, opOperand->get(), vectorType, map);
507     LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: new vectorized bbarg("
508                       << bbarg.getArgNumber() << "): " << vectorRead);
509     bvm.map(bbarg, vectorRead);
510     bvm.map(opOperand->get(), vectorRead);
511   }
512 
513   auto hooks = llvm::to_vector<4>(customVectorizationHooks);
514   // 4a. Register CustomVectorizationHook for yieldOp.
515   CustomVectorizationHook vectorizeYield =
516       [&](Operation *op,
517           const BlockAndValueMapping &bvm) -> VectorizationResult {
518     return vectorizeLinalgYield(b, op, bvm, linalgOp, newResults);
519   };
520   hooks.push_back(vectorizeYield);
521 
522   // 4b. Register CustomVectorizationHook for indexOp.
523   CustomVectorizationHook vectorizeIndex =
524       [&](Operation *op,
525           const BlockAndValueMapping &bvm) -> VectorizationResult {
526     return vectorizeLinalgIndex(b, op, linalgOp);
527   };
528   hooks.push_back(vectorizeIndex);
529 
530   // 5. Iteratively call `vectorizeOneOp` to each op in the slice.
531   for (Operation &op : block.getOperations()) {
532     VectorizationResult result = vectorizeOneOp(b, &op, bvm, hooks);
533     if (result.status == VectorizationStatus::Failure) {
534       LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: failed to vectorize: " << op);
535       return failure();
536     }
537     if (result.status == VectorizationStatus::NewOp) {
538       LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: new vector op: "
539                         << *result.newOp;);
540       bvm.map(op.getResults(), result.newOp->getResults());
541     }
542   }
543 
544   return success();
545 }
546 
547 static LogicalResult vectorizeContraction(OpBuilder &b, LinalgOp linalgOp,
548                                           SmallVectorImpl<Value> &newResults) {
549   assert(isaContractionOpInterface(linalgOp) &&
550          "expected vectorizeContraction preconditions to be met");
551   Location loc = linalgOp.getLoc();
552   // Vectorize other ops as vector contraction.
553   // TODO: interface.
554   LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: "
555                     << "Rewrite linalg op as vector.contract: ";
556              linalgOp.dump());
557   // Special function that describes how to vectorize the multiplication op in a
558   // linalg contraction.
559   CustomVectorizationHook vectorizeContraction =
560       [&](Operation *op,
561           const BlockAndValueMapping &bvm) -> VectorizationResult {
562     if (!isa<MulIOp, MulFOp>(op))
563       return VectorizationResult{VectorizationStatus::Failure, nullptr};
564     ArrayRef<int64_t> outShape =
565         linalgOp.getShape(linalgOp.getOutputOperand(0));
566     auto vType = outShape.empty()
567                      ? op->getResult(0).getType()
568                      : VectorType::get(outShape, op->getResult(0).getType());
569     auto zero = b.create<ConstantOp>(loc, vType, b.getZeroAttr(vType));
570     // Indexing maps at the time of vector.transfer_read are adjusted to order
571     // vector dimensions in the same order as the canonical linalg op iteration
572     // space order.
573     // The indexings for the contraction therefore need to be adjusted.
574     // TODO: consider dropping contraction special casing altogether, this will
575     // require more advanced canonicalizations involving vector.multi_reduction
576     // that are not yet available.
577     SmallVector<AffineMap> indexingMaps;
578     indexingMaps.reserve(linalgOp.getNumInputsAndOutputs());
579     llvm::transform(linalgOp.getIndexingMaps(),
580                     std::back_inserter(indexingMaps),
581                     [](AffineMap indexingMap) {
582                       return inversePermutation(reindexIndexingMap(indexingMap))
583                           .compose(indexingMap);
584                     });
585     Operation *contract = b.create<vector::ContractionOp>(
586         loc, bvm.lookup(op->getOperand(0)), bvm.lookup(op->getOperand(1)), zero,
587         b.getAffineMapArrayAttr(indexingMaps), linalgOp.iterator_types());
588     return VectorizationResult{VectorizationStatus::NewOp, contract};
589   };
590   return vectorizeAsLinalgGeneric(b, linalgOp, newResults,
591                                   /*broadcastToMaximalCommonShape=*/false,
592                                   {vectorizeContraction});
593 }
594 
595 static bool allIndexingsAreProjectedPermutation(LinalgOp op) {
596   return llvm::all_of(op.getIndexingMaps(),
597                       [](AffineMap m) { return m.isProjectedPermutation(); });
598 }
599 
600 // TODO: probably need some extra checks for reduction followed by consumer
601 // ops that may not commute (e.g. linear reduction + non-linear instructions).
602 static LogicalResult reductionPreconditions(LinalgOp op) {
603   if (llvm::none_of(op.iterator_types(), isReductionIteratorType))
604     return failure();
605   for (OpOperand *opOperand : op.getOutputOperands()) {
606     Operation *reductionOp = getSingleBinaryOpAssumedReduction(opOperand);
607     if (!getKindForOp(reductionOp))
608       return failure();
609   }
610   return success();
611 }
612 
613 LogicalResult mlir::linalg::vectorizeLinalgOpPrecondition(Operation *op) {
614   auto linalgOp = cast<linalg::LinalgOp>(op);
615   // All types must be static shape to go to vector.
616   if (linalgOp.hasDynamicShape())
617     return failure();
618   if (isElementwise(op))
619     return success();
620   if (isaContractionOpInterface(linalgOp))
621     return success();
622   // TODO: the common vector shape is equal to the static loop sizes only when
623   // all indexing maps are projected permutations. For convs and stencils the
624   // logic will need to evolve.
625   if (allIndexingsAreProjectedPermutation(linalgOp) &&
626       succeeded(reductionPreconditions(linalgOp)))
627     return success();
628   return failure();
629 }
630 
631 LogicalResult
632 mlir::linalg::vectorizeLinalgOp(OpBuilder &b, Operation *op,
633                                 SmallVectorImpl<Value> &newResults) {
634   if (failed(vectorizeLinalgOpPrecondition(op)))
635     return failure();
636 
637   auto linalgOp = cast<LinalgOp>(op);
638   if (isaContractionOpInterface(linalgOp))
639     return vectorizeContraction(b, linalgOp, newResults);
640 
641   LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: "
642                     << "Vectorize linalg op as a generic by broadcasting to "
643                        "maximal common shape: "
644                     << *op);
645   return vectorizeAsLinalgGeneric(b, linalgOp, newResults,
646                                   /*broadcastToMaximalCommonShape=*/true);
647 }
648 
649 //----------------------------------------------------------------------------//
650 // Misc. vectorization patterns.
651 //----------------------------------------------------------------------------//
652 
653 /// Rewrite a PadTensorOp into a sequence of InitTensorOp, TransferReadOp and
654 /// TransferWriteOp. For now, this only applies when all low and high paddings
655 /// are determined to be zero.
656 LogicalResult PadTensorOpVectorizationPattern::matchAndRewrite(
657     linalg::PadTensorOp padOp, PatternRewriter &rewriter) const {
658   // Helper function to determine whether an OpFoldResult is not a zero Index.
659   auto isNotZeroIndex = [](OpFoldResult ofr) {
660     if (Attribute attr = ofr.dyn_cast<Attribute>())
661       return attr.cast<IntegerAttr>().getInt() != 0;
662     Value v = ofr.get<Value>();
663     if (auto constOp = v.getDefiningOp<ConstantOp>())
664       if (auto intAttr = constOp.getValue().dyn_cast<IntegerAttr>())
665         return intAttr.getValue().getSExtValue() != 0;
666     return true;
667   };
668 
669   auto resultShapedType = padOp.result().getType().cast<ShapedType>();
670   // Bail on non-static shapes.
671   if (!resultShapedType.hasStaticShape())
672     return failure();
673 
674   // If any pad_low is not a static 0, needs a mask. Bail for now.
675   if (llvm::any_of(padOp.getMixedLowPad(), isNotZeroIndex))
676     return failure();
677   VectorType vectorType = extractVectorTypeFromShapedValue(padOp.result());
678   if (!vectorType)
679     return failure();
680 
681   // Only support padding with a constant for now, i.e. either:
682   //   1. A BBarg from a different block.
683   //   2. A value defined outside of the current block.
684   Block &block = padOp.region().front();
685   auto yieldOp = cast<YieldOp>(block.getTerminator());
686   assert(yieldOp.getNumOperands() == 1 && "expected single operand yield");
687   Value padValue = yieldOp.values().front();
688   Operation *definingOp = padValue.getDefiningOp();
689   if (definingOp && definingOp->getBlock() == &block)
690     return failure();
691   if (!definingOp && padValue.cast<BlockArgument>().getOwner() == &block)
692     return failure();
693 
694   // TODO: if any pad_high is not a static 0, needs a mask. For now, just bail.
695   if (llvm::any_of(padOp.getMixedHighPad(),
696                    [&](OpFoldResult ofr) { return isNotZeroIndex(ofr); }))
697     return failure();
698 
699   // Now we can rewrite as InitTensorOp + TransferReadOp@[0..0] +
700   // TransferWriteOp@[0..0].
701   SmallVector<Value> indices(
702       resultShapedType.getRank(),
703       rewriter.create<ConstantIndexOp>(padOp.getLoc(), 0));
704   Value read = rewriter.create<vector::TransferReadOp>(
705       padOp.getLoc(), vectorType, padOp.source(), indices, padValue);
706   Value init =
707       rewriter.create<InitTensorOp>(padOp.getLoc(), resultShapedType.getShape(),
708                                     resultShapedType.getElementType());
709   rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(padOp, read, init,
710                                                        indices);
711 
712   return success();
713 }
714 
715 // TODO: cleanup all the convolution vectorization patterns.
716 template <class ConvOp, int N>
717 LogicalResult ConvOpVectorization<ConvOp, N>::matchAndRewrite(
718     ConvOp op, PatternRewriter &rewriter) const {
719   Location loc = op.getLoc();
720   MLIRContext *context = op.getContext();
721 
722   OpOperand *input = op.getInputOperand(0);
723   OpOperand *kernel = op.getInputOperand(1);
724   OpOperand *output = op.getOutputOperand(0);
725   ArrayRef<int64_t> inShape = op.getShape(input);
726   ArrayRef<int64_t> kShape = op.getShape(kernel);
727 
728   if (llvm::any_of(inShape, ShapedType::isDynamic) ||
729       llvm::any_of(kShape, ShapedType::isDynamic))
730     return failure();
731 
732   SmallVector<AffineExpr, 4> mapping;
733   SmallVector<int64_t, 4> vectorDims;
734   // Fail to apply when the size of not vectorized dimension is not 1.
735   for (unsigned i = 0; i < N; i++) {
736     if (!mask[i] && (inShape[i] != 1 || kShape[i] != 1))
737       return failure();
738 
739     if (mask[i] && inShape[i] != kShape[i])
740       return failure();
741 
742     if (mask[i]) {
743       mapping.push_back(getAffineDimExpr(i, context));
744       vectorDims.push_back(inShape[i]);
745     }
746   }
747 
748   int64_t rank = op.getRank(input);
749   int64_t numDims = mapping.size();
750   Type elemType = getElementTypeOrSelf(input->get().getType());
751 
752   auto map = AffineMap::get(rank, 0, mapping, context);
753   SmallVector<Value, 4> zeros(rank, rewriter.create<ConstantIndexOp>(loc, 0));
754   auto vecType = VectorType::get(vectorDims, elemType);
755 
756   auto inputVec = rewriter.create<vector::TransferReadOp>(
757       loc, vecType, input->get(), zeros, map);
758   auto kernelVec = rewriter.create<vector::TransferReadOp>(
759       loc, vecType, kernel->get(), zeros, map);
760 
761   auto acc = rewriter.create<ConstantOp>(loc, elemType,
762                                          rewriter.getZeroAttr(elemType));
763 
764   std::array<AffineMap, 3> indexingMaps{
765       AffineMap::getMultiDimIdentityMap(numDims, context),
766       AffineMap::getMultiDimIdentityMap(numDims, context),
767       AffineMap::get(numDims, 0, {}, context)};
768 
769   std::vector<StringRef> iteratorTypes(numDims, "reduction");
770 
771   auto result = rewriter.create<vector::ContractionOp>(
772       loc, inputVec, kernelVec, acc,
773       rewriter.getAffineMapArrayAttr(indexingMaps),
774       rewriter.getStrArrayAttr(iteratorTypes));
775 
776   rewriter.create<memref::StoreOp>(loc, result, output->get(),
777                                    ValueRange(zeros));
778   rewriter.eraseOp(op);
779   return success();
780 }
781 
782 using ConvOpConst = ConvOpVectorization<ConvWOp, 1>;
783 
784 /// Inserts tiling, promotion and vectorization pattern for ConvOp
785 /// conversion into corresponding pattern lists.
786 template <typename ConvOp, unsigned N>
787 static void populateVectorizationPatterns(
788     RewritePatternSet &tilingPatterns, RewritePatternSet &promotionPatterns,
789     RewritePatternSet &vectorizationPatterns, ArrayRef<int64_t> tileSizes) {
790   auto *context = tilingPatterns.getContext();
791   if (tileSizes.size() < N)
792     return;
793 
794   constexpr static StringRef kTiledMarker = "TILED";
795   constexpr static StringRef kPromotedMarker = "PROMOTED";
796   tilingPatterns.add<LinalgTilingPattern<ConvOp>>(
797       context, LinalgTilingOptions().setTileSizes(tileSizes),
798       LinalgTransformationFilter(ArrayRef<Identifier>{},
799                                  Identifier::get(kTiledMarker, context)));
800 
801   promotionPatterns.add<LinalgPromotionPattern<ConvOp>>(
802       context, LinalgPromotionOptions().setUseFullTileBuffersByDefault(true),
803       LinalgTransformationFilter(Identifier::get(kTiledMarker, context),
804                                  Identifier::get(kPromotedMarker, context)));
805 
806   SmallVector<bool, 4> mask(N);
807   int offset = tileSizes.size() - N;
808   std::transform(tileSizes.begin() + offset, tileSizes.end(), mask.begin(),
809                  [](int64_t i) -> bool { return i > 1; });
810 
811   vectorizationPatterns.add<ConvOpVectorization<ConvOp, N>>(context, mask);
812 }
813 
814 void mlir::linalg::populateConvVectorizationPatterns(
815     MLIRContext *context, SmallVectorImpl<RewritePatternSet> &patterns,
816     ArrayRef<int64_t> tileSizes) {
817   RewritePatternSet tiling(context);
818   RewritePatternSet promotion(context);
819   RewritePatternSet vectorization(context);
820   populateVectorizationPatterns<ConvWOp, 1>(tiling, promotion, vectorization,
821                                             tileSizes);
822 
823   populateVectorizationPatterns<ConvNWCOp, 3>(tiling, promotion, vectorization,
824                                               tileSizes);
825   populateVectorizationPatterns<ConvInputNWCFilterWCFOp, 3>(
826       tiling, promotion, vectorization, tileSizes);
827 
828   populateVectorizationPatterns<ConvNCWOp, 3>(tiling, promotion, vectorization,
829                                               tileSizes);
830   populateVectorizationPatterns<ConvInputNCWFilterWCFOp, 3>(
831       tiling, promotion, vectorization, tileSizes);
832 
833   populateVectorizationPatterns<ConvHWOp, 2>(tiling, promotion, vectorization,
834                                              tileSizes);
835 
836   populateVectorizationPatterns<ConvNHWCOp, 4>(tiling, promotion, vectorization,
837                                                tileSizes);
838   populateVectorizationPatterns<ConvInputNHWCFilterHWCFOp, 4>(
839       tiling, promotion, vectorization, tileSizes);
840 
841   populateVectorizationPatterns<ConvNCHWOp, 4>(tiling, promotion, vectorization,
842                                                tileSizes);
843   populateVectorizationPatterns<ConvInputNCHWFilterHWCFOp, 4>(
844       tiling, promotion, vectorization, tileSizes);
845 
846   populateVectorizationPatterns<ConvDHWOp, 3>(tiling, promotion, vectorization,
847                                               tileSizes);
848 
849   populateVectorizationPatterns<ConvNDHWCOp, 5>(tiling, promotion,
850                                                 vectorization, tileSizes);
851   populateVectorizationPatterns<ConvInputNDHWCFilterDHWCFOp, 5>(
852       tiling, promotion, vectorization, tileSizes);
853 
854   populateVectorizationPatterns<ConvNCDHWOp, 5>(tiling, promotion,
855                                                 vectorization, tileSizes);
856   populateVectorizationPatterns<ConvInputNCDHWFilterDHWCFOp, 5>(
857       tiling, promotion, vectorization, tileSizes);
858 
859   patterns.push_back(std::move(tiling));
860   patterns.push_back(std::move(promotion));
861   patterns.push_back(std::move(vectorization));
862 }
863 
864 //----------------------------------------------------------------------------//
865 // Forwarding patterns
866 //----------------------------------------------------------------------------//
867 
868 /// Check whether there is any interleaved use of any `values` between `firstOp`
869 /// and `secondOp`. Conservatively return `true` if any op or value is in a
870 /// different block.
871 static bool mayExistInterleavedUses(Operation *firstOp, Operation *secondOp,
872                                     ValueRange values) {
873   if (firstOp->getBlock() != secondOp->getBlock() ||
874       !firstOp->isBeforeInBlock(secondOp)) {
875     LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
876                             << "interleavedUses precondition failed, firstOp: "
877                             << *firstOp << ", second op: " << *secondOp);
878     return true;
879   }
880   for (auto v : values) {
881     for (auto &u : v.getUses()) {
882       Operation *owner = u.getOwner();
883       if (owner == firstOp || owner == secondOp)
884         continue;
885       // TODO: this is too conservative, use dominance info in the future.
886       if (owner->getBlock() == firstOp->getBlock() &&
887           (owner->isBeforeInBlock(firstOp) || secondOp->isBeforeInBlock(owner)))
888         continue;
889       LLVM_DEBUG(llvm::dbgs()
890                  << "\n[" DEBUG_TYPE "]: "
891                  << " found interleaved op " << *owner
892                  << ", firstOp: " << *firstOp << ", second op: " << *secondOp);
893       return true;
894     }
895   }
896   return false;
897 }
898 
899 /// Return the unique subview use of `v` if it is indeed unique, null otherwise.
900 static memref::SubViewOp getSubViewUseIfUnique(Value v) {
901   memref::SubViewOp subViewOp;
902   for (auto &u : v.getUses()) {
903     if (auto newSubViewOp = dyn_cast<memref::SubViewOp>(u.getOwner())) {
904       if (subViewOp)
905         return memref::SubViewOp();
906       subViewOp = newSubViewOp;
907     }
908   }
909   return subViewOp;
910 }
911 
912 /// TODO: use interfaces, side-effects and aliasing analysis as appropriate,
913 /// when available.
914 LogicalResult LinalgCopyVTRForwardingPattern::matchAndRewrite(
915     vector::TransferReadOp xferOp, PatternRewriter &rewriter) const {
916 
917   // Transfer into `view`.
918   Value viewOrAlloc = xferOp.source();
919   if (!viewOrAlloc.getDefiningOp<memref::ViewOp>() &&
920       !viewOrAlloc.getDefiningOp<memref::AllocOp>())
921     return failure();
922 
923   LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: " << viewOrAlloc);
924 
925   // Ensure there is exactly one subview of `viewOrAlloc` defining `subView`.
926   memref::SubViewOp subViewOp = getSubViewUseIfUnique(viewOrAlloc);
927   if (!subViewOp)
928     return failure();
929   Value subView = subViewOp.getResult();
930   LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
931                           << "with subView " << subView);
932 
933   // Find the copy into `subView` without interleaved uses.
934   CopyOp copyOp;
935   for (auto &u : subView.getUses()) {
936     if (auto newCopyOp = dyn_cast<CopyOp>(u.getOwner())) {
937       assert(newCopyOp.output().getType().isa<MemRefType>());
938       if (newCopyOp.output() != subView)
939         continue;
940       LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
941                               << "copy candidate " << *newCopyOp);
942       if (mayExistInterleavedUses(newCopyOp, xferOp, {viewOrAlloc, subView}))
943         continue;
944       copyOp = newCopyOp;
945       break;
946     }
947   }
948   if (!copyOp)
949     return failure();
950   LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
951                           << "with copy " << *copyOp);
952 
953   // Find the fill into `viewOrAlloc` without interleaved uses before the copy.
954   FillOp maybeFillOp;
955   for (auto &u : viewOrAlloc.getUses()) {
956     if (auto newFillOp = dyn_cast<FillOp>(u.getOwner())) {
957       assert(newFillOp.output().getType().isa<MemRefType>());
958       if (newFillOp.output() != viewOrAlloc)
959         continue;
960       LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
961                               << "fill candidate " << *newFillOp);
962       if (mayExistInterleavedUses(newFillOp, copyOp, {viewOrAlloc, subView}))
963         continue;
964       maybeFillOp = newFillOp;
965       break;
966     }
967   }
968   // Ensure padding matches.
969   if (maybeFillOp && xferOp.padding() != maybeFillOp.value())
970     return failure();
971   if (maybeFillOp)
972     LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
973                             << "with maybeFillOp " << *maybeFillOp);
974 
975   // `in` is the subview that linalg.copy reads. Replace it.
976   Value in = copyOp.input();
977 
978   // linalg.copy + linalg.fill can be used to create a padded local buffer.
979   // The `masked` attribute is only valid on this padded buffer.
980   // When forwarding to vector.transfer_read, the attribute must be reset
981   // conservatively.
982   Value res = rewriter.create<vector::TransferReadOp>(
983       xferOp.getLoc(), xferOp.getVectorType(), in, xferOp.indices(),
984       xferOp.permutation_map(), xferOp.padding(), ArrayAttr());
985 
986   if (maybeFillOp)
987     rewriter.eraseOp(maybeFillOp);
988   rewriter.eraseOp(copyOp);
989   rewriter.replaceOp(xferOp, res);
990 
991   return success();
992 }
993 
994 /// TODO: use interfaces, side-effects and aliasing analysis as appropriate,
995 /// when available.
996 LogicalResult LinalgCopyVTWForwardingPattern::matchAndRewrite(
997     vector::TransferWriteOp xferOp, PatternRewriter &rewriter) const {
998   // Transfer into `viewOrAlloc`.
999   Value viewOrAlloc = xferOp.source();
1000   if (!viewOrAlloc.getDefiningOp<memref::ViewOp>() &&
1001       !viewOrAlloc.getDefiningOp<memref::AllocOp>())
1002     return failure();
1003 
1004   // Ensure there is exactly one subview of `viewOrAlloc` defining `subView`.
1005   memref::SubViewOp subViewOp = getSubViewUseIfUnique(viewOrAlloc);
1006   if (!subViewOp)
1007     return failure();
1008   Value subView = subViewOp.getResult();
1009 
1010   // Find the copy from `subView` without interleaved uses.
1011   CopyOp copyOp;
1012   for (auto &u : subViewOp.getResult().getUses()) {
1013     if (auto newCopyOp = dyn_cast<CopyOp>(u.getOwner())) {
1014       if (newCopyOp.getInputOperand(0)->get() != subView)
1015         continue;
1016       if (mayExistInterleavedUses(xferOp, newCopyOp, {viewOrAlloc, subView}))
1017         continue;
1018       copyOp = newCopyOp;
1019       break;
1020     }
1021   }
1022   if (!copyOp)
1023     return failure();
1024 
1025   // `out` is the subview copied into that we replace.
1026   assert(copyOp.output().getType().isa<MemRefType>());
1027   Value out = copyOp.output();
1028 
1029   // Forward vector.transfer into copy.
1030   // linalg.copy + linalg.fill can be used to create a padded local buffer.
1031   // The `masked` attribute is only valid on this padded buffer.
1032   // When forwarding to vector.transfer_write, the attribute must be reset
1033   // conservatively.
1034   rewriter.create<vector::TransferWriteOp>(
1035       xferOp.getLoc(), xferOp.vector(), out, xferOp.indices(),
1036       xferOp.permutation_map(), ArrayAttr());
1037 
1038   rewriter.eraseOp(copyOp);
1039   rewriter.eraseOp(xferOp);
1040 
1041   return success();
1042 }
1043