1 //===- Vectorization.cpp - Implementation of linalg Vectorization ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the linalg dialect Vectorization transformations.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "mlir/Analysis/SliceAnalysis.h"
14 #include "mlir/Dialect/Affine/Analysis/LoopAnalysis.h"
15 #include "mlir/Dialect/Affine/IR/AffineOps.h"
16 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
17 #include "mlir/Dialect/Func/IR/FuncOps.h"
18 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
19 #include "mlir/Dialect/Linalg/IR/Linalg.h"
20 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
21 #include "mlir/Dialect/Linalg/Utils/Utils.h"
22 #include "mlir/Dialect/Tensor/IR/Tensor.h"
23 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
24 #include "mlir/Dialect/Vector/IR/VectorOps.h"
25 #include "mlir/Dialect/Vector/Transforms/VectorTransforms.h"
26 #include "mlir/IR/AffineExpr.h"
27 #include "mlir/IR/Matchers.h"
28 #include "mlir/IR/PatternMatch.h"
29 #include "mlir/Pass/Pass.h"
30 #include "mlir/Support/LLVM.h"
31 #include "mlir/Transforms/RegionUtils.h"
32 #include "llvm/ADT/ScopeExit.h"
33 #include "llvm/ADT/Sequence.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/TypeSwitch.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/raw_ostream.h"
38 #include <type_traits>
39 
40 using namespace mlir;
41 using namespace mlir::linalg;
42 
43 #define DEBUG_TYPE "linalg-vectorization"
44 
45 #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
46 #define LDBG(X) LLVM_DEBUG(DBGS() << X)
47 
48 /// Try to vectorize `convOp` as a convolution.
49 static FailureOr<Operation *> vectorizeConvolution(OpBuilder &b,
50                                                    LinalgOp convOp);
51 
52 /// Return the unique instance of OpType in `block` if it is indeed unique.
53 /// Return null if none or more than 1 instances exist.
54 template <typename OpType>
55 static OpType getSingleOpOfType(Block &block) {
56   OpType res;
57   block.walk([&](OpType op) {
58     if (res) {
59       res = nullptr;
60       return WalkResult::interrupt();
61     }
62     res = op;
63     return WalkResult::advance();
64   });
65   return res;
66 }
67 
68 /// Given an indexing `map` coming from a LinalgOp indexing, restricted to a
69 /// projectedPermutation, compress the unused dimensions to serve as a
70 /// permutation_map for a vector transfer operation.
71 /// For example, given a linalg op such as:
72 ///
73 /// ```
74 ///   %0 = linalg.generic {
75 ///        indexing_maps = affine_map<(d0, d1, d2, d3, d4) -> (d4, d0, d2)>,
76 ///        indexing_maps = affine_map<(d0, d1, d2, d3, d4) -> (d1, d3)>
77 ///      }
78 ///     ins(%0 : tensor<2x3x4xf32>)
79 ///    outs(%1 : tensor<5x6xf32>)
80 /// ```
81 ///
82 /// the iteration domain size of the linalg op is 3x5x4x6x2. The first affine
83 /// map is reindexed to `affine_map<(d0, d1, d2) -> (d2, d0, d1)>`, the second
84 /// affine map is reindexed to `affine_map<(d0, d1) -> (d0, d1)>`.
85 static AffineMap reindexIndexingMap(AffineMap map) {
86   assert(map.isProjectedPermutation(/*allowZeroInResults=*/true) &&
87          "expected projected permutation");
88   auto res = compressUnusedDims(map);
89   assert(res.getNumDims() == res.getNumResults() &&
90          "expected reindexed map with same number of dims and results");
91   return res;
92 }
93 
94 /// Helper data structure to represent the result of vectorization.
95 /// In certain specific cases, like terminators, we do not want to propagate/
96 enum VectorizationStatus {
97   /// Op failed to vectorize.
98   Failure = 0,
99   /// Op vectorized and custom function took care of replacement logic
100   NoReplace,
101   /// Op vectorized into a new Op whose results will replace original Op's
102   /// results.
103   NewOp
104   // TODO: support values if Op vectorized to Many-Ops whose results we need to
105   // aggregate for replacement.
106 };
107 struct VectorizationResult {
108   /// Return status from vectorizing the current op.
109   enum VectorizationStatus status = VectorizationStatus::Failure;
110   /// New vectorized operation to replace the current op.
111   /// Replacement behavior is specified by `status`.
112   Operation *newOp;
113 };
114 
115 llvm::Optional<vector::CombiningKind>
116 mlir::linalg::getCombinerOpKind(Operation *combinerOp) {
117   using ::mlir::vector::CombiningKind;
118 
119   if (!combinerOp)
120     return llvm::None;
121   return llvm::TypeSwitch<Operation *, llvm::Optional<CombiningKind>>(
122              combinerOp)
123       .Case<arith::AddIOp, arith::AddFOp>(
124           [&](auto op) { return CombiningKind::ADD; })
125       .Case<arith::AndIOp>([&](auto op) { return CombiningKind::AND; })
126       .Case<arith::MaxSIOp>([&](auto op) { return CombiningKind::MAXSI; })
127       .Case<arith::MaxFOp>([&](auto op) { return CombiningKind::MAXF; })
128       .Case<arith::MinSIOp>([&](auto op) { return CombiningKind::MINSI; })
129       .Case<arith::MinFOp>([&](auto op) { return CombiningKind::MINF; })
130       .Case<arith::MulIOp, arith::MulFOp>(
131           [&](auto op) { return CombiningKind::MUL; })
132       .Case<arith::OrIOp>([&](auto op) { return CombiningKind::OR; })
133       .Case<arith::XOrIOp>([&](auto op) { return CombiningKind::XOR; })
134       .Default([&](auto op) { return llvm::None; });
135 }
136 
137 /// Check whether `outputOperand` is a reduction with a single combiner
138 /// operation. Return the combiner operation of the reduction. Return
139 /// nullptr otherwise. Multiple reduction operations would impose an
140 /// ordering between reduction dimensions and is currently unsupported in
141 /// Linalg. This limitation is motivated by the fact that e.g. min(max(X)) !=
142 /// max(min(X))
143 // TODO: use in LinalgOp verification, there is a circular dependency atm.
144 static Operation *matchLinalgReduction(OpOperand *outputOperand) {
145   auto linalgOp = cast<LinalgOp>(outputOperand->getOwner());
146   unsigned outputPos =
147       outputOperand->getOperandNumber() - linalgOp.getNumInputs();
148   // Only single combiner operations are supported for now.
149   SmallVector<Operation *, 4> combinerOps;
150   if (!matchReduction(linalgOp.getRegionOutputArgs(), outputPos, combinerOps) ||
151       combinerOps.size() != 1)
152     return nullptr;
153 
154   // Return the combiner operation.
155   return combinerOps[0];
156 }
157 
158 /// Broadcast `value` to a vector of `shape` if possible. Return value
159 /// otherwise.
160 static Value broadcastIfNeeded(OpBuilder &b, Value value,
161                                ArrayRef<int64_t> shape) {
162   // If no shape to broadcast to, just return `value`.
163   if (shape.empty())
164     return value;
165   VectorType targetVectorType =
166       VectorType::get(shape, getElementTypeOrSelf(value));
167   if (vector::isBroadcastableTo(value.getType(), targetVectorType) !=
168       vector::BroadcastableToResult::Success)
169     return value;
170   Location loc = b.getInsertionPoint()->getLoc();
171   return b.createOrFold<vector::BroadcastOp>(loc, targetVectorType, value);
172 }
173 
174 /// Create MultiDimReductionOp to compute the reduction for `reductionOp`. This
175 /// assumes that `reductionOp` has two operands and one of them is the reduction
176 /// initial value.
177 static Value buildMultiDimReduce(OpBuilder &b, Operation *reduceOp,
178                                  Value valueToReduce,
179                                  const SmallVector<bool> &reductionMask) {
180   auto maybeKind = getCombinerOpKind(reduceOp);
181   assert(maybeKind && "Failed precondition: could not get reduction kind");
182   return b.create<vector::MultiDimReductionOp>(
183       reduceOp->getLoc(), valueToReduce, reductionMask, *maybeKind);
184 }
185 
186 static SmallVector<bool> getReductionMask(LinalgOp linalgOp) {
187   unsigned idx = 0;
188   SmallVector<bool> reductionMask(linalgOp.iterator_types().size(), false);
189   for (auto attr : linalgOp.iterator_types()) {
190     if (isReductionIterator(attr))
191       reductionMask[idx] = true;
192     ++idx;
193   }
194   return reductionMask;
195 }
196 
197 /// Build a vector.transfer_write of `value` into `outputOperand` at indices set
198 /// to all `0`; where `outputOperand` is an output operand of the LinalgOp
199 /// currently being vectorized. If `dest` has null rank, build an memref.store.
200 /// Return the produced value or null if no value is produced.
201 static Value buildVectorWrite(OpBuilder &b, Value value,
202                               OpOperand *outputOperand) {
203   Operation *write;
204   Location loc = value.getLoc();
205   auto linalgOp = cast<LinalgOp>(outputOperand->getOwner());
206   ArrayRef<int64_t> shape = linalgOp.getShape(outputOperand);
207   auto vectorType = VectorType::get(
208       shape, getElementTypeOrSelf(outputOperand->get().getType()));
209   if (vectorType.getRank() > 0) {
210     // 0-d case is still special: do not invert the reindexing map.
211     AffineMap map =
212         reindexIndexingMap(linalgOp.getTiedIndexingMap(outputOperand));
213     SmallVector<int64_t> transposeShape =
214         applyPermutationMap(inversePermutation(map), vectorType.getShape());
215     assert(!transposeShape.empty() && "unexpected empty transpose shape");
216     vectorType = VectorType::get(transposeShape, vectorType.getElementType());
217     SmallVector<Value> indices(linalgOp.getRank(outputOperand),
218                                b.create<arith::ConstantIndexOp>(loc, 0));
219     value = broadcastIfNeeded(b, value, vectorType.getShape());
220     write = b.create<vector::TransferWriteOp>(loc, value, outputOperand->get(),
221                                               indices, map);
222   } else {
223     if (!value.getType().isa<VectorType>())
224       value = b.create<vector::BroadcastOp>(loc, vectorType, value);
225     assert(value.getType() == vectorType && "incorrect type");
226     write = b.create<vector::TransferWriteOp>(loc, value, outputOperand->get(),
227                                               ValueRange{});
228   }
229   LDBG("vectorized op: " << *write);
230   if (!write->getResults().empty())
231     return write->getResult(0);
232   return Value();
233 }
234 
235 // Custom vectorization function type. Produce a vector form of Operation*
236 // assuming all its vectorized operands are already in the BlockAndValueMapping.
237 // Return nullptr if the Operation cannot be vectorized.
238 using CustomVectorizationHook = std::function<VectorizationResult(
239     Operation *, const BlockAndValueMapping &)>;
240 
241 /// Helper function to vectorize the terminator of a `linalgOp`. New result
242 /// vector values are appended to `newResults`. Return
243 /// VectorizationStatus::NoReplace to signal the vectorization algorithm that it
244 /// should not try to map produced operations and instead return the results
245 /// using the `newResults` vector making them available to the
246 /// vectorization algorithm for RAUW. This function is meant to be used as a
247 /// CustomVectorizationHook.
248 static VectorizationResult
249 vectorizeLinalgYield(OpBuilder &b, Operation *op,
250                      const BlockAndValueMapping &bvm, LinalgOp linalgOp,
251                      SmallVectorImpl<Value> &newResults) {
252   auto yieldOp = dyn_cast<linalg::YieldOp>(op);
253   if (!yieldOp)
254     return VectorizationResult{VectorizationStatus::Failure, nullptr};
255   for (const auto &outputs : llvm::enumerate(yieldOp.values())) {
256     // TODO: Scan for an opportunity for reuse.
257     // TODO: use a map.
258     Value vectorValue = bvm.lookup(outputs.value());
259     Value newResult = buildVectorWrite(
260         b, vectorValue, linalgOp.getOutputOperand(outputs.index()));
261     if (newResult)
262       newResults.push_back(newResult);
263   }
264   return VectorizationResult{VectorizationStatus::NoReplace, nullptr};
265 }
266 
267 /// Helper function to vectorize the index operations of a `linalgOp`. Return
268 /// VectorizationStatus::NewOp to signal the vectorization algorithm that it
269 /// should map the produced operations. This function is meant to be used as a
270 /// CustomVectorizationHook.
271 static VectorizationResult vectorizeLinalgIndex(OpBuilder &b, Operation *op,
272                                                 LinalgOp linalgOp) {
273   IndexOp indexOp = dyn_cast<linalg::IndexOp>(op);
274   if (!indexOp)
275     return VectorizationResult{VectorizationStatus::Failure, nullptr};
276   auto loc = indexOp.getLoc();
277   // Compute the static loop sizes of the index op.
278   auto targetShape = linalgOp.computeStaticLoopSizes();
279   // Compute a one-dimensional index vector for the index op dimension.
280   SmallVector<int64_t> constantSeq =
281       llvm::to_vector<16>(llvm::seq<int64_t>(0, targetShape[indexOp.dim()]));
282   auto constantOp =
283       b.create<arith::ConstantOp>(loc, b.getIndexVectorAttr(constantSeq));
284   // Return the one-dimensional index vector if it lives in the trailing
285   // dimension of the iteration space since the vectorization algorithm in this
286   // case can handle the broadcast.
287   if (indexOp.dim() == targetShape.size() - 1)
288     return VectorizationResult{VectorizationStatus::NewOp, constantOp};
289   // Otherwise permute the targetShape to move the index dimension last,
290   // broadcast the one-dimensional index vector to the permuted shape, and
291   // finally transpose the broadcasted index vector to undo the permutation.
292   std::swap(targetShape[indexOp.dim()], targetShape.back());
293   auto broadCastOp = b.create<vector::BroadcastOp>(
294       loc, VectorType::get(targetShape, b.getIndexType()), constantOp);
295   SmallVector<int64_t> transposition =
296       llvm::to_vector<16>(llvm::seq<int64_t>(0, linalgOp.getNumLoops()));
297   std::swap(transposition.back(), transposition[indexOp.dim()]);
298   auto transposeOp =
299       b.create<vector::TransposeOp>(loc, broadCastOp, transposition);
300   return VectorizationResult{VectorizationStatus::NewOp, transposeOp};
301 }
302 
303 /// Emit reduction operations if the shapes of the value to reduce is different
304 /// that the result shape.
305 static Operation *reduceIfNeeded(OpBuilder &b, LinalgOp linalgOp, Operation *op,
306                                  Value reduceValue, Value initialValue,
307                                  const BlockAndValueMapping &bvm) {
308   Value reduceVec = bvm.lookup(reduceValue);
309   Value outputVec = bvm.lookup(initialValue);
310   auto reduceType = reduceVec.getType().dyn_cast<VectorType>();
311   auto outputType = outputVec.getType().dyn_cast<VectorType>();
312   // Reduce only if needed as the value may already have been reduce for
313   // contraction vectorization.
314   if (!reduceType ||
315       (outputType && reduceType.getShape() == outputType.getShape()))
316     return nullptr;
317   SmallVector<bool> reductionMask = getReductionMask(linalgOp);
318   Value reduce = buildMultiDimReduce(b, op, reduceVec, reductionMask);
319   return b.create(op->getLoc(), op->getName().getIdentifier(),
320                   /*operands=*/{reduce, outputVec}, reduce.getType(),
321                   op->getAttrs());
322 }
323 
324 /// Generic vectorization for a single operation `op`, given already vectorized
325 /// operands carried by `bvm`. Vectorization occurs as follows:
326 ///   1. Try to apply any of the `customVectorizationHooks` and return its
327 ///   result on success.
328 ///   2. Clone any constant in the current scope without vectorization: each
329 ///   consumer of the constant will later determine the shape to which the
330 ///   constant needs to be broadcast to.
331 ///   3. Fail on any remaining non `ElementwiseMappable` op. It is the purpose
332 ///   of the `customVectorizationHooks` to cover such cases.
333 ///   4. Clone `op` in vector form to a vector of shape prescribed by the first
334 ///   operand of maximal rank. Other operands have smaller rank and are
335 ///   broadcast accordingly. It is assumed this broadcast is always legal,
336 ///   otherwise, it means one of the `customVectorizationHooks` is incorrect.
337 ///
338 /// This function assumes all operands of `op` have been vectorized and are in
339 /// the `bvm` mapping. As a consequence, this function is meant to be called on
340 /// a topologically-sorted list of ops.
341 /// This function does not update `bvm` but returns a VectorizationStatus that
342 /// instructs the caller what `bvm` update needs to occur.
343 static VectorizationResult
344 vectorizeOneOp(OpBuilder &b, LinalgOp linalgOp, Operation *op,
345                const BlockAndValueMapping &bvm,
346                ArrayRef<CustomVectorizationHook> customVectorizationHooks) {
347   LDBG("vectorize op " << *op);
348 
349   // 1. Try to apply any CustomVectorizationHook.
350   if (!customVectorizationHooks.empty()) {
351     for (auto &customFunc : customVectorizationHooks) {
352       VectorizationResult result = customFunc(op, bvm);
353       if (result.status == VectorizationStatus::Failure)
354         continue;
355       return result;
356     }
357   }
358 
359   // 2. Constant ops don't get vectorized but rather broadcasted at their users.
360   // Clone so that the constant is not confined to the linalgOp block .
361   if (isa<arith::ConstantOp, func::ConstantOp>(op))
362     return VectorizationResult{VectorizationStatus::NewOp, b.clone(*op)};
363 
364   // 3. Only ElementwiseMappable are allowed in the generic vectorization.
365   if (!OpTrait::hasElementwiseMappableTraits(op))
366     return VectorizationResult{VectorizationStatus::Failure, nullptr};
367 
368   // 4 . Check if the operation is a reduction.
369   SmallVector<std::pair<Value, Value>> reductionOperands;
370   for (Value operand : op->getOperands()) {
371     auto arg = operand.dyn_cast<BlockArgument>();
372     if (!arg || arg.getArgNumber() < linalgOp.getNumInputs())
373       continue;
374     SmallVector<Operation *> reductionOps;
375     Value reduceValue = matchReduction(
376         linalgOp.getRegionOutputArgs(),
377         arg.getArgNumber() - linalgOp.getNumInputs(), reductionOps);
378     if (!reduceValue)
379       continue;
380     reductionOperands.push_back(std::make_pair(reduceValue, operand));
381   }
382   if (!reductionOperands.empty()) {
383     assert(reductionOperands.size() == 1);
384     Operation *reduceOp =
385         reduceIfNeeded(b, linalgOp, op, reductionOperands[0].first,
386                        reductionOperands[0].second, bvm);
387     if (reduceOp)
388       return VectorizationResult{VectorizationStatus::NewOp, reduceOp};
389   }
390 
391   // 5. Generic vectorization path for ElementwiseMappable ops.
392   //   a. first get the first max ranked shape.
393   SmallVector<int64_t, 4> firstMaxRankedShape;
394   for (Value operand : op->getOperands()) {
395     auto vt = bvm.lookup(operand).getType().dyn_cast<VectorType>();
396     if (vt && firstMaxRankedShape.size() < vt.getShape().size())
397       firstMaxRankedShape.assign(vt.getShape().begin(), vt.getShape().end());
398   }
399   //   b. broadcast each op if needed.
400   auto vectorizedOperands = llvm::map_range(op->getOperands(), [&](Value v) {
401     return firstMaxRankedShape.empty()
402                ? bvm.lookup(v)
403                : broadcastIfNeeded(b, bvm.lookup(v), firstMaxRankedShape);
404   });
405   //   c. for elementwise, the result is the vector with the firstMaxRankedShape
406   auto returnTypes = llvm::map_range(op->getResultTypes(), [&](Type t) {
407     return firstMaxRankedShape.empty()
408                ? t
409                : VectorType::get(firstMaxRankedShape, t);
410   });
411 
412   // Build and return the new op.
413   return VectorizationResult{
414       VectorizationStatus::NewOp,
415       b.create(op->getLoc(), op->getName().getIdentifier(),
416                llvm::to_vector<4>(vectorizedOperands),
417                llvm::to_vector<4>(returnTypes), op->getAttrs())};
418 }
419 
420 /// Generic vectorization function that rewrites the body of a `linalgOp` into
421 /// vector form. Generic vectorization proceeds as follows:
422 ///   1. Verify the `linalgOp` has one non-empty region.
423 ///   2. Values defined above the region are mapped to themselves and will be
424 ///   broadcasted on a per-need basis by their consumers.
425 ///   3. Each region argument is vectorized into a vector.transfer_read (or 0-d
426 ///   load).
427 ///   TODO: Reuse opportunities for RAR dependencies.
428 ///   4a. Register CustomVectorizationHook for YieldOp to capture the results.
429 ///   4b. Register CustomVectorizationHook for IndexOp to access the iteration
430 ///   indices.
431 ///   5. Iteratively call vectorizeOneOp on the region operations.
432 ///
433 /// When `broadcastToMaximalCommonShape` is set to true, eager broadcasting is
434 /// performed to the maximal common vector size implied by the `linalgOp`
435 /// iteration space. This eager broadcasting is introduced in the
436 /// permutation_map of the vector.transfer_read operations. The eager
437 /// broadcasting makes it trivial to detrmine where broadcast, transposes and
438 /// reductions should occur, without any bookkeeping. The tradeoff is that, in
439 /// the absence of good canonicalizations, the amount of work increases.
440 /// This is not deemed a problem as we expect canonicalizations and foldings to
441 /// aggressively clean up the useless work.
442 static LogicalResult
443 vectorizeAsLinalgGeneric(OpBuilder &b, LinalgOp linalgOp,
444                          SmallVectorImpl<Value> &newResults) {
445   Block *block = linalgOp.getBlock();
446 
447   // 2. Values defined above the region can only be broadcast for now. Make them
448   // map to themselves.
449   BlockAndValueMapping bvm;
450   SetVector<Value> valuesSet;
451   mlir::getUsedValuesDefinedAbove(linalgOp->getRegion(0), valuesSet);
452   bvm.map(valuesSet.getArrayRef(), valuesSet.getArrayRef());
453 
454   if (linalgOp.getNumOutputs() == 0)
455     return failure();
456 
457   // TODO: the common vector shape is equal to the static loop sizes only when
458   // all indexing maps are projected permutations. For convs and stencils the
459   // logic will need to evolve.
460   SmallVector<int64_t> commonVectorShape = linalgOp.computeStaticLoopSizes();
461 
462   // 3. Turn all BBArgs into vector.transfer_read / load.
463   Location loc = linalgOp.getLoc();
464   Value zero = b.create<arith::ConstantIndexOp>(loc, 0);
465   for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) {
466     BlockArgument bbarg = block->getArgument(opOperand->getOperandNumber());
467     if (linalgOp.isScalar(opOperand)) {
468       bvm.map(bbarg, opOperand->get());
469       continue;
470     }
471     VectorType readType;
472     AffineMap map;
473     // TODO: can we keep this simplification?
474     // if (linalgOp.getShape(opOperand).empty()) {
475     //   readType = VectorType::get({}, bbarg.getType());
476     // } else {
477     if (opOperand->getOperandNumber() < linalgOp.getNumInputs()) {
478       map = inverseAndBroadcastProjectedPermutation(
479           linalgOp.getTiedIndexingMap(opOperand));
480       readType = VectorType::get(commonVectorShape,
481                                  getElementTypeOrSelf(opOperand->get()));
482     } else {
483       map = inversePermutation(
484           reindexIndexingMap(linalgOp.getTiedIndexingMap(opOperand)));
485       readType = VectorType::get(map.compose(linalgOp.getShape(opOperand)),
486                                  getElementTypeOrSelf(opOperand->get()));
487     }
488     // }
489 
490     auto shape = linalgOp.getShape(opOperand);
491     SmallVector<Value> indices(shape.size(), zero);
492     Value readValue = b.create<vector::TransferReadOp>(
493         loc, readType, opOperand->get(), indices, map);
494     // Not all ops support 0-d vectors, extract the scalar for now.
495     // TODO: remove this.
496     if (readValue.getType().cast<VectorType>().getRank() == 0)
497       readValue = b.create<vector::ExtractElementOp>(loc, readValue);
498 
499     LDBG("new vectorized bbarg(" << bbarg.getArgNumber() << "): " << readValue);
500     bvm.map(bbarg, readValue);
501     bvm.map(opOperand->get(), readValue);
502   }
503 
504   SmallVector<CustomVectorizationHook> hooks;
505   // 4a. Register CustomVectorizationHook for yieldOp.
506   CustomVectorizationHook vectorizeYield =
507       [&](Operation *op,
508           const BlockAndValueMapping &bvm) -> VectorizationResult {
509     return vectorizeLinalgYield(b, op, bvm, linalgOp, newResults);
510   };
511   hooks.push_back(vectorizeYield);
512 
513   // 4b. Register CustomVectorizationHook for indexOp.
514   CustomVectorizationHook vectorizeIndex =
515       [&](Operation *op,
516           const BlockAndValueMapping &bvm) -> VectorizationResult {
517     return vectorizeLinalgIndex(b, op, linalgOp);
518   };
519   hooks.push_back(vectorizeIndex);
520 
521   // 5. Iteratively call `vectorizeOneOp` to each op in the slice.
522   for (Operation &op : block->getOperations()) {
523     VectorizationResult result = vectorizeOneOp(b, linalgOp, &op, bvm, hooks);
524     if (result.status == VectorizationStatus::Failure) {
525       LDBG("failed to vectorize: " << op);
526       return failure();
527     }
528     if (result.status == VectorizationStatus::NewOp) {
529       LDBG("new vector op: " << *result.newOp;);
530       bvm.map(op.getResults(), result.newOp->getResults());
531     }
532   }
533 
534   return success();
535 }
536 
537 // TODO: probably need some extra checks for reduction followed by consumer
538 // ops that may not commute (e.g. linear reduction + non-linear instructions).
539 static LogicalResult reductionPreconditions(LinalgOp op) {
540   if (llvm::none_of(op.iterator_types(), isReductionIterator)) {
541     LDBG("reduction precondition failed: no reduction iterator");
542     return failure();
543   }
544   for (OpOperand *opOperand : op.getOutputOperands()) {
545     Operation *reduceOp = matchLinalgReduction(opOperand);
546     if (!reduceOp || !getCombinerOpKind(reduceOp)) {
547       LDBG("reduction precondition failed: reduction detection failed");
548       return failure();
549     }
550   }
551   return success();
552 }
553 
554 static LogicalResult vectorizeStaticLinalgOpPrecondition(linalg::LinalgOp op) {
555   // All types in the body should be a supported element type for VectorType.
556   for (Operation &innerOp : op->getRegion(0).front()) {
557     if (llvm::any_of(innerOp.getOperandTypes(), [](Type type) {
558           return !VectorType::isValidElementType(type);
559         })) {
560       return failure();
561     }
562     if (llvm::any_of(innerOp.getResultTypes(), [](Type type) {
563           return !VectorType::isValidElementType(type);
564         })) {
565       return failure();
566     }
567   }
568   if (isElementwise(op))
569     return success();
570   // TODO: isaConvolutionOpInterface that can also infer from generic features.
571   // But we will still need stride/dilation attributes that will be annoying to
572   // reverse-engineer...
573   if (isa<ConvolutionOpInterface>(op.getOperation()))
574     return success();
575   // TODO: the common vector shape is equal to the static loop sizes only when
576   // all indexing maps are projected permutations. For convs and stencils the
577   // logic will need to evolve.
578   if (!allIndexingsAreProjectedPermutation(op)) {
579     LDBG("precondition failed: not projected permutations");
580     return failure();
581   }
582   if (failed(reductionPreconditions(op))) {
583     LDBG("precondition failed: reduction preconditions");
584     return failure();
585   }
586   return success();
587 }
588 
589 static LogicalResult vectorizeLinalgOpPrecondition(LinalgOp linalgOp) {
590   // All types must be static shape to go to vector.
591   if (linalgOp.hasDynamicShape()) {
592     LDBG("precondition failed: dynamic shape");
593     return failure();
594   }
595   return vectorizeStaticLinalgOpPrecondition(linalgOp);
596 }
597 
598 LogicalResult mlir::linalg::vectorize(RewriterBase &rewriter,
599                                       LinalgOp linalgOp) {
600   if (failed(vectorizeLinalgOpPrecondition(linalgOp)))
601     return failure();
602 
603   SmallVector<Value> results;
604   // TODO: isaConvolutionOpInterface that can also infer from generic
605   // features. Will require stride/dilation attributes inference.
606   FailureOr<Operation *> convOr = vectorizeConvolution(rewriter, linalgOp);
607   if (succeeded(convOr)) {
608     llvm::append_range(results, (*convOr)->getResults());
609   } else {
610     if (failed(vectorizeLinalgOpPrecondition(linalgOp)))
611       return failure();
612     LDBG("Vectorize generic by broadcasting to a common shape: " << linalgOp);
613     if (failed(vectorizeAsLinalgGeneric(rewriter, linalgOp, results)))
614       return failure();
615   }
616 
617   if (!results.empty())
618     rewriter.replaceOp(linalgOp, results);
619   else
620     rewriter.eraseOp(linalgOp);
621 
622   return success();
623 }
624 
625 LogicalResult mlir::linalg::vectorizeCopy(RewriterBase &rewriter,
626                                           memref::CopyOp copyOp) {
627 
628   auto srcType = copyOp.source().getType().cast<MemRefType>();
629   auto dstType = copyOp.target().getType().cast<MemRefType>();
630   if (!srcType.hasStaticShape() || !dstType.hasStaticShape())
631     return failure();
632 
633   auto readType =
634       VectorType::get(srcType.getShape(), getElementTypeOrSelf(srcType));
635   auto writeType =
636       VectorType::get(dstType.getShape(), getElementTypeOrSelf(dstType));
637 
638   Location loc = copyOp->getLoc();
639   Value zero = rewriter.create<arith::ConstantIndexOp>(loc, 0);
640   SmallVector<Value> indices(srcType.getRank(), zero);
641 
642   Value readValue = rewriter.create<vector::TransferReadOp>(
643       loc, readType, copyOp.source(), indices,
644       rewriter.getMultiDimIdentityMap(srcType.getRank()));
645   if (readValue.getType().cast<VectorType>().getRank() == 0) {
646     readValue = rewriter.create<vector::ExtractElementOp>(loc, readValue);
647     readValue = rewriter.create<vector::BroadcastOp>(loc, writeType, readValue);
648   }
649   Operation *writeValue = rewriter.create<vector::TransferWriteOp>(
650       loc, readValue, copyOp.target(), indices,
651       rewriter.getMultiDimIdentityMap(srcType.getRank()));
652   rewriter.replaceOp(copyOp, writeValue->getResults());
653   return success();
654 }
655 
656 //----------------------------------------------------------------------------//
657 // Misc. vectorization patterns.
658 //----------------------------------------------------------------------------//
659 
660 /// Helper function that retrieves the value of an IntegerAttr.
661 static int64_t getIntFromAttr(Attribute attr) {
662   return attr.cast<IntegerAttr>().getInt();
663 }
664 
665 /// Given an ArrayRef of OpFoldResults, return a vector of Values.
666 /// IntegerAttrs are converted to ConstantIndexOps. Other attribute types are
667 /// not supported.
668 static SmallVector<Value> ofrToIndexValues(OpBuilder &builder, Location loc,
669                                            ArrayRef<OpFoldResult> ofrs) {
670   SmallVector<Value> result;
671   llvm::for_each(ofrs, [&](auto o) {
672     if (auto val = o.template dyn_cast<Value>()) {
673       result.push_back(val);
674     } else {
675       result.push_back(builder.create<arith::ConstantIndexOp>(
676           loc, getIntFromAttr(o.template get<Attribute>())));
677     }
678   });
679   return result;
680 }
681 
682 /// Rewrite a tensor::PadOp into a sequence of InitTensorOp, FillOp and
683 /// InsertSliceOp. For now, only constant padding values are supported.
684 /// If there is enough static type information, TransferReadOps and
685 /// TransferWriteOps may be generated instead of InsertSliceOps.
686 struct GenericPadOpVectorizationPattern : public GeneralizePadOpPattern {
687   GenericPadOpVectorizationPattern(MLIRContext *context,
688                                    PatternBenefit benefit = 1)
689       : GeneralizePadOpPattern(context, tryVectorizeCopy, benefit) {}
690   /// Vectorize the copying of a tensor::PadOp's source. This is possible if
691   /// each dimension size is statically know in the source type or the result
692   /// type (or both).
693   static LogicalResult tryVectorizeCopy(PatternRewriter &rewriter,
694                                         tensor::PadOp padOp, Value dest) {
695     auto sourceType = padOp.getSourceType();
696     auto resultType = padOp.getResultType();
697 
698     // Copy cannot be vectorized if pad value is non-constant and source shape
699     // is dynamic. In case of a dynamic source shape, padding must be appended
700     // by TransferReadOp, but TransferReadOp supports only constant padding.
701     auto padValue = padOp.getConstantPaddingValue();
702     if (!padValue) {
703       if (!sourceType.hasStaticShape())
704         return failure();
705       // Create dummy padding value.
706       auto elemType = sourceType.getElementType();
707       padValue = rewriter.create<arith::ConstantOp>(
708           padOp.getLoc(), elemType, rewriter.getZeroAttr(elemType));
709     }
710 
711     SmallVector<int64_t> vecShape;
712     SmallVector<bool> readInBounds;
713     SmallVector<bool> writeInBounds;
714     for (unsigned i = 0; i < sourceType.getRank(); ++i) {
715       if (!sourceType.isDynamicDim(i)) {
716         vecShape.push_back(sourceType.getDimSize(i));
717         // Source shape is statically known: Neither read nor write are
718         // out-of- bounds.
719         readInBounds.push_back(true);
720         writeInBounds.push_back(true);
721       } else if (!resultType.isDynamicDim(i)) {
722         // Source shape is not statically known, but result shape is.
723         // Vectorize with size of result shape. This may be larger than the
724         // source size.
725         vecShape.push_back(resultType.getDimSize(i));
726         // Read may be out-of-bounds because the result size could be larger
727         // than the source size.
728         readInBounds.push_back(false);
729         // Write is out-of-bounds if low padding > 0.
730         writeInBounds.push_back(
731             getConstantIntValue(padOp.getMixedLowPad()[i]) ==
732             static_cast<int64_t>(0));
733       } else {
734         // Neither source nor result dim of padOp is static. Cannot vectorize
735         // the copy.
736         return failure();
737       }
738     }
739     auto vecType = VectorType::get(vecShape, sourceType.getElementType());
740 
741     // Generate TransferReadOp.
742     SmallVector<Value> readIndices(
743         vecType.getRank(),
744         rewriter.create<arith::ConstantIndexOp>(padOp.getLoc(), 0));
745     auto read = rewriter.create<vector::TransferReadOp>(
746         padOp.getLoc(), vecType, padOp.getSource(), readIndices, padValue,
747         ArrayRef<bool>{readInBounds});
748 
749     // If `dest` is a FillOp and the TransferWriteOp would overwrite the
750     // entire tensor, write directly to the FillOp's operand.
751     if (llvm::equal(vecShape, resultType.getShape()) &&
752         llvm::all_of(writeInBounds, [](bool b) { return b; }))
753       if (auto fill = dest.getDefiningOp<FillOp>())
754         dest = fill.output();
755 
756     // Generate TransferWriteOp.
757     auto writeIndices =
758         ofrToIndexValues(rewriter, padOp.getLoc(), padOp.getMixedLowPad());
759     rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
760         padOp, read, dest, writeIndices, ArrayRef<bool>{writeInBounds});
761 
762     return success();
763   }
764 };
765 
766 /// Base pattern for rewriting tensor::PadOps whose result is consumed by a
767 /// given operation type OpTy.
768 template <typename OpTy>
769 struct VectorizePadOpUserPattern : public OpRewritePattern<tensor::PadOp> {
770   using OpRewritePattern<tensor::PadOp>::OpRewritePattern;
771 
772   LogicalResult matchAndRewrite(tensor::PadOp padOp,
773                                 PatternRewriter &rewriter) const final {
774     bool changed = false;
775     // Insert users in vector, because some users may be replaced/removed.
776     for (auto *user : llvm::to_vector<4>(padOp->getUsers()))
777       if (auto op = dyn_cast<OpTy>(user))
778         changed |= rewriteUser(rewriter, padOp, op).succeeded();
779     return success(changed);
780   }
781 
782 protected:
783   virtual LogicalResult rewriteUser(PatternRewriter &rewriter,
784                                     tensor::PadOp padOp, OpTy op) const = 0;
785 };
786 
787 /// Rewrite use of tensor::PadOp result in TransferReadOp. E.g.:
788 /// ```
789 /// %0 = tensor.pad %src ... : tensor<?x?xf32> to tensor<17x5xf32>
790 /// %r = vector.transfer_read %0[%c0, %c0], %cst
791 ///     {in_bounds = [true, true]} : tensor<17x5xf32>, vector<17x5xf32>
792 /// ```
793 /// is rewritten to:
794 /// ```
795 /// %r = vector.transfer_read %src[%c0, %c0], %padding
796 ///     {in_bounds = [true, true]}
797 ///     : tensor<?x?xf32>, vector<17x5xf32>
798 /// ```
799 /// Note: By restricting this pattern to in-bounds TransferReadOps, we can be
800 /// sure that the original padding value %cst was never used.
801 ///
802 /// This rewrite is possible if:
803 /// - `xferOp` has no out-of-bounds dims or mask.
804 /// - Low padding is static 0.
805 /// - Single, scalar padding value.
806 struct PadOpVectorizationWithTransferReadPattern
807     : public VectorizePadOpUserPattern<vector::TransferReadOp> {
808   using VectorizePadOpUserPattern<
809       vector::TransferReadOp>::VectorizePadOpUserPattern;
810 
811   LogicalResult rewriteUser(PatternRewriter &rewriter, tensor::PadOp padOp,
812                             vector::TransferReadOp xferOp) const override {
813     // Low padding must be static 0.
814     if (!padOp.hasZeroLowPad())
815       return failure();
816     // Pad value must be a constant.
817     auto padValue = padOp.getConstantPaddingValue();
818     if (!padValue)
819       return failure();
820     // Padding value of existing `xferOp` is unused.
821     if (xferOp.hasOutOfBoundsDim() || xferOp.getMask())
822       return failure();
823 
824     rewriter.updateRootInPlace(xferOp, [&]() {
825       SmallVector<bool> inBounds(xferOp.getVectorType().getRank(), false);
826       xferOp->setAttr(xferOp.getInBoundsAttrName(),
827                       rewriter.getBoolArrayAttr(inBounds));
828       xferOp.getSourceMutable().assign(padOp.getSource());
829       xferOp.getPaddingMutable().assign(padValue);
830     });
831 
832     return success();
833   }
834 };
835 
836 /// Rewrite use of tensor::PadOp result in TransferWriteOp.
837 /// This pattern rewrites TransferWriteOps that write to a padded tensor
838 /// value, where the same amount of padding is immediately removed again after
839 /// the write. In such cases, the TransferWriteOp can write to the non-padded
840 /// tensor value and apply out-of-bounds masking. E.g.:
841 /// ```
842 /// %0 = tensor.extract_slice ...[...] [%s0, %s1] [1, 1]
843 ///     : tensor<...> to tensor<?x?xf32>
844 /// %1 = tensor.pad %0 ... : tensor<?x?xf32> to tensor<17x5xf32>
845 /// %2 = vector.transfer_write %vec, %1[...]
846 ///     : vector<17x5xf32>, tensor<17x5xf32>
847 /// %r = tensor.extract_slice %2[0, 0] [%s0, %s1] [1, 1]
848 ///     : tensor<17x5xf32> to tensor<?x?xf32>
849 /// ```
850 /// is rewritten to:
851 /// ```
852 /// %0 = tensor.extract_slice ...[...] [%s0, %s1] [1, 1]
853 ///     : tensor<...> to tensor<?x?xf32>
854 /// %r = vector.transfer_write %vec, %0[...] : vector<17x5xf32>,
855 /// tensor<?x?xf32>
856 /// ```
857 /// Note: It is important that the ExtractSliceOp %r resizes the result of the
858 /// TransferWriteOp to the same size as the input of the TensorPadOp (or an
859 /// even smaller size). Otherwise, %r's new (dynamic) dimensions would differ
860 /// from %r's old dimensions.
861 ///
862 /// This rewrite is possible if:
863 /// - Low padding is static 0.
864 /// - `xferOp` has exactly one use, which is an ExtractSliceOp. This
865 ///   ExtractSliceOp trims the same amount of padding that was added
866 ///   beforehand.
867 /// - Single, scalar padding value.
868 struct PadOpVectorizationWithTransferWritePattern
869     : public VectorizePadOpUserPattern<vector::TransferWriteOp> {
870   using VectorizePadOpUserPattern<
871       vector::TransferWriteOp>::VectorizePadOpUserPattern;
872 
873   LogicalResult rewriteUser(PatternRewriter &rewriter, tensor::PadOp padOp,
874                             vector::TransferWriteOp xferOp) const override {
875     // TODO: support 0-d corner case.
876     if (xferOp.getTransferRank() == 0)
877       return failure();
878 
879     // Low padding must be static 0.
880     if (!padOp.hasZeroLowPad())
881       return failure();
882     // Pad value must be a constant.
883     auto padValue = padOp.getConstantPaddingValue();
884     if (!padValue)
885       return failure();
886     // TransferWriteOp result must be directly consumed by an ExtractSliceOp.
887     if (!xferOp->hasOneUse())
888       return failure();
889     auto trimPadding = dyn_cast<tensor::ExtractSliceOp>(*xferOp->user_begin());
890     if (!trimPadding)
891       return failure();
892     // Only static zero offsets supported when trimming padding.
893     if (!trimPadding.hasZeroOffset())
894       return failure();
895     // trimPadding must remove the amount of padding that was added earlier.
896     if (!hasSameTensorSize(padOp.getSource(), trimPadding))
897       return failure();
898 
899     // Insert the new TransferWriteOp at position of the old TransferWriteOp.
900     rewriter.setInsertionPoint(xferOp);
901 
902     SmallVector<bool> inBounds(xferOp.getVectorType().getRank(), false);
903     auto newXferOp = rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
904         xferOp, padOp.getSource().getType(), xferOp.getVector(),
905         padOp.getSource(), xferOp.getIndices(), xferOp.getPermutationMapAttr(),
906         xferOp.getMask(), rewriter.getBoolArrayAttr(inBounds));
907     rewriter.replaceOp(trimPadding, newXferOp->getResult(0));
908 
909     return success();
910   }
911 
912   /// Check if `beforePadding` and `afterTrimming` have the same tensor size,
913   /// i.e., same dimensions.
914   ///
915   /// Dimensions may be static, dynamic or mix of both. In case of dynamic
916   /// dimensions, this function tries to infer the (static) tensor size by
917   /// looking at the defining op and utilizing op-specific knowledge.
918   ///
919   /// This is a conservative analysis. In case equal tensor sizes cannot be
920   /// proven statically, this analysis returns `false` even though the tensor
921   /// sizes may turn out to be equal at runtime.
922   bool hasSameTensorSize(Value beforePadding,
923                          tensor::ExtractSliceOp afterTrimming) const {
924     // If the input to tensor::PadOp is a CastOp, try with with both CastOp
925     // result and CastOp operand.
926     if (auto castOp = beforePadding.getDefiningOp<tensor::CastOp>())
927       if (hasSameTensorSize(castOp.getSource(), afterTrimming))
928         return true;
929 
930     auto t1 = beforePadding.getType().dyn_cast<RankedTensorType>();
931     auto t2 = afterTrimming.getType().dyn_cast<RankedTensorType>();
932     // Only RankedTensorType supported.
933     if (!t1 || !t2)
934       return false;
935     // Rank of both values must be the same.
936     if (t1.getRank() != t2.getRank())
937       return false;
938 
939     // All static dimensions must be the same. Mixed cases (e.g., dimension
940     // static in `t1` but dynamic in `t2`) are not supported.
941     for (unsigned i = 0; i < t1.getRank(); ++i) {
942       if (t1.isDynamicDim(i) != t2.isDynamicDim(i))
943         return false;
944       if (!t1.isDynamicDim(i) && t1.getDimSize(i) != t2.getDimSize(i))
945         return false;
946     }
947 
948     // Nothing more to check if all dimensions are static.
949     if (t1.getNumDynamicDims() == 0)
950       return true;
951 
952     // All dynamic sizes must be the same. The only supported case at the
953     // moment is when `beforePadding` is an ExtractSliceOp (or a cast
954     // thereof).
955 
956     // Apart from CastOp, only ExtractSliceOp is supported.
957     auto beforeSlice = beforePadding.getDefiningOp<tensor::ExtractSliceOp>();
958     if (!beforeSlice)
959       return false;
960 
961     assert(static_cast<size_t>(t1.getRank()) ==
962            beforeSlice.getMixedSizes().size());
963     assert(static_cast<size_t>(t2.getRank()) ==
964            afterTrimming.getMixedSizes().size());
965 
966     for (unsigned i = 0; i < t1.getRank(); ++i) {
967       // Skip static dimensions.
968       if (!t1.isDynamicDim(i))
969         continue;
970       auto size1 = beforeSlice.getMixedSizes()[i];
971       auto size2 = afterTrimming.getMixedSizes()[i];
972 
973       // Case 1: Same value or same constant int.
974       if (isEqualConstantIntOrValue(size1, size2))
975         continue;
976 
977       // Other cases: Take a deeper look at defining ops of values.
978       auto v1 = size1.dyn_cast<Value>();
979       auto v2 = size2.dyn_cast<Value>();
980       if (!v1 || !v2)
981         return false;
982 
983       // Case 2: Both values are identical AffineMinOps. (Should not happen if
984       // CSE is run.)
985       auto minOp1 = v1.getDefiningOp<AffineMinOp>();
986       auto minOp2 = v2.getDefiningOp<AffineMinOp>();
987       if (minOp1 && minOp2 && minOp1.getAffineMap() == minOp2.getAffineMap() &&
988           minOp1.operands() == minOp2.operands())
989         continue;
990 
991       // Add additional cases as needed.
992     }
993 
994     // All tests passed.
995     return true;
996   }
997 };
998 
999 /// Rewrite use of tensor::PadOp result in InsertSliceOp. E.g.:
1000 /// ```
1001 /// %0 = tensor.pad %src ... : tensor<?x?xf32> to tensor<17x5xf32>
1002 /// %r = tensor.insert_slice %0
1003 ///     into %dest[%a, %b, 0, 0] [1, 1, 17, 5] [1, 1, 1, 1]
1004 ///     : tensor<17x5xf32> into tensor<?x?x17x5xf32>
1005 /// ```
1006 /// is rewritten to:
1007 /// ```
1008 /// %0 = vector.transfer_read %src[%c0, %c0], %padding
1009 ///     : tensor<?x?xf32>, vector<17x5xf32>
1010 /// %r = vector.transfer_write %0, %dest[%a, %b, %c0, %c0]
1011 ///     {in_bounds = [true, true]} : vector<17x5xf32>, tensor<?x?x17x5xf32>
1012 /// ```
1013 ///
1014 /// This rewrite is possible if:
1015 /// - Low padding is static 0.
1016 /// - `padOp` result shape is static.
1017 /// - The entire padded tensor is inserted.
1018 ///   (Implies that sizes of `insertOp` are all static.)
1019 /// - Only unit strides in `insertOp`.
1020 /// - Single, scalar padding value.
1021 /// - `padOp` result not used as destination.
1022 struct PadOpVectorizationWithInsertSlicePattern
1023     : public VectorizePadOpUserPattern<tensor::InsertSliceOp> {
1024   using VectorizePadOpUserPattern<
1025       tensor::InsertSliceOp>::VectorizePadOpUserPattern;
1026 
1027   LogicalResult rewriteUser(PatternRewriter &rewriter, tensor::PadOp padOp,
1028                             tensor::InsertSliceOp insertOp) const override {
1029     // Low padding must be static 0.
1030     if (!padOp.hasZeroLowPad())
1031       return failure();
1032     // Only unit stride supported.
1033     if (!insertOp.hasUnitStride())
1034       return failure();
1035     // Pad value must be a constant.
1036     auto padValue = padOp.getConstantPaddingValue();
1037     if (!padValue)
1038       return failure();
1039     // Dynamic shapes not supported.
1040     if (!padOp.getResult().getType().cast<ShapedType>().hasStaticShape())
1041       return failure();
1042     // Pad result not used as destination.
1043     if (insertOp.getDest() == padOp.getResult())
1044       return failure();
1045 
1046     auto vecType = VectorType::get(padOp.getType().getShape(),
1047                                    padOp.getType().getElementType());
1048     unsigned vecRank = vecType.getRank();
1049     unsigned tensorRank = insertOp.getType().getRank();
1050 
1051     // Check if sizes match: Insert the entire tensor into most minor dims.
1052     // (No permutations allowed.)
1053     SmallVector<int64_t> expectedSizes(tensorRank - vecRank, 1);
1054     expectedSizes.append(vecType.getShape().begin(), vecType.getShape().end());
1055     if (!llvm::all_of(
1056             llvm::zip(insertOp.getMixedSizes(), expectedSizes), [](auto it) {
1057               return getConstantIntValue(std::get<0>(it)) == std::get<1>(it);
1058             }))
1059       return failure();
1060 
1061     // Insert the TransferReadOp and TransferWriteOp at the position of the
1062     // InsertSliceOp.
1063     rewriter.setInsertionPoint(insertOp);
1064 
1065     // Generate TransferReadOp: Read entire source tensor and add high
1066     // padding.
1067     SmallVector<Value> readIndices(
1068         vecRank, rewriter.create<arith::ConstantIndexOp>(padOp.getLoc(), 0));
1069     auto read = rewriter.create<vector::TransferReadOp>(
1070         padOp.getLoc(), vecType, padOp.getSource(), readIndices, padValue);
1071 
1072     // Generate TransferWriteOp: Write to InsertSliceOp's dest tensor at
1073     // specified offsets. Write is fully in-bounds because a InsertSliceOp's
1074     // source must fit into the destination at the specified offsets.
1075     auto writeIndices =
1076         ofrToIndexValues(rewriter, padOp.getLoc(), insertOp.getMixedOffsets());
1077     SmallVector<bool> inBounds(vecRank, true);
1078     rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
1079         insertOp, read, insertOp.getDest(), writeIndices,
1080         ArrayRef<bool>{inBounds});
1081 
1082     return success();
1083   }
1084 };
1085 
1086 void mlir::linalg::populatePadOpVectorizationPatterns(
1087     RewritePatternSet &patterns, PatternBenefit baseBenefit) {
1088   patterns.add<GenericPadOpVectorizationPattern>(patterns.getContext(),
1089                                                  baseBenefit);
1090   // Try these specialized patterns first before resorting to the generic one.
1091   patterns.add<PadOpVectorizationWithTransferReadPattern,
1092                PadOpVectorizationWithTransferWritePattern,
1093                PadOpVectorizationWithInsertSlicePattern>(
1094       patterns.getContext(), baseBenefit.getBenefit() + 1);
1095 }
1096 
1097 //----------------------------------------------------------------------------//
1098 // Forwarding patterns
1099 //----------------------------------------------------------------------------//
1100 
1101 /// Check whether there is any interleaved use of any `values` between
1102 /// `firstOp` and `secondOp`. Conservatively return `true` if any op or value
1103 /// is in a different block.
1104 static bool mayExistInterleavedUses(Operation *firstOp, Operation *secondOp,
1105                                     ValueRange values) {
1106   if (firstOp->getBlock() != secondOp->getBlock() ||
1107       !firstOp->isBeforeInBlock(secondOp)) {
1108     LDBG("interleavedUses precondition failed, firstOp: "
1109          << *firstOp << ", second op: " << *secondOp);
1110     return true;
1111   }
1112   for (auto v : values) {
1113     for (auto &u : v.getUses()) {
1114       Operation *owner = u.getOwner();
1115       if (owner == firstOp || owner == secondOp)
1116         continue;
1117       // TODO: this is too conservative, use dominance info in the future.
1118       if (owner->getBlock() == firstOp->getBlock() &&
1119           (owner->isBeforeInBlock(firstOp) || secondOp->isBeforeInBlock(owner)))
1120         continue;
1121       LDBG(" found interleaved op " << *owner << ", firstOp: " << *firstOp
1122                                     << ", second op: " << *secondOp);
1123       return true;
1124     }
1125   }
1126   return false;
1127 }
1128 
1129 /// Return the unique subview use of `v` if it is indeed unique, null
1130 /// otherwise.
1131 static memref::SubViewOp getSubViewUseIfUnique(Value v) {
1132   memref::SubViewOp subViewOp;
1133   for (auto &u : v.getUses()) {
1134     if (auto newSubViewOp = dyn_cast<memref::SubViewOp>(u.getOwner())) {
1135       if (subViewOp)
1136         return memref::SubViewOp();
1137       subViewOp = newSubViewOp;
1138     }
1139   }
1140   return subViewOp;
1141 }
1142 
1143 /// TODO: use interfaces, side-effects and aliasing analysis as appropriate,
1144 /// when available.
1145 LogicalResult LinalgCopyVTRForwardingPattern::matchAndRewrite(
1146     vector::TransferReadOp xferOp, PatternRewriter &rewriter) const {
1147 
1148   // TODO: support mask.
1149   if (xferOp.getMask())
1150     return failure();
1151 
1152   // Transfer into `view`.
1153   Value viewOrAlloc = xferOp.getSource();
1154   if (!viewOrAlloc.getDefiningOp<memref::ViewOp>() &&
1155       !viewOrAlloc.getDefiningOp<memref::AllocOp>())
1156     return failure();
1157 
1158   LDBG(viewOrAlloc);
1159 
1160   // Ensure there is exactly one subview of `viewOrAlloc` defining `subView`.
1161   memref::SubViewOp subViewOp = getSubViewUseIfUnique(viewOrAlloc);
1162   if (!subViewOp)
1163     return failure();
1164   Value subView = subViewOp.getResult();
1165   LDBG("with subView " << subView);
1166 
1167   // Find the copy into `subView` without interleaved uses.
1168   memref::CopyOp copyOp;
1169   for (auto &u : subView.getUses()) {
1170     if (auto newCopyOp = dyn_cast<memref::CopyOp>(u.getOwner())) {
1171       assert(newCopyOp.target().getType().isa<MemRefType>());
1172       if (newCopyOp.target() != subView)
1173         continue;
1174       LDBG("copy candidate " << *newCopyOp);
1175       if (mayExistInterleavedUses(newCopyOp, xferOp, {viewOrAlloc, subView}))
1176         continue;
1177       copyOp = newCopyOp;
1178       break;
1179     }
1180   }
1181   if (!copyOp)
1182     return failure();
1183   LDBG("with copy " << *copyOp);
1184 
1185   // Find the fill into `viewOrAlloc` without interleaved uses before the
1186   // copy.
1187   FillOp maybeFillOp;
1188   for (auto &u : viewOrAlloc.getUses()) {
1189     if (auto newFillOp = dyn_cast<FillOp>(u.getOwner())) {
1190       assert(newFillOp.output().getType().isa<MemRefType>());
1191       if (newFillOp.output() != viewOrAlloc)
1192         continue;
1193       LDBG("fill candidate " << *newFillOp);
1194       if (mayExistInterleavedUses(newFillOp, copyOp, {viewOrAlloc, subView}))
1195         continue;
1196       maybeFillOp = newFillOp;
1197       break;
1198     }
1199   }
1200   // Ensure padding matches.
1201   if (maybeFillOp && xferOp.getPadding() != maybeFillOp.value())
1202     return failure();
1203   if (maybeFillOp)
1204     LDBG("with maybeFillOp " << *maybeFillOp);
1205 
1206   // `in` is the subview that memref.copy reads. Replace it.
1207   Value in = copyOp.source();
1208 
1209   // memref.copy + linalg.fill can be used to create a padded local buffer.
1210   // The `masked` attribute is only valid on this padded buffer.
1211   // When forwarding to vector.transfer_read, the attribute must be reset
1212   // conservatively.
1213   Value res = rewriter.create<vector::TransferReadOp>(
1214       xferOp.getLoc(), xferOp.getVectorType(), in, xferOp.getIndices(),
1215       xferOp.getPermutationMapAttr(), xferOp.getPadding(), xferOp.getMask(),
1216       // in_bounds is explicitly reset
1217       /*inBoundsAttr=*/ArrayAttr());
1218 
1219   if (maybeFillOp)
1220     rewriter.eraseOp(maybeFillOp);
1221   rewriter.eraseOp(copyOp);
1222   rewriter.replaceOp(xferOp, res);
1223 
1224   return success();
1225 }
1226 
1227 /// TODO: use interfaces, side-effects and aliasing analysis as appropriate,
1228 /// when available.
1229 LogicalResult LinalgCopyVTWForwardingPattern::matchAndRewrite(
1230     vector::TransferWriteOp xferOp, PatternRewriter &rewriter) const {
1231   // TODO: support mask.
1232   if (xferOp.getMask())
1233     return failure();
1234 
1235   // Transfer into `viewOrAlloc`.
1236   Value viewOrAlloc = xferOp.getSource();
1237   if (!viewOrAlloc.getDefiningOp<memref::ViewOp>() &&
1238       !viewOrAlloc.getDefiningOp<memref::AllocOp>())
1239     return failure();
1240 
1241   // Ensure there is exactly one subview of `viewOrAlloc` defining `subView`.
1242   memref::SubViewOp subViewOp = getSubViewUseIfUnique(viewOrAlloc);
1243   if (!subViewOp)
1244     return failure();
1245   Value subView = subViewOp.getResult();
1246 
1247   // Find the copy from `subView` without interleaved uses.
1248   memref::CopyOp copyOp;
1249   for (auto &u : subViewOp.getResult().getUses()) {
1250     if (auto newCopyOp = dyn_cast<memref::CopyOp>(u.getOwner())) {
1251       if (newCopyOp.source() != subView)
1252         continue;
1253       if (mayExistInterleavedUses(xferOp, newCopyOp, {viewOrAlloc, subView}))
1254         continue;
1255       copyOp = newCopyOp;
1256       break;
1257     }
1258   }
1259   if (!copyOp)
1260     return failure();
1261 
1262   // `out` is the subview copied into that we replace.
1263   assert(copyOp.target().getType().isa<MemRefType>());
1264   Value out = copyOp.target();
1265 
1266   // Forward vector.transfer into copy.
1267   // memref.copy + linalg.fill can be used to create a padded local buffer.
1268   // The `masked` attribute is only valid on this padded buffer.
1269   // When forwarding to vector.transfer_write, the attribute must be reset
1270   // conservatively.
1271   rewriter.create<vector::TransferWriteOp>(
1272       xferOp.getLoc(), xferOp.getVector(), out, xferOp.getIndices(),
1273       xferOp.getPermutationMapAttr(), xferOp.getMask(),
1274       // in_bounds is explicitly reset
1275       /*inBoundsAttr=*/ArrayAttr());
1276 
1277   rewriter.eraseOp(copyOp);
1278   rewriter.eraseOp(xferOp);
1279 
1280   return success();
1281 }
1282 
1283 //===----------------------------------------------------------------------===//
1284 // Convolution vectorization patterns
1285 //===----------------------------------------------------------------------===//
1286 
1287 template <int N>
1288 static void bindShapeDims(ShapedType shapedType) {}
1289 
1290 template <int N, typename IntTy, typename... IntTy2>
1291 static void bindShapeDims(ShapedType shapedType, IntTy &val, IntTy2 &...vals) {
1292   val = shapedType.getShape()[N];
1293   bindShapeDims<N + 1, IntTy2 &...>(shapedType, vals...);
1294 }
1295 
1296 /// Bind a pack of int& to the leading dimensions of shapedType.getShape().
1297 template <typename... IntTy>
1298 static void bindShapeDims(ShapedType shapedType, IntTy &...vals) {
1299   bindShapeDims<0>(shapedType, vals...);
1300 }
1301 
1302 namespace {
1303 /// Generate a vector implementation for either:
1304 /// ```
1305 ///   Op def: (     n,     w,     c,    kw,    f  )
1306 ///    Iters: ({Par(), Par(), Par(), Red(), Red()})
1307 ///   Layout: {{n, strideW * w + dilationW * kw, c}, {kw, c, f}, {n, w, f}}
1308 /// ```
1309 /// kw is unrolled, w is unrolled iff dilationW > 1.
1310 ///
1311 /// or
1312 ///
1313 /// ```
1314 ///   Op def: (     n,     w,     c,    kw )
1315 ///    Iters: ({Par(), Par(), Par(), Red()})
1316 ///   Layout: {{n, strideW * w + dilationW * kw, c}, {kw, c}, {n, w, c}}
1317 /// ```
1318 /// kw is unrolled, w is unrolled iff dilationW > 1.
1319 struct Conv1DNwcGenerator : public StructuredGenerator<LinalgOp> {
1320   Conv1DNwcGenerator(OpBuilder &builder, LinalgOp linalgOp, int strideW,
1321                      int dilationW)
1322       : StructuredGenerator<LinalgOp>(builder, linalgOp), strideW(strideW),
1323         dilationW(dilationW) {
1324     // Determine whether `linalgOp` can be generated with this generator
1325     if (linalgOp.getNumInputs() != 2 || linalgOp.getNumOutputs() != 1)
1326       return;
1327     lhsShaped = linalgOp.inputs()[0];
1328     rhsShaped = linalgOp.inputs()[1];
1329     resShaped = linalgOp.outputs()[0];
1330     lhsShapedType = lhsShaped.getType().dyn_cast<ShapedType>();
1331     rhsShapedType = rhsShaped.getType().dyn_cast<ShapedType>();
1332     resShapedType = resShaped.getType().dyn_cast<ShapedType>();
1333     if (!lhsShapedType || !rhsShapedType || !resShapedType)
1334       return;
1335     if (lhsShapedType.getRank() != 3 ||
1336         (rhsShapedType.getRank() != 2 && rhsShapedType.getRank() != 3) ||
1337         resShapedType.getRank() != 3)
1338       return;
1339 
1340     // Check for reduction `add` preceded by `mul`.
1341     Operation *reduceOp = matchLinalgReduction(linalgOp.getOutputOperand(0));
1342     if (!reduceOp)
1343       return;
1344     llvm::Optional<vector::CombiningKind> maybeKind;
1345     maybeKind = getCombinerOpKind(reduceOp);
1346     if (!maybeKind || *maybeKind != vector::CombiningKind::ADD)
1347       return;
1348     // Check for single `mul` predecessor. The `mul` operands must be block
1349     // arguments or extension of block arguments.
1350     Operation *mulOp = nullptr;
1351     for (Value operand : reduceOp->getOperands()) {
1352       if (operand.isa<BlockArgument>())
1353         continue;
1354       if (mulOp)
1355         return;
1356       mulOp = operand.getDefiningOp();
1357       if (!mulOp || !isa<arith::MulIOp, arith::MulFOp>(mulOp))
1358         return;
1359     }
1360     if (!mulOp)
1361       return;
1362     for (Value operand : mulOp->getOperands()) {
1363       if (Operation *def = operand.getDefiningOp()) {
1364         if (!isa<arith::ExtFOp>(def))
1365           return;
1366         operand = def->getOperand(0);
1367       }
1368       if (!operand.isa<BlockArgument>())
1369         return;
1370     }
1371     // The op is now known to be valid.
1372     valid = true;
1373   }
1374 
1375   /// Generate a vector implementation for:
1376   /// ```
1377   ///   Op def: (     n,     w,     c,    kw,    f  )
1378   ///    Iters: ({Par(), Par(), Par(), Red(), Red()})
1379   ///   Layout: {{n, strideW * w + dilationW * kw, c}, {kw, c, f}, {n, w, f}}
1380   /// ```
1381   /// kw is always unrolled.
1382   /// TODO: w (resp. kw) is unrolled when the strideW ( resp. dilationW) is
1383   /// > 1.
1384   FailureOr<Operation *> conv() {
1385     if (!valid)
1386       return failure();
1387 
1388     int64_t nSize, wSize, cSize, kwSize, fSize;
1389     // kernel{kw, c, f}
1390     bindShapeDims(rhsShapedType, kwSize, cSize, fSize);
1391     // out{n, w, f}
1392     bindShapeDims(resShapedType, nSize, wSize);
1393 
1394     vector::TransferWriteOp write;
1395     Value zero = builder.create<arith::ConstantIndexOp>(loc, 0);
1396 
1397     // w is unrolled (i.e. wSizeStep == 1) iff strideW > 1.
1398     // When strideW == 1, we can batch the contiguous loads and avoid
1399     // unrolling
1400     int64_t wSizeStep = strideW == 1 ? wSize : 1;
1401 
1402     Type lhsEltType = lhsShapedType.getElementType();
1403     Type rhsEltType = rhsShapedType.getElementType();
1404     Type resEltType = resShapedType.getElementType();
1405     VectorType lhsType = VectorType::get(
1406         {nSize,
1407          // iw = ow * sw + kw *  dw - 1
1408          //   (i.e. 16 convolved with 3 (@stride 1 dilation 1) -> 14)
1409          // Perform the proper inclusive -> exclusive -> inclusive.
1410          ((wSize - 1) * strideW + 1) + ((kwSize - 1) * dilationW + 1) - 1,
1411          cSize},
1412         lhsEltType);
1413     VectorType rhsType = VectorType::get({kwSize, cSize, fSize}, rhsEltType);
1414     VectorType resType = VectorType::get({nSize, wSize, fSize}, resEltType);
1415 
1416     // Read lhs slice of size {w * strideW + kw * dilationW, c, f} @ [0, 0,
1417     // 0].
1418     Value lhs = builder.create<vector::TransferReadOp>(
1419         loc, lhsType, lhsShaped, ValueRange{zero, zero, zero});
1420     // Read rhs slice of size {kw, c, f} @ [0, 0, 0].
1421     Value rhs = builder.create<vector::TransferReadOp>(
1422         loc, rhsType, rhsShaped, ValueRange{zero, zero, zero});
1423     // Read res slice of size {n, w, f} @ [0, 0, 0].
1424     Value res = builder.create<vector::TransferReadOp>(
1425         loc, resType, resShaped, ValueRange{zero, zero, zero});
1426 
1427     //===------------------------------------------------------------------===//
1428     // Begin vector-only rewrite part
1429     //===------------------------------------------------------------------===//
1430     // Unroll along kw and read slices of lhs and rhs.
1431     SmallVector<Value> lhsVals, rhsVals, resVals;
1432     // Extract lhs slice of size {n, wSizeStep, c} @ [0, sw * w + dw * kw, 0].
1433     for (int64_t kw = 0; kw < kwSize; ++kw) {
1434       for (int64_t w = 0; w < wSize; w += wSizeStep) {
1435         lhsVals.push_back(builder.create<vector::ExtractStridedSliceOp>(
1436             loc, lhs,
1437             /*offsets=*/ArrayRef<int64_t>{0, w * strideW + kw * dilationW, 0},
1438             /*sizes=*/ArrayRef<int64_t>{nSize, wSizeStep, cSize},
1439             /*strides=*/ArrayRef<int64_t>{1, 1, 1}));
1440       }
1441     }
1442     // Extract rhs slice of size {c, f} @ [kw].
1443     for (int64_t kw = 0; kw < kwSize; ++kw) {
1444       rhsVals.push_back(builder.create<vector::ExtractOp>(
1445           loc, rhs, /*offsets=*/ArrayRef<int64_t>{kw}));
1446     }
1447     // Extract res slice: {n, wSizeStep, f} @ [0, w, 0].
1448     for (int64_t w = 0; w < wSize; w += wSizeStep) {
1449       resVals.push_back(builder.create<vector::ExtractStridedSliceOp>(
1450           loc, res,
1451           /*offsets=*/ArrayRef<int64_t>{0, w, 0},
1452           /*sizes=*/ArrayRef<int64_t>{nSize, wSizeStep, fSize},
1453           /*strides=*/ArrayRef<int64_t>{1, 1, 1}));
1454     }
1455 
1456     auto linearIndex = [&](int64_t kw, int64_t w) {
1457       return kw * (wSize / wSizeStep) + w;
1458     };
1459 
1460     // Compute contraction: O{n, w, f} += I{n, sw * w + dw * kw, c} * F{c, f}
1461     for (int64_t kw = 0; kw < kwSize; ++kw) {
1462       for (int64_t w = 0; w < wSize; w += wSizeStep) {
1463         resVals[w] = conv1dSliceAsContraction(
1464             builder, loc, lhsVals[linearIndex(kw, w)], rhsVals[kw], resVals[w]);
1465       }
1466     }
1467 
1468     // Write back res slice: {n, wSizeStep, f} @ [0, w, 0].
1469     // This does not depend on kw.
1470     for (int64_t w = 0; w < wSize; w += wSizeStep) {
1471       res = builder.create<vector::InsertStridedSliceOp>(
1472           loc, resVals[w], res,
1473           /*offsets=*/ArrayRef<int64_t>{0, w, 0},
1474           /*strides=*/ArrayRef<int64_t>{1, 1, 1});
1475     }
1476     //===------------------------------------------------------------------===//
1477     // End vector-only rewrite part
1478     //===------------------------------------------------------------------===//
1479 
1480     // Write back res slice of size {n, w, f} @ [0, 0, 0].
1481     return builder
1482         .create<vector::TransferWriteOp>(loc, res, resShaped,
1483                                          ValueRange{zero, zero, zero})
1484         .getOperation();
1485   }
1486 
1487   // Create a contraction: lhs{n, w, c} * rhs{c, f} -> res{n, w, f}
1488   Value conv1dSliceAsContraction(OpBuilder &b, Location loc, Value lhs,
1489                                  Value rhs, Value res) {
1490     StringRef par = Par().strRef, red = Red().strRef;
1491     AffineExpr n, w, f, c;
1492     bindDims(ctx, n, w, f, c);
1493     return builder.create<vector::ContractionOp>(
1494         loc, lhs, rhs, res,
1495         /*indexingMaps=*/MapList{{n, w, c}, {c, f}, {n, w, f}},
1496         /*iteratorTypes=*/ArrayRef<StringRef>{par, par, par, red});
1497   }
1498 
1499   /// Generate a vector implementation for:
1500   /// ```
1501   ///   Op def: (     n,     w,     c,    kw)
1502   ///    Iters: ({Par(), Par(), Par(), Red()})
1503   ///   Layout: {{n, strideW * w + dilationW * kw, c}, {kw, c}, {n, w, c}}
1504   /// ```
1505   /// kw is always unrolled.
1506   /// TODO: w (resp. kw) is unrolled when the strideW ( resp. dilationW) is
1507   /// > 1.
1508   FailureOr<Operation *> depthwiseConv() {
1509     if (!valid)
1510       return failure();
1511 
1512     int64_t nSize, wSize, cSize, kwSize;
1513     // kernel{kw, c}
1514     bindShapeDims(rhsShapedType, kwSize, cSize);
1515     // out{n, w, c}
1516     bindShapeDims(resShapedType, nSize, wSize);
1517 
1518     vector::TransferWriteOp write;
1519     Value zero = builder.create<arith::ConstantIndexOp>(loc, 0);
1520 
1521     // w is unrolled (i.e. wSizeStep == 1) iff strideW > 1.
1522     // When strideW == 1, we can batch the contiguous loads and avoid
1523     // unrolling
1524     int64_t wSizeStep = strideW == 1 ? wSize : 1;
1525 
1526     Type lhsEltType = lhsShapedType.getElementType();
1527     Type rhsEltType = rhsShapedType.getElementType();
1528     Type resEltType = resShapedType.getElementType();
1529     VectorType lhsType = VectorType::get(
1530         {nSize,
1531          // iw = ow * sw + kw *  dw - 1
1532          //   (i.e. 16 convolved with 3 (@stride 1 dilation 1) -> 14)
1533          ((wSize - 1) * strideW + 1) + ((kwSize - 1) * dilationW + 1) - 1,
1534          cSize},
1535         lhsEltType);
1536     VectorType rhsType = VectorType::get({kwSize, cSize}, rhsEltType);
1537     VectorType resType = VectorType::get({nSize, wSize, cSize}, resEltType);
1538 
1539     // Read lhs slice of size {n, w * strideW + kw * dilationW, c} @ [0, 0,
1540     // 0].
1541     Value lhs = builder.create<vector::TransferReadOp>(
1542         loc, lhsType, lhsShaped, ValueRange{zero, zero, zero});
1543     // Read rhs slice of size {kw, c} @ [0, 0].
1544     Value rhs = builder.create<vector::TransferReadOp>(loc, rhsType, rhsShaped,
1545                                                        ValueRange{zero, zero});
1546     // Read res slice of size {n, w, c} @ [0, 0, 0].
1547     Value res = builder.create<vector::TransferReadOp>(
1548         loc, resType, resShaped, ValueRange{zero, zero, zero});
1549 
1550     //===------------------------------------------------------------------===//
1551     // Begin vector-only rewrite part
1552     //===------------------------------------------------------------------===//
1553     // Unroll along kw and read slices of lhs and rhs.
1554     SmallVector<Value> lhsVals, rhsVals, resVals;
1555     // Extract lhs slice of size {n, wSizeStep, c}
1556     //   @ [0, sw * w + dw * kw, 0].
1557     for (int64_t kw = 0; kw < kwSize; ++kw) {
1558       for (int64_t w = 0; w < wSize; w += wSizeStep) {
1559         lhsVals.push_back(builder.create<vector::ExtractStridedSliceOp>(
1560             loc, lhs,
1561             /*offsets=*/ArrayRef<int64_t>{0, w * strideW + kw * dilationW, 0},
1562             /*sizes=*/ArrayRef<int64_t>{nSize, wSizeStep, cSize},
1563             /*strides=*/ArrayRef<int64_t>{1, 1, 1}));
1564       }
1565     }
1566     // Extract rhs slice of size {c} @ [kw].
1567     for (int64_t kw = 0; kw < kwSize; ++kw) {
1568       rhsVals.push_back(builder.create<vector::ExtractOp>(
1569           loc, rhs, /*offsets=*/ArrayRef<int64_t>{kw}));
1570     }
1571     // Extract res slice: {n, wSizeStep, c} @ [0, w, 0].
1572     for (int64_t w = 0; w < wSize; w += wSizeStep) {
1573       resVals.push_back(builder.create<vector::ExtractStridedSliceOp>(
1574           loc, res,
1575           /*offsets=*/ArrayRef<int64_t>{0, w, 0},
1576           /*sizes=*/ArrayRef<int64_t>{nSize, wSizeStep, cSize},
1577           /*strides=*/ArrayRef<int64_t>{1, 1, 1}));
1578     }
1579 
1580     auto linearIndex = [&](int64_t kw, int64_t w) {
1581       return kw * (wSize / wSizeStep) + w;
1582     };
1583 
1584     // Compute contraction: O{n, w, c} += I{n, sw * w + dw * kw, c} * F{c}
1585     for (int64_t kw = 0; kw < kwSize; ++kw) {
1586       for (int64_t w = 0; w < wSize; w += wSizeStep) {
1587         resVals[w] = depthwiseConv1dSliceAsFma(
1588             builder, loc, lhsVals[linearIndex(kw, w)], rhsVals[kw], resVals[w]);
1589       }
1590     }
1591 
1592     // Write back res slice: {n, wSizeStep, c} @ [0, w, 0].
1593     // This does not depend on kw.
1594     for (int64_t w = 0; w < wSize; w += wSizeStep) {
1595       res = builder.create<vector::InsertStridedSliceOp>(
1596           loc, resVals[w], res,
1597           /*offsets=*/ArrayRef<int64_t>{0, w, 0},
1598           /*strides=*/ArrayRef<int64_t>{1, 1, 1});
1599     }
1600     //===------------------------------------------------------------------===//
1601     // End vector-only rewrite part
1602     //===------------------------------------------------------------------===//
1603 
1604     // Write back res slice of size {n, w, c} @ [0, 0, 0].
1605     return builder
1606         .create<vector::TransferWriteOp>(loc, res, resShaped,
1607                                          ValueRange{zero, zero, zero})
1608         .getOperation();
1609   }
1610 
1611   /// Lower lhs{n, w, c} * rhs{c} -> res{n, w, c} to fma.
1612   Value depthwiseConv1dSliceAsFma(OpBuilder &b, Location loc, Value lhs,
1613                                   Value rhs, Value res) {
1614     Value bcast = builder.create<vector::BroadcastOp>(loc, res.getType(), rhs);
1615     return b.create<vector::FMAOp>(loc, lhs, bcast, res);
1616   }
1617 
1618   /// Entry point that transposes into the common form:
1619   ///   {{n, strideW * w + dilationW * kw, c}, {kw, c, f}, {n, w, f}}
1620   FailureOr<Operation *> generateConv() {
1621     AffineExpr n, w, f, kw, c;
1622     bindDims(ctx, n, w, f, kw, c);
1623     if (!iters({Par(), Par(), Par(), Red(), Red()}))
1624       return failure();
1625 
1626     // No transposition needed.
1627     if (layout({/*lhsIndex*/ {n, strideW * w + dilationW * kw, c},
1628                 /*rhsIndex*/ {kw, c, f},
1629                 /*resIndex*/ {n, w, f}}))
1630       return conv();
1631     return failure();
1632   }
1633 
1634   /// Entry point that transposes into the common form:
1635   ///   {{n, strideW * w + dilationW * kw, c}, {kw, c}, {n, w, c}}
1636   FailureOr<Operation *> generateDilatedConv() {
1637     AffineExpr n, w, c, kw;
1638     bindDims(ctx, n, w, c, kw);
1639     if (!iters({Par(), Par(), Par(), Red()}))
1640       return failure();
1641 
1642     // No transposition needed.
1643     if (layout({/*lhsIndex*/ {n, strideW * w + dilationW * kw, c},
1644                 /*rhsIndex*/ {kw, c},
1645                 /*resIndex*/ {n, w, c}}))
1646       return depthwiseConv();
1647     return failure();
1648   }
1649 
1650 private:
1651   bool valid = false;
1652   int strideW, dilationW;
1653   Value lhsShaped, rhsShaped, resShaped;
1654   ShapedType lhsShapedType, rhsShapedType, resShapedType;
1655 };
1656 } // namespace
1657 
1658 /// Helper function to vectorize a LinalgOp with convolution semantics.
1659 // TODO: extend the generic vectorization to support windows and drop this.
1660 static FailureOr<Operation *> vectorizeConvolution(OpBuilder &b, LinalgOp op) {
1661   // The ConvolutionOpInterface gives us guarantees of existence for
1662   // strides/dilations. However, we do not need to rely on those, we can simply
1663   // use them if present, otherwise use the default and let the generic conv.
1664   // matcher in the ConvGenerator succeed or fail.
1665   auto strides = op->getAttrOfType<DenseIntElementsAttr>("strides");
1666   auto dilations = op->getAttrOfType<DenseIntElementsAttr>("dilations");
1667   auto stride = strides ? *strides.getValues<uint64_t>().begin() : 1;
1668   auto dilation = dilations ? *dilations.getValues<uint64_t>().begin() : 1;
1669   Conv1DNwcGenerator e(b, op, stride, dilation);
1670   auto res = e.generateConv();
1671   if (succeeded(res))
1672     return res;
1673   return e.generateDilatedConv();
1674 }
1675 
1676 struct VectorizeConvolution : public OpInterfaceRewritePattern<LinalgOp> {
1677   using OpInterfaceRewritePattern::OpInterfaceRewritePattern;
1678 
1679   LogicalResult matchAndRewrite(LinalgOp op,
1680                                 PatternRewriter &rewriter) const override {
1681     FailureOr<Operation *> resultOrFail = vectorizeConvolution(rewriter, op);
1682     if (failed(resultOrFail))
1683       return failure();
1684     Operation *newOp = *resultOrFail;
1685     if (newOp->getNumResults() == 0) {
1686       rewriter.eraseOp(op.getOperation());
1687       return success();
1688     }
1689     assert(newOp->getNumResults() == 1 && "expected single result");
1690     rewriter.replaceOp(op.getOperation(), newOp->getResult(0));
1691     return success();
1692   }
1693 };
1694 
1695 void mlir::linalg::populateConvolutionVectorizationPatterns(
1696     RewritePatternSet &patterns, PatternBenefit benefit) {
1697   patterns.add<VectorizeConvolution>(patterns.getContext(), benefit);
1698 }
1699