1 //===- Vectorization.cpp - Implementation of linalg Vectorization ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the linalg dialect Vectorization transformations.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "mlir/Analysis/LoopAnalysis.h"
14 #include "mlir/Analysis/SliceAnalysis.h"
15 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
16 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
17 #include "mlir/Dialect/Linalg/IR/Linalg.h"
18 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
19 #include "mlir/Dialect/Linalg/Utils/Utils.h"
20 #include "mlir/Dialect/Tensor/IR/Tensor.h"
21 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
22 #include "mlir/Dialect/Vector/VectorOps.h"
23 #include "mlir/Dialect/Vector/VectorTransforms.h"
24 #include "mlir/IR/AffineExpr.h"
25 #include "mlir/IR/Matchers.h"
26 #include "mlir/IR/PatternMatch.h"
27 #include "mlir/Pass/Pass.h"
28 #include "mlir/Support/LLVM.h"
29 #include "mlir/Transforms/RegionUtils.h"
30 #include "llvm/ADT/ScopeExit.h"
31 #include "llvm/ADT/Sequence.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/TypeSwitch.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include <type_traits>
37 
38 using namespace mlir;
39 using namespace mlir::linalg;
40 
41 #define DEBUG_TYPE "linalg-vectorization"
42 
43 #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
44 #define LDBG(X) LLVM_DEBUG(DBGS() << X)
45 
46 static FailureOr<Operation *>
47 vectorizeConvolution(OpBuilder &b, ConvolutionOpInterface convOp);
48 
49 /// Return the unique instance of OpType in `block` if it is indeed unique.
50 /// Return null if none or more than 1 instances exist.
51 template <typename OpType>
52 static OpType getSingleOpOfType(Block &block) {
53   OpType res;
54   block.walk([&](OpType op) {
55     if (res) {
56       res = nullptr;
57       return WalkResult::interrupt();
58     }
59     res = op;
60     return WalkResult::advance();
61   });
62   return res;
63 }
64 
65 /// Given an indexing `map` coming from a LinalgOp indexing, restricted to a
66 /// projectedPermutation, compress the unused dimensions to serve as a
67 /// permutation_map for a vector transfer operation.
68 /// For example, given a linalg op such as:
69 ///
70 /// ```
71 ///   %0 = linalg.generic {
72 ///        indexing_maps = affine_map<(d0, d1, d2, d3, d4) -> (d4, d0, d2)>,
73 ///        indexing_maps = affine_map<(d0, d1, d2, d3, d4) -> (d1, d3)>
74 ///      }
75 ///     ins(%0 : tensor<2x3x4xf32>)
76 ///    outs(%1 : tensor<5x6xf32>)
77 /// ```
78 ///
79 /// the iteration domain size of the linalg op is 3x5x4x6x2. The first affine
80 /// map is reindexed to `affine_map<(d0, d1, d2) -> (d2, d0, d1)>`, the second
81 /// affine map is reindexed to `affine_map<(d0, d1) -> (d0, d1)>`.
82 static AffineMap reindexIndexingMap(AffineMap map) {
83   assert(map.isProjectedPermutation(/*allowZeroInResults=*/true) &&
84          "expected projected permutation");
85   auto res = compressUnusedDims(map);
86   assert(res.getNumDims() == res.getNumResults() &&
87          "expected reindexed map with same number of dims and results");
88   return res;
89 }
90 
91 /// Helper data structure to represent the result of vectorization.
92 /// In certain specific cases, like terminators, we do not want to propagate/
93 enum VectorizationStatus {
94   /// Op failed to vectorize.
95   Failure = 0,
96   /// Op vectorized and custom function took care of replacement logic
97   NoReplace,
98   /// Op vectorized into a new Op whose results will replace original Op's
99   /// results.
100   NewOp
101   // TODO: support values if Op vectorized to Many-Ops whose results we need to
102   // aggregate for replacement.
103 };
104 struct VectorizationResult {
105   /// Return status from vectorizing the current op.
106   enum VectorizationStatus status = VectorizationStatus::Failure;
107   /// New vectorized operation to replace the current op.
108   /// Replacement behavior is specified by `status`.
109   Operation *newOp;
110 };
111 
112 static llvm::Optional<vector::CombiningKind>
113 getKindForOp(Operation *reductionOp) {
114   if (!reductionOp)
115     return llvm::None;
116   return llvm::TypeSwitch<Operation *, llvm::Optional<vector::CombiningKind>>(
117              reductionOp)
118       .Case<arith::AddIOp, arith::AddFOp>(
119           [&](auto op) { return vector::CombiningKind::ADD; })
120       .Case<arith::AndIOp>([&](auto op) { return vector::CombiningKind::AND; })
121       .Case<arith::MaxSIOp>(
122           [&](auto op) { return vector::CombiningKind::MAXSI; })
123       .Case<arith::MaxFOp>([&](auto op) { return vector::CombiningKind::MAXF; })
124       .Case<arith::MinSIOp>(
125           [&](auto op) { return vector::CombiningKind::MINSI; })
126       .Case<arith::MinFOp>([&](auto op) { return vector::CombiningKind::MINF; })
127       .Case<arith::MulIOp, arith::MulFOp>(
128           [&](auto op) { return vector::CombiningKind::MUL; })
129       .Case<arith::OrIOp>([&](auto op) { return vector::CombiningKind::OR; })
130       .Case<arith::XOrIOp>([&](auto op) { return vector::CombiningKind::XOR; })
131       .Default([&](auto op) { return llvm::None; });
132 }
133 
134 /// Check whether `outputOperand` is a reduction with a single combiner
135 /// operation. Return the combiner operation of the reduction. Return
136 /// nullptr otherwise. Multiple reduction operations would impose an
137 /// ordering between reduction dimensions and is currently unsupported in
138 /// Linalg. This limitation is motivated by the fact that e.g. min(max(X)) !=
139 /// max(min(X))
140 // TODO: use in LinalgOp verification, there is a circular dependency atm.
141 static Operation *matchLinalgReduction(OpOperand *outputOperand) {
142   auto linalgOp = cast<LinalgOp>(outputOperand->getOwner());
143   unsigned outputPos =
144       outputOperand->getOperandNumber() - linalgOp.getNumInputs();
145   // Only single combiner operations are supported for now.
146   SmallVector<Operation *, 4> combinerOps;
147   if (!matchReduction(linalgOp.getRegionOutputArgs(), outputPos, combinerOps) ||
148       combinerOps.size() != 1)
149     return nullptr;
150 
151   // Return the combiner operation.
152   return combinerOps[0];
153 }
154 
155 /// Broadcast `value` to a vector of `shape` if possible. Return value
156 /// otherwise.
157 static Value broadcastIfNeeded(OpBuilder &b, Value value,
158                                ArrayRef<int64_t> shape) {
159   // If no shape to broadcast to, just return `value`.
160   if (shape.empty())
161     return value;
162   VectorType targetVectorType =
163       VectorType::get(shape, getElementTypeOrSelf(value));
164   if (vector::isBroadcastableTo(value.getType(), targetVectorType) !=
165       vector::BroadcastableToResult::Success)
166     return value;
167   Location loc = b.getInsertionPoint()->getLoc();
168   return b.createOrFold<vector::BroadcastOp>(loc, targetVectorType, value);
169 }
170 
171 /// Create MultiDimReductionOp to compute the reduction for `reductionOp`. This
172 /// assumes that `reductionOp` has two operands and one of them is the reduction
173 /// initial value.
174 static Value buildMultiDimReduce(OpBuilder &b, Operation *reduceOp,
175                                  Value valueToReduce,
176                                  const SmallVector<bool> &reductionMask) {
177   auto maybeKind = getKindForOp(reduceOp);
178   assert(maybeKind && "Failed precondition: could not get reduction kind");
179   return b.create<vector::MultiDimReductionOp>(
180       reduceOp->getLoc(), valueToReduce, reductionMask, *maybeKind);
181 }
182 
183 static SmallVector<bool> getReductionMask(LinalgOp linalgOp) {
184   unsigned idx = 0;
185   SmallVector<bool> reductionMask(linalgOp.iterator_types().size(), false);
186   for (auto attr : linalgOp.iterator_types()) {
187     if (isReductionIterator(attr))
188       reductionMask[idx] = true;
189     ++idx;
190   }
191   return reductionMask;
192 }
193 
194 /// Build a vector.transfer_write of `value` into `outputOperand` at indices set
195 /// to all `0`; where `outputOperand` is an output operand of the LinalgOp
196 /// currently being vectorized. If `dest` has null rank, build an memref.store.
197 /// Return the produced value or null if no value is produced.
198 static Value buildVectorWrite(OpBuilder &b, Value value,
199                               OpOperand *outputOperand) {
200   Operation *write;
201   Location loc = value.getLoc();
202   auto linalgOp = cast<LinalgOp>(outputOperand->getOwner());
203   ArrayRef<int64_t> shape = linalgOp.getShape(outputOperand);
204   auto vectorType = VectorType::get(
205       shape, getElementTypeOrSelf(outputOperand->get().getType()));
206   if (vectorType.getRank() > 0) {
207     // 0-d case is still special: do not invert the reindexing map.
208     AffineMap map =
209         reindexIndexingMap(linalgOp.getTiedIndexingMap(outputOperand));
210     SmallVector<int64_t> transposeShape =
211         applyPermutationMap(inversePermutation(map), vectorType.getShape());
212     assert(!transposeShape.empty() && "unexpected empty transpose shape");
213     vectorType = VectorType::get(transposeShape, vectorType.getElementType());
214     SmallVector<Value> indices(linalgOp.getRank(outputOperand),
215                                b.create<arith::ConstantIndexOp>(loc, 0));
216     value = broadcastIfNeeded(b, value, vectorType.getShape());
217     write = b.create<vector::TransferWriteOp>(loc, value, outputOperand->get(),
218                                               indices, map);
219   } else {
220     if (!value.getType().isa<VectorType>())
221       value = b.create<vector::BroadcastOp>(loc, vectorType, value);
222     assert(value.getType() == vectorType && "incorrect type");
223     write = b.create<vector::TransferWriteOp>(loc, value, outputOperand->get(),
224                                               ValueRange{});
225   }
226   LDBG("vectorized op: " << *write);
227   if (!write->getResults().empty())
228     return write->getResult(0);
229   return Value();
230 }
231 
232 // Custom vectorization function type. Produce a vector form of Operation*
233 // assuming all its vectorized operands are already in the BlockAndValueMapping.
234 // Return nullptr if the Operation cannot be vectorized.
235 using CustomVectorizationHook = std::function<VectorizationResult(
236     Operation *, const BlockAndValueMapping &)>;
237 
238 /// Helper function to vectorize the terminator of a `linalgOp`. New result
239 /// vector values are appended to `newResults`. Return
240 /// VectorizationStatus::NoReplace to signal the vectorization algorithm that it
241 /// should not try to map produced operations and instead return the results
242 /// using the `newResults` vector making them available to the
243 /// vectorization algorithm for RAUW. This function is meant to be used as a
244 /// CustomVectorizationHook.
245 static VectorizationResult
246 vectorizeLinalgYield(OpBuilder &b, Operation *op,
247                      const BlockAndValueMapping &bvm, LinalgOp linalgOp,
248                      SmallVectorImpl<Value> &newResults) {
249   auto yieldOp = dyn_cast<linalg::YieldOp>(op);
250   if (!yieldOp)
251     return VectorizationResult{VectorizationStatus::Failure, nullptr};
252   for (const auto &outputs : llvm::enumerate(yieldOp.values())) {
253     // TODO: Scan for an opportunity for reuse.
254     // TODO: use a map.
255     Value vectorValue = bvm.lookup(outputs.value());
256     Value newResult = buildVectorWrite(
257         b, vectorValue, linalgOp.getOutputOperand(outputs.index()));
258     if (newResult)
259       newResults.push_back(newResult);
260   }
261   return VectorizationResult{VectorizationStatus::NoReplace, nullptr};
262 }
263 
264 /// Helper function to vectorize the index operations of a `linalgOp`. Return
265 /// VectorizationStatus::NewOp to signal the vectorization algorithm that it
266 /// should map the produced operations. This function is meant to be used as a
267 /// CustomVectorizationHook.
268 static VectorizationResult vectorizeLinalgIndex(OpBuilder &b, Operation *op,
269                                                 LinalgOp linalgOp) {
270   IndexOp indexOp = dyn_cast<linalg::IndexOp>(op);
271   if (!indexOp)
272     return VectorizationResult{VectorizationStatus::Failure, nullptr};
273   auto loc = indexOp.getLoc();
274   // Compute the static loop sizes of the index op.
275   auto targetShape = linalgOp.computeStaticLoopSizes();
276   // Compute a one-dimensional index vector for the index op dimension.
277   SmallVector<int64_t> constantSeq =
278       llvm::to_vector<16>(llvm::seq<int64_t>(0, targetShape[indexOp.dim()]));
279   auto constantOp =
280       b.create<arith::ConstantOp>(loc, b.getIndexVectorAttr(constantSeq));
281   // Return the one-dimensional index vector if it lives in the trailing
282   // dimension of the iteration space since the vectorization algorithm in this
283   // case can handle the broadcast.
284   if (indexOp.dim() == targetShape.size() - 1)
285     return VectorizationResult{VectorizationStatus::NewOp, constantOp};
286   // Otherwise permute the targetShape to move the index dimension last,
287   // broadcast the one-dimensional index vector to the permuted shape, and
288   // finally transpose the broadcasted index vector to undo the permutation.
289   std::swap(targetShape[indexOp.dim()], targetShape.back());
290   auto broadCastOp = b.create<vector::BroadcastOp>(
291       loc, VectorType::get(targetShape, b.getIndexType()), constantOp);
292   SmallVector<int64_t> transposition =
293       llvm::to_vector<16>(llvm::seq<int64_t>(0, linalgOp.getNumLoops()));
294   std::swap(transposition.back(), transposition[indexOp.dim()]);
295   auto transposeOp =
296       b.create<vector::TransposeOp>(loc, broadCastOp, transposition);
297   return VectorizationResult{VectorizationStatus::NewOp, transposeOp};
298 }
299 
300 /// Create a new vectorized verstion of `op` with the given operands and types.
301 static Operation *createVectorizedOp(OpBuilder &b, Operation *op,
302                                      ValueRange newOperands,
303                                      ArrayRef<Type> types) {
304   OperationState state(op->getLoc(), op->getName());
305   state.addAttributes(op->getAttrs());
306   state.addOperands(newOperands);
307   state.addTypes(types);
308   return b.createOperation(state);
309 }
310 
311 /// Emit reduction operations if the shapes of the value to reduce is different
312 /// that the result shape.
313 static Operation *reduceIfNeeded(OpBuilder &b, LinalgOp linalgOp, Operation *op,
314                                  Value reduceValue, Value initialValue,
315                                  const BlockAndValueMapping &bvm) {
316   Value reduceVec = bvm.lookup(reduceValue);
317   Value outputVec = bvm.lookup(initialValue);
318   auto reduceType = reduceVec.getType().dyn_cast<VectorType>();
319   auto outputType = outputVec.getType().dyn_cast<VectorType>();
320   // Reduce only if needed as the value may already have been reduce for
321   // contraction vectorization.
322   if (!reduceType ||
323       (outputType && reduceType.getShape() == outputType.getShape()))
324     return nullptr;
325   SmallVector<bool> reductionMask = getReductionMask(linalgOp);
326   Value reduce = buildMultiDimReduce(b, op, reduceVec, reductionMask);
327   return createVectorizedOp(b, op, {reduce, outputVec}, reduce.getType());
328 }
329 
330 /// Generic vectorization for a single operation `op`, given already vectorized
331 /// operands carried by `bvm`. Vectorization occurs as follows:
332 ///   1. Try to apply any of the `customVectorizationHooks` and return its
333 ///   result on success.
334 ///   2. Clone any constant in the current scope without vectorization: each
335 ///   consumer of the constant will later determine the shape to which the
336 ///   constant needs to be broadcast to.
337 ///   3. Fail on any remaining non `ElementwiseMappable` op. It is the purpose
338 ///   of the `customVectorizationHooks` to cover such cases.
339 ///   4. Clone `op` in vector form to a vector of shape prescribed by the first
340 ///   operand of maximal rank. Other operands have smaller rank and are
341 ///   broadcast accordingly. It is assumed this broadcast is always legal,
342 ///   otherwise, it means one of the `customVectorizationHooks` is incorrect.
343 ///
344 /// This function assumes all operands of `op` have been vectorized and are in
345 /// the `bvm` mapping. As a consequence, this function is meant to be called on
346 /// a topologically-sorted list of ops.
347 /// This function does not update `bvm` but returns a VectorizationStatus that
348 /// instructs the caller what `bvm` update needs to occur.
349 static VectorizationResult
350 vectorizeOneOp(OpBuilder &b, LinalgOp linalgOp, Operation *op,
351                const BlockAndValueMapping &bvm,
352                ArrayRef<CustomVectorizationHook> customVectorizationHooks) {
353   LDBG("vectorize op " << *op);
354 
355   // 1. Try to apply any CustomVectorizationHook.
356   if (!customVectorizationHooks.empty()) {
357     for (auto &customFunc : customVectorizationHooks) {
358       VectorizationResult result = customFunc(op, bvm);
359       if (result.status == VectorizationStatus::Failure)
360         continue;
361       return result;
362     }
363   }
364 
365   // 2. Constant ops don't get vectorized but rather broadcasted at their users.
366   // Clone so that the constant is not confined to the linalgOp block .
367   if (isa<arith::ConstantOp, ConstantOp>(op))
368     return VectorizationResult{VectorizationStatus::NewOp, b.clone(*op)};
369 
370   // 3. Only ElementwiseMappable are allowed in the generic vectorization.
371   if (!OpTrait::hasElementwiseMappableTraits(op))
372     return VectorizationResult{VectorizationStatus::Failure, nullptr};
373 
374   // 4 . Check if the operation is a reduction.
375   SmallVector<std::pair<Value, Value>> reductionOperands;
376   for (Value operand : op->getOperands()) {
377     auto arg = operand.dyn_cast<BlockArgument>();
378     if (!arg || arg.getArgNumber() < linalgOp.getNumInputs())
379       continue;
380     SmallVector<Operation *> reductionOps;
381     Value reduceValue = matchReduction(
382         linalgOp.getRegionOutputArgs(),
383         arg.getArgNumber() - linalgOp.getNumInputs(), reductionOps);
384     if (!reduceValue)
385       continue;
386     reductionOperands.push_back(std::make_pair(reduceValue, operand));
387   }
388   if (!reductionOperands.empty()) {
389     assert(reductionOperands.size() == 1);
390     Operation *reduceOp =
391         reduceIfNeeded(b, linalgOp, op, reductionOperands[0].first,
392                        reductionOperands[0].second, bvm);
393     if (reduceOp)
394       return VectorizationResult{VectorizationStatus::NewOp, reduceOp};
395   }
396 
397   // 5. Generic vectorization path for ElementwiseMappable ops.
398   //   a. first get the first max ranked shape.
399   SmallVector<int64_t, 4> firstMaxRankedShape;
400   for (Value operand : op->getOperands()) {
401     auto vt = bvm.lookup(operand).getType().dyn_cast<VectorType>();
402     if (vt && firstMaxRankedShape.size() < vt.getShape().size())
403       firstMaxRankedShape.assign(vt.getShape().begin(), vt.getShape().end());
404   }
405   //   b. broadcast each op if needed.
406   auto vectorizedOperands = llvm::map_range(op->getOperands(), [&](Value v) {
407     return firstMaxRankedShape.empty()
408                ? bvm.lookup(v)
409                : broadcastIfNeeded(b, bvm.lookup(v), firstMaxRankedShape);
410   });
411   //   c. for elementwise, the result is the vector with the firstMaxRankedShape
412   auto returnTypes = llvm::map_range(op->getResultTypes(), [&](Type t) {
413     return firstMaxRankedShape.empty()
414                ? t
415                : VectorType::get(firstMaxRankedShape, t);
416   });
417 
418   // Build and return the new op.
419   return VectorizationResult{
420       VectorizationStatus::NewOp,
421       createVectorizedOp(b, op, llvm::to_vector<4>(vectorizedOperands),
422                          llvm::to_vector<4>(returnTypes))};
423 }
424 
425 /// Detect whether `r` has only ConstantOp, ElementwiseMappable and YieldOp.
426 static bool hasOnlyScalarElementwiseOp(Region &r) {
427   if (!llvm::hasSingleElement(r))
428     return false;
429   for (Operation &op : r.front()) {
430     if (!(isa<arith::ConstantOp, ConstantOp, linalg::YieldOp, linalg::IndexOp>(
431               op) ||
432           OpTrait::hasElementwiseMappableTraits(&op)) ||
433         llvm::any_of(op.getResultTypes(),
434                      [](Type type) { return !type.isIntOrIndexOrFloat(); }))
435       return false;
436   }
437   return true;
438 }
439 
440 // Return true if the op is an element-wise linalg op.
441 static bool isElementwise(Operation *op) {
442   auto linalgOp = dyn_cast<linalg::LinalgOp>(op);
443   if (!linalgOp)
444     return false;
445   if (linalgOp.getNumLoops() != linalgOp.getNumParallelLoops())
446     return false;
447   // TODO: relax the restrictions on indexing map.
448   for (OpOperand *opOperand : linalgOp.getOutputOperands()) {
449     if (!linalgOp.getTiedIndexingMap(opOperand).isIdentity())
450       return false;
451   }
452   return hasOnlyScalarElementwiseOp(linalgOp->getRegion(0));
453 }
454 
455 /// Generic vectorization function that rewrites the body of a `linalgOp` into
456 /// vector form. Generic vectorization proceeds as follows:
457 ///   1. Verify the `linalgOp` has one non-empty region.
458 ///   2. Values defined above the region are mapped to themselves and will be
459 ///   broadcasted on a per-need basis by their consumers.
460 ///   3. Each region argument is vectorized into a vector.transfer_read (or 0-d
461 ///   load).
462 ///   TODO: Reuse opportunities for RAR dependencies.
463 ///   4a. Register CustomVectorizationHook for YieldOp to capture the results.
464 ///   4b. Register CustomVectorizationHook for IndexOp to access the iteration
465 ///   indices.
466 ///   5. Iteratively call vectorizeOneOp on the region operations.
467 ///
468 /// When `broadcastToMaximalCommonShape` is set to true, eager broadcasting is
469 /// performed to the maximal common vector size implied by the `linalgOp`
470 /// iteration space. This eager broadcasting is introduced in the
471 /// permutation_map of the vector.transfer_read operations. The eager
472 /// broadcasting makes it trivial to detrmine where broadcast, transposes and
473 /// reductions should occur, without any bookkeeping. The tradeoff is that, in
474 /// the absence of good canonicalizations, the amount of work increases.
475 /// This is not deemed a problem as we expect canonicalizations and foldings to
476 /// aggressively clean up the useless work.
477 static LogicalResult
478 vectorizeAsLinalgGeneric(OpBuilder &b, LinalgOp linalgOp,
479                          SmallVectorImpl<Value> &newResults) {
480   Block *block = linalgOp.getBlock();
481 
482   // 2. Values defined above the region can only be broadcast for now. Make them
483   // map to themselves.
484   BlockAndValueMapping bvm;
485   SetVector<Value> valuesSet;
486   mlir::getUsedValuesDefinedAbove(linalgOp->getRegion(0), valuesSet);
487   bvm.map(valuesSet.getArrayRef(), valuesSet.getArrayRef());
488 
489   if (linalgOp.getNumOutputs() == 0)
490     return failure();
491 
492   // TODO: the common vector shape is equal to the static loop sizes only when
493   // all indexing maps are projected permutations. For convs and stencils the
494   // logic will need to evolve.
495   SmallVector<int64_t> commonVectorShape = linalgOp.computeStaticLoopSizes();
496 
497   // 3. Turn all BBArgs into vector.transfer_read / load.
498   Location loc = linalgOp.getLoc();
499   Value zero = b.create<arith::ConstantIndexOp>(loc, 0);
500   for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) {
501     BlockArgument bbarg = block->getArgument(opOperand->getOperandNumber());
502     if (linalgOp.isScalar(opOperand)) {
503       bvm.map(bbarg, opOperand->get());
504       continue;
505     }
506     VectorType readType;
507     AffineMap map;
508     // TODO: can we keep this simplification?
509     // if (linalgOp.getShape(opOperand).empty()) {
510     //   readType = VectorType::get({}, bbarg.getType());
511     // } else {
512     if (opOperand->getOperandNumber() < linalgOp.getNumInputs()) {
513       map = inverseAndBroadcastProjectedPermuation(
514           linalgOp.getTiedIndexingMap(opOperand));
515       readType = VectorType::get(commonVectorShape,
516                                  getElementTypeOrSelf(opOperand->get()));
517     } else {
518       map = inversePermutation(
519           reindexIndexingMap(linalgOp.getTiedIndexingMap(opOperand)));
520       readType = VectorType::get(map.compose(linalgOp.getShape(opOperand)),
521                                  getElementTypeOrSelf(opOperand->get()));
522     }
523     // }
524 
525     auto shape = linalgOp.getShape(opOperand);
526     SmallVector<Value> indices(shape.size(), zero);
527     Value readValue = b.create<vector::TransferReadOp>(
528         loc, readType, opOperand->get(), indices, map);
529     // Not all ops support 0-d vectors, extract the scalar for now.
530     // TODO: remove this.
531     if (readValue.getType().cast<VectorType>().getRank() == 0)
532       readValue = b.create<vector::ExtractElementOp>(loc, readValue);
533 
534     LDBG("new vectorized bbarg(" << bbarg.getArgNumber() << "): " << readValue);
535     bvm.map(bbarg, readValue);
536     bvm.map(opOperand->get(), readValue);
537   }
538 
539   SmallVector<CustomVectorizationHook> hooks;
540   // 4a. Register CustomVectorizationHook for yieldOp.
541   CustomVectorizationHook vectorizeYield =
542       [&](Operation *op,
543           const BlockAndValueMapping &bvm) -> VectorizationResult {
544     return vectorizeLinalgYield(b, op, bvm, linalgOp, newResults);
545   };
546   hooks.push_back(vectorizeYield);
547 
548   // 4b. Register CustomVectorizationHook for indexOp.
549   CustomVectorizationHook vectorizeIndex =
550       [&](Operation *op,
551           const BlockAndValueMapping &bvm) -> VectorizationResult {
552     return vectorizeLinalgIndex(b, op, linalgOp);
553   };
554   hooks.push_back(vectorizeIndex);
555 
556   // 5. Iteratively call `vectorizeOneOp` to each op in the slice.
557   for (Operation &op : block->getOperations()) {
558     VectorizationResult result = vectorizeOneOp(b, linalgOp, &op, bvm, hooks);
559     if (result.status == VectorizationStatus::Failure) {
560       LDBG("failed to vectorize: " << op);
561       return failure();
562     }
563     if (result.status == VectorizationStatus::NewOp) {
564       LDBG("new vector op: " << *result.newOp;);
565       bvm.map(op.getResults(), result.newOp->getResults());
566     }
567   }
568 
569   return success();
570 }
571 
572 /// Helper function to vectorize a `linalgOp` with contraction semantics in a
573 /// generic fashion.
574 /// This helper is needed atm because the truly generic implementation requires
575 /// good vector.multi_reduce folding patterns that are currently NYI.
576 // TODO: drop reliance on a specific pattern.
577 static bool allIndexingsAreProjectedPermutation(LinalgOp op) {
578   return llvm::all_of(op.getIndexingMaps(), [](AffineMap m) {
579     return m.isProjectedPermutation(/*allowZeroInResults=*/true);
580   });
581 }
582 
583 // TODO: probably need some extra checks for reduction followed by consumer
584 // ops that may not commute (e.g. linear reduction + non-linear instructions).
585 static LogicalResult reductionPreconditions(LinalgOp op) {
586   if (llvm::none_of(op.iterator_types(), isReductionIterator)) {
587     LDBG("reduction precondition failed: no reduction iterator");
588     return failure();
589   }
590   for (OpOperand *opOperand : op.getOutputOperands()) {
591     Operation *reduceOp = matchLinalgReduction(opOperand);
592     if (!reduceOp || !getKindForOp(reduceOp)) {
593       LDBG("reduction precondition failed: reduction detection failed");
594       return failure();
595     }
596   }
597   return success();
598 }
599 
600 LogicalResult
601 mlir::linalg::vectorizeStaticLinalgOpPrecondition(linalg::LinalgOp op) {
602   if (isElementwise(op))
603     return success();
604   // TODO: isaConvolutionOpInterface that can also infer from generic features.
605   // But we will still need stride/dilation attributes that will be annoying to
606   // reverse-engineer...
607   if (isa<ConvolutionOpInterface>(op.getOperation()))
608     return success();
609   // TODO: the common vector shape is equal to the static loop sizes only when
610   // all indexing maps are projected permutations. For convs and stencils the
611   // logic will need to evolve.
612   if (!allIndexingsAreProjectedPermutation(op)) {
613     LDBG("precondition failed: not projected permutations");
614     return failure();
615   }
616   if (failed(reductionPreconditions(op))) {
617     LDBG("precondition failed: reduction preconditions");
618     return failure();
619   }
620   return success();
621 }
622 
623 LogicalResult mlir::linalg::vectorizeLinalgOpPrecondition(Operation *op) {
624   auto linalgOp = cast<linalg::LinalgOp>(op);
625   // All types must be static shape to go to vector.
626   if (linalgOp.hasDynamicShape()) {
627     LDBG("precondition failed: dynamic shape");
628     return failure();
629   }
630   return vectorizeStaticLinalgOpPrecondition(linalgOp);
631 }
632 
633 LogicalResult
634 mlir::linalg::vectorizeLinalgOp(OpBuilder &b, Operation *op,
635                                 SmallVectorImpl<Value> &newResults) {
636   if (failed(vectorizeLinalgOpPrecondition(op)))
637     return failure();
638 
639   auto linalgOp = cast<LinalgOp>(op);
640 
641   // TODO: isaConvolutionOpInterface that can also infer from generic features.
642   // But we will still need stride/dilation attributes that will be annoying to
643   // reverse-engineer...
644   if (auto convOp = dyn_cast<ConvolutionOpInterface>(op)) {
645     FailureOr<Operation *> resultOrFail = vectorizeConvolution(b, convOp);
646     if (failed(resultOrFail))
647       return failure();
648     Operation *newOp = *resultOrFail;
649     llvm::append_range(newResults, newOp->getResults());
650     return success();
651   }
652 
653   LDBG(""
654        << "Vectorize linalg op as a generic by broadcasting to "
655           "maximal common shape: "
656        << *op);
657   return vectorizeAsLinalgGeneric(b, linalgOp, newResults);
658 }
659 
660 //----------------------------------------------------------------------------//
661 // Misc. vectorization patterns.
662 //----------------------------------------------------------------------------//
663 
664 /// Helper function that retrieves the value of an IntegerAttr.
665 static int64_t getIntFromAttr(Attribute attr) {
666   return attr.cast<IntegerAttr>().getInt();
667 }
668 
669 /// Given an ArrayRef of OpFoldResults, return a vector of Values. IntegerAttrs
670 /// are converted to ConstantIndexOps. Other attribute types are not supported.
671 static SmallVector<Value> ofrToIndexValues(OpBuilder &builder, Location loc,
672                                            ArrayRef<OpFoldResult> ofrs) {
673   SmallVector<Value> result;
674   llvm::for_each(ofrs, [&](auto o) {
675     if (auto val = o.template dyn_cast<Value>()) {
676       result.push_back(val);
677     } else {
678       result.push_back(builder.create<arith::ConstantIndexOp>(
679           loc, getIntFromAttr(o.template get<Attribute>())));
680     }
681   });
682   return result;
683 }
684 
685 /// Rewrite a PadTensorOp into a sequence of InitTensorOp, FillOp and
686 /// InsertSliceOp. For now, only constant padding values are supported.
687 /// If there is enough static type information, TransferReadOps and
688 /// TransferWriteOps may be generated instead of InsertSliceOps.
689 struct GenericPadTensorOpVectorizationPattern
690     : public GeneralizePadTensorOpPattern {
691   GenericPadTensorOpVectorizationPattern(MLIRContext *context,
692                                          PatternBenefit benefit = 1)
693       : GeneralizePadTensorOpPattern(context, tryVectorizeCopy, benefit) {}
694   /// Vectorize the copying of a PadTensorOp's source. This is possible if each
695   /// dimension size is statically know in the source type or the result type
696   /// (or both).
697   static LogicalResult tryVectorizeCopy(PatternRewriter &rewriter,
698                                         PadTensorOp padOp, Value dest) {
699     auto sourceType = padOp.getSourceType();
700     auto resultType = padOp.getResultType();
701 
702     // Copy cannot be vectorized if pad value is non-constant and source shape
703     // is dynamic. In case of a dynamic source shape, padding must be appended
704     // by TransferReadOp, but TransferReadOp supports only constant padding.
705     auto padValue = padOp.getConstantPaddingValue();
706     if (!padValue) {
707       if (!sourceType.hasStaticShape())
708         return failure();
709       // Create dummy padding value.
710       auto elemType = sourceType.getElementType();
711       padValue = rewriter.create<arith::ConstantOp>(
712           padOp.getLoc(), elemType, rewriter.getZeroAttr(elemType));
713     }
714 
715     SmallVector<int64_t> vecShape;
716     SmallVector<bool> readInBounds;
717     SmallVector<bool> writeInBounds;
718     for (unsigned i = 0; i < sourceType.getRank(); ++i) {
719       if (!sourceType.isDynamicDim(i)) {
720         vecShape.push_back(sourceType.getDimSize(i));
721         // Source shape is statically known: Neither read nor write are out-of-
722         // bounds.
723         readInBounds.push_back(true);
724         writeInBounds.push_back(true);
725       } else if (!resultType.isDynamicDim(i)) {
726         // Source shape is not statically known, but result shape is. Vectorize
727         // with size of result shape. This may be larger than the source size.
728         vecShape.push_back(resultType.getDimSize(i));
729         // Read may be out-of-bounds because the result size could be larger
730         // than the source size.
731         readInBounds.push_back(false);
732         // Write is out-of-bounds if low padding > 0.
733         writeInBounds.push_back(
734             getConstantIntValue(padOp.getMixedLowPad()[i]) ==
735             static_cast<int64_t>(0));
736       } else {
737         // Neither source nor result dim of padOp is static. Cannot vectorize
738         // the copy.
739         return failure();
740       }
741     }
742     auto vecType = VectorType::get(vecShape, sourceType.getElementType());
743 
744     // Generate TransferReadOp.
745     SmallVector<Value> readIndices(
746         vecType.getRank(),
747         rewriter.create<arith::ConstantIndexOp>(padOp.getLoc(), 0));
748     auto read = rewriter.create<vector::TransferReadOp>(
749         padOp.getLoc(), vecType, padOp.source(), readIndices, padValue,
750         ArrayRef<bool>{readInBounds});
751 
752     // If `dest` is a FillOp and the TransferWriteOp would overwrite the entire
753     // tensor, write directly to the FillOp's operand.
754     if (llvm::equal(vecShape, resultType.getShape()) &&
755         llvm::all_of(writeInBounds, [](bool b) { return b; }))
756       if (auto fill = dest.getDefiningOp<FillOp>())
757         dest = fill.output();
758 
759     // Generate TransferWriteOp.
760     auto writeIndices =
761         ofrToIndexValues(rewriter, padOp.getLoc(), padOp.getMixedLowPad());
762     rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
763         padOp, read, dest, writeIndices, ArrayRef<bool>{writeInBounds});
764 
765     return success();
766   }
767 };
768 
769 /// Base pattern for rewriting PadTensorOps whose result is consumed by a given
770 /// operation type OpTy.
771 template <typename OpTy>
772 struct VectorizePadTensorOpUserPattern : public OpRewritePattern<PadTensorOp> {
773   using OpRewritePattern<PadTensorOp>::OpRewritePattern;
774 
775   LogicalResult matchAndRewrite(PadTensorOp padOp,
776                                 PatternRewriter &rewriter) const final {
777     bool changed = false;
778     // Insert users in vector, because some users may be replaced/removed.
779     for (auto *user : llvm::to_vector<4>(padOp->getUsers()))
780       if (auto op = dyn_cast<OpTy>(user))
781         changed |= rewriteUser(rewriter, padOp, op).succeeded();
782     return success(changed);
783   }
784 
785 protected:
786   virtual LogicalResult rewriteUser(PatternRewriter &rewriter,
787                                     PadTensorOp padOp, OpTy op) const = 0;
788 };
789 
790 /// Rewrite use of PadTensorOp result in TransferReadOp. E.g.:
791 /// ```
792 /// %0 = linalg.pad_tensor %src ... : tensor<?x?xf32> to tensor<17x5xf32>
793 /// %r = vector.transfer_read %0[%c0, %c0], %cst
794 ///     {in_bounds = [true, true]} : tensor<17x5xf32>, vector<17x5xf32>
795 /// ```
796 /// is rewritten to:
797 /// ```
798 /// %r = vector.transfer_read %src[%c0, %c0], %padding
799 ///     {in_bounds = [true, true]}
800 ///     : tensor<?x?xf32>, vector<17x5xf32>
801 /// ```
802 /// Note: By restricting this pattern to in-bounds TransferReadOps, we can be
803 /// sure that the original padding value %cst was never used.
804 ///
805 /// This rewrite is possible if:
806 /// - `xferOp` has no out-of-bounds dims or mask.
807 /// - Low padding is static 0.
808 /// - Single, scalar padding value.
809 struct PadTensorOpVectorizationWithTransferReadPattern
810     : public VectorizePadTensorOpUserPattern<vector::TransferReadOp> {
811   using VectorizePadTensorOpUserPattern<
812       vector::TransferReadOp>::VectorizePadTensorOpUserPattern;
813 
814   LogicalResult rewriteUser(PatternRewriter &rewriter, PadTensorOp padOp,
815                             vector::TransferReadOp xferOp) const override {
816     // Low padding must be static 0.
817     if (!padOp.hasZeroLowPad())
818       return failure();
819     // Pad value must be a constant.
820     auto padValue = padOp.getConstantPaddingValue();
821     if (!padValue)
822       return failure();
823     // Padding value of existing `xferOp` is unused.
824     if (xferOp.hasOutOfBoundsDim() || xferOp.mask())
825       return failure();
826 
827     rewriter.updateRootInPlace(xferOp, [&]() {
828       SmallVector<bool> inBounds(xferOp.getVectorType().getRank(), false);
829       xferOp->setAttr(xferOp.getInBoundsAttrName(),
830                       rewriter.getBoolArrayAttr(inBounds));
831       xferOp.sourceMutable().assign(padOp.source());
832       xferOp.paddingMutable().assign(padValue);
833     });
834 
835     return success();
836   }
837 };
838 
839 /// Rewrite use of PadTensorOp result in TransferWriteOp.
840 /// This pattern rewrites TransferWriteOps that write to a padded tensor value,
841 /// where the same amount of padding is immediately removed again after the
842 /// write. In such cases, the TransferWriteOp can write to the non-padded tensor
843 /// value and apply out-of-bounds masking. E.g.:
844 /// ```
845 /// %0 = tensor.extract_slice ...[...] [%s0, %s1] [1, 1]
846 ///     : tensor<...> to tensor<?x?xf32>
847 /// %1 = linalg.pad_tensor %0 ... : tensor<?x?xf32> to tensor<17x5xf32>
848 /// %2 = vector.transfer_write %vec, %1[...]
849 ///     : vector<17x5xf32>, tensor<17x5xf32>
850 /// %r = tensor.extract_slice %2[0, 0] [%s0, %s1] [1, 1]
851 ///     : tensor<17x5xf32> to tensor<?x?xf32>
852 /// ```
853 /// is rewritten to:
854 /// ```
855 /// %0 = tensor.extract_slice ...[...] [%s0, %s1] [1, 1]
856 ///     : tensor<...> to tensor<?x?xf32>
857 /// %r = vector.transfer_write %vec, %0[...] : vector<17x5xf32>, tensor<?x?xf32>
858 /// ```
859 /// Note: It is important that the ExtractSliceOp %r resizes the result of the
860 /// TransferWriteOp to the same size as the input of the TensorPadOp (or an even
861 /// smaller size). Otherwise, %r's new (dynamic) dimensions would differ from
862 /// %r's old dimensions.
863 ///
864 /// This rewrite is possible if:
865 /// - Low padding is static 0.
866 /// - `xferOp` has exactly one use, which is an ExtractSliceOp. This
867 ///   ExtractSliceOp trims the same amount of padding that was added beforehand.
868 /// - Single, scalar padding value.
869 struct PadTensorOpVectorizationWithTransferWritePattern
870     : public VectorizePadTensorOpUserPattern<vector::TransferWriteOp> {
871   using VectorizePadTensorOpUserPattern<
872       vector::TransferWriteOp>::VectorizePadTensorOpUserPattern;
873 
874   LogicalResult rewriteUser(PatternRewriter &rewriter, PadTensorOp padOp,
875                             vector::TransferWriteOp xferOp) const override {
876     // TODO: support 0-d corner case.
877     if (xferOp.getTransferRank() == 0)
878       return failure();
879 
880     // Low padding must be static 0.
881     if (!padOp.hasZeroLowPad())
882       return failure();
883     // Pad value must be a constant.
884     auto padValue = padOp.getConstantPaddingValue();
885     if (!padValue)
886       return failure();
887     // TransferWriteOp result must be directly consumed by an ExtractSliceOp.
888     if (!xferOp->hasOneUse())
889       return failure();
890     auto trimPadding = dyn_cast<tensor::ExtractSliceOp>(*xferOp->user_begin());
891     if (!trimPadding)
892       return failure();
893     // Only static zero offsets supported when trimming padding.
894     if (!trimPadding.hasZeroOffset())
895       return failure();
896     // trimPadding must remove the amount of padding that was added earlier.
897     if (!hasSameTensorSize(padOp.source(), trimPadding))
898       return failure();
899 
900     // Insert the new TransferWriteOp at position of the old TransferWriteOp.
901     rewriter.setInsertionPoint(xferOp);
902 
903     SmallVector<bool> inBounds(xferOp.getVectorType().getRank(), false);
904     auto newXferOp = rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
905         xferOp, padOp.source().getType(), xferOp.vector(), padOp.source(),
906         xferOp.indices(), xferOp.permutation_mapAttr(), xferOp.mask(),
907         rewriter.getBoolArrayAttr(inBounds));
908     rewriter.replaceOp(trimPadding, newXferOp->getResult(0));
909 
910     return success();
911   }
912 
913   /// Check if `beforePadding` and `afterTrimming` have the same tensor size,
914   /// i.e., same dimensions.
915   ///
916   /// Dimensions may be static, dynamic or mix of both. In case of dynamic
917   /// dimensions, this function tries to infer the (static) tensor size by
918   /// looking at the defining op and utilizing op-specific knowledge.
919   ///
920   /// This is a conservative analysis. In case equal tensor sizes cannot be
921   /// proven statically, this analysis returns `false` even though the tensor
922   /// sizes may turn out to be equal at runtime.
923   bool hasSameTensorSize(Value beforePadding,
924                          tensor::ExtractSliceOp afterTrimming) const {
925     // If the input to PadTensorOp is a CastOp, try with with both CastOp result
926     // and CastOp operand.
927     if (auto castOp = beforePadding.getDefiningOp<tensor::CastOp>())
928       if (hasSameTensorSize(castOp.source(), afterTrimming))
929         return true;
930 
931     auto t1 = beforePadding.getType().dyn_cast<RankedTensorType>();
932     auto t2 = afterTrimming.getType().dyn_cast<RankedTensorType>();
933     // Only RankedTensorType supported.
934     if (!t1 || !t2)
935       return false;
936     // Rank of both values must be the same.
937     if (t1.getRank() != t2.getRank())
938       return false;
939 
940     // All static dimensions must be the same. Mixed cases (e.g., dimension
941     // static in `t1` but dynamic in `t2`) are not supported.
942     for (unsigned i = 0; i < t1.getRank(); ++i) {
943       if (t1.isDynamicDim(i) != t2.isDynamicDim(i))
944         return false;
945       if (!t1.isDynamicDim(i) && t1.getDimSize(i) != t2.getDimSize(i))
946         return false;
947     }
948 
949     // Nothing more to check if all dimensions are static.
950     if (t1.getNumDynamicDims() == 0)
951       return true;
952 
953     // All dynamic sizes must be the same. The only supported case at the moment
954     // is when `beforePadding` is an ExtractSliceOp (or a cast thereof).
955 
956     // Apart from CastOp, only ExtractSliceOp is supported.
957     auto beforeSlice = beforePadding.getDefiningOp<tensor::ExtractSliceOp>();
958     if (!beforeSlice)
959       return false;
960 
961     assert(static_cast<size_t>(t1.getRank()) ==
962            beforeSlice.getMixedSizes().size());
963     assert(static_cast<size_t>(t2.getRank()) ==
964            afterTrimming.getMixedSizes().size());
965 
966     for (unsigned i = 0; i < t1.getRank(); ++i) {
967       // Skip static dimensions.
968       if (!t1.isDynamicDim(i))
969         continue;
970       auto size1 = beforeSlice.getMixedSizes()[i];
971       auto size2 = afterTrimming.getMixedSizes()[i];
972 
973       // Case 1: Same value or same constant int.
974       if (isEqualConstantIntOrValue(size1, size2))
975         continue;
976 
977       // Other cases: Take a deeper look at defining ops of values.
978       auto v1 = size1.dyn_cast<Value>();
979       auto v2 = size2.dyn_cast<Value>();
980       if (!v1 || !v2)
981         return false;
982 
983       // Case 2: Both values are identical AffineMinOps. (Should not happen if
984       // CSE is run.)
985       auto minOp1 = v1.getDefiningOp<AffineMinOp>();
986       auto minOp2 = v2.getDefiningOp<AffineMinOp>();
987       if (minOp1 && minOp2 && minOp1.getAffineMap() == minOp2.getAffineMap() &&
988           minOp1.operands() == minOp2.operands())
989         continue;
990 
991       // Add additional cases as needed.
992     }
993 
994     // All tests passed.
995     return true;
996   }
997 };
998 
999 /// Rewrite use of PadTensorOp result in InsertSliceOp. E.g.:
1000 /// ```
1001 /// %0 = linalg.pad_tensor %src ... : tensor<?x?xf32> to tensor<17x5xf32>
1002 /// %r = tensor.insert_slice %0
1003 ///     into %dest[%a, %b, 0, 0] [1, 1, 17, 5] [1, 1, 1, 1]
1004 ///     : tensor<17x5xf32> into tensor<?x?x17x5xf32>
1005 /// ```
1006 /// is rewritten to:
1007 /// ```
1008 /// %0 = vector.transfer_read %src[%c0, %c0], %padding
1009 ///     : tensor<?x?xf32>, vector<17x5xf32>
1010 /// %r = vector.transfer_write %0, %dest[%a, %b, %c0, %c0]
1011 ///     {in_bounds = [true, true]} : vector<17x5xf32>, tensor<?x?x17x5xf32>
1012 /// ```
1013 ///
1014 /// This rewrite is possible if:
1015 /// - Low padding is static 0.
1016 /// - `padOp` result shape is static.
1017 /// - The entire padded tensor is inserted.
1018 ///   (Implies that sizes of `insertOp` are all static.)
1019 /// - Only unit strides in `insertOp`.
1020 /// - Single, scalar padding value.
1021 /// - `padOp` result not used as destination.
1022 struct PadTensorOpVectorizationWithInsertSlicePattern
1023     : public VectorizePadTensorOpUserPattern<tensor::InsertSliceOp> {
1024   using VectorizePadTensorOpUserPattern<
1025       tensor::InsertSliceOp>::VectorizePadTensorOpUserPattern;
1026 
1027   LogicalResult rewriteUser(PatternRewriter &rewriter, PadTensorOp padOp,
1028                             tensor::InsertSliceOp insertOp) const override {
1029     // Low padding must be static 0.
1030     if (!padOp.hasZeroLowPad())
1031       return failure();
1032     // Only unit stride supported.
1033     if (!insertOp.hasUnitStride())
1034       return failure();
1035     // Pad value must be a constant.
1036     auto padValue = padOp.getConstantPaddingValue();
1037     if (!padValue)
1038       return failure();
1039     // Dynamic shapes not supported.
1040     if (!padOp.result().getType().cast<ShapedType>().hasStaticShape())
1041       return failure();
1042     // Pad result not used as destination.
1043     if (insertOp.dest() == padOp.result())
1044       return failure();
1045 
1046     auto vecType = VectorType::get(padOp.getType().getShape(),
1047                                    padOp.getType().getElementType());
1048     unsigned vecRank = vecType.getRank();
1049     unsigned tensorRank = insertOp.getType().getRank();
1050 
1051     // Check if sizes match: Insert the entire tensor into most minor dims.
1052     // (No permutations allowed.)
1053     SmallVector<int64_t> expectedSizes(tensorRank - vecRank, 1);
1054     expectedSizes.append(vecType.getShape().begin(), vecType.getShape().end());
1055     if (!llvm::all_of(
1056             llvm::zip(insertOp.getMixedSizes(), expectedSizes), [](auto it) {
1057               return getConstantIntValue(std::get<0>(it)) == std::get<1>(it);
1058             }))
1059       return failure();
1060 
1061     // Insert the TransferReadOp and TransferWriteOp at the position of the
1062     // InsertSliceOp.
1063     rewriter.setInsertionPoint(insertOp);
1064 
1065     // Generate TransferReadOp: Read entire source tensor and add high padding.
1066     SmallVector<Value> readIndices(
1067         vecRank, rewriter.create<arith::ConstantIndexOp>(padOp.getLoc(), 0));
1068     auto read = rewriter.create<vector::TransferReadOp>(
1069         padOp.getLoc(), vecType, padOp.source(), readIndices, padValue);
1070 
1071     // Generate TransferWriteOp: Write to InsertSliceOp's dest tensor at
1072     // specified offsets. Write is fully in-bounds because a InsertSliceOp's
1073     // source must fit into the destination at the specified offsets.
1074     auto writeIndices =
1075         ofrToIndexValues(rewriter, padOp.getLoc(), insertOp.getMixedOffsets());
1076     SmallVector<bool> inBounds(vecRank, true);
1077     rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
1078         insertOp, read, insertOp.dest(), writeIndices,
1079         ArrayRef<bool>{inBounds});
1080 
1081     return success();
1082   }
1083 };
1084 
1085 void mlir::linalg::populatePadTensorOpVectorizationPatterns(
1086     RewritePatternSet &patterns, PatternBenefit baseBenefit) {
1087   patterns.add<GenericPadTensorOpVectorizationPattern>(patterns.getContext(),
1088                                                        baseBenefit);
1089   // Try these specialized patterns first before resorting to the generic one.
1090   patterns.add<PadTensorOpVectorizationWithTransferReadPattern,
1091                PadTensorOpVectorizationWithTransferWritePattern,
1092                PadTensorOpVectorizationWithInsertSlicePattern>(
1093       patterns.getContext(), baseBenefit.getBenefit() + 1);
1094 }
1095 
1096 // TODO: cleanup all the convolution vectorization patterns.
1097 template <class ConvOp, int N>
1098 LogicalResult ConvOpVectorization<ConvOp, N>::matchAndRewrite(
1099     ConvOp op, PatternRewriter &rewriter) const {
1100   Location loc = op.getLoc();
1101   MLIRContext *context = op.getContext();
1102 
1103   OpOperand *input = op.getInputOperand(0);
1104   OpOperand *kernel = op.getInputOperand(1);
1105   OpOperand *output = op.getOutputOperand(0);
1106   ArrayRef<int64_t> inShape = op.getShape(input);
1107   ArrayRef<int64_t> kShape = op.getShape(kernel);
1108 
1109   if (llvm::any_of(inShape, ShapedType::isDynamic) ||
1110       llvm::any_of(kShape, ShapedType::isDynamic))
1111     return failure();
1112 
1113   SmallVector<AffineExpr, 4> mapping;
1114   SmallVector<int64_t, 4> vectorDims;
1115   // Fail to apply when the size of not vectorized dimension is not 1.
1116   for (unsigned i = 0; i < N; i++) {
1117     if (!mask[i] && (inShape[i] != 1 || kShape[i] != 1))
1118       return failure();
1119 
1120     if (mask[i] && inShape[i] != kShape[i])
1121       return failure();
1122 
1123     if (mask[i]) {
1124       mapping.push_back(getAffineDimExpr(i, context));
1125       vectorDims.push_back(inShape[i]);
1126     }
1127   }
1128 
1129   int64_t rank = op.getRank(input);
1130   int64_t numDims = mapping.size();
1131   Type elemType = getElementTypeOrSelf(input->get());
1132 
1133   auto map = AffineMap::get(rank, 0, mapping, context);
1134   SmallVector<Value, 4> zeros(rank,
1135                               rewriter.create<arith::ConstantIndexOp>(loc, 0));
1136   auto vecType = VectorType::get(vectorDims, elemType);
1137 
1138   auto inputVec = rewriter.create<vector::TransferReadOp>(
1139       loc, vecType, input->get(), zeros, map);
1140   auto kernelVec = rewriter.create<vector::TransferReadOp>(
1141       loc, vecType, kernel->get(), zeros, map);
1142 
1143   auto acc = rewriter.create<arith::ConstantOp>(loc, elemType,
1144                                                 rewriter.getZeroAttr(elemType));
1145 
1146   std::array<AffineMap, 3> indexingMaps{
1147       AffineMap::getMultiDimIdentityMap(numDims, context),
1148       AffineMap::getMultiDimIdentityMap(numDims, context),
1149       AffineMap::get(numDims, 0, {}, context)};
1150 
1151   std::vector<StringRef> iteratorTypes(numDims, "reduction");
1152 
1153   auto result = rewriter.create<vector::ContractionOp>(
1154       loc, inputVec, kernelVec, acc,
1155       rewriter.getAffineMapArrayAttr(indexingMaps),
1156       rewriter.getStrArrayAttr(iteratorTypes));
1157 
1158   rewriter.create<memref::StoreOp>(loc, result, output->get(),
1159                                    ValueRange(zeros));
1160   rewriter.eraseOp(op);
1161   return success();
1162 }
1163 
1164 /// Inserts tiling, promotion and vectorization pattern for ConvOp
1165 /// conversion into corresponding pattern lists.
1166 template <typename ConvOp, unsigned N>
1167 static void populateVectorizationPatterns(
1168     RewritePatternSet &tilingPatterns, RewritePatternSet &promotionPatterns,
1169     RewritePatternSet &vectorizationPatterns, ArrayRef<int64_t> tileSizes) {
1170   auto *context = tilingPatterns.getContext();
1171   if (tileSizes.size() < N)
1172     return;
1173 
1174   constexpr static StringRef kTiledMarker = "TILED";
1175   constexpr static StringRef kPromotedMarker = "PROMOTED";
1176   tilingPatterns.add<LinalgTilingPattern<ConvOp>>(
1177       context, LinalgTilingOptions().setTileSizes(tileSizes),
1178       LinalgTransformationFilter(ArrayRef<StringAttr>{},
1179                                  StringAttr::get(kTiledMarker, context)));
1180 
1181   promotionPatterns.add<LinalgPromotionPattern<ConvOp>>(
1182       context, LinalgPromotionOptions().setUseFullTileBuffersByDefault(true),
1183       LinalgTransformationFilter(StringAttr::get(kTiledMarker, context),
1184                                  StringAttr::get(kPromotedMarker, context)));
1185 
1186   SmallVector<bool, 4> mask(N);
1187   int offset = tileSizes.size() - N;
1188   std::transform(tileSizes.begin() + offset, tileSizes.end(), mask.begin(),
1189                  [](int64_t i) -> bool { return i > 1; });
1190 
1191   vectorizationPatterns.add<ConvOpVectorization<ConvOp, N>>(context, mask);
1192 }
1193 
1194 void mlir::linalg::populateConvVectorizationPatterns(
1195     MLIRContext *context, SmallVectorImpl<RewritePatternSet> &patterns,
1196     ArrayRef<int64_t> tileSizes) {
1197   RewritePatternSet tiling(context);
1198   RewritePatternSet promotion(context);
1199   RewritePatternSet vectorization(context);
1200   populateVectorizationPatterns<Conv1DOp, 1>(tiling, promotion, vectorization,
1201                                              tileSizes);
1202 
1203   populateVectorizationPatterns<Conv2DOp, 2>(tiling, promotion, vectorization,
1204                                              tileSizes);
1205 
1206   populateVectorizationPatterns<Conv3DOp, 3>(tiling, promotion, vectorization,
1207                                              tileSizes);
1208 
1209   populateVectorizationPatterns<Conv1DNwcWcfOp, 3>(tiling, promotion,
1210                                                    vectorization, tileSizes);
1211 
1212   populateVectorizationPatterns<Conv2DNhwcHwcfOp, 4>(tiling, promotion,
1213                                                      vectorization, tileSizes);
1214 
1215   populateVectorizationPatterns<Conv3DNdhwcDhwcfOp, 5>(
1216       tiling, promotion, vectorization, tileSizes);
1217 
1218   patterns.push_back(std::move(tiling));
1219   patterns.push_back(std::move(promotion));
1220   patterns.push_back(std::move(vectorization));
1221 }
1222 
1223 //----------------------------------------------------------------------------//
1224 // Forwarding patterns
1225 //----------------------------------------------------------------------------//
1226 
1227 /// Check whether there is any interleaved use of any `values` between `firstOp`
1228 /// and `secondOp`. Conservatively return `true` if any op or value is in a
1229 /// different block.
1230 static bool mayExistInterleavedUses(Operation *firstOp, Operation *secondOp,
1231                                     ValueRange values) {
1232   if (firstOp->getBlock() != secondOp->getBlock() ||
1233       !firstOp->isBeforeInBlock(secondOp)) {
1234     LDBG("interleavedUses precondition failed, firstOp: "
1235          << *firstOp << ", second op: " << *secondOp);
1236     return true;
1237   }
1238   for (auto v : values) {
1239     for (auto &u : v.getUses()) {
1240       Operation *owner = u.getOwner();
1241       if (owner == firstOp || owner == secondOp)
1242         continue;
1243       // TODO: this is too conservative, use dominance info in the future.
1244       if (owner->getBlock() == firstOp->getBlock() &&
1245           (owner->isBeforeInBlock(firstOp) || secondOp->isBeforeInBlock(owner)))
1246         continue;
1247       LDBG(" found interleaved op " << *owner << ", firstOp: " << *firstOp
1248                                     << ", second op: " << *secondOp);
1249       return true;
1250     }
1251   }
1252   return false;
1253 }
1254 
1255 /// Return the unique subview use of `v` if it is indeed unique, null otherwise.
1256 static memref::SubViewOp getSubViewUseIfUnique(Value v) {
1257   memref::SubViewOp subViewOp;
1258   for (auto &u : v.getUses()) {
1259     if (auto newSubViewOp = dyn_cast<memref::SubViewOp>(u.getOwner())) {
1260       if (subViewOp)
1261         return memref::SubViewOp();
1262       subViewOp = newSubViewOp;
1263     }
1264   }
1265   return subViewOp;
1266 }
1267 
1268 /// TODO: use interfaces, side-effects and aliasing analysis as appropriate,
1269 /// when available.
1270 LogicalResult LinalgCopyVTRForwardingPattern::matchAndRewrite(
1271     vector::TransferReadOp xferOp, PatternRewriter &rewriter) const {
1272 
1273   // TODO: support mask.
1274   if (xferOp.mask())
1275     return failure();
1276 
1277   // Transfer into `view`.
1278   Value viewOrAlloc = xferOp.source();
1279   if (!viewOrAlloc.getDefiningOp<memref::ViewOp>() &&
1280       !viewOrAlloc.getDefiningOp<memref::AllocOp>())
1281     return failure();
1282 
1283   LDBG(viewOrAlloc);
1284 
1285   // Ensure there is exactly one subview of `viewOrAlloc` defining `subView`.
1286   memref::SubViewOp subViewOp = getSubViewUseIfUnique(viewOrAlloc);
1287   if (!subViewOp)
1288     return failure();
1289   Value subView = subViewOp.getResult();
1290   LDBG("with subView " << subView);
1291 
1292   // Find the copy into `subView` without interleaved uses.
1293   CopyOp copyOp;
1294   for (auto &u : subView.getUses()) {
1295     if (auto newCopyOp = dyn_cast<CopyOp>(u.getOwner())) {
1296       assert(newCopyOp.output().getType().isa<MemRefType>());
1297       if (newCopyOp.output() != subView)
1298         continue;
1299       LDBG("copy candidate " << *newCopyOp);
1300       if (mayExistInterleavedUses(newCopyOp, xferOp, {viewOrAlloc, subView}))
1301         continue;
1302       copyOp = newCopyOp;
1303       break;
1304     }
1305   }
1306   if (!copyOp)
1307     return failure();
1308   LDBG("with copy " << *copyOp);
1309 
1310   // Find the fill into `viewOrAlloc` without interleaved uses before the copy.
1311   FillOp maybeFillOp;
1312   for (auto &u : viewOrAlloc.getUses()) {
1313     if (auto newFillOp = dyn_cast<FillOp>(u.getOwner())) {
1314       assert(newFillOp.output().getType().isa<MemRefType>());
1315       if (newFillOp.output() != viewOrAlloc)
1316         continue;
1317       LDBG("fill candidate " << *newFillOp);
1318       if (mayExistInterleavedUses(newFillOp, copyOp, {viewOrAlloc, subView}))
1319         continue;
1320       maybeFillOp = newFillOp;
1321       break;
1322     }
1323   }
1324   // Ensure padding matches.
1325   if (maybeFillOp && xferOp.padding() != maybeFillOp.value())
1326     return failure();
1327   if (maybeFillOp)
1328     LDBG("with maybeFillOp " << *maybeFillOp);
1329 
1330   // `in` is the subview that linalg.copy reads. Replace it.
1331   Value in = copyOp.input();
1332 
1333   // linalg.copy + linalg.fill can be used to create a padded local buffer.
1334   // The `masked` attribute is only valid on this padded buffer.
1335   // When forwarding to vector.transfer_read, the attribute must be reset
1336   // conservatively.
1337   Value res = rewriter.create<vector::TransferReadOp>(
1338       xferOp.getLoc(), xferOp.getVectorType(), in, xferOp.indices(),
1339       xferOp.permutation_mapAttr(), xferOp.padding(), xferOp.mask(),
1340       // in_bounds is explicitly reset
1341       /*inBoundsAttr=*/ArrayAttr());
1342 
1343   if (maybeFillOp)
1344     rewriter.eraseOp(maybeFillOp);
1345   rewriter.eraseOp(copyOp);
1346   rewriter.replaceOp(xferOp, res);
1347 
1348   return success();
1349 }
1350 
1351 /// TODO: use interfaces, side-effects and aliasing analysis as appropriate,
1352 /// when available.
1353 LogicalResult LinalgCopyVTWForwardingPattern::matchAndRewrite(
1354     vector::TransferWriteOp xferOp, PatternRewriter &rewriter) const {
1355   // TODO: support mask.
1356   if (xferOp.mask())
1357     return failure();
1358 
1359   // Transfer into `viewOrAlloc`.
1360   Value viewOrAlloc = xferOp.source();
1361   if (!viewOrAlloc.getDefiningOp<memref::ViewOp>() &&
1362       !viewOrAlloc.getDefiningOp<memref::AllocOp>())
1363     return failure();
1364 
1365   // Ensure there is exactly one subview of `viewOrAlloc` defining `subView`.
1366   memref::SubViewOp subViewOp = getSubViewUseIfUnique(viewOrAlloc);
1367   if (!subViewOp)
1368     return failure();
1369   Value subView = subViewOp.getResult();
1370 
1371   // Find the copy from `subView` without interleaved uses.
1372   CopyOp copyOp;
1373   for (auto &u : subViewOp.getResult().getUses()) {
1374     if (auto newCopyOp = dyn_cast<CopyOp>(u.getOwner())) {
1375       if (newCopyOp.getInputOperand(0)->get() != subView)
1376         continue;
1377       if (mayExistInterleavedUses(xferOp, newCopyOp, {viewOrAlloc, subView}))
1378         continue;
1379       copyOp = newCopyOp;
1380       break;
1381     }
1382   }
1383   if (!copyOp)
1384     return failure();
1385 
1386   // `out` is the subview copied into that we replace.
1387   assert(copyOp.output().getType().isa<MemRefType>());
1388   Value out = copyOp.output();
1389 
1390   // Forward vector.transfer into copy.
1391   // linalg.copy + linalg.fill can be used to create a padded local buffer.
1392   // The `masked` attribute is only valid on this padded buffer.
1393   // When forwarding to vector.transfer_write, the attribute must be reset
1394   // conservatively.
1395   rewriter.create<vector::TransferWriteOp>(
1396       xferOp.getLoc(), xferOp.vector(), out, xferOp.indices(),
1397       xferOp.permutation_mapAttr(), xferOp.mask(),
1398       // in_bounds is explicitly reset
1399       /*inBoundsAttr=*/ArrayAttr());
1400 
1401   rewriter.eraseOp(copyOp);
1402   rewriter.eraseOp(xferOp);
1403 
1404   return success();
1405 }
1406 
1407 //===----------------------------------------------------------------------===//
1408 // Convolution vectorization patterns
1409 //===----------------------------------------------------------------------===//
1410 namespace {
1411 /// Generate a vector implementation for either:
1412 /// ```
1413 ///   Op def: (     n,     w,     c,    kw,    f  )
1414 ///    Iters: ({Par(), Par(), Par(), Red(), Red()})
1415 ///   Layout: {{n, strideW * w + dilationW * kw, c}, {kw, c, f}, {n, w, f}}
1416 /// ```
1417 /// kw is unrolled, w is unrolled iff dilationW > 1.
1418 ///
1419 /// or
1420 ///
1421 /// ```
1422 ///   Op def: (     n,     w,     c,    kw )
1423 ///    Iters: ({Par(), Par(), Par(), Red()})
1424 ///   Layout: {{n, strideW * w + dilationW * kw, c}, {kw, c}, {n, w, c}}
1425 /// ```
1426 /// kw is unrolled, w is unrolled iff dilationW > 1.
1427 struct Conv1DNwcGenerator : public StructuredGenerator<LinalgOp> {
1428   Conv1DNwcGenerator(OpBuilder &builder, LinalgOp linalgOp, int strideW,
1429                      int dilationW)
1430       : StructuredGenerator<LinalgOp>(builder, linalgOp), valid(false),
1431         strideW(strideW), dilationW(dilationW) {
1432     // Determine whether `linalgOp` can be generated with this generator
1433     if (linalgOp.getNumInputs() != 2 || linalgOp.getNumOutputs() != 1)
1434       return;
1435     lhsShaped = linalgOp.inputs()[0];
1436     rhsShaped = linalgOp.inputs()[1];
1437     resShaped = linalgOp.outputs()[0];
1438     lhsShapedType = lhsShaped.getType().dyn_cast<ShapedType>();
1439     rhsShapedType = rhsShaped.getType().dyn_cast<ShapedType>();
1440     resShapedType = resShaped.getType().dyn_cast<ShapedType>();
1441     if (!lhsShapedType || !rhsShapedType || !resShapedType)
1442       return;
1443     if (lhsShapedType.getRank() != 3 ||
1444         (rhsShapedType.getRank() != 2 && rhsShapedType.getRank() != 3) ||
1445         resShapedType.getRank() != 3)
1446       return;
1447 
1448     // Check for reduction `add` preceded by `mul`.
1449     Operation *reduceOp = matchLinalgReduction(linalgOp.getOutputOperand(0));
1450     if (!reduceOp)
1451       return;
1452     llvm::Optional<vector::CombiningKind> maybeKind;
1453     maybeKind = getKindForOp(reduceOp);
1454     if (!maybeKind || *maybeKind != vector::CombiningKind::ADD)
1455       return;
1456     maybeKind = getKindForOp(&(linalgOp->getRegion(0).front().front()));
1457     if (!maybeKind || *maybeKind != vector::CombiningKind::MUL)
1458       return;
1459 
1460     // The op is now known to be valid.
1461     valid = true;
1462   }
1463 
1464   /// Generate a vector implementation for:
1465   /// ```
1466   ///   Op def: (     n,     w,     c,    kw,    f  )
1467   ///    Iters: ({Par(), Par(), Par(), Red(), Red()})
1468   ///   Layout: {{n, strideW * w + dilationW * kw, c}, {kw, c, f}, {n, w, f}}
1469   /// ```
1470   /// kw is always unrolled.
1471   /// TODO: w (resp. kw) is unrolled when the strideW ( resp. dilationW) is > 1.
1472   FailureOr<Operation *> conv() {
1473     if (!valid)
1474       return failure();
1475 
1476     int nSize = lhsShapedType.getShape()[0];
1477     int wSize = resShapedType.getShape()[1];
1478     int cSize = lhsShapedType.getShape()[2];
1479     int kwSize = rhsShapedType.getShape()[0];
1480     int fSize = rhsShapedType.getShape()[2];
1481 
1482     vector::TransferWriteOp write;
1483     Value zero = builder.create<arith::ConstantIndexOp>(loc, 0);
1484 
1485     // w is unrolled (i.e. wSizeStep == 1) iff strideW > 1.
1486     // When strideW == 1, we can batch the contiguous loads and avoid unrolling
1487     int64_t wSizeStep = strideW == 1 ? wSize : 1;
1488 
1489     Type lhsEltType = lhsShapedType.getElementType();
1490     Type rhsEltType = rhsShapedType.getElementType();
1491     Type resEltType = resShapedType.getElementType();
1492     VectorType lhsType = VectorType::get(
1493         {nSize,
1494          // iw = ow * sw + kw *  dw - 1
1495          //   (i.e. 16 convolved with 3 (@stride 1 dilation 1) -> 14)
1496          // Perform the proper inclusive -> exclusive -> inclusive.
1497          ((wSize - 1) * strideW + 1) + ((kwSize - 1) * dilationW + 1) - 1,
1498          cSize},
1499         lhsEltType);
1500     VectorType rhsType = VectorType::get({kwSize, cSize, fSize}, rhsEltType);
1501     VectorType resType = VectorType::get({nSize, wSize, fSize}, resEltType);
1502 
1503     // Read lhs slice of size {w * strideW + kw * dilationW, c, f} @ [0, 0, 0].
1504     Value lhs = builder.create<vector::TransferReadOp>(
1505         loc, lhsType, lhsShaped, ValueRange{zero, zero, zero});
1506     // Read rhs slice of size {kw, c, f} @ [0, 0, 0].
1507     Value rhs = builder.create<vector::TransferReadOp>(
1508         loc, rhsType, rhsShaped, ValueRange{zero, zero, zero});
1509     // Read res slice of size {n, w, f} @ [0, 0, 0].
1510     Value res = builder.create<vector::TransferReadOp>(
1511         loc, resType, resShaped, ValueRange{zero, zero, zero});
1512 
1513     //===------------------------------------------------------------------===//
1514     // Begin vector-only rewrite part
1515     //===------------------------------------------------------------------===//
1516     // Unroll along kw and read slices of lhs and rhs.
1517     SmallVector<Value> lhsVals, rhsVals, resVals;
1518     for (int64_t kw = 0; kw < kwSize; ++kw) {
1519       // Extract rhs slice of size {c, f} @ [kw].
1520       rhsVals.push_back(builder.create<vector::ExtractOp>(
1521           loc, rhs, /*offsets=*/ArrayRef<int64_t>{kw}));
1522 
1523       for (int64_t w = 0; w < wSize; w += wSizeStep) {
1524         // Extract lhs slice of size {n, wSizeStep, c}
1525         //   @ [0, sw * w + dw * kw, 0].
1526         lhsVals.push_back(builder.create<vector::ExtractStridedSliceOp>(
1527             loc, lhs,
1528             /*offsets=*/ArrayRef<int64_t>{0, w * strideW + kw * dilationW, 0},
1529             /*sizes=*/ArrayRef<int64_t>{nSize, wSizeStep, cSize},
1530             /*strides=*/ArrayRef<int64_t>{1, 1, 1}));
1531 
1532         // This does not depend on kw.
1533         if (kw == 0) {
1534           // Extract res slice: {n, wSizeStep, f} @ [0, w, 0].
1535           resVals.push_back(builder.create<vector::ExtractStridedSliceOp>(
1536               loc, res,
1537               /*offsets=*/ArrayRef<int64_t>{0, w, 0},
1538               /*sizes=*/ArrayRef<int64_t>{nSize, wSizeStep, fSize},
1539               /*strides=*/ArrayRef<int64_t>{1, 1, 1}));
1540         }
1541       }
1542     }
1543 
1544     auto linearIndex = [&](int64_t kw, int64_t w) {
1545       return kw * (wSize / wSizeStep) + w;
1546     };
1547 
1548     // Compute contraction: O{n, w, f} += I{n, sw * w + dw * kw, c} * F{c, f}
1549     for (int64_t kw = 0; kw < kwSize; ++kw) {
1550       for (int64_t w = 0; w < wSize; w += wSizeStep) {
1551         resVals[w] = conv1dSliceAsContraction(
1552             builder, loc, lhsVals[linearIndex(kw, w)], rhsVals[kw], resVals[w]);
1553       }
1554     }
1555 
1556     // Write back res slice: {n, wSizeStep, f} @ [0, w, 0].
1557     // This does not depend on kw.
1558     for (int64_t w = 0; w < wSize; w += wSizeStep) {
1559       res = builder.create<vector::InsertStridedSliceOp>(
1560           loc, resVals[w], res,
1561           /*offsets=*/ArrayRef<int64_t>{0, w, 0},
1562           /*strides=*/ArrayRef<int64_t>{1, 1, 1});
1563     }
1564     //===------------------------------------------------------------------===//
1565     // End vector-only rewrite part
1566     //===------------------------------------------------------------------===//
1567 
1568     // Write back res slice of size {n, w, f} @ [0, 0, 0].
1569     return builder
1570         .create<vector::TransferWriteOp>(loc, res, resShaped,
1571                                          ValueRange{zero, zero, zero})
1572         .getOperation();
1573   }
1574 
1575   // Create a contraction: lhs{n, w, c} * rhs{c, f} -> res{n, w, f}
1576   Value conv1dSliceAsContraction(OpBuilder &b, Location loc, Value lhs,
1577                                  Value rhs, Value res) {
1578     StringRef par = Par().strRef, red = Red().strRef;
1579     AffineExpr n, w, f, c;
1580     bindDims(ctx, n, w, f, c);
1581     return builder.create<vector::ContractionOp>(
1582         loc, lhs, rhs, res,
1583         /*indexingMaps=*/MapList{{n, w, c}, {c, f}, {n, w, f}},
1584         /*iteratorTypes=*/ArrayRef<StringRef>{par, par, par, red});
1585   }
1586 
1587   /// Generate a vector implementation for:
1588   /// ```
1589   ///   Op def: (     n,     w,     c,    kw)
1590   ///    Iters: ({Par(), Par(), Par(), Red()})
1591   ///   Layout: {{n, strideW * w + dilationW * kw, c}, {kw, c}, {n, w, c}}
1592   /// ```
1593   /// kw is always unrolled.
1594   /// TODO: w (resp. kw) is unrolled when the strideW ( resp. dilationW) is > 1.
1595   FailureOr<Operation *> dilatedConv() {
1596     if (!valid)
1597       return failure();
1598 
1599     int nSize = lhsShapedType.getShape()[0];
1600     int wSize = resShapedType.getShape()[1];
1601     int cSize = lhsShapedType.getShape()[2];
1602     int kwSize = rhsShapedType.getShape()[0];
1603 
1604     vector::TransferWriteOp write;
1605     Value zero = builder.create<arith::ConstantIndexOp>(loc, 0);
1606 
1607     // w is unrolled (i.e. wSizeStep == 1) iff strideW > 1.
1608     // When strideW == 1, we can batch the contiguous loads and avoid unrolling
1609     int64_t wSizeStep = strideW == 1 ? wSize : 1;
1610 
1611     Type lhsEltType = lhsShapedType.getElementType();
1612     Type rhsEltType = rhsShapedType.getElementType();
1613     Type resEltType = resShapedType.getElementType();
1614     VectorType lhsType = VectorType::get(
1615         {nSize,
1616          // iw = ow * sw + kw *  dw - 1
1617          //   (i.e. 16 convolved with 3 (@stride 1 dilation 1) -> 14)
1618          ((wSize - 1) * strideW + 1) + ((kwSize - 1) * dilationW + 1) - 1,
1619          cSize},
1620         lhsEltType);
1621     VectorType rhsType = VectorType::get({kwSize, cSize}, rhsEltType);
1622     VectorType resType = VectorType::get({nSize, wSize, cSize}, resEltType);
1623 
1624     // Read lhs slice of size {n, w * strideW + kw * dilationW, c} @ [0, 0, 0].
1625     Value lhs = builder.create<vector::TransferReadOp>(
1626         loc, lhsType, lhsShaped, ValueRange{zero, zero, zero});
1627     // Read rhs slice of size {kw, c} @ [0, 0].
1628     Value rhs = builder.create<vector::TransferReadOp>(loc, rhsType, rhsShaped,
1629                                                        ValueRange{zero, zero});
1630     // Read res slice of size {n, w, c} @ [0, 0, 0].
1631     Value res = builder.create<vector::TransferReadOp>(
1632         loc, resType, resShaped, ValueRange{zero, zero, zero});
1633 
1634     //===------------------------------------------------------------------===//
1635     // Begin vector-only rewrite part
1636     //===------------------------------------------------------------------===//
1637     // Unroll along kw and read slices of lhs and rhs.
1638     SmallVector<Value> lhsVals, rhsVals, resVals;
1639     for (int64_t kw = 0; kw < kwSize; ++kw) {
1640       // Extract rhs slice of size {c} @ [kw].
1641       rhsVals.push_back(builder.create<vector::ExtractOp>(
1642           loc, rhs, /*offsets=*/ArrayRef<int64_t>{kw}));
1643 
1644       for (int64_t w = 0; w < wSize; w += wSizeStep) {
1645         // Extract lhs slice of size {n, wSizeStep, c}
1646         //   @ [0, sw * w + dw * kw, 0].
1647         lhsVals.push_back(builder.create<vector::ExtractStridedSliceOp>(
1648             loc, lhs,
1649             /*offsets=*/ArrayRef<int64_t>{0, w * strideW + kw * dilationW, 0},
1650             /*sizes=*/ArrayRef<int64_t>{nSize, wSizeStep, cSize},
1651             /*strides=*/ArrayRef<int64_t>{1, 1, 1}));
1652 
1653         // This does not depend on kw.
1654         if (kw == 0) {
1655           // Extract res slice: {n, wSizeStep, c} @ [0, w, 0].
1656           resVals.push_back(builder.create<vector::ExtractStridedSliceOp>(
1657               loc, res,
1658               /*offsets=*/ArrayRef<int64_t>{0, w, 0},
1659               /*sizes=*/ArrayRef<int64_t>{nSize, wSizeStep, cSize},
1660               /*strides=*/ArrayRef<int64_t>{1, 1, 1}));
1661         }
1662       }
1663     }
1664 
1665     auto linearIndex = [&](int64_t kw, int64_t w) {
1666       return kw * (wSize / wSizeStep) + w;
1667     };
1668 
1669     // Compute contraction: O{n, w, c} += I{n, sw * w + dw * kw, c} * F{c}
1670     for (int64_t kw = 0; kw < kwSize; ++kw) {
1671       for (int64_t w = 0; w < wSize; w += wSizeStep) {
1672         resVals[w] = dilatedConv1dSliceAsFma(
1673             builder, loc, lhsVals[linearIndex(kw, w)], rhsVals[kw], resVals[w]);
1674       }
1675     }
1676 
1677     // Write back res slice: {n, wSizeStep, c} @ [0, w, 0].
1678     // This does not depend on kw.
1679     for (int64_t w = 0; w < wSize; w += wSizeStep) {
1680       res = builder.create<vector::InsertStridedSliceOp>(
1681           loc, resVals[w], res,
1682           /*offsets=*/ArrayRef<int64_t>{0, w, 0},
1683           /*strides=*/ArrayRef<int64_t>{1, 1, 1});
1684     }
1685     //===------------------------------------------------------------------===//
1686     // End vector-only rewrite part
1687     //===------------------------------------------------------------------===//
1688 
1689     // Write back res slice of size {n, w, c} @ [0, 0, 0].
1690     return builder
1691         .create<vector::TransferWriteOp>(loc, res, resShaped,
1692                                          ValueRange{zero, zero, zero})
1693         .getOperation();
1694   }
1695 
1696   /// Lower lhs{n, w, c} * rhs{c} -> res{n, w, c} to fma.
1697   Value dilatedConv1dSliceAsFma(OpBuilder &b, Location loc, Value lhs,
1698                                 Value rhs, Value res) {
1699     Value bcast = builder.create<vector::BroadcastOp>(loc, res.getType(), rhs);
1700     return b.create<vector::FMAOp>(loc, lhs, bcast, res);
1701   }
1702 
1703   /// Entry point that transposes into the common form:
1704   ///   {{n, strideW * w + dilationW * kw, c}, {kw, c, f}, {n, w, f}}
1705   FailureOr<Operation *> generateConv() {
1706     AffineExpr n, w, f, kw, c;
1707     bindDims(ctx, n, w, f, kw, c);
1708     if (!iters({Par(), Par(), Par(), Red(), Red()}))
1709       return failure();
1710 
1711     // No transposition needed.
1712     if (layout({/*lhsIndex*/ {n, strideW * w + dilationW * kw, c},
1713                 /*rhsIndex*/ {kw, c, f},
1714                 /*resIndex*/ {n, w, f}}))
1715       return conv();
1716     return failure();
1717   }
1718 
1719   /// Entry point that transposes into the common form:
1720   ///   {{n, strideW * w + dilationW * kw, c}, {kw, c}, {n, w, c}}
1721   FailureOr<Operation *> generateDilatedConv() {
1722     AffineExpr n, w, c, kw;
1723     bindDims(ctx, n, w, c, kw);
1724     if (!iters({Par(), Par(), Par(), Red()}))
1725       return failure();
1726 
1727     // No transposition needed.
1728     if (layout({/*lhsIndex*/ {n, strideW * w + dilationW * kw, c},
1729                 /*rhsIndex*/ {kw, c},
1730                 /*resIndex*/ {n, w, c}}))
1731       return dilatedConv();
1732     return failure();
1733   }
1734 
1735 private:
1736   bool valid;
1737   int strideW, dilationW;
1738   Value lhsShaped, rhsShaped, resShaped;
1739   ShapedType lhsShapedType, rhsShapedType, resShapedType;
1740 };
1741 } // namespace
1742 
1743 /// Helper function to vectorize a `linalgOp` with convolution semantics.
1744 // TODO: extend the generic vectorization to support windows and drop this.
1745 static FailureOr<Operation *>
1746 vectorizeConvolution(OpBuilder &b, ConvolutionOpInterface convOp) {
1747   // TODO: these are legitimately part of ConvolutionOpInterface.
1748   auto strides = convOp->getAttrOfType<DenseIntElementsAttr>("strides");
1749   auto dilations = convOp->getAttrOfType<DenseIntElementsAttr>("dilations");
1750   auto stride = strides ? *strides.getValues<uint64_t>().begin() : 1;
1751   auto dilation = dilations ? *dilations.getValues<uint64_t>().begin() : 1;
1752   LinalgOp linalgOp = cast<LinalgOp>(convOp.getOperation());
1753   Conv1DNwcGenerator e(b, linalgOp, stride, dilation);
1754   auto res = e.generateConv();
1755   if (succeeded(res))
1756     return res;
1757   return e.generateDilatedConv();
1758 }
1759 
1760 struct VectorizeConvolution
1761     : public OpInterfaceRewritePattern<ConvolutionOpInterface> {
1762   using OpInterfaceRewritePattern::OpInterfaceRewritePattern;
1763 
1764   LogicalResult matchAndRewrite(ConvolutionOpInterface convOp,
1765                                 PatternRewriter &rewriter) const override {
1766     FailureOr<Operation *> resultOrFail =
1767         vectorizeConvolution(rewriter, convOp);
1768     if (failed(resultOrFail))
1769       return failure();
1770     Operation *newOp = *resultOrFail;
1771     if (newOp->getNumResults() == 0) {
1772       rewriter.eraseOp(convOp.getOperation());
1773       return success();
1774     }
1775     assert(newOp->getNumResults() == 1 && "expected single result");
1776     rewriter.replaceOp(convOp.getOperation(), newOp->getResult(0));
1777     return success();
1778   }
1779 };
1780 
1781 void mlir::linalg::populateConvolutionVectorizationPatterns(
1782     RewritePatternSet &patterns, PatternBenefit benefit) {
1783   patterns.add<VectorizeConvolution>(patterns.getContext(), benefit);
1784 }
1785