1 //===- Tiling.cpp - Implementation of linalg Tiling -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the linalg dialect Tiling pass.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include <utility>
14 
15 #include "PassDetail.h"
16 #include "mlir/Dialect/Linalg/IR/Linalg.h"
17 #include "mlir/Dialect/Linalg/Passes.h"
18 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
19 #include "mlir/Dialect/Linalg/Utils/Utils.h"
20 #include "mlir/Dialect/MemRef/IR/MemRef.h"
21 #include "mlir/Dialect/SCF/Transforms.h"
22 #include "mlir/Dialect/Tensor/IR/Tensor.h"
23 #include "mlir/IR/AffineExpr.h"
24 #include "mlir/IR/AffineMap.h"
25 #include "mlir/Transforms/FoldUtils.h"
26 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
27 
28 #include "llvm/Support/CommandLine.h"
29 
30 using namespace mlir;
31 using namespace mlir::linalg;
32 using namespace mlir::scf;
33 
34 #define DEBUG_TYPE "linalg-tiling"
35 
36 static bool isZero(Value v) {
37   if (auto cst = v.getDefiningOp<arith::ConstantIndexOp>())
38     return cst.value() == 0;
39   return false;
40 }
41 
42 using LoopIndexToRangeIndexMap = DenseMap<int, int>;
43 
44 // Creates a number of ranges equal to the number of non-zero in `tileSizes`.
45 // One for each loop of the LinalgOp that is tiled. The `tileSizes` argument has
46 // one entry per surrounding loop. It uses zero as the convention that a
47 // particular loop is not tiled. This convention simplifies implementations by
48 // avoiding affine map manipulations.
49 // The returned ranges correspond to the loop ranges, in the proper order, that
50 // are tiled and for which new loops will be created. Also the function returns
51 // a map from loop indices of the LinalgOp to the corresponding non-empty range
52 // indices of newly created loops.
53 static std::tuple<SmallVector<Range, 4>, LoopIndexToRangeIndexMap>
54 makeTiledLoopRanges(RewriterBase &b, Location loc, AffineMap map,
55                     ValueRange allShapeSizes, ValueRange allTileSizes) {
56   assert(allTileSizes.size() == map.getNumResults());
57   // Apply `map` to get shape sizes in loop order.
58   auto shapeSizes = applyMapToValues(b, loc, map, allShapeSizes);
59   SmallVector<Value, 4> tileSizes(allTileSizes.begin(), allTileSizes.end());
60 
61   // Traverse the tile sizes, which are in loop order, erase zeros everywhere.
62   LoopIndexToRangeIndexMap loopIndexToRangeIndex;
63   for (int idx = 0, e = tileSizes.size(), zerosCount = 0; idx < e; ++idx) {
64     if (isZero(tileSizes[idx - zerosCount])) {
65       shapeSizes.erase(shapeSizes.begin() + idx - zerosCount);
66       tileSizes.erase(tileSizes.begin() + idx - zerosCount);
67       ++zerosCount;
68       continue;
69     }
70     loopIndexToRangeIndex[idx] = idx - zerosCount;
71   }
72 
73   // Create a new range with the applied tile sizes.
74   SmallVector<Range, 4> res;
75   for (unsigned idx = 0, e = tileSizes.size(); idx < e; ++idx)
76     res.push_back(Range{b.create<arith::ConstantIndexOp>(loc, 0),
77                         shapeSizes[idx], tileSizes[idx]});
78   return std::make_tuple(res, loopIndexToRangeIndex);
79 }
80 
81 // All indices returned by IndexOp should be invariant with respect to tiling.
82 // Therefore, if an operation is tiled, we have to transform the indices
83 // accordingly, i.e. offset them by the values of the corresponding induction
84 // variables that are captured implicitly in the body of the op.
85 //
86 // Example. `linalg.generic` before tiling:
87 //
88 // #id_2d = (i, j) -> (i, j)
89 // #pointwise_2d_trait = {
90 //   indexing_maps = [#id_2d, #id_2d],
91 //   iterator_types = ["parallel", "parallel"]
92 // }
93 // linalg.generic #pointwise_2d_trait %operand, %result {
94 //   ^bb0(%operand_in: f32, %result_in: f32):
95 //     %i = linalg.index 0 : index
96 //     %j = linalg.index 1 : index
97 //     <some operations that use %i, %j>
98 // }: memref<50x100xf32>, memref<50x100xf32>
99 //
100 // After tiling pass with tiles sizes 10 and 25:
101 //
102 // #strided = (i, j)[s0, s1, s2] -> (i * s1 + s0 + j * s2)
103 //
104 // %c1 = arith.constant 1 : index
105 // %c0 = arith.constant 0 : index
106 // %c25 = arith.constant 25 : index
107 // %c10 = arith.constant 10 : index
108 // operand_dim_0 = dim %operand, 0 : memref<50x100xf32>
109 // operand_dim_1 = dim %operand, 1 : memref<50x100xf32>
110 // scf.for %k = %c0 to operand_dim_0 step %c10 {
111 //   scf.for %l = %c0 to operand_dim_1 step %c25 {
112 //     %4 = std.subview %operand[%k, %l][%c10, %c25][%c1, %c1]
113 //       : memref<50x100xf32> to memref<?x?xf32, #strided>
114 //     %5 = std.subview %result[%k, %l][%c10, %c25][%c1, %c1]
115 //       : memref<50x100xf32> to memref<?x?xf32, #strided>
116 //     linalg.generic pointwise_2d_trait %4, %5 {
117 //     ^bb0(%operand_in: f32, %result_in: f32):
118 //       %i = linalg.index 0 : index
119 //       %j = linalg.index 1 : index
120 //       // Indices `k` and `l` are implicitly captured in the body.
121 //       %transformed_i = arith.addi %i, %k : index // index `i` is offset by %k
122 //       %transformed_j = arith.addi %j, %l : index // index `j` is offset by %l
123 //       // Every use of %i, %j is replaced with %transformed_i, %transformed_j
124 //       <some operations that use %transformed_i, %transformed_j>
125 //     }: memref<?x?xf32, #strided>, memref<?x?xf32, #strided>
126 //   }
127 // }
128 //
129 // TODO: Investigate whether mixing implicit and explicit indices
130 // does not lead to losing information.
131 static void
132 transformIndexOps(RewriterBase &b, LinalgOp op, SmallVectorImpl<Value> &ivs,
133                   const LoopIndexToRangeIndexMap &loopIndexToRangeIndex) {
134   SmallVector<Value> allIvs(op.getNumLoops(), nullptr);
135   for (auto &en : enumerate(allIvs)) {
136     auto rangeIndex = loopIndexToRangeIndex.find(en.index());
137     if (rangeIndex == loopIndexToRangeIndex.end())
138       continue;
139     en.value() = ivs[rangeIndex->second];
140   }
141   addTileLoopIvsToIndexOpResults(b, op, allIvs);
142 }
143 
144 // Insert a tile `source` into the destination tensor `dest`. The position at
145 // which the tile is inserted (as well as size of tile) is taken from a given
146 // ExtractSliceOp `sliceOp`.
147 static Value insertSliceIntoTensor(RewriterBase &b, Location loc,
148                                    tensor::ExtractSliceOp sliceOp, Value source,
149                                    Value dest) {
150   return b.create<tensor::InsertSliceOp>(
151       loc, sliceOp.source().getType(), source, dest, sliceOp.offsets(),
152       sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(),
153       sliceOp.static_sizes(), sliceOp.static_strides());
154 }
155 
156 template <typename LoopTy>
157 static FailureOr<TiledLinalgOp>
158 tileLinalgOpImpl(RewriterBase &b, LinalgOp op, ValueRange tileSizes,
159                  const LinalgTilingOptions &options) {
160   auto nLoops = op.getNumLoops();
161   // Initial tile sizes may be too big, only take the first nLoops.
162   tileSizes = tileSizes.take_front(nLoops);
163 
164   if (llvm::all_of(tileSizes, isZero)) {
165     TiledLinalgOp tiledOp;
166     tiledOp.op = cast<LinalgOp>(b.clone(*op.getOperation()));
167     tiledOp.tensorResults.assign(tiledOp.op->result_begin(),
168                                  tiledOp.op->result_end());
169     return tiledOp;
170   }
171 
172   // 1. Build the tiled loop ranges.
173   auto allShapeSizes = op.createFlatListOfOperandDims(b, op.getLoc());
174   AffineMap shapeSizesToLoopsMap = op.getShapesToLoopsMap();
175   if (!shapeSizesToLoopsMap)
176     return failure();
177 
178   SmallVector<Range, 4> loopRanges;
179   LoopIndexToRangeIndexMap loopIndexToRangeIndex;
180   std::tie(loopRanges, loopIndexToRangeIndex) = makeTiledLoopRanges(
181       b, op.getLoc(), shapeSizesToLoopsMap, allShapeSizes, tileSizes);
182 
183   SmallVector<Attribute, 4> iteratorTypes;
184   for (const auto &attr :
185        enumerate(op.iterator_types().cast<ArrayAttr>().getValue())) {
186     if (loopIndexToRangeIndex.count(attr.index()))
187       iteratorTypes.push_back(attr.value());
188   }
189   // If interchangeVector is empty, use the identity. Build the permutation map
190   // otherwise.
191   auto invPermutationMap =
192       AffineMap::getMultiDimIdentityMap(tileSizes.size(), b.getContext());
193   if (!options.interchangeVector.empty()) {
194     // Based on the pruned iterations (due to zero tile size), recompute the
195     // interchange vector.
196     SmallVector<unsigned, 4> interchangeVector;
197     interchangeVector.reserve(options.interchangeVector.size());
198     for (auto pos : options.interchangeVector) {
199       auto it = loopIndexToRangeIndex.find(pos);
200       if (it == loopIndexToRangeIndex.end())
201         continue;
202       interchangeVector.push_back(it->second);
203     }
204     // Interchange vector is guaranteed to be a permutation,
205     // `inversePermutation` must succeed.
206     invPermutationMap = inversePermutation(
207         AffineMap::getPermutationMap(interchangeVector, b.getContext()));
208     assert(invPermutationMap);
209     SmallVector<int64_t> permutation(interchangeVector.begin(),
210                                      interchangeVector.end());
211     applyPermutationToVector(loopRanges, permutation);
212     applyPermutationToVector(iteratorTypes, permutation);
213   }
214 
215   // 2. Create the tiled loops.
216   LinalgOp res = op;
217   SmallVector<Value, 4> ivs, tensorResults;
218   auto tiledLoopBodyBuilder =
219       [&](OpBuilder &builder, Location loc, ValueRange localIvs,
220           ValueRange operandValuesToUse) -> scf::ValueVector {
221     ivs.assign(localIvs.begin(), localIvs.end());
222 
223     // When an `interchangeVector` is present, it has been applied to the
224     // loop ranges and the iterator types. Apply its inverse to the
225     // resulting loop `ivs` to match the op definition.
226     SmallVector<Value, 4> interchangedIvs;
227     if (!options.interchangeVector.empty())
228       interchangedIvs = applyMapToValues(b, loc, invPermutationMap, ivs);
229     else
230       interchangedIvs.assign(ivs.begin(), ivs.end());
231 
232     // Tile the `operandValuesToUse` that either match the `op` operands
233     // themselves or the tile loop arguments forwarding them.
234     assert(operandValuesToUse.size() ==
235                static_cast<size_t>(op.getNumInputsAndOutputs()) &&
236            "expect the number of operands and inputs and outputs to match");
237     SmallVector<Value> valuesToTile = operandValuesToUse;
238     auto sizeBounds =
239         applyMapToValues(b, loc, shapeSizesToLoopsMap, allShapeSizes);
240     SmallVector<Value, 4> tiledOperands = makeTiledShapes(
241         b, loc, op, valuesToTile, interchangedIvs, tileSizes, sizeBounds);
242 
243     // TODO: use an interface/adaptor to avoid leaking position in
244     // `tiledOperands`.
245     SmallVector<Type, 4> resultTensorTypes;
246     for (OpOperand *opOperand : op.getOutputTensorOperands())
247       resultTensorTypes.push_back(
248           tiledOperands[opOperand->getOperandNumber()].getType());
249 
250     res = op.clone(b, loc, resultTensorTypes, tiledOperands);
251 
252     // Insert a insert_slice for each output tensor.
253     unsigned resultIdx = 0;
254     for (OpOperand *opOperand : op.getOutputTensorOperands()) {
255       // TODO: use an interface/adaptor to avoid leaking position in
256       // `tiledOperands`.
257       Value outputTensor = tiledOperands[opOperand->getOperandNumber()];
258       // TODO: Propagate RewriterBase everywhere.
259       IRRewriter rewriter(b);
260       if (auto sliceOp = outputTensor.getDefiningOp<tensor::ExtractSliceOp>()) {
261         tensorResults.push_back(insertSliceIntoTensor(rewriter, loc, sliceOp,
262                                                       res->getResult(resultIdx),
263                                                       sliceOp.source()));
264       } else {
265         tensorResults.push_back(res->getResult(resultIdx));
266       }
267       ++resultIdx;
268     }
269     return scf::ValueVector(tensorResults.begin(), tensorResults.end());
270   };
271   GenerateLoopNest<LoopTy>::doit(b, op.getLoc(), loopRanges, op, iteratorTypes,
272                                  tiledLoopBodyBuilder, options.distribution,
273                                  options.distributionTypes);
274 
275   // 3. Transform IndexOp results w.r.t. the tiling.
276   transformIndexOps(b, res, ivs, loopIndexToRangeIndex);
277 
278   // 4. Gather the newly created loops and return them with the new op.
279   SmallVector<Operation *, 8> loops;
280   loops.reserve(ivs.size());
281   for (auto iv : ivs) {
282     if (iv.isa<BlockArgument>()) {
283       loops.push_back(iv.cast<BlockArgument>().getOwner()->getParentOp());
284       assert(loops.back() && "no owner found for induction variable!");
285     } else {
286       // TODO: Instead of doing this, try to recover the ops used instead of the
287       // loop.
288       loops.push_back(nullptr);
289     }
290   }
291 
292   // 5. Get the tensor results from the outermost loop if available. Otherwise
293   // use the previously captured `tensorResults`.
294   Operation *outermostLoop = nullptr;
295   for (Operation *loop : loops)
296     if ((outermostLoop = loop))
297       break;
298 
299   return TiledLinalgOp{
300       res, loops, outermostLoop ? outermostLoop->getResults() : tensorResults};
301 }
302 
303 template <typename LoopTy>
304 FailureOr<TiledLinalgOp> static tileLinalgOpImpl(
305     RewriterBase &b, LinalgOp op, const LinalgTilingOptions &options) {
306   OpBuilder::InsertionGuard g(b);
307   b.setInsertionPoint(op);
308 
309   if (!options.tileSizeComputationFunction)
310     return failure();
311 
312   // Enforce the convention that "tiling by zero" skips tiling a particular
313   // dimension. This convention is significantly simpler to handle instead of
314   // adjusting affine maps to account for missing dimensions.
315   auto nLoops = op.getNumLoops();
316   SmallVector<Value, 4> tileSizeVector =
317       options.tileSizeComputationFunction(b, op);
318   if (tileSizeVector.size() < nLoops) {
319     auto zero = b.create<arith::ConstantIndexOp>(op.getLoc(), 0);
320     tileSizeVector.append(nLoops - tileSizeVector.size(), zero);
321   }
322 
323   return tileLinalgOpImpl<LoopTy>(b, op, tileSizeVector, options);
324 }
325 
326 FailureOr<TiledLinalgOp>
327 mlir::linalg::tileLinalgOp(RewriterBase &b, LinalgOp op,
328                            const LinalgTilingOptions &options) {
329   switch (options.loopType) {
330   case LinalgTilingLoopType::Loops:
331     return tileLinalgOpImpl<scf::ForOp>(b, op, options);
332   case LinalgTilingLoopType::ParallelLoops:
333     return tileLinalgOpImpl<scf::ParallelOp>(b, op, options);
334   case LinalgTilingLoopType::TiledLoops:
335     return tileLinalgOpImpl<linalg::TiledLoopOp>(b, op, options);
336   default:;
337   }
338   return failure();
339 }
340 
341 /// Generate a loop nest around a given tensor::PadOp (for tiling). `newPadOp`
342 /// and `loopNest` are output parameters that return the new (tiled)
343 /// tensor::PadOp and the loop nest.
344 static LogicalResult tilePadOp(RewriterBase &builder, tensor::PadOp op,
345                                tensor::PadOp &newPadOp, LoopNest &loopNest,
346                                const LinalgTilingOptions &options) {
347   Location loc = op.getLoc();
348   OpBuilder::InsertionGuard g(builder);
349   builder.setInsertionPoint(op);
350 
351   // Clone tensor::PadOp so that the existing op can be replaced more easily.
352   newPadOp = cast<tensor::PadOp>(builder.clone(*op.getOperation()));
353   // Get rank and tile sizes.
354   int64_t rank = op.getResultType().getRank();
355   SmallVector<Value> tileSizes =
356       options.tileSizeComputationFunction(builder, op);
357   // Normalize untiled padding dimensions to 0.
358   Value zero = builder.create<arith::ConstantIndexOp>(loc, 0);
359   tileSizes.append(rank - tileSizes.size(), zero);
360   // Compute lower and upper bounds of the loop nest.
361   TilingInterface tilingInterface =
362       dyn_cast<TilingInterface>(op.getOperation());
363   SmallVector<Range> ranges = tilingInterface.getIterationDomain(builder);
364   SmallVector<Value> lbs, dims, allDims, steps;
365   for (int64_t i = 0; i < rank; ++i) {
366     allDims.push_back(ranges[i].size);
367     if (!isZero(tileSizes[i])) {
368       lbs.push_back(ranges[i].offset);
369       dims.push_back(ranges[i].size);
370       steps.push_back(tileSizes[i]);
371     }
372   }
373   // Generate loop nest: One loop per dimension.
374   SmallVector<Value> destOperand =
375       tilingInterface.getDestinationOperands(builder);
376   loopNest = mlir::scf::buildLoopNest(
377       builder, loc, lbs, /*ubs=*/dims, steps, ValueRange(destOperand),
378       [&](OpBuilder &b, Location loc, ValueRange localIvs,
379           ValueRange iterArgs) -> scf::ValueVector {
380         // Compute offsets and sizes of ExtractSliceOp.
381         SmallVector<Value> offsets =
382             computeTileOffsets(b, loc, localIvs, tileSizes);
383         SmallVector<Value> sizes =
384             computeTileSizes(b, loc, localIvs, tileSizes, allDims);
385         // Create ExtractSliceOp: Extract a tile from the tensor::PadOp.
386         // Note: The tensor::PadOp is located outside of the loop nest. It is
387         // later moved inside by ExtractSliceOfPadTensorSwapPattern.
388         auto map = AffineMap::getMultiDimIdentityMap(rank, b.getContext());
389         Value tiledOutput =
390             makeTiledShape(b, loc, newPadOp->getResult(0), tileSizes, map,
391                            offsets, allDims, sizes);
392         auto sliceOp = tiledOutput.getDefiningOp<tensor::ExtractSliceOp>();
393         assert(sliceOp && "expected ExtractSliceOp");
394         // Insert the tile into the output tensor.
395         // TODO: Propagate RewriterBase everywhere.
396         IRRewriter rewriter(b);
397         Value yieldValue =
398             insertSliceIntoTensor(rewriter, loc, sliceOp, sliceOp, iterArgs[0]);
399         return scf::ValueVector({yieldValue});
400       });
401   return success();
402 }
403 
404 namespace {
405 struct PadOpTilingPattern : public OpRewritePattern<tensor::PadOp> {
406   PadOpTilingPattern(MLIRContext *ctx, LinalgTilingOptions opt)
407       : OpRewritePattern<tensor::PadOp>(ctx), options(std::move(opt)) {}
408 
409   LogicalResult matchAndRewrite(tensor::PadOp op,
410                                 PatternRewriter &rewriter) const override {
411     if (op->hasAttr(LinalgTransforms::kLinalgTransformMarker))
412       return failure();
413     tensor::PadOp newPadOp;
414     LoopNest loopNest;
415     if (failed(tilePadOp(rewriter, op, newPadOp, loopNest, options)))
416       return failure();
417     newPadOp->setAttr(LinalgTransforms::kLinalgTransformMarker,
418                       rewriter.getUnitAttr());
419     // Replace all uses of the original tensor::PadOp.
420     rewriter.replaceOp(op, loopNest.getResults()[0]);
421     return success();
422   }
423 
424   LinalgTilingOptions options;
425 };
426 } // namespace
427 
428 namespace {
429 /// Helper classes for type list expansion.
430 template <typename... OpTypes>
431 class CanonicalizationPatternList;
432 
433 template <>
434 class CanonicalizationPatternList<> {
435 public:
436   static void insert(RewritePatternSet &patterns) {}
437 };
438 
439 template <typename OpTy, typename... OpTypes>
440 class CanonicalizationPatternList<OpTy, OpTypes...> {
441 public:
442   static void insert(RewritePatternSet &patterns) {
443     OpTy::getCanonicalizationPatterns(patterns, patterns.getContext());
444     CanonicalizationPatternList<OpTypes...>::insert(patterns);
445   }
446 };
447 } // namespace
448 
449 RewritePatternSet
450 mlir::linalg::getLinalgTilingCanonicalizationPatterns(MLIRContext *ctx) {
451   RewritePatternSet patterns(ctx);
452   populateLinalgTilingCanonicalizationPatterns(patterns);
453   return patterns;
454 }
455 
456 void mlir::linalg::populateLinalgTilingCanonicalizationPatterns(
457     RewritePatternSet &patterns) {
458   auto *ctx = patterns.getContext();
459   AffineApplyOp::getCanonicalizationPatterns(patterns, ctx);
460   AffineForOp::getCanonicalizationPatterns(patterns, ctx);
461   AffineMinOp::getCanonicalizationPatterns(patterns, ctx);
462   AffineMaxOp::getCanonicalizationPatterns(patterns, ctx);
463   arith::ConstantIndexOp::getCanonicalizationPatterns(patterns, ctx);
464 
465   memref::SubViewOp::getCanonicalizationPatterns(patterns, ctx);
466   memref::ViewOp::getCanonicalizationPatterns(patterns, ctx);
467 
468   scf::ForOp::getCanonicalizationPatterns(patterns, ctx);
469   scf::ParallelOp::getCanonicalizationPatterns(patterns, ctx);
470 
471   tensor::CastOp::getCanonicalizationPatterns(patterns, ctx);
472   tensor::ExtractSliceOp::getCanonicalizationPatterns(patterns, ctx);
473   tensor::InsertSliceOp::getCanonicalizationPatterns(patterns, ctx);
474 
475   InitTensorOp::getCanonicalizationPatterns(patterns, ctx);
476   tensor::PadOp::getCanonicalizationPatterns(patterns, ctx);
477   ctx->getLoadedDialect<LinalgDialect>()->getCanonicalizationPatterns(patterns);
478 
479   CanonicalizationPatternList<
480 #define GET_OP_LIST
481 #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
482       >::insert(patterns);
483 }
484 
485 /// Populate the given list with patterns that apply Linalg tiling.
486 static void insertTilingPatterns(RewritePatternSet &patterns,
487                                  const LinalgTilingOptions &options) {
488   auto *ctx = patterns.getContext();
489   LinalgTransformationFilter f(ArrayRef<StringAttr>{},
490                                StringAttr::get(ctx, "tiled"));
491   TilingPatterns<GenericOp,
492 #define GET_OP_LIST
493 #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
494                  >::insert(patterns, options, f);
495   patterns.add<PadOpTilingPattern>(ctx, options);
496 }
497 
498 void mlir::linalg::populatePadTensorTilingPatterns(
499     RewritePatternSet &patterns, const LinalgTilingOptions &options) {
500   auto *ctx = patterns.getContext();
501   patterns.add<PadOpTilingPattern>(ctx, options);
502 }
503 
504 static void applyExtractSliceOfPadTensorSwapPattern(FuncOp funcOp) {
505   MLIRContext *ctx = funcOp.getContext();
506   RewritePatternSet patterns(ctx);
507   patterns.add<ExtractSliceOfPadTensorSwapPattern>(patterns.getContext());
508   (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
509   (void)applyPatternsAndFoldGreedily(
510       funcOp, getLinalgTilingCanonicalizationPatterns(ctx));
511 }
512 
513 namespace {
514 struct LinalgTilingPass : public LinalgTilingBase<LinalgTilingPass> {
515   LinalgTilingPass() = default;
516   LinalgTilingPass(ArrayRef<int64_t> tileSizes, LinalgTilingLoopType loopType,
517                    ArrayRef<StringRef> distributionTypes) {
518     this->tileSizes = tileSizes;
519     this->loopType = "";
520     this->loopTypeEnum = loopType;
521     this->distributionTypes = llvm::to_vector<2>(llvm::map_range(
522         distributionTypes, [](StringRef ref) { return ref.str(); }));
523   }
524 
525   void runOnOperation() override {
526     FuncOp funcOp = getOperation();
527     LinalgTilingLoopType type =
528         llvm::StringSwitch<LinalgTilingLoopType>(loopType)
529             .Case("for", LinalgTilingLoopType::Loops)
530             .Case("affine", LinalgTilingLoopType::AffineLoops)
531             .Case("parallel", LinalgTilingLoopType::ParallelLoops)
532             .Case("tiled_loop", LinalgTilingLoopType::TiledLoops)
533             .Default(loopTypeEnum);
534     auto distTypes = llvm::to_vector<2>(llvm::map_range(
535         distributionTypes, [](std::string &str) { return StringRef(str); }));
536     auto options = LinalgTilingOptions()
537                        .setTileSizes(tileSizes)
538                        .setLoopType(type)
539                        .setDistributionTypes(distTypes);
540     MLIRContext *ctx = funcOp.getContext();
541     RewritePatternSet patterns(ctx);
542     insertTilingPatterns(patterns, options);
543     scf::populateSCFForLoopCanonicalizationPatterns(patterns);
544     (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
545     (void)applyPatternsAndFoldGreedily(
546         funcOp, getLinalgTilingCanonicalizationPatterns(ctx));
547     // Drop the marker.
548     funcOp.walk([](LinalgOp op) {
549       op->removeAttr(LinalgTransforms::kLinalgTransformMarker);
550     });
551 
552     // Apply swap pattern after generating loop nest and running
553     // canonicalizations.
554     applyExtractSliceOfPadTensorSwapPattern(funcOp);
555   }
556 
557   LinalgTilingLoopType loopTypeEnum;
558 };
559 
560 } // namespace
561 
562 std::unique_ptr<OperationPass<FuncOp>>
563 mlir::createLinalgTilingPass(ArrayRef<int64_t> tileSizes,
564                              linalg::LinalgTilingLoopType loopType,
565                              ArrayRef<StringRef> distributionTypes) {
566   return std::make_unique<LinalgTilingPass>(tileSizes, loopType,
567                                             distributionTypes);
568 }
569