1 //===- Tiling.cpp - Implementation of linalg Tiling -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the linalg dialect Tiling pass.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include <utility>
14 
15 #include "PassDetail.h"
16 #include "mlir/Dialect/Linalg/IR/Linalg.h"
17 #include "mlir/Dialect/Linalg/Passes.h"
18 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
19 #include "mlir/Dialect/Linalg/Utils/Utils.h"
20 #include "mlir/Dialect/MemRef/IR/MemRef.h"
21 #include "mlir/Dialect/SCF/Transforms.h"
22 #include "mlir/Dialect/Tensor/IR/Tensor.h"
23 #include "mlir/IR/AffineExpr.h"
24 #include "mlir/IR/AffineMap.h"
25 #include "mlir/Transforms/FoldUtils.h"
26 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
27 
28 #include "llvm/Support/CommandLine.h"
29 
30 using namespace mlir;
31 using namespace mlir::linalg;
32 using namespace mlir::scf;
33 
34 #define DEBUG_TYPE "linalg-tiling"
35 
36 static bool isZero(Value v) {
37   if (auto cst = v.getDefiningOp<arith::ConstantIndexOp>())
38     return cst.value() == 0;
39   return false;
40 }
41 
42 using LoopIndexToRangeIndexMap = DenseMap<int, int>;
43 
44 // Creates a number of ranges equal to the number of non-zero in `tileSizes`.
45 // One for each loop of the LinalgOp that is tiled. The `tileSizes` argument has
46 // one entry per surrounding loop. It uses zero as the convention that a
47 // particular loop is not tiled. This convention simplifies implementations by
48 // avoiding affine map manipulations.
49 // The returned ranges correspond to the loop ranges, in the proper order, that
50 // are tiled and for which new loops will be created. Also the function returns
51 // a map from loop indices of the LinalgOp to the corresponding non-empty range
52 // indices of newly created loops.
53 static std::tuple<SmallVector<Range, 4>, LoopIndexToRangeIndexMap>
54 makeTiledLoopRanges(RewriterBase &b, Location loc, AffineMap map,
55                     ValueRange allShapeSizes, ValueRange allTileSizes) {
56   assert(allTileSizes.size() == map.getNumResults());
57   // Apply `map` to get shape sizes in loop order.
58   auto shapeSizes = applyMapToValues(b, loc, map, allShapeSizes);
59   SmallVector<Value, 4> tileSizes(allTileSizes.begin(), allTileSizes.end());
60 
61   // Traverse the tile sizes, which are in loop order, erase zeros everywhere.
62   LoopIndexToRangeIndexMap loopIndexToRangeIndex;
63   for (int idx = 0, e = tileSizes.size(), zerosCount = 0; idx < e; ++idx) {
64     if (isZero(tileSizes[idx - zerosCount])) {
65       shapeSizes.erase(shapeSizes.begin() + idx - zerosCount);
66       tileSizes.erase(tileSizes.begin() + idx - zerosCount);
67       ++zerosCount;
68       continue;
69     }
70     loopIndexToRangeIndex[idx] = idx - zerosCount;
71   }
72 
73   // Create a new range with the applied tile sizes.
74   SmallVector<Range, 4> res;
75   for (unsigned idx = 0, e = tileSizes.size(); idx < e; ++idx)
76     res.push_back(Range{b.create<arith::ConstantIndexOp>(loc, 0),
77                         shapeSizes[idx], tileSizes[idx]});
78   return std::make_tuple(res, loopIndexToRangeIndex);
79 }
80 
81 // All indices returned by IndexOp should be invariant with respect to tiling.
82 // Therefore, if an operation is tiled, we have to transform the indices
83 // accordingly, i.e. offset them by the values of the corresponding induction
84 // variables that are captured implicitly in the body of the op.
85 //
86 // Example. `linalg.generic` before tiling:
87 //
88 // #id_2d = (i, j) -> (i, j)
89 // #pointwise_2d_trait = {
90 //   indexing_maps = [#id_2d, #id_2d],
91 //   iterator_types = ["parallel", "parallel"]
92 // }
93 // linalg.generic #pointwise_2d_trait %operand, %result {
94 //   ^bb0(%operand_in: f32, %result_in: f32):
95 //     %i = linalg.index 0 : index
96 //     %j = linalg.index 1 : index
97 //     <some operations that use %i, %j>
98 // }: memref<50x100xf32>, memref<50x100xf32>
99 //
100 // After tiling pass with tiles sizes 10 and 25:
101 //
102 // #strided = (i, j)[s0, s1, s2] -> (i * s1 + s0 + j * s2)
103 //
104 // %c1 = arith.constant 1 : index
105 // %c0 = arith.constant 0 : index
106 // %c25 = arith.constant 25 : index
107 // %c10 = arith.constant 10 : index
108 // operand_dim_0 = dim %operand, 0 : memref<50x100xf32>
109 // operand_dim_1 = dim %operand, 1 : memref<50x100xf32>
110 // scf.for %k = %c0 to operand_dim_0 step %c10 {
111 //   scf.for %l = %c0 to operand_dim_1 step %c25 {
112 //     %4 = std.subview %operand[%k, %l][%c10, %c25][%c1, %c1]
113 //       : memref<50x100xf32> to memref<?x?xf32, #strided>
114 //     %5 = std.subview %result[%k, %l][%c10, %c25][%c1, %c1]
115 //       : memref<50x100xf32> to memref<?x?xf32, #strided>
116 //     linalg.generic pointwise_2d_trait %4, %5 {
117 //     ^bb0(%operand_in: f32, %result_in: f32):
118 //       %i = linalg.index 0 : index
119 //       %j = linalg.index 1 : index
120 //       // Indices `k` and `l` are implicitly captured in the body.
121 //       %transformed_i = arith.addi %i, %k : index // index `i` is offset by %k
122 //       %transformed_j = arith.addi %j, %l : index // index `j` is offset by %l
123 //       // Every use of %i, %j is replaced with %transformed_i, %transformed_j
124 //       <some operations that use %transformed_i, %transformed_j>
125 //     }: memref<?x?xf32, #strided>, memref<?x?xf32, #strided>
126 //   }
127 // }
128 //
129 // TODO: Investigate whether mixing implicit and explicit indices
130 // does not lead to losing information.
131 static void
132 transformIndexOps(RewriterBase &b, LinalgOp op, SmallVectorImpl<Value> &ivs,
133                   const LoopIndexToRangeIndexMap &loopIndexToRangeIndex) {
134   SmallVector<Value> allIvs(op.getNumLoops(), nullptr);
135   for (auto &en : enumerate(allIvs)) {
136     auto rangeIndex = loopIndexToRangeIndex.find(en.index());
137     if (rangeIndex == loopIndexToRangeIndex.end())
138       continue;
139     en.value() = ivs[rangeIndex->second];
140   }
141   addTileLoopIvsToIndexOpResults(b, op, allIvs);
142 }
143 
144 // Insert a tile `source` into the destination tensor `dest`. The position at
145 // which the tile is inserted (as well as size of tile) is taken from a given
146 // ExtractSliceOp `sliceOp`.
147 static Value insertSliceIntoTensor(RewriterBase &b, Location loc,
148                                    tensor::ExtractSliceOp sliceOp, Value source,
149                                    Value dest) {
150   return b.create<tensor::InsertSliceOp>(
151       loc, sliceOp.source().getType(), source, dest, sliceOp.offsets(),
152       sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(),
153       sliceOp.static_sizes(), sliceOp.static_strides());
154 }
155 
156 template <typename LoopTy>
157 static FailureOr<TiledLinalgOp>
158 tileLinalgOpImpl(RewriterBase &b, LinalgOp op, ValueRange tileSizes,
159                  const LinalgTilingOptions &options) {
160   auto nLoops = op.getNumLoops();
161   // Initial tile sizes may be too big, only take the first nLoops.
162   tileSizes = tileSizes.take_front(nLoops);
163 
164   if (llvm::all_of(tileSizes, isZero)) {
165     TiledLinalgOp tiledOp;
166     tiledOp.op = cast<LinalgOp>(b.clone(*op.getOperation()));
167     tiledOp.tensorResults.assign(tiledOp.op->result_begin(),
168                                  tiledOp.op->result_end());
169     return tiledOp;
170   }
171 
172   // 1. Build the tiled loop ranges.
173   auto allShapeSizes = op.createFlatListOfOperandDims(b, op.getLoc());
174   AffineMap shapeSizesToLoopsMap = op.getShapesToLoopsMap();
175   if (!shapeSizesToLoopsMap)
176     return failure();
177 
178   SmallVector<Range, 4> loopRanges;
179   LoopIndexToRangeIndexMap loopIndexToRangeIndex;
180   std::tie(loopRanges, loopIndexToRangeIndex) = makeTiledLoopRanges(
181       b, op.getLoc(), shapeSizesToLoopsMap, allShapeSizes, tileSizes);
182 
183   SmallVector<Attribute, 4> iteratorTypes;
184   for (const auto &attr :
185        enumerate(op.iterator_types().cast<ArrayAttr>().getValue())) {
186     if (loopIndexToRangeIndex.count(attr.index()))
187       iteratorTypes.push_back(attr.value());
188   }
189   // If interchangeVector is empty, use the identity. Build the permutation map
190   // otherwise.
191   auto invPermutationMap =
192       AffineMap::getMultiDimIdentityMap(tileSizes.size(), b.getContext());
193   if (!options.interchangeVector.empty()) {
194     // Based on the pruned iterations (due to zero tile size), recompute the
195     // interchange vector.
196     SmallVector<unsigned, 4> interchangeVector;
197     interchangeVector.reserve(options.interchangeVector.size());
198     for (auto pos : options.interchangeVector) {
199       auto it = loopIndexToRangeIndex.find(pos);
200       if (it == loopIndexToRangeIndex.end())
201         continue;
202       interchangeVector.push_back(it->second);
203     }
204     // Interchange vector is guaranteed to be a permutation,
205     // `inversePermutation` must succeed.
206     invPermutationMap = inversePermutation(
207         AffineMap::getPermutationMap(interchangeVector, b.getContext()));
208     assert(invPermutationMap);
209     SmallVector<int64_t> permutation(interchangeVector.begin(),
210                                      interchangeVector.end());
211     applyPermutationToVector(loopRanges, permutation);
212     applyPermutationToVector(iteratorTypes, permutation);
213   }
214 
215   // 2. Create the tiled loops.
216   LinalgOp res = op;
217   SmallVector<Value, 4> ivs, tensorResults;
218   auto tiledLoopBodyBuilder =
219       [&](OpBuilder &builder, Location loc, ValueRange localIvs,
220           ValueRange operandValuesToUse) -> scf::ValueVector {
221     ivs.assign(localIvs.begin(), localIvs.end());
222 
223     // When an `interchangeVector` is present, it has been applied to the
224     // loop ranges and the iterator types. Apply its inverse to the
225     // resulting loop `ivs` to match the op definition.
226     SmallVector<Value, 4> interchangedIvs;
227     if (!options.interchangeVector.empty())
228       interchangedIvs = applyMapToValues(b, loc, invPermutationMap, ivs);
229     else
230       interchangedIvs.assign(ivs.begin(), ivs.end());
231 
232     // Tile the `operandValuesToUse` that either match the `op` operands
233     // themselves or the tile loop arguments forwarding them.
234     assert(operandValuesToUse.size() ==
235                static_cast<size_t>(op.getNumInputsAndOutputs()) &&
236            "expect the number of operands and inputs and outputs to match");
237     SmallVector<Value> valuesToTile = operandValuesToUse;
238     auto sizeBounds =
239         applyMapToValues(b, loc, shapeSizesToLoopsMap, allShapeSizes);
240     SmallVector<Value, 4> tiledOperands = makeTiledShapes(
241         b, loc, op, valuesToTile, interchangedIvs, tileSizes, sizeBounds);
242 
243     // TODO: use an interface/adaptor to avoid leaking position in
244     // `tiledOperands`.
245     SmallVector<Type, 4> resultTensorTypes;
246     for (OpOperand *opOperand : op.getOutputTensorOperands())
247       resultTensorTypes.push_back(
248           tiledOperands[opOperand->getOperandNumber()].getType());
249 
250     res = op.clone(b, loc, resultTensorTypes, tiledOperands);
251 
252     // Insert a insert_slice for each output tensor.
253     unsigned resultIdx = 0;
254     for (OpOperand *opOperand : op.getOutputTensorOperands()) {
255       // TODO: use an interface/adaptor to avoid leaking position in
256       // `tiledOperands`.
257       Value outputTensor = tiledOperands[opOperand->getOperandNumber()];
258       // TODO: Propagate RewriterBase everywhere.
259       IRRewriter rewriter(b);
260       if (auto sliceOp = outputTensor.getDefiningOp<tensor::ExtractSliceOp>()) {
261         tensorResults.push_back(insertSliceIntoTensor(rewriter, loc, sliceOp,
262                                                       res->getResult(resultIdx),
263                                                       sliceOp.source()));
264       } else {
265         tensorResults.push_back(res->getResult(resultIdx));
266       }
267       ++resultIdx;
268     }
269     return scf::ValueVector(tensorResults.begin(), tensorResults.end());
270   };
271   GenerateLoopNest<LoopTy>::doit(b, op.getLoc(), loopRanges, op, iteratorTypes,
272                                  tiledLoopBodyBuilder, options.distribution,
273                                  options.distributionTypes);
274 
275   // 3. Transform IndexOp results w.r.t. the tiling.
276   transformIndexOps(b, res, ivs, loopIndexToRangeIndex);
277 
278   // 4. Gather the newly created loops and return them with the new op.
279   SmallVector<Operation *, 8> loops;
280   loops.reserve(ivs.size());
281   for (auto iv : ivs) {
282     if (iv.isa<BlockArgument>()) {
283       loops.push_back(iv.cast<BlockArgument>().getOwner()->getParentOp());
284       assert(loops.back() && "no owner found for induction variable!");
285     } else {
286       // TODO: Instead of doing this, try to recover the ops used instead of the
287       // loop.
288       loops.push_back(nullptr);
289     }
290   }
291 
292   // 5. Get the tensor results from the outermost loop if available. Otherwise
293   // use the previously captured `tensorResults`.
294   Operation *outermostLoop = nullptr;
295   for (Operation *loop : loops)
296     if ((outermostLoop = loop))
297       break;
298 
299   return TiledLinalgOp{
300       res, loops, outermostLoop ? outermostLoop->getResults() : tensorResults};
301 }
302 
303 template <typename LoopTy>
304 FailureOr<TiledLinalgOp> static tileLinalgOpImpl(
305     RewriterBase &b, LinalgOp op, const LinalgTilingOptions &options) {
306   OpBuilder::InsertionGuard g(b);
307   b.setInsertionPoint(op);
308 
309   if (!options.tileSizeComputationFunction)
310     return failure();
311 
312   // Enforce the convention that "tiling by zero" skips tiling a particular
313   // dimension. This convention is significantly simpler to handle instead of
314   // adjusting affine maps to account for missing dimensions.
315   auto nLoops = op.getNumLoops();
316   SmallVector<Value, 4> tileSizeVector =
317       options.tileSizeComputationFunction(b, op);
318   if (tileSizeVector.size() < nLoops) {
319     auto zero = b.create<arith::ConstantIndexOp>(op.getLoc(), 0);
320     tileSizeVector.append(nLoops - tileSizeVector.size(), zero);
321   }
322 
323   return tileLinalgOpImpl<LoopTy>(b, op, tileSizeVector, options);
324 }
325 
326 FailureOr<TiledLinalgOp>
327 mlir::linalg::tileLinalgOp(RewriterBase &b, LinalgOp op,
328                            const LinalgTilingOptions &options) {
329   switch (options.loopType) {
330   case LinalgTilingLoopType::Loops:
331     return tileLinalgOpImpl<scf::ForOp>(b, op, options);
332   case LinalgTilingLoopType::ParallelLoops:
333     return tileLinalgOpImpl<scf::ParallelOp>(b, op, options);
334   case LinalgTilingLoopType::TiledLoops:
335     return tileLinalgOpImpl<linalg::TiledLoopOp>(b, op, options);
336   default:;
337   }
338   return failure();
339 }
340 
341 /// Generate a loop nest around a given PadTensorOp (for tiling). `newPadOp`
342 /// and `loopNest` are output parameters that return the new (tiled) PadTensorOp
343 /// and the loop nest.
344 static LogicalResult tilePadTensorOp(RewriterBase &builder, PadTensorOp op,
345                                      PadTensorOp &newPadOp, LoopNest &loopNest,
346                                      const LinalgTilingOptions &options) {
347   Location loc = op.getLoc();
348   OpBuilder::InsertionGuard g(builder);
349   builder.setInsertionPoint(op);
350 
351   // Clone PadTensorOp so that the existing op can be replaced more easily.
352   newPadOp = cast<PadTensorOp>(builder.clone(*op.getOperation()));
353   // Get rank and tile sizes.
354   int64_t rank = op.getResultType().getRank();
355   SmallVector<Value> tileSizes =
356       options.tileSizeComputationFunction(builder, op);
357   assert(static_cast<int64_t>(tileSizes.size()) == rank);
358   // Compute lower and upper bounds of the loop nest.
359   SmallVector<Range> ranges = op.getIterationDomain(builder);
360   SmallVector<Value> lbs, dims, allDims, steps;
361   for (int64_t i = 0; i < rank; ++i) {
362     allDims.push_back(ranges[i].size);
363     if (!isZero(tileSizes[i])) {
364       lbs.push_back(ranges[i].offset);
365       dims.push_back(ranges[i].size);
366       steps.push_back(tileSizes[i]);
367     }
368   }
369   // Generate loop nest: One loop per dimension.
370   SmallVector<Value> destOperand = op.getDestinationOperands(builder);
371   loopNest = mlir::scf::buildLoopNest(
372       builder, loc, lbs, /*ubs=*/dims, steps, ValueRange(destOperand),
373       [&](OpBuilder &b, Location loc, ValueRange localIvs,
374           ValueRange iterArgs) -> scf::ValueVector {
375         // Compute offsets and sizes of ExtractSliceOp.
376         SmallVector<Value> offsets =
377             computeTileOffsets(b, loc, localIvs, tileSizes);
378         SmallVector<Value> sizes =
379             computeTileSizes(b, loc, localIvs, tileSizes, allDims);
380         // Create ExtractSliceOp: Extract a tile from the PadTensorOp.
381         // Note: The PadTensorOp is located outside of the loop nest. It is
382         // later moved inside by ExtractSliceOfPadTensorSwapPattern.
383         auto map = AffineMap::getMultiDimIdentityMap(rank, b.getContext());
384         Value tiledOutput =
385             makeTiledShape(b, loc, newPadOp->getResult(0), tileSizes, map,
386                            offsets, allDims, sizes);
387         auto sliceOp = tiledOutput.getDefiningOp<tensor::ExtractSliceOp>();
388         assert(sliceOp && "expected ExtractSliceOp");
389         // Insert the tile into the output tensor.
390         // TODO: Propagate RewriterBase everywhere.
391         IRRewriter rewriter(b);
392         Value yieldValue =
393             insertSliceIntoTensor(rewriter, loc, sliceOp, sliceOp, iterArgs[0]);
394         return scf::ValueVector({yieldValue});
395       });
396   return success();
397 }
398 
399 namespace {
400 struct PadTensorOpTilingPattern : public OpRewritePattern<PadTensorOp> {
401   PadTensorOpTilingPattern(MLIRContext *ctx, LinalgTilingOptions opt)
402       : OpRewritePattern<PadTensorOp>(ctx), options(std::move(opt)) {}
403 
404   LogicalResult matchAndRewrite(PadTensorOp op,
405                                 PatternRewriter &rewriter) const override {
406     if (op->hasAttr(LinalgTransforms::kLinalgTransformMarker))
407       return failure();
408     PadTensorOp newPadOp;
409     LoopNest loopNest;
410     if (failed(tilePadTensorOp(rewriter, op, newPadOp, loopNest, options)))
411       return failure();
412     newPadOp->setAttr(LinalgTransforms::kLinalgTransformMarker,
413                       rewriter.getUnitAttr());
414     // Replace all uses of the original PadTensorOp.
415     rewriter.replaceOp(op, loopNest.getResults()[0]);
416     return success();
417   }
418 
419   LinalgTilingOptions options;
420 };
421 } // namespace
422 
423 namespace {
424 /// Helper classes for type list expansion.
425 template <typename... OpTypes>
426 class CanonicalizationPatternList;
427 
428 template <>
429 class CanonicalizationPatternList<> {
430 public:
431   static void insert(RewritePatternSet &patterns) {}
432 };
433 
434 template <typename OpTy, typename... OpTypes>
435 class CanonicalizationPatternList<OpTy, OpTypes...> {
436 public:
437   static void insert(RewritePatternSet &patterns) {
438     OpTy::getCanonicalizationPatterns(patterns, patterns.getContext());
439     CanonicalizationPatternList<OpTypes...>::insert(patterns);
440   }
441 };
442 } // namespace
443 
444 RewritePatternSet
445 mlir::linalg::getLinalgTilingCanonicalizationPatterns(MLIRContext *ctx) {
446   RewritePatternSet patterns(ctx);
447   populateLinalgTilingCanonicalizationPatterns(patterns);
448   return patterns;
449 }
450 
451 void mlir::linalg::populateLinalgTilingCanonicalizationPatterns(
452     RewritePatternSet &patterns) {
453   auto *ctx = patterns.getContext();
454   AffineApplyOp::getCanonicalizationPatterns(patterns, ctx);
455   AffineForOp::getCanonicalizationPatterns(patterns, ctx);
456   AffineMinOp::getCanonicalizationPatterns(patterns, ctx);
457   AffineMaxOp::getCanonicalizationPatterns(patterns, ctx);
458   arith::ConstantIndexOp::getCanonicalizationPatterns(patterns, ctx);
459 
460   memref::SubViewOp::getCanonicalizationPatterns(patterns, ctx);
461   memref::ViewOp::getCanonicalizationPatterns(patterns, ctx);
462 
463   scf::ForOp::getCanonicalizationPatterns(patterns, ctx);
464   scf::ParallelOp::getCanonicalizationPatterns(patterns, ctx);
465 
466   tensor::CastOp::getCanonicalizationPatterns(patterns, ctx);
467   tensor::ExtractSliceOp::getCanonicalizationPatterns(patterns, ctx);
468   tensor::InsertSliceOp::getCanonicalizationPatterns(patterns, ctx);
469 
470   InitTensorOp::getCanonicalizationPatterns(patterns, ctx);
471   PadTensorOp::getCanonicalizationPatterns(patterns, ctx);
472   ctx->getLoadedDialect<LinalgDialect>()->getCanonicalizationPatterns(patterns);
473 
474   CanonicalizationPatternList<
475 #define GET_OP_LIST
476 #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
477       >::insert(patterns);
478 }
479 
480 /// Populate the given list with patterns that apply Linalg tiling.
481 static void insertTilingPatterns(RewritePatternSet &patterns,
482                                  const LinalgTilingOptions &options) {
483   auto *ctx = patterns.getContext();
484   LinalgTransformationFilter f(ArrayRef<StringAttr>{},
485                                StringAttr::get(ctx, "tiled"));
486   TilingPatterns<GenericOp,
487 #define GET_OP_LIST
488 #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
489                  >::insert(patterns, options, f);
490   patterns.add<PadTensorOpTilingPattern>(ctx, options);
491 }
492 
493 static void applyExtractSliceOfPadTensorSwapPattern(FuncOp funcOp) {
494   MLIRContext *ctx = funcOp.getContext();
495   RewritePatternSet patterns(ctx);
496   patterns.add<ExtractSliceOfPadTensorSwapPattern>(patterns.getContext());
497   (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
498   (void)applyPatternsAndFoldGreedily(
499       funcOp, getLinalgTilingCanonicalizationPatterns(ctx));
500 }
501 
502 namespace {
503 struct LinalgTilingPass : public LinalgTilingBase<LinalgTilingPass> {
504   LinalgTilingPass() = default;
505   LinalgTilingPass(ArrayRef<int64_t> tileSizes, LinalgTilingLoopType loopType,
506                    ArrayRef<StringRef> distributionTypes) {
507     this->tileSizes = tileSizes;
508     this->loopType = "";
509     this->loopTypeEnum = loopType;
510     this->distributionTypes = llvm::to_vector<2>(llvm::map_range(
511         distributionTypes, [](StringRef ref) { return ref.str(); }));
512   }
513 
514   void runOnFunction() override {
515     FuncOp funcOp = getFunction();
516     LinalgTilingLoopType type =
517         llvm::StringSwitch<LinalgTilingLoopType>(loopType)
518             .Case("for", LinalgTilingLoopType::Loops)
519             .Case("affine", LinalgTilingLoopType::AffineLoops)
520             .Case("parallel", LinalgTilingLoopType::ParallelLoops)
521             .Case("tiled_loop", LinalgTilingLoopType::TiledLoops)
522             .Default(loopTypeEnum);
523     auto distTypes = llvm::to_vector<2>(llvm::map_range(
524         distributionTypes, [](std::string &str) { return StringRef(str); }));
525     auto options = LinalgTilingOptions()
526                        .setTileSizes(tileSizes)
527                        .setLoopType(type)
528                        .setDistributionTypes(distTypes);
529     MLIRContext *ctx = funcOp.getContext();
530     RewritePatternSet patterns(ctx);
531     insertTilingPatterns(patterns, options);
532     scf::populateSCFForLoopCanonicalizationPatterns(patterns);
533     (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
534     (void)applyPatternsAndFoldGreedily(
535         funcOp, getLinalgTilingCanonicalizationPatterns(ctx));
536     // Drop the marker.
537     funcOp.walk([](LinalgOp op) {
538       op->removeAttr(LinalgTransforms::kLinalgTransformMarker);
539     });
540 
541     // Apply swap pattern after generating loop nest and running
542     // canonicalizations.
543     applyExtractSliceOfPadTensorSwapPattern(funcOp);
544   }
545 
546   LinalgTilingLoopType loopTypeEnum;
547 };
548 
549 } // namespace
550 
551 std::unique_ptr<OperationPass<FuncOp>>
552 mlir::createLinalgTilingPass(ArrayRef<int64_t> tileSizes,
553                              linalg::LinalgTilingLoopType loopType,
554                              ArrayRef<StringRef> distributionTypes) {
555   return std::make_unique<LinalgTilingPass>(tileSizes, loopType,
556                                             distributionTypes);
557 }
558