1 //===- Tiling.cpp - Implementation of linalg Tiling -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the linalg dialect Tiling pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PassDetail.h" 14 #include "mlir/Dialect/Affine/EDSC/Intrinsics.h" 15 #include "mlir/Dialect/Linalg/EDSC/Intrinsics.h" 16 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h" 17 #include "mlir/Dialect/Linalg/Passes.h" 18 #include "mlir/Dialect/Linalg/Utils/Utils.h" 19 #include "mlir/Dialect/LoopOps/EDSC/Builders.h" 20 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h" 21 #include "mlir/IR/AffineExpr.h" 22 #include "mlir/IR/AffineExprVisitor.h" 23 #include "mlir/IR/AffineMap.h" 24 #include "mlir/Support/Functional.h" 25 #include "mlir/Support/LLVM.h" 26 #include "mlir/Support/STLExtras.h" 27 #include "mlir/Transforms/FoldUtils.h" 28 29 #include "llvm/Support/CommandLine.h" 30 31 using namespace mlir; 32 using namespace mlir::edsc; 33 using namespace mlir::edsc::intrinsics; 34 using namespace mlir::linalg; 35 using namespace mlir::loop; 36 37 using folded_affine_min = folded::ValueBuilder<AffineMinOp>; 38 39 #define DEBUG_TYPE "linalg-tiling" 40 41 static bool isZero(Value v) { 42 return isa_and_nonnull<ConstantIndexOp>(v.getDefiningOp()) && 43 cast<ConstantIndexOp>(v.getDefiningOp()).getValue() == 0; 44 } 45 46 using LoopIndexToRangeIndexMap = DenseMap<int, int>; 47 48 // Creates a number of ranges equal to the number of non-zero in `tileSizes`. 49 // One for each loop of the LinalgOp that is tiled. The `tileSizes` argument has 50 // one entry per surrounding loop. It uses zero as the convention that a 51 // particular loop is not tiled. This convention simplifies implementations by 52 // avoiding affine map manipulations. 53 // The returned ranges correspond to the loop ranges, in the proper order, that 54 // are tiled and for which new loops will be created. Also the function returns 55 // a map from loop indices of the LinalgOp to the corresponding non-empty range 56 // indices of newly created loops. 57 static std::tuple<SmallVector<SubViewOp::Range, 4>, LoopIndexToRangeIndexMap> 58 makeTiledLoopRanges(OpBuilder &b, Location loc, AffineMap map, 59 ArrayRef<Value> allViewSizes, ArrayRef<Value> allTileSizes, 60 OperationFolder *folder) { 61 assert(allTileSizes.size() == map.getNumResults()); 62 // Apply `map` to get view sizes in loop order. 63 auto viewSizes = applyMapToValues(b, loc, map, allViewSizes, folder); 64 SmallVector<Value, 4> tileSizes(allTileSizes.begin(), allTileSizes.end()); 65 66 // Traverse the tile sizes, which are in loop order, erase zeros everywhere. 67 LoopIndexToRangeIndexMap loopIndexToRangeIndex; 68 for (int idx = 0, e = tileSizes.size(), zerosCount = 0; idx < e; ++idx) { 69 if (isZero(tileSizes[idx - zerosCount])) { 70 viewSizes.erase(viewSizes.begin() + idx - zerosCount); 71 tileSizes.erase(tileSizes.begin() + idx - zerosCount); 72 ++zerosCount; 73 continue; 74 } 75 loopIndexToRangeIndex[idx] = idx - zerosCount; 76 } 77 78 // Create a new range with the applied tile sizes. 79 SmallVector<SubViewOp::Range, 4> res; 80 for (unsigned idx = 0, e = tileSizes.size(); idx < e; ++idx) { 81 res.push_back(SubViewOp::Range{folded_std_constant_index(folder, 0), 82 viewSizes[idx], tileSizes[idx]}); 83 } 84 return std::make_tuple(res, loopIndexToRangeIndex); 85 } 86 87 namespace { 88 89 // Helper visitor to determine whether an AffineExpr is tiled. 90 // This is achieved by traversing every AffineDimExpr with position `pos` and 91 // checking whether the corresponding `tileSizes[pos]` is non-zero. 92 // This also enforces only positive coefficients occur in multiplications. 93 // 94 // Example: 95 // `d0 + 2 * d1 + d3` is tiled by [0, 0, 0, 2] but not by [0, 0, 2, 0] 96 // 97 struct TileCheck : public AffineExprVisitor<TileCheck> { 98 TileCheck(ArrayRef<Value> tileSizes) : isTiled(false), tileSizes(tileSizes) {} 99 100 void visitDimExpr(AffineDimExpr expr) { 101 isTiled |= !isZero(tileSizes[expr.getPosition()]); 102 } 103 void visitAffineBinaryOpExpr(AffineBinaryOpExpr expr) { 104 visit(expr.getLHS()); 105 visit(expr.getRHS()); 106 if (expr.getKind() == mlir::AffineExprKind::Mul) 107 assert(expr.getRHS().cast<AffineConstantExpr>().getValue() > 0 && 108 "nonpositive multiplying coefficient"); 109 } 110 bool isTiled; 111 ArrayRef<Value> tileSizes; 112 }; 113 114 } // namespace 115 116 // IndexedGenericOp explicitly uses induction variables in the loop body. The 117 // values of the indices that are used in the loop body for any given access of 118 // input/output memref before `subview` op was applied should be invariant with 119 // respect to tiling. 120 // 121 // Therefore, if the operation is tiled, we have to transform the indices 122 // accordingly, i.e. offset them by the values of the corresponding induction 123 // variables that are captured implicitly in the body of the op. 124 // 125 // Example. `linalg.indexed_generic` before tiling: 126 // 127 // #id_2d = (i, j) -> (i, j) 128 // #pointwise_2d_trait = { 129 // indexing_maps = [#id_2d, #id_2d], 130 // iterator_types = ["parallel", "parallel"], 131 // n_views = [1, 1] 132 // } 133 // linalg.indexed_generic #pointwise_2d_trait %operand, %result { 134 // ^bb0(%i: index, %j: index, %operand_in: f32, %result_in: f32): 135 // <some operations that use %i, %j> 136 // }: memref<50x100xf32>, memref<50x100xf32> 137 // 138 // After tiling pass with tiles sizes 10 and 25: 139 // 140 // #strided = (i, j)[s0, s1, s2] -> (i * s1 + s0 + j * s2) 141 // 142 // %c1 = constant 1 : index 143 // %c0 = constant 0 : index 144 // %c25 = constant 25 : index 145 // %c10 = constant 10 : index 146 // operand_dim_0 = dim %operand, 0 : memref<50x100xf32> 147 // operand_dim_1 = dim %operand, 1 : memref<50x100xf32> 148 // loop.for %k = %c0 to operand_dim_0 step %c10 { 149 // loop.for %l = %c0 to operand_dim_1 step %c25 { 150 // %4 = std.subview %operand[%k, %l][%c10, %c25][%c1, %c1] 151 // : memref<50x100xf32> to memref<?x?xf32, #strided> 152 // %5 = std.subview %result[%k, %l][%c10, %c25][%c1, %c1] 153 // : memref<50x100xf32> to memref<?x?xf32, #strided> 154 // linalg.indexed_generic pointwise_2d_trait %4, %5 { 155 // ^bb0(%i: index, %j: index, %operand_in: f32, %result_in: f32): 156 // // Indices `k` and `l` are implicitly captured in the body. 157 // %transformed_i = addi %i, %k : index // index `i` is offset by %k 158 // %transformed_j = addi %j, %l : index // index `j` is offset by %l 159 // // Every use of %i, %j is replaced with %transformed_i, %transformed_j 160 // <some operations that use %transformed_i, %transformed_j> 161 // }: memref<?x?xf32, #strided>, memref<?x?xf32, #strided> 162 // } 163 // } 164 // 165 // TODO(pifon, ntv): Investigate whether mixing implicit and explicit indices 166 // does not lead to losing information. 167 static void transformIndexedGenericOpIndices( 168 OpBuilder &b, LinalgOp op, ArrayRef<ValueHandle *> pivs, 169 const LoopIndexToRangeIndexMap &loopIndexToRangeIndex) { 170 assert(op.hasBufferSemantics() && "expected linalg op with buffer semantics"); 171 auto indexedGenericOp = dyn_cast<IndexedGenericOp>(op.getOperation()); 172 if (!indexedGenericOp) 173 return; 174 175 // `linalg.indexed_generic` comes in two flavours. One has a region with a 176 // single block that defines the loop body. The other has a `fun` attribute 177 // that refers to an existing function symbol. The `fun` function call will be 178 // inserted in the loop body in that case. 179 // 180 // TODO(pifon): Add support for `linalg.indexed_generic` with `fun` attribute. 181 auto ®ion = indexedGenericOp.region(); 182 if (region.empty()) { 183 indexedGenericOp.emitOpError("expected a region"); 184 return; 185 } 186 auto &block = region.getBlocks().front(); 187 188 OpBuilder::InsertionGuard g(b); 189 b.setInsertionPointToStart(&block); 190 for (unsigned i = 0; i < indexedGenericOp.getNumLoops(); ++i) { 191 auto rangeIndex = loopIndexToRangeIndex.find(i); 192 if (rangeIndex == loopIndexToRangeIndex.end()) 193 continue; 194 Value oldIndex = block.getArgument(i); 195 // Offset the index argument `i` by the value of the corresponding induction 196 // variable and replace all uses of the previous value. 197 Value newIndex = b.create<AddIOp>(indexedGenericOp.getLoc(), oldIndex, 198 pivs[rangeIndex->second]->getValue()); 199 for (auto &use : oldIndex.getUses()) { 200 if (use.getOwner() == newIndex.getDefiningOp()) 201 continue; 202 use.set(newIndex); 203 } 204 } 205 } 206 207 static bool isTiled(AffineExpr expr, ArrayRef<Value> tileSizes) { 208 if (!expr) 209 return false; 210 TileCheck t(tileSizes); 211 t.visit(expr); 212 return t.isTiled; 213 } 214 215 // Checks whether the view with index `viewIndex` within `linalgOp` varies with 216 // respect to a non-zero `tileSize`. 217 static bool isTiled(AffineMap map, ArrayRef<Value> tileSizes) { 218 if (!map) 219 return false; 220 for (unsigned r = 0; r < map.getNumResults(); ++r) 221 if (isTiled(map.getResult(r), tileSizes)) 222 return true; 223 return false; 224 } 225 226 static SmallVector<Value, 4> 227 makeTiledViews(OpBuilder &b, Location loc, LinalgOp linalgOp, 228 ArrayRef<Value> ivs, ArrayRef<Value> tileSizes, 229 ArrayRef<Value> viewSizes, OperationFolder *folder) { 230 assert(linalgOp.hasBufferSemantics() && 231 "expected linalg op with buffer semantics"); 232 assert(ivs.size() == static_cast<size_t>(llvm::count_if( 233 llvm::make_range(tileSizes.begin(), tileSizes.end()), 234 [](Value v) { return !isZero(v); })) && 235 "expected as many ivs as non-zero sizes"); 236 237 using namespace edsc::op; 238 239 // Construct (potentially temporary) mins and maxes on which to apply maps 240 // that define tile subviews. 241 SmallVector<Value, 8> lbs, subViewSizes; 242 for (unsigned idx = 0, idxIvs = 0, e = tileSizes.size(); idx < e; ++idx) { 243 bool isTiled = !isZero(tileSizes[idx]); 244 lbs.push_back(isTiled ? ivs[idxIvs++] 245 : (Value)folded_std_constant_index(folder, 0)); 246 subViewSizes.push_back(isTiled ? tileSizes[idx] : viewSizes[idx]); 247 } 248 249 auto *op = linalgOp.getOperation(); 250 251 SmallVector<Value, 4> res; 252 res.reserve(op->getNumOperands()); 253 auto viewIteratorBegin = linalgOp.getInputsAndOutputBuffers().begin(); 254 for (unsigned viewIndex = 0; viewIndex < linalgOp.getNumInputsAndOutputs(); 255 ++viewIndex) { 256 Value view = *(viewIteratorBegin + viewIndex); 257 auto viewType = view.getType().cast<MemRefType>(); 258 unsigned rank = viewType.getRank(); 259 auto mapAttr = linalgOp.indexing_maps()[viewIndex]; 260 auto map = mapAttr.cast<AffineMapAttr>().getValue(); 261 // If the view is not tiled, we can use it as is. 262 if (!isTiled(map, tileSizes)) { 263 res.push_back(view); 264 continue; 265 } 266 267 // Construct a new subview for the tile. 268 SmallVector<Value, 4> offsets, sizes, strides; 269 offsets.reserve(rank); 270 sizes.reserve(rank); 271 strides.reserve(rank); 272 for (unsigned r = 0; r < rank; ++r) { 273 if (!isTiled(map.getSubMap({r}), tileSizes)) { 274 offsets.push_back(folded_std_constant_index(folder, 0)); 275 sizes.push_back(std_dim(view, r)); 276 strides.push_back(folded_std_constant_index(folder, 1)); 277 continue; 278 } 279 280 // Tiling creates a new slice at the proper index, the slice step is 1 281 // (i.e. the slice view does not subsample, stepping occurs in the loop). 282 auto m = map.getSubMap({r}); 283 auto offset = applyMapToValues(b, loc, m, lbs, folder).front(); 284 offsets.push_back(offset); 285 auto size = applyMapToValues(b, loc, m, subViewSizes, folder).front(); 286 287 // The size of the subview should be trimmed to avoid out-of-bounds 288 // accesses, unless we statically know the subview size divides the view 289 // size evenly. 290 int64_t viewSize = viewType.getDimSize(r); 291 auto sizeCst = dyn_cast_or_null<ConstantIndexOp>(size.getDefiningOp()); 292 if (ShapedType::isDynamic(viewSize) || !sizeCst || 293 (viewSize % sizeCst.getValue()) != 0) { 294 // Compute min(size, dim - offset) to avoid out-of-bounds accesses. 295 auto minMap = AffineMap::get( 296 /*dimCount=*/3, /*symbolCount=*/0, 297 {getAffineDimExpr(/*position=*/0, b.getContext()), 298 getAffineDimExpr(/*position=*/1, b.getContext()) - 299 getAffineDimExpr(/*position=*/2, b.getContext())}); 300 auto d = folded_std_dim(folder, view, r); 301 size = folded_affine_min(folder, b.getIndexType(), minMap, 302 ValueRange{size, d, offset}); 303 } 304 305 sizes.push_back(size); 306 strides.push_back(folded_std_constant_index(folder, 1)); 307 } 308 309 res.push_back(b.create<SubViewOp>(loc, view, offsets, sizes, strides)); 310 } 311 312 // Traverse the mins/maxes and erase those that don't have uses left. 313 // This is a special type of folding that we only apply when `folder` is 314 // defined. 315 if (folder) 316 for (auto v : llvm::concat<Value>(lbs, subViewSizes)) 317 if (v.use_empty()) 318 v.getDefiningOp()->erase(); 319 320 return res; 321 } 322 323 template <typename LoopTy> 324 Optional<TiledLinalgOp> static tileLinalgOpImpl(OpBuilder &b, LinalgOp op, 325 ArrayRef<Value> tileSizes, 326 ArrayRef<unsigned> permutation, 327 OperationFolder *folder) { 328 assert(op.hasBufferSemantics() && "expected linalg op with buffer semantics"); 329 // 1. Enforce the convention that "tiling by zero" skips tiling a particular 330 // dimension. This convention is significantly simpler to handle instead of 331 // adjusting affine maps to account for missing dimensions. 332 assert(op.getNumParallelLoops() + op.getNumReductionLoops() + 333 op.getNumWindowLoops() == 334 tileSizes.size() && 335 "expected matching number of tile sizes and loops"); 336 337 if (auto convOp = dyn_cast<linalg::ConvOp>(op.getOperation())) { 338 // For conv op only support tiling along batch dimension (which is the first 339 // loop). 340 if (convOp.padding() && 341 !llvm::all_of(tileSizes.drop_front(), 342 [](Value val) { return isZero(val); })) 343 return llvm::None; 344 } 345 346 // If permutation is empty, use the identity. Build the permutation map 347 // otherwise. 348 auto invPermutationMap = AffineMap::getMultiDimIdentityMap( 349 tileSizes.size(), ScopedContext::getContext()); 350 if (!permutation.empty()) 351 invPermutationMap = inversePermutation( 352 AffineMap::getPermutationMap(permutation, ScopedContext::getContext())); 353 354 OpBuilder::InsertionGuard g(b); 355 b.setInsertionPoint(op); 356 ScopedContext scope(b, op.getLoc()); 357 // 2. Build the tiled loop ranges. 358 auto viewSizes = getViewSizes(b, op); 359 // The flattened loopToOperandRangesMaps is expected to be an invertible 360 // permutation map (asserted in the inverse calculation). 361 auto mapsRange = op.indexing_maps().getAsRange<AffineMapAttr>(); 362 auto maps = 363 functional::map([](AffineMapAttr a) { return a.getValue(); }, mapsRange); 364 auto viewSizesToLoopsMap = inversePermutation(concatAffineMaps(maps)); 365 assert(viewSizesToLoopsMap && "expected invertible map"); 366 367 SmallVector<SubViewOp::Range, 4> loopRanges; 368 LoopIndexToRangeIndexMap loopIndexToRangeIndex; 369 std::tie(loopRanges, loopIndexToRangeIndex) = 370 makeTiledLoopRanges(b, scope.getLocation(), viewSizesToLoopsMap, 371 viewSizes, tileSizes, folder); 372 if (!permutation.empty()) 373 applyPermutationToVector(loopRanges, permutation); 374 375 // 3. Create the tiled loops. 376 LinalgOp res = op; 377 auto ivs = ValueHandle::makeIndexHandles(loopRanges.size()); 378 auto pivs = makeHandlePointers(MutableArrayRef<ValueHandle>(ivs)); 379 // Convert SubViewOp::Range to linalg_range. 380 SmallVector<Value, 4> linalgRanges; 381 for (auto &range : loopRanges) { 382 linalgRanges.push_back( 383 linalg_range(range.offset, range.size, range.stride)); 384 } 385 GenericLoopNestRangeBuilder<LoopTy>(pivs, linalgRanges)([&] { 386 auto b = ScopedContext::getBuilder(); 387 auto loc = ScopedContext::getLocation(); 388 SmallVector<Value, 4> ivValues(ivs.begin(), ivs.end()); 389 390 // If we have to apply a permutation to the tiled loop nest, we have to 391 // reorder the induction variables This permutation is the right one 392 // assuming that loopRanges have previously been permuted by 393 // (i,j,k)->(k,i,j) So this permutation should be the inversePermutation of 394 // that one: (d0,d1,d2)->(d2,d0,d1) 395 if (!permutation.empty()) 396 ivValues = applyMapToValues(b, loc, invPermutationMap, ivValues, folder); 397 398 auto views = 399 makeTiledViews(b, loc, op, ivValues, tileSizes, viewSizes, folder); 400 auto operands = getAssumedNonViewOperands(op); 401 views.append(operands.begin(), operands.end()); 402 res = op.clone(b, loc, views); 403 }); 404 405 // 4. Transforms index arguments of `linalg.generic` w.r.t. to the tiling. 406 transformIndexedGenericOpIndices(b, res, pivs, loopIndexToRangeIndex); 407 408 // 5. Gather the newly created loops and return them with the new op. 409 SmallVector<Operation *, 8> loops; 410 loops.reserve(ivs.size()); 411 for (auto iv : ivs) 412 loops.push_back(loop::getForInductionVarOwner(iv)); 413 414 return TiledLinalgOp{res, loops}; 415 } 416 417 template <typename LoopTy> 418 static Optional<TiledLinalgOp> 419 tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ArrayRef<int64_t> tileSizes, 420 ArrayRef<unsigned> permutation, OperationFolder *folder) { 421 assert(op.hasBufferSemantics() && "expected linalg op with buffer semantics"); 422 if (tileSizes.empty()) 423 return llvm::None; 424 425 // The following uses the convention that "tiling by zero" skips tiling a 426 // particular dimension. This convention is significantly simpler to handle 427 // instead of adjusting affine maps to account for missing dimensions. 428 auto nLoops = op.getNumParallelLoops() + op.getNumReductionLoops() + 429 op.getNumWindowLoops(); 430 tileSizes = tileSizes.take_front(nLoops); 431 // If only 0 tilings are left, then return. 432 if (llvm::all_of(tileSizes, [](int64_t v) { return v == 0; })) 433 return llvm::None; 434 435 if (auto convOp = dyn_cast<linalg::ConvOp>(op.getOperation())) { 436 // For conv op only support tiling along batch dimension (which is the first 437 // loop). 438 if (convOp.padding() && !llvm::all_of(tileSizes.drop_front(), 439 [](int64_t val) { return val == 0; })) 440 return llvm::None; 441 } 442 443 // Create a builder for tile size constants. 444 OpBuilder::InsertionGuard g(b); 445 b.setInsertionPoint(op); 446 ScopedContext scope(b, op.getLoc()); 447 448 // Materialize concrete tile size values to pass the generic tiling function. 449 SmallVector<Value, 8> tileSizeValues; 450 tileSizeValues.reserve(tileSizes.size()); 451 for (auto ts : tileSizes) 452 tileSizeValues.push_back(folded_std_constant_index(folder, ts)); 453 // Pad tile sizes with zero values to enforce our convention. 454 if (tileSizeValues.size() < nLoops) { 455 for (unsigned i = tileSizeValues.size(); i < nLoops; ++i) 456 tileSizeValues.push_back(folded_std_constant_index(folder, 0)); 457 } 458 459 return tileLinalgOpImpl<LoopTy>(b, op, tileSizeValues, permutation, folder); 460 } 461 462 Optional<TiledLinalgOp> 463 mlir::linalg::tileLinalgOp(OpBuilder &b, LinalgOp op, ArrayRef<Value> tileSizes, 464 ArrayRef<unsigned> permutation, 465 OperationFolder *folder) { 466 return tileLinalgOpImpl<loop::ForOp>(b, op, tileSizes, permutation, folder); 467 } 468 469 Optional<TiledLinalgOp> mlir::linalg::tileLinalgOpToParallelLoops( 470 OpBuilder &b, LinalgOp op, ArrayRef<Value> tileSizes, 471 ArrayRef<unsigned> permutation, OperationFolder *folder) { 472 return tileLinalgOpImpl<loop::ParallelOp>(b, op, tileSizes, permutation, 473 folder); 474 } 475 476 Optional<TiledLinalgOp> mlir::linalg::tileLinalgOp( 477 OpBuilder &b, LinalgOp op, ArrayRef<int64_t> tileSizes, 478 ArrayRef<unsigned> permutation, OperationFolder *folder) { 479 return tileLinalgOpImpl<loop::ForOp>(b, op, tileSizes, permutation, folder); 480 } 481 482 Optional<TiledLinalgOp> mlir::linalg::tileLinalgOpToParallelLoops( 483 OpBuilder &b, LinalgOp op, ArrayRef<int64_t> tileSizes, 484 ArrayRef<unsigned> permutation, OperationFolder *folder) { 485 return tileLinalgOpImpl<loop::ParallelOp>(b, op, tileSizes, permutation, 486 folder); 487 } 488 489 template <typename LoopTy> 490 static void tileLinalgOps(FuncOp f, ArrayRef<int64_t> tileSizes) { 491 OpBuilder b(f); 492 OperationFolder folder(f.getContext()); 493 f.walk([tileSizes, &b, &folder](LinalgOp op) { 494 if (!op.hasBufferSemantics()) 495 return; 496 auto opLoopsPair = 497 tileLinalgOpImpl<LoopTy>(b, op, tileSizes, /*permutation=*/{}, &folder); 498 // If tiling occurred successfully, erase old op. 499 if (opLoopsPair) 500 op.erase(); 501 }); 502 f.walk([](LinalgOp op) { 503 if (isOpTriviallyDead(op)) 504 op.erase(); 505 }); 506 } 507 508 namespace { 509 struct LinalgTilingPass : public LinalgTilingBase<LinalgTilingPass> { 510 LinalgTilingPass() = default; 511 LinalgTilingPass(ArrayRef<int64_t> sizes) { tileSizes = sizes; } 512 513 void runOnFunction() override { 514 tileLinalgOps<loop::ForOp>(getFunction(), tileSizes); 515 } 516 }; 517 518 struct LinalgTilingToParallelLoopsPass 519 : public LinalgTilingToParallelLoopsBase<LinalgTilingToParallelLoopsPass> { 520 LinalgTilingToParallelLoopsPass() = default; 521 LinalgTilingToParallelLoopsPass(ArrayRef<int64_t> sizes) { 522 tileSizes = sizes; 523 } 524 525 void runOnFunction() override { 526 tileLinalgOps<loop::ParallelOp>(getFunction(), tileSizes); 527 } 528 }; 529 530 } // namespace 531 532 std::unique_ptr<OperationPass<FuncOp>> 533 mlir::createLinalgTilingPass(ArrayRef<int64_t> tileSizes) { 534 return std::make_unique<LinalgTilingPass>(tileSizes); 535 } 536 537 std::unique_ptr<OperationPass<FuncOp>> 538 mlir::createLinalgTilingToParallelLoopsPass(ArrayRef<int64_t> tileSizes) { 539 return std::make_unique<LinalgTilingToParallelLoopsPass>(tileSizes); 540 } 541