1 //===- Tiling.cpp - Implementation of linalg Tiling -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the linalg dialect Tiling pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include <utility> 14 15 #include "PassDetail.h" 16 #include "mlir/Dialect/Linalg/IR/Linalg.h" 17 #include "mlir/Dialect/Linalg/Passes.h" 18 #include "mlir/Dialect/Linalg/Transforms/Transforms.h" 19 #include "mlir/Dialect/Linalg/Utils/Utils.h" 20 #include "mlir/Dialect/MemRef/IR/MemRef.h" 21 #include "mlir/Dialect/SCF/Transforms.h" 22 #include "mlir/Dialect/Tensor/IR/Tensor.h" 23 #include "mlir/IR/AffineExpr.h" 24 #include "mlir/IR/AffineMap.h" 25 #include "mlir/Transforms/FoldUtils.h" 26 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 27 28 #include "llvm/Support/CommandLine.h" 29 30 using namespace mlir; 31 using namespace mlir::linalg; 32 using namespace mlir::scf; 33 34 #define DEBUG_TYPE "linalg-tiling" 35 36 static bool isZero(Value v) { 37 if (auto cst = v.getDefiningOp<arith::ConstantIndexOp>()) 38 return cst.value() == 0; 39 return false; 40 } 41 42 std::tuple<SmallVector<Range, 4>, LoopIndexToRangeIndexMap> 43 mlir::linalg::makeTiledLoopRanges(RewriterBase &b, Location loc, AffineMap map, 44 ValueRange allShapeSizes, 45 ValueRange allTileSizes) { 46 assert(allTileSizes.size() == map.getNumResults()); 47 // Apply `map` to get shape sizes in loop order. 48 auto shapeSizes = applyMapToValues(b, loc, map, allShapeSizes); 49 SmallVector<Value, 4> tileSizes(allTileSizes.begin(), allTileSizes.end()); 50 51 // Traverse the tile sizes, which are in loop order, erase zeros everywhere. 52 LoopIndexToRangeIndexMap loopIndexToRangeIndex; 53 for (int idx = 0, e = tileSizes.size(), zerosCount = 0; idx < e; ++idx) { 54 if (isZero(tileSizes[idx - zerosCount])) { 55 shapeSizes.erase(shapeSizes.begin() + idx - zerosCount); 56 tileSizes.erase(tileSizes.begin() + idx - zerosCount); 57 ++zerosCount; 58 continue; 59 } 60 loopIndexToRangeIndex[idx] = idx - zerosCount; 61 } 62 63 // Create a new range with the applied tile sizes. 64 SmallVector<Range, 4> res; 65 for (unsigned idx = 0, e = tileSizes.size(); idx < e; ++idx) 66 res.push_back(Range{b.create<arith::ConstantIndexOp>(loc, 0), 67 shapeSizes[idx], tileSizes[idx]}); 68 return std::make_tuple(res, loopIndexToRangeIndex); 69 } 70 71 void mlir::linalg::transformIndexOps( 72 RewriterBase &b, LinalgOp op, SmallVectorImpl<Value> &ivs, 73 const LoopIndexToRangeIndexMap &loopIndexToRangeIndex) { 74 SmallVector<Value> allIvs(op.getNumLoops(), nullptr); 75 for (auto &en : enumerate(allIvs)) { 76 auto rangeIndex = loopIndexToRangeIndex.find(en.index()); 77 if (rangeIndex == loopIndexToRangeIndex.end()) 78 continue; 79 en.value() = ivs[rangeIndex->second]; 80 } 81 addTileLoopIvsToIndexOpResults(b, op, allIvs); 82 } 83 84 // Insert a tile `source` into the destination tensor `dest`. The position at 85 // which the tile is inserted (as well as size of tile) is taken from a given 86 // ExtractSliceOp `sliceOp`. 87 static Value insertSliceIntoTensor(RewriterBase &b, Location loc, 88 tensor::ExtractSliceOp sliceOp, Value source, 89 Value dest) { 90 return b.create<tensor::InsertSliceOp>( 91 loc, sliceOp.source().getType(), source, dest, sliceOp.offsets(), 92 sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(), 93 sliceOp.static_sizes(), sliceOp.static_strides()); 94 } 95 96 template <typename LoopTy> 97 static FailureOr<TiledLinalgOp> 98 tileLinalgOpImpl(RewriterBase &b, LinalgOp op, ValueRange tileSizes, 99 const LinalgTilingOptions &options) { 100 auto nLoops = op.getNumLoops(); 101 // Initial tile sizes may be too big, only take the first nLoops. 102 tileSizes = tileSizes.take_front(nLoops); 103 104 if (llvm::all_of(tileSizes, isZero)) { 105 TiledLinalgOp tiledOp; 106 tiledOp.op = cast<LinalgOp>(b.clone(*op.getOperation())); 107 tiledOp.tensorResults.assign(tiledOp.op->result_begin(), 108 tiledOp.op->result_end()); 109 return tiledOp; 110 } 111 112 // 1. Build the tiled loop ranges. 113 auto allShapeSizes = op.createFlatListOfOperandDims(b, op.getLoc()); 114 AffineMap shapeSizesToLoopsMap = op.getShapesToLoopsMap(); 115 if (!shapeSizesToLoopsMap) 116 return failure(); 117 118 SmallVector<Range, 4> loopRanges; 119 LoopIndexToRangeIndexMap loopIndexToRangeIndex; 120 std::tie(loopRanges, loopIndexToRangeIndex) = makeTiledLoopRanges( 121 b, op.getLoc(), shapeSizesToLoopsMap, allShapeSizes, tileSizes); 122 123 SmallVector<Attribute, 4> iteratorTypes; 124 for (const auto &attr : 125 enumerate(op.iterator_types().cast<ArrayAttr>().getValue())) { 126 if (loopIndexToRangeIndex.count(attr.index())) 127 iteratorTypes.push_back(attr.value()); 128 } 129 // If interchangeVector is empty, use the identity. Build the permutation map 130 // otherwise. 131 auto invPermutationMap = 132 AffineMap::getMultiDimIdentityMap(tileSizes.size(), b.getContext()); 133 if (!options.interchangeVector.empty()) { 134 // Based on the pruned iterations (due to zero tile size), recompute the 135 // interchange vector. 136 SmallVector<unsigned, 4> interchangeVector; 137 interchangeVector.reserve(options.interchangeVector.size()); 138 for (auto pos : options.interchangeVector) { 139 auto it = loopIndexToRangeIndex.find(pos); 140 if (it == loopIndexToRangeIndex.end()) 141 continue; 142 interchangeVector.push_back(it->second); 143 } 144 // Interchange vector is guaranteed to be a permutation, 145 // `inversePermutation` must succeed. 146 invPermutationMap = inversePermutation( 147 AffineMap::getPermutationMap(interchangeVector, b.getContext())); 148 assert(invPermutationMap); 149 SmallVector<int64_t> permutation(interchangeVector.begin(), 150 interchangeVector.end()); 151 applyPermutationToVector(loopRanges, permutation); 152 applyPermutationToVector(iteratorTypes, permutation); 153 } 154 155 // 2. Create the tiled loops. 156 LinalgOp res = op; 157 SmallVector<Value, 4> ivs, tensorResults; 158 auto tiledLoopBodyBuilder = 159 [&](OpBuilder &builder, Location loc, ValueRange localIvs, 160 ValueRange operandValuesToUse) -> scf::ValueVector { 161 ivs.assign(localIvs.begin(), localIvs.end()); 162 163 // When an `interchangeVector` is present, it has been applied to the 164 // loop ranges and the iterator types. Apply its inverse to the 165 // resulting loop `ivs` to match the op definition. 166 SmallVector<Value, 4> interchangedIvs; 167 if (!options.interchangeVector.empty()) 168 interchangedIvs = applyMapToValues(b, loc, invPermutationMap, ivs); 169 else 170 interchangedIvs.assign(ivs.begin(), ivs.end()); 171 172 // Tile the `operandValuesToUse` that either match the `op` operands 173 // themselves or the tile loop arguments forwarding them. 174 assert(operandValuesToUse.size() == 175 static_cast<size_t>(op.getNumInputsAndOutputs()) && 176 "expect the number of operands and inputs and outputs to match"); 177 SmallVector<Value> valuesToTile = operandValuesToUse; 178 auto sizeBounds = 179 applyMapToValues(b, loc, shapeSizesToLoopsMap, allShapeSizes); 180 SmallVector<Value, 4> tiledOperands = makeTiledShapes( 181 b, loc, op, valuesToTile, interchangedIvs, tileSizes, sizeBounds); 182 183 // TODO: use an interface/adaptor to avoid leaking position in 184 // `tiledOperands`. 185 SmallVector<Type, 4> resultTensorTypes; 186 for (OpOperand *opOperand : op.getOutputTensorOperands()) 187 resultTensorTypes.push_back( 188 tiledOperands[opOperand->getOperandNumber()].getType()); 189 190 res = op.clone(b, loc, resultTensorTypes, tiledOperands); 191 192 // Insert a insert_slice for each output tensor. 193 unsigned resultIdx = 0; 194 for (OpOperand *opOperand : op.getOutputTensorOperands()) { 195 // TODO: use an interface/adaptor to avoid leaking position in 196 // `tiledOperands`. 197 Value outputTensor = tiledOperands[opOperand->getOperandNumber()]; 198 // TODO: Propagate RewriterBase everywhere. 199 IRRewriter rewriter(b); 200 if (auto sliceOp = outputTensor.getDefiningOp<tensor::ExtractSliceOp>()) { 201 tensorResults.push_back(insertSliceIntoTensor(rewriter, loc, sliceOp, 202 res->getResult(resultIdx), 203 sliceOp.source())); 204 } else { 205 tensorResults.push_back(res->getResult(resultIdx)); 206 } 207 ++resultIdx; 208 } 209 return scf::ValueVector(tensorResults.begin(), tensorResults.end()); 210 }; 211 GenerateLoopNest<LoopTy>::doit(b, op.getLoc(), loopRanges, op, iteratorTypes, 212 tiledLoopBodyBuilder, options.distribution, 213 options.distributionTypes); 214 215 // 3. Transform IndexOp results w.r.t. the tiling. 216 transformIndexOps(b, res, ivs, loopIndexToRangeIndex); 217 218 // 4. Gather the newly created loops and return them with the new op. 219 SmallVector<Operation *, 8> loops; 220 loops.reserve(ivs.size()); 221 for (auto iv : ivs) { 222 if (iv.isa<BlockArgument>()) { 223 loops.push_back(iv.cast<BlockArgument>().getOwner()->getParentOp()); 224 assert(loops.back() && "no owner found for induction variable!"); 225 } else { 226 // TODO: Instead of doing this, try to recover the ops used instead of the 227 // loop. 228 loops.push_back(nullptr); 229 } 230 } 231 232 // 5. Get the tensor results from the outermost loop if available. Otherwise 233 // use the previously captured `tensorResults`. 234 Operation *outermostLoop = nullptr; 235 for (Operation *loop : loops) 236 if ((outermostLoop = loop)) 237 break; 238 239 return TiledLinalgOp{ 240 res, loops, outermostLoop ? outermostLoop->getResults() : tensorResults}; 241 } 242 243 template <typename LoopTy> 244 FailureOr<TiledLinalgOp> static tileLinalgOpImpl( 245 RewriterBase &b, LinalgOp op, const LinalgTilingOptions &options) { 246 OpBuilder::InsertionGuard g(b); 247 b.setInsertionPoint(op); 248 249 if (!options.tileSizeComputationFunction) 250 return failure(); 251 252 // Enforce the convention that "tiling by zero" skips tiling a particular 253 // dimension. This convention is significantly simpler to handle instead of 254 // adjusting affine maps to account for missing dimensions. 255 auto nLoops = op.getNumLoops(); 256 SmallVector<Value, 4> tileSizeVector = 257 options.tileSizeComputationFunction(b, op); 258 if (tileSizeVector.size() < nLoops) { 259 auto zero = b.create<arith::ConstantIndexOp>(op.getLoc(), 0); 260 tileSizeVector.append(nLoops - tileSizeVector.size(), zero); 261 } 262 263 return tileLinalgOpImpl<LoopTy>(b, op, tileSizeVector, options); 264 } 265 266 FailureOr<TiledLinalgOp> 267 mlir::linalg::tileLinalgOp(RewriterBase &b, LinalgOp op, 268 const LinalgTilingOptions &options) { 269 switch (options.loopType) { 270 case LinalgTilingLoopType::Loops: 271 return tileLinalgOpImpl<scf::ForOp>(b, op, options); 272 case LinalgTilingLoopType::ParallelLoops: 273 return tileLinalgOpImpl<scf::ParallelOp>(b, op, options); 274 default:; 275 } 276 return failure(); 277 } 278 279 /// Generate a loop nest around a given tensor::PadOp (for tiling). `newPadOp` 280 /// and `loopNest` are output parameters that return the new (tiled) 281 /// tensor::PadOp and the loop nest. 282 static LogicalResult tilePadOp(RewriterBase &builder, tensor::PadOp op, 283 tensor::PadOp &newPadOp, LoopNest &loopNest, 284 const LinalgTilingOptions &options) { 285 Location loc = op.getLoc(); 286 OpBuilder::InsertionGuard g(builder); 287 builder.setInsertionPoint(op); 288 289 // Clone tensor::PadOp so that the existing op can be replaced more easily. 290 newPadOp = cast<tensor::PadOp>(builder.clone(*op.getOperation())); 291 // Get rank and tile sizes. 292 int64_t rank = op.getResultType().getRank(); 293 SmallVector<Value> tileSizes = 294 options.tileSizeComputationFunction(builder, op); 295 // Normalize untiled padding dimensions to 0. 296 Value zero = builder.create<arith::ConstantIndexOp>(loc, 0); 297 tileSizes.append(rank - tileSizes.size(), zero); 298 // Compute lower and upper bounds of the loop nest. 299 TilingInterface tilingInterface = 300 dyn_cast<TilingInterface>(op.getOperation()); 301 SmallVector<Range> ranges = tilingInterface.getIterationDomain(builder); 302 SmallVector<Value> lbs, dims, allDims, steps; 303 for (int64_t i = 0; i < rank; ++i) { 304 allDims.push_back(ranges[i].size); 305 if (!isZero(tileSizes[i])) { 306 lbs.push_back(ranges[i].offset); 307 dims.push_back(ranges[i].size); 308 steps.push_back(tileSizes[i]); 309 } 310 } 311 // Generate loop nest: One loop per dimension. 312 SmallVector<Value> destOperand = 313 tilingInterface.getDestinationOperands(builder); 314 loopNest = mlir::scf::buildLoopNest( 315 builder, loc, lbs, /*ubs=*/dims, steps, ValueRange(destOperand), 316 [&](OpBuilder &b, Location loc, ValueRange localIvs, 317 ValueRange iterArgs) -> scf::ValueVector { 318 // Compute offsets and sizes of ExtractSliceOp. 319 SmallVector<Value> offsets = 320 computeTileOffsets(b, loc, localIvs, tileSizes); 321 SmallVector<Value> sizes = 322 computeTileSizes(b, loc, localIvs, tileSizes, allDims); 323 // Create ExtractSliceOp: Extract a tile from the tensor::PadOp. 324 // Note: The tensor::PadOp is located outside of the loop nest. It is 325 // later moved inside by ExtractSliceOfPadTensorSwapPattern. 326 auto map = AffineMap::getMultiDimIdentityMap(rank, b.getContext()); 327 Value tiledOutput = 328 makeTiledShape(b, loc, newPadOp->getResult(0), tileSizes, map, 329 offsets, allDims, sizes); 330 auto sliceOp = tiledOutput.getDefiningOp<tensor::ExtractSliceOp>(); 331 assert(sliceOp && "expected ExtractSliceOp"); 332 // Insert the tile into the output tensor. 333 // TODO: Propagate RewriterBase everywhere. 334 IRRewriter rewriter(b); 335 Value yieldValue = 336 insertSliceIntoTensor(rewriter, loc, sliceOp, sliceOp, iterArgs[0]); 337 return scf::ValueVector({yieldValue}); 338 }); 339 return success(); 340 } 341 342 namespace { 343 struct PadOpTilingPattern : public OpRewritePattern<tensor::PadOp> { 344 PadOpTilingPattern(MLIRContext *ctx, LinalgTilingOptions opt) 345 : OpRewritePattern<tensor::PadOp>(ctx), options(std::move(opt)) {} 346 347 LogicalResult matchAndRewrite(tensor::PadOp op, 348 PatternRewriter &rewriter) const override { 349 if (op->hasAttr(LinalgTransforms::kLinalgTransformMarker)) 350 return failure(); 351 tensor::PadOp newPadOp; 352 LoopNest loopNest; 353 if (failed(tilePadOp(rewriter, op, newPadOp, loopNest, options))) 354 return failure(); 355 newPadOp->setAttr(LinalgTransforms::kLinalgTransformMarker, 356 rewriter.getUnitAttr()); 357 // Replace all uses of the original tensor::PadOp. 358 rewriter.replaceOp(op, loopNest.getResults()[0]); 359 return success(); 360 } 361 362 LinalgTilingOptions options; 363 }; 364 } // namespace 365 366 namespace { 367 /// Helper classes for type list expansion. 368 template <typename... OpTypes> 369 class CanonicalizationPatternList; 370 371 template <> 372 class CanonicalizationPatternList<> { 373 public: 374 static void insert(RewritePatternSet &patterns) {} 375 }; 376 377 template <typename OpTy, typename... OpTypes> 378 class CanonicalizationPatternList<OpTy, OpTypes...> { 379 public: 380 static void insert(RewritePatternSet &patterns) { 381 OpTy::getCanonicalizationPatterns(patterns, patterns.getContext()); 382 CanonicalizationPatternList<OpTypes...>::insert(patterns); 383 } 384 }; 385 } // namespace 386 387 RewritePatternSet 388 mlir::linalg::getLinalgTilingCanonicalizationPatterns(MLIRContext *ctx) { 389 RewritePatternSet patterns(ctx); 390 populateLinalgTilingCanonicalizationPatterns(patterns); 391 return patterns; 392 } 393 394 void mlir::linalg::populateLinalgTilingCanonicalizationPatterns( 395 RewritePatternSet &patterns) { 396 auto *ctx = patterns.getContext(); 397 AffineApplyOp::getCanonicalizationPatterns(patterns, ctx); 398 AffineForOp::getCanonicalizationPatterns(patterns, ctx); 399 AffineMinOp::getCanonicalizationPatterns(patterns, ctx); 400 AffineMaxOp::getCanonicalizationPatterns(patterns, ctx); 401 arith::ConstantIndexOp::getCanonicalizationPatterns(patterns, ctx); 402 403 memref::SubViewOp::getCanonicalizationPatterns(patterns, ctx); 404 memref::ViewOp::getCanonicalizationPatterns(patterns, ctx); 405 406 scf::ForOp::getCanonicalizationPatterns(patterns, ctx); 407 scf::ParallelOp::getCanonicalizationPatterns(patterns, ctx); 408 409 tensor::CastOp::getCanonicalizationPatterns(patterns, ctx); 410 tensor::ExtractSliceOp::getCanonicalizationPatterns(patterns, ctx); 411 tensor::InsertSliceOp::getCanonicalizationPatterns(patterns, ctx); 412 413 InitTensorOp::getCanonicalizationPatterns(patterns, ctx); 414 tensor::PadOp::getCanonicalizationPatterns(patterns, ctx); 415 ctx->getLoadedDialect<LinalgDialect>()->getCanonicalizationPatterns(patterns); 416 417 CanonicalizationPatternList< 418 #define GET_OP_LIST 419 #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc" 420 >::insert(patterns); 421 } 422 423 /// Populate the given list with patterns that apply Linalg tiling. 424 static void insertTilingPatterns(RewritePatternSet &patterns, 425 const LinalgTilingOptions &options) { 426 auto *ctx = patterns.getContext(); 427 LinalgTransformationFilter f(ArrayRef<StringAttr>{}, 428 StringAttr::get(ctx, "tiled")); 429 TilingPatterns<GenericOp, 430 #define GET_OP_LIST 431 #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc" 432 >::insert(patterns, options, f); 433 patterns.add<PadOpTilingPattern>(ctx, options); 434 } 435 436 void mlir::linalg::populatePadTensorTilingPatterns( 437 RewritePatternSet &patterns, const LinalgTilingOptions &options) { 438 auto *ctx = patterns.getContext(); 439 patterns.add<PadOpTilingPattern>(ctx, options); 440 } 441 442 static void applyExtractSliceOfPadTensorSwapPattern(FuncOp funcOp) { 443 MLIRContext *ctx = funcOp.getContext(); 444 RewritePatternSet patterns(ctx); 445 patterns.add<ExtractSliceOfPadTensorSwapPattern>(patterns.getContext()); 446 (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns)); 447 (void)applyPatternsAndFoldGreedily( 448 funcOp, getLinalgTilingCanonicalizationPatterns(ctx)); 449 } 450 451 namespace { 452 struct LinalgTilingPass : public LinalgTilingBase<LinalgTilingPass> { 453 LinalgTilingPass() = default; 454 LinalgTilingPass(ArrayRef<int64_t> tileSizes, LinalgTilingLoopType loopType) { 455 this->tileSizes = tileSizes; 456 this->loopType = ""; 457 this->loopTypeEnum = loopType; 458 } 459 460 void runOnOperation() override { 461 FuncOp funcOp = getOperation(); 462 LinalgTilingLoopType type = 463 llvm::StringSwitch<LinalgTilingLoopType>(loopType) 464 .Case("for", LinalgTilingLoopType::Loops) 465 .Case("affine", LinalgTilingLoopType::AffineLoops) 466 .Case("parallel", LinalgTilingLoopType::ParallelLoops) 467 .Default(loopTypeEnum); 468 auto options = 469 LinalgTilingOptions().setTileSizes(tileSizes).setLoopType(type); 470 MLIRContext *ctx = funcOp.getContext(); 471 RewritePatternSet patterns(ctx); 472 insertTilingPatterns(patterns, options); 473 scf::populateSCFForLoopCanonicalizationPatterns(patterns); 474 (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns)); 475 (void)applyPatternsAndFoldGreedily( 476 funcOp, getLinalgTilingCanonicalizationPatterns(ctx)); 477 // Drop the marker. 478 funcOp.walk([](LinalgOp op) { 479 op->removeAttr(LinalgTransforms::kLinalgTransformMarker); 480 }); 481 482 // Apply swap pattern after generating loop nest and running 483 // canonicalizations. 484 applyExtractSliceOfPadTensorSwapPattern(funcOp); 485 } 486 487 LinalgTilingLoopType loopTypeEnum; 488 }; 489 490 } // namespace 491 492 std::unique_ptr<OperationPass<FuncOp>> 493 mlir::createLinalgTilingPass(ArrayRef<int64_t> tileSizes, 494 linalg::LinalgTilingLoopType loopType) { 495 return std::make_unique<LinalgTilingPass>(tileSizes, loopType); 496 } 497