1 //===- TestLinalgTransforms.cpp - Test Linalg transformation patterns -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements logic for testing Linalg transformations. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "mlir/Dialect/Affine/IR/AffineOps.h" 14 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 15 #include "mlir/Dialect/GPU/GPUDialect.h" 16 #include "mlir/Dialect/Linalg/IR/Linalg.h" 17 #include "mlir/Dialect/Linalg/Passes.h" 18 #include "mlir/Dialect/Linalg/Transforms/HoistPadding.h" 19 #include "mlir/Dialect/Linalg/Transforms/Hoisting.h" 20 #include "mlir/Dialect/Linalg/Transforms/Transforms.h" 21 #include "mlir/Dialect/Linalg/Utils/Utils.h" 22 #include "mlir/Dialect/StandardOps/IR/Ops.h" 23 #include "mlir/Dialect/Vector/IR/VectorOps.h" 24 #include "mlir/Pass/PassManager.h" 25 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 26 27 #include "llvm/ADT/SetVector.h" 28 #include "llvm/ADT/SmallVector.h" 29 30 using namespace mlir; 31 using namespace mlir::linalg; 32 33 namespace { 34 struct TestLinalgTransforms 35 : public PassWrapper<TestLinalgTransforms, OperationPass<FuncOp>> { 36 TestLinalgTransforms() = default; 37 TestLinalgTransforms(const TestLinalgTransforms &pass) : PassWrapper(pass) {} 38 39 void getDependentDialects(DialectRegistry ®istry) const override { 40 // clang-format off 41 registry.insert<AffineDialect, 42 memref::MemRefDialect, 43 scf::SCFDialect, 44 StandardOpsDialect, 45 linalg::LinalgDialect, 46 vector::VectorDialect, 47 gpu::GPUDialect>(); 48 // clang-format on 49 } 50 StringRef getArgument() const final { 51 return "test-linalg-transform-patterns"; 52 } 53 StringRef getDescription() const final { 54 return "Test Linalg transformation patterns by applying them greedily."; 55 } 56 57 void runOnOperation() override; 58 59 Option<bool> testPatterns{*this, "test-patterns", 60 llvm::cl::desc("Test a mixed set of patterns"), 61 llvm::cl::init(false)}; 62 Option<bool> testMatmulToVectorPatterns1dTiling{ 63 *this, "test-matmul-to-vector-patterns-tile-1d", 64 llvm::cl::desc( 65 "Test a fused pass that applies patterns from matmul to vectors via " 66 "1-d tiling"), 67 llvm::cl::init(false)}; 68 Option<bool> testMatmulToVectorPatterns2dTiling{ 69 *this, "test-matmul-to-vector-patterns-tile-2d", 70 llvm::cl::desc( 71 "Test a fused pass that applies patterns from matmul to vectors via " 72 "2-d tiling"), 73 llvm::cl::init(false)}; 74 Option<bool> testPromotionOptions{*this, "test-linalg-promotion-options", 75 llvm::cl::desc("Test promotion options"), 76 llvm::cl::init(false)}; 77 Option<bool> testTileAndDistributionOptions{ 78 *this, "test-tile-and-distribute-options", 79 llvm::cl::desc("Test tile and distribute options"), 80 llvm::cl::init(false)}; 81 Option<bool> testVectorTransferForwardingPatterns{ 82 *this, "test-vector-transfer-forwarding-patterns", 83 llvm::cl::desc( 84 "Test a fused pass that forwards memref.copy to vector.transfer"), 85 llvm::cl::init(false)}; 86 Option<bool> testGenericToVectorPattern{ 87 *this, "test-linalg-to-vector-patterns", 88 llvm::cl::desc("Test a set of patterns that rewrite a linalg contraction " 89 "in vector.contract form"), 90 llvm::cl::init(false)}; 91 Option<bool> testTilePattern{*this, "test-tile-pattern", 92 llvm::cl::desc("Test tile pattern"), 93 llvm::cl::init(false)}; 94 Option<bool> testTileScalarizeDynamicDims{ 95 *this, "test-tile-scalarize-dynamic-dims", 96 llvm::cl::desc("Test tiling of dynamic dims by 1"), 97 llvm::cl::init(false)}; 98 Option<bool> testTransformPadTensor{ 99 *this, "test-transform-pad-tensor", 100 llvm::cl::desc("Test transform pad tensor by copying with generic ops"), 101 llvm::cl::init(false)}; 102 Option<bool> testGeneralizePadTensor{ 103 *this, "test-generalize-pad-tensor", 104 llvm::cl::desc("Test transform pad tensor by copying with generic ops"), 105 llvm::cl::init(false)}; 106 Option<bool> testSwapSubTensorPadTensor{ 107 *this, "test-swap-subtensor-padtensor", 108 llvm::cl::desc("Test rewrite of subtensor(pad_tensor) into " 109 "pad_tensor(subtensor)"), 110 llvm::cl::init(false)}; 111 ListOption<int64_t> peeledLoops{ 112 *this, "peeled-loops", 113 llvm::cl::desc("Loops to be peeled when test-tile-pattern"), 114 llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated}; 115 ListOption<int64_t> tileSizes{ 116 *this, "tile-sizes", 117 llvm::cl::desc("Linalg tile sizes for test-tile-pattern"), 118 llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated}; 119 ListOption<unsigned> testTiledLoopPeeling{ 120 *this, "test-tiled-loop-peeling", 121 llvm::cl::desc("Test peeling of linalg.tiled_loop ops"), 122 llvm::cl::OneOrMore, llvm::cl::MiscFlags::CommaSeparated}; 123 Option<bool> skipPartial{ 124 *this, "skip-partial", 125 llvm::cl::desc("Skip loops inside partial iterations during peeling"), 126 llvm::cl::init(false)}; 127 Option<std::string> loopType{ 128 *this, "loop-type", 129 llvm::cl::desc("Specify the type of loops to generate: for, parallel or " 130 "tiled_loop"), 131 llvm::cl::init("for")}; 132 }; 133 } // namespace 134 135 static void applyPatterns(FuncOp funcOp) { 136 MLIRContext *ctx = funcOp.getContext(); 137 RewritePatternSet patterns(ctx); 138 139 //===--------------------------------------------------------------------===// 140 // Linalg tiling patterns. 141 //===--------------------------------------------------------------------===// 142 patterns.add<LinalgTilingPattern>( 143 MatmulOp::getOperationName(), ctx, 144 LinalgTilingOptions().setTileSizes({2000, 3000, 4000}), 145 LinalgTransformationFilter(StringAttr::get(ctx, "MEM"), 146 StringAttr::get(ctx, "L3"))); 147 patterns.add<LinalgTilingPattern>( 148 MatmulOp::getOperationName(), ctx, 149 LinalgTilingOptions().setTileSizes({200, 300, 400}), 150 LinalgTransformationFilter(StringAttr::get(ctx, "L3"), 151 StringAttr::get(ctx, "L2"))); 152 patterns.add<LinalgTilingPattern>( 153 MatmulOp::getOperationName(), ctx, 154 LinalgTilingOptions().setTileSizes({20, 30, 40}), 155 LinalgTransformationFilter(StringAttr::get(ctx, "L2"), 156 StringAttr::get(ctx, "L1"))); 157 patterns.add<LinalgTilingPattern>( 158 MatmulOp::getOperationName(), ctx, 159 LinalgTilingOptions().setTileSizes({2, 3, 4}), 160 LinalgTransformationFilter(StringAttr::get(ctx, "L1"), 161 StringAttr::get(ctx, "REG"))); 162 163 patterns.add<LinalgTilingPattern>( 164 MatvecOp::getOperationName(), ctx, 165 LinalgTilingOptions().setTileSizes({5, 6}).setLoopType( 166 LinalgTilingLoopType::ParallelLoops), 167 LinalgTransformationFilter(ArrayRef<StringAttr>{}, 168 StringAttr::get(ctx, "L1"))); 169 170 patterns.add<LinalgTilingPattern>( 171 DotOp::getOperationName(), ctx, LinalgTilingOptions().setTileSizes(8000), 172 LinalgTransformationFilter( 173 ArrayRef<StringAttr>{StringAttr::get(ctx, "MEM"), 174 StringAttr::get(ctx, "L3"), 175 StringAttr::get(ctx, "L2")}, 176 StringAttr::get(ctx, "REG"))); 177 178 //===--------------------------------------------------------------------===// 179 // Linalg tiling and permutation patterns. 180 //===--------------------------------------------------------------------===// 181 patterns.add<LinalgTilingPattern>( 182 MatmulOp::getOperationName(), ctx, 183 LinalgTilingOptions() 184 .setTileSizes({2000, 3000, 4000}) 185 .setInterchange({1, 2, 0}), 186 LinalgTransformationFilter(StringAttr::get(ctx, "__with_perm__"), 187 StringAttr::get(ctx, "L2__with_perm__"))); 188 patterns.add<LinalgTilingPattern>( 189 MatmulOp::getOperationName(), ctx, 190 LinalgTilingOptions() 191 .setTileSizes({200, 300, 400}) 192 .setInterchange({1, 0, 2}), 193 LinalgTransformationFilter(StringAttr::get(ctx, "L2__with_perm__"), 194 StringAttr::get(ctx, "L1__with_perm__"))); 195 patterns.add<LinalgTilingPattern>( 196 MatmulOp::getOperationName(), ctx, 197 LinalgTilingOptions().setTileSizes({20, 30, 40}), 198 LinalgTransformationFilter(StringAttr::get(ctx, "L1__with_perm__"), 199 StringAttr::get(ctx, "REG__with_perm__"))); 200 201 patterns.add<LinalgTilingPattern>( 202 MatvecOp::getOperationName(), ctx, 203 LinalgTilingOptions().setTileSizes({5, 6}).setInterchange({1, 0}), 204 LinalgTransformationFilter(StringAttr::get(ctx, "__with_perm__"), 205 StringAttr::get(ctx, "L1__with_perm__"))); 206 207 patterns.add<LinalgTilingPattern>( 208 MatmulOp::getOperationName(), ctx, 209 LinalgTilingOptions() 210 .setTileSizes({16, 8, 4}) 211 .setInterchange({1, 2, 0}) 212 .setLoopType(LinalgTilingLoopType::ParallelLoops), 213 LinalgTransformationFilter( 214 StringAttr::get(ctx, "par__with_perm__"), 215 StringAttr::get(ctx, "after_par__with_perm__"))); 216 217 //===--------------------------------------------------------------------===// 218 // Linalg to loops patterns. 219 //===--------------------------------------------------------------------===// 220 patterns.add<LinalgLoweringPattern<DotOp>>( 221 ctx, 222 /*loweringType=*/LinalgLoweringType::Loops, 223 LinalgTransformationFilter(StringAttr::get(ctx, "REG"))); 224 225 //===--------------------------------------------------------------------===// 226 // Linalg distribution patterns. 227 //===--------------------------------------------------------------------===// 228 LinalgLoopDistributionOptions distributionOptions; 229 230 //===--------------------------------------------------------------------===// 231 // Linalg to vector contraction patterns. 232 //===--------------------------------------------------------------------===// 233 patterns.add<LinalgVectorizationPattern>( 234 ctx, LinalgTransformationFilter(StringAttr::get(ctx, "VECTORIZE")) 235 .addOpFilter<MatmulOp, FillOp, GenericOp>()); 236 patterns.add<CopyVectorizationPattern>(ctx); 237 238 //===--------------------------------------------------------------------===// 239 // Linalg generic interchange pattern. 240 //===--------------------------------------------------------------------===// 241 patterns.add<GenericOpInterchangePattern>( 242 ctx, 243 /*interchangeVector=*/ArrayRef<unsigned>{1, 2, 0}, 244 LinalgTransformationFilter(ArrayRef<StringAttr>{}, 245 StringAttr::get(ctx, "PERMUTED"))); 246 247 //===--------------------------------------------------------------------===// 248 // Linalg subview operands promotion. 249 //===--------------------------------------------------------------------===// 250 patterns.add<LinalgPromotionPattern<MatmulOp>>( 251 ctx, LinalgPromotionOptions().setUseFullTileBuffersByDefault(true), 252 LinalgTransformationFilter(StringAttr::get(ctx, "_promote_views_"), 253 StringAttr::get(ctx, "_views_promoted_"))); 254 patterns.add<LinalgPromotionPattern<MatmulOp>>( 255 ctx, 256 LinalgPromotionOptions() 257 .setOperandsToPromote({0}) 258 .setUseFullTileBuffersByDefault(true), 259 LinalgTransformationFilter( 260 StringAttr::get(ctx, "_promote_first_view_"), 261 StringAttr::get(ctx, "_first_view_promoted_"))); 262 patterns.add<LinalgPromotionPattern<FillOp>>( 263 ctx, 264 LinalgPromotionOptions() 265 .setOperandsToPromote({1}) 266 .setUseFullTileBuffers({false, true}) 267 .setAlignment(32), 268 LinalgTransformationFilter( 269 StringAttr::get(ctx, "_promote_views_aligned_"), 270 StringAttr::get(ctx, "_views_aligned_promoted_"))); 271 272 (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns)); 273 274 // Drop the marker. 275 funcOp.walk([](LinalgOp op) { 276 op->removeAttr(LinalgTransforms::kLinalgTransformMarker); 277 }); 278 } 279 280 static void fillL1TilingAndMatmulToVectorPatterns( 281 FuncOp funcOp, StringRef startMarker, 282 SmallVectorImpl<RewritePatternSet> &patternsVector) { 283 MLIRContext *ctx = funcOp.getContext(); 284 patternsVector.emplace_back( 285 ctx, std::make_unique<LinalgTilingPattern>( 286 MatmulOp::getOperationName(), ctx, 287 LinalgTilingOptions() 288 .setTileSizes({8, 12, 16}) 289 .setInterchange({1, 0, 2}), 290 LinalgTransformationFilter(StringAttr::get(ctx, startMarker), 291 StringAttr::get(ctx, "L1")))); 292 293 patternsVector.emplace_back( 294 ctx, 295 std::make_unique<LinalgPromotionPattern<MatmulOp>>( 296 ctx, LinalgPromotionOptions().setUseFullTileBuffersByDefault(true), 297 LinalgTransformationFilter(StringAttr::get(ctx, "L1"), 298 StringAttr::get(ctx, "VEC")))); 299 300 patternsVector.emplace_back( 301 ctx, std::make_unique<LinalgVectorizationPattern>( 302 MatmulOp::getOperationName(), ctx, LinalgVectorizationOptions(), 303 LinalgTransformationFilter(StringAttr::get(ctx, "VEC")))); 304 patternsVector.back().add<LinalgVectorizationPattern>( 305 ctx, LinalgTransformationFilter().addOpFilter<FillOp>()); 306 patternsVector.back().add<CopyVectorizationPattern>(ctx); 307 } 308 309 //===----------------------------------------------------------------------===// 310 // Test promotion callbacks 311 //===----------------------------------------------------------------------===// 312 313 // Allocation call back 314 static Optional<Value> allocCallBackFn(OpBuilder &b, memref::SubViewOp subView, 315 ArrayRef<Value> boundingSubViewSize, 316 DataLayout &layout) { 317 SmallVector<int64_t, 4> shape(boundingSubViewSize.size(), -1); 318 return b 319 .create<memref::AllocOp>( 320 subView.getLoc(), 321 MemRefType::get(shape, subView.getType().getElementType(), 322 /*affineMapComposition =*/{}, 3), 323 boundingSubViewSize) 324 .getResult(); 325 } 326 327 // Deallocation callback 328 static LogicalResult deallocCallBackFn(OpBuilder &b, Value buffer) { 329 b.create<memref::DeallocOp>(buffer.getLoc(), buffer); 330 return success(); 331 } 332 333 // Copy in call back 334 static LogicalResult copyCallBackFn(OpBuilder &b, Value src, Value dst, 335 bool isOutput) { 336 auto floatType = src.getType().cast<MemRefType>().getElementType(); 337 if (!floatType.isa<FloatType>()) 338 return failure(); 339 if (!isOutput) { 340 Value cst = b.create<arith::ConstantOp>(src.getLoc(), 341 FloatAttr::get(floatType, 42.0)); 342 b.create<FillOp>(src.getLoc(), cst, dst); 343 } 344 b.create<memref::CopyOp>(src.getLoc(), src, dst); 345 return success(); 346 } 347 348 static void fillPromotionCallBackPatterns(MLIRContext *ctx, 349 RewritePatternSet &patterns) { 350 patterns.add<LinalgTilingPattern>( 351 MatmulOp::getOperationName(), ctx, 352 LinalgTilingOptions().setTileSizes({16, 16, 16}), 353 LinalgTransformationFilter(StringAttr::get(ctx, "START"), 354 StringAttr::get(ctx, "PROMOTE"))); 355 patterns.add<LinalgPromotionPattern<MatmulOp>>( 356 ctx, 357 LinalgPromotionOptions() 358 .setOperandsToPromote({0, 2}) 359 .setUseFullTileBuffers({false, false}) 360 .setAllocationDeallocationFns(allocCallBackFn, deallocCallBackFn) 361 .setCopyInOutFns( 362 [](OpBuilder &b, Value src, Value dst) -> LogicalResult { 363 return copyCallBackFn(b, src, dst, false); 364 }, 365 [](OpBuilder &b, Value src, Value dst) -> LogicalResult { 366 return copyCallBackFn(b, src, dst, true); 367 }), 368 LinalgTransformationFilter(StringAttr::get(ctx, "PROMOTE"))); 369 } 370 371 template <typename IdOp, typename NProcsOp> 372 static SmallVector<ProcInfo, 2> 373 getGpuProcIds(OpBuilder &b, Location loc, ArrayRef<Range> parallelLoopRanges) { 374 size_t count = std::min<size_t>(3, parallelLoopRanges.size()); 375 SmallVector<ProcInfo, 2> procInfo(count); 376 Type indexType = b.getIndexType(); 377 for (unsigned i = 0; i < count; ++i) { 378 gpu::Dimension dim = *gpu::symbolizeDimension(i); 379 procInfo[count - 1 - i] = {b.create<IdOp>(loc, indexType, dim), 380 b.create<NProcsOp>(loc, indexType, dim)}; 381 } 382 return procInfo; 383 } 384 385 static void fillTileAndDistributePatterns(MLIRContext *context, 386 RewritePatternSet &patterns) { 387 { 388 LinalgLoopDistributionOptions cyclicNprocsEqNiters; 389 cyclicNprocsEqNiters.distributionMethod.resize( 390 2, DistributionMethod::CyclicNumProcsEqNumIters); 391 cyclicNprocsEqNiters.procInfo = 392 getGpuProcIds<gpu::BlockIdOp, gpu::GridDimOp>; 393 patterns.add<LinalgTilingPattern>( 394 MatmulOp::getOperationName(), context, 395 LinalgTilingOptions() 396 .setTileSizes({8, 8, 4}) 397 .setLoopType(LinalgTilingLoopType::ParallelLoops) 398 .setDistributionOptions(cyclicNprocsEqNiters), 399 LinalgTransformationFilter( 400 StringAttr::get(context, "distribute1"), 401 StringAttr::get(context, "after_distribute1"))); 402 } 403 404 { 405 LinalgLoopDistributionOptions cyclicNprocsGeNiters; 406 cyclicNprocsGeNiters.distributionMethod.resize( 407 2, DistributionMethod::CyclicNumProcsGeNumIters); 408 cyclicNprocsGeNiters.procInfo = 409 getGpuProcIds<gpu::BlockIdOp, gpu::GridDimOp>; 410 patterns.add<LinalgTilingPattern>( 411 MatmulOp::getOperationName(), context, 412 LinalgTilingOptions() 413 .setTileSizes({8, 8, 4}) 414 .setLoopType(LinalgTilingLoopType::ParallelLoops) 415 .setDistributionOptions(cyclicNprocsGeNiters), 416 LinalgTransformationFilter( 417 StringAttr::get(context, "distribute2"), 418 StringAttr::get(context, "after_distribute2"))); 419 } 420 421 { 422 LinalgLoopDistributionOptions cyclicNprocsDefault; 423 cyclicNprocsDefault.distributionMethod.resize(2, 424 DistributionMethod::Cyclic); 425 cyclicNprocsDefault.procInfo = 426 getGpuProcIds<gpu::BlockIdOp, gpu::GridDimOp>; 427 patterns.add<LinalgTilingPattern>( 428 MatmulOp::getOperationName(), context, 429 LinalgTilingOptions() 430 .setTileSizes({8, 8, 4}) 431 .setLoopType(LinalgTilingLoopType::ParallelLoops) 432 .setDistributionOptions(cyclicNprocsDefault), 433 LinalgTransformationFilter( 434 StringAttr::get(context, "distribute3"), 435 StringAttr::get(context, "after_distribute3"))); 436 } 437 438 { 439 LinalgLoopDistributionOptions cyclicNprocsMixed1; 440 cyclicNprocsMixed1.distributionMethod = { 441 DistributionMethod::CyclicNumProcsEqNumIters, 442 DistributionMethod::CyclicNumProcsGeNumIters}; 443 cyclicNprocsMixed1.procInfo = getGpuProcIds<gpu::BlockIdOp, gpu::GridDimOp>; 444 patterns.add<LinalgTilingPattern>( 445 MatmulOp::getOperationName(), context, 446 LinalgTilingOptions() 447 .setTileSizes({8, 8, 4}) 448 .setLoopType(LinalgTilingLoopType::ParallelLoops) 449 .setDistributionOptions(cyclicNprocsMixed1), 450 LinalgTransformationFilter( 451 StringAttr::get(context, "distribute4"), 452 StringAttr::get(context, "after_distribute4"))); 453 } 454 455 { 456 LinalgLoopDistributionOptions cyclicNprocsMixed2; 457 cyclicNprocsMixed2.distributionMethod = { 458 DistributionMethod::CyclicNumProcsGeNumIters, 459 DistributionMethod::Cyclic}; 460 cyclicNprocsMixed2.procInfo = getGpuProcIds<gpu::BlockIdOp, gpu::GridDimOp>; 461 patterns.add<LinalgTilingPattern>( 462 MatmulOp::getOperationName(), context, 463 LinalgTilingOptions() 464 .setTileSizes({8, 8, 4}) 465 .setLoopType(LinalgTilingLoopType::ParallelLoops) 466 .setDistributionOptions(cyclicNprocsMixed2), 467 LinalgTransformationFilter( 468 StringAttr::get(context, "distribute5"), 469 StringAttr::get(context, "after_distribute5"))); 470 } 471 472 { 473 LinalgLoopDistributionOptions cyclicNprocsMixed3; 474 cyclicNprocsMixed3.distributionMethod = { 475 DistributionMethod::Cyclic, 476 DistributionMethod::CyclicNumProcsEqNumIters}; 477 cyclicNprocsMixed3.procInfo = getGpuProcIds<gpu::BlockIdOp, gpu::GridDimOp>; 478 479 patterns.add<LinalgTilingPattern>( 480 MatmulOp::getOperationName(), context, 481 LinalgTilingOptions() 482 .setTileSizes({8, 8, 4}) 483 .setLoopType(LinalgTilingLoopType::ParallelLoops) 484 .setDistributionOptions(cyclicNprocsMixed3), 485 LinalgTransformationFilter( 486 StringAttr::get(context, "distribute6"), 487 StringAttr::get(context, "after_distribute6"))); 488 } 489 490 { 491 LinalgLoopDistributionOptions cyclicNprocsEqNiters; 492 cyclicNprocsEqNiters.distributionMethod.resize(2, 493 DistributionMethod::Cyclic); 494 cyclicNprocsEqNiters.procInfo = 495 getGpuProcIds<gpu::BlockIdOp, gpu::GridDimOp>; 496 patterns.add<LinalgTilingPattern>( 497 MatmulOp::getOperationName(), context, 498 LinalgTilingOptions() 499 .setTileSizes({8, 8, 4}) 500 .setLoopType(LinalgTilingLoopType::Loops) 501 .setDistributionOptions(cyclicNprocsEqNiters), 502 LinalgTransformationFilter( 503 StringAttr::get(context, "tensors_distribute1"), 504 StringAttr::get(context, "tensors_after_distribute1"))); 505 } 506 } 507 508 static void 509 applyMatmulToVectorPatterns(FuncOp funcOp, 510 bool testMatmulToVectorPatterns1dTiling, 511 bool testMatmulToVectorPatterns2dTiling) { 512 MLIRContext *ctx = funcOp.getContext(); 513 SmallVector<RewritePatternSet, 4> stage1Patterns; 514 if (testMatmulToVectorPatterns1dTiling) { 515 fillL1TilingAndMatmulToVectorPatterns(funcOp, "START", stage1Patterns); 516 } else if (testMatmulToVectorPatterns2dTiling) { 517 stage1Patterns.emplace_back( 518 ctx, std::make_unique<LinalgTilingPattern>( 519 MatmulOp::getOperationName(), ctx, 520 LinalgTilingOptions() 521 .setTileSizes({768, 264, 768}) 522 .setInterchange({1, 2, 0}), 523 LinalgTransformationFilter(StringAttr::get(ctx, "START"), 524 StringAttr::get(ctx, "L2")))); 525 fillL1TilingAndMatmulToVectorPatterns(funcOp, "L2", stage1Patterns); 526 } 527 { 528 // Canonicalization patterns 529 RewritePatternSet canonicalizationPatterns(funcOp.getContext()); 530 vector::populateVectorTransferPermutationMapLoweringPatterns( 531 canonicalizationPatterns); 532 vector::populateVectorReductionToContractPatterns(canonicalizationPatterns); 533 stage1Patterns.push_back(std::move(canonicalizationPatterns)); 534 } 535 SmallVector<FrozenRewritePatternSet, 4> frozenStage1Patterns; 536 llvm::move(stage1Patterns, std::back_inserter(frozenStage1Patterns)); 537 FrozenRewritePatternSet stage2Patterns = 538 getLinalgTilingCanonicalizationPatterns(ctx); 539 (void)applyStagedPatterns(funcOp, frozenStage1Patterns, stage2Patterns); 540 } 541 542 static void applyVectorTransferForwardingPatterns(FuncOp funcOp) { 543 RewritePatternSet forwardPattern(funcOp.getContext()); 544 forwardPattern.add<LinalgCopyVTRForwardingPattern>(funcOp.getContext()); 545 forwardPattern.add<LinalgCopyVTWForwardingPattern>(funcOp.getContext()); 546 (void)applyPatternsAndFoldGreedily(funcOp, std::move(forwardPattern)); 547 } 548 549 static void applyLinalgToVectorPatterns(FuncOp funcOp) { 550 RewritePatternSet patterns(funcOp.getContext()); 551 auto *ctx = funcOp.getContext(); 552 patterns.add<LinalgVectorizationPattern>( 553 ctx, LinalgTransformationFilter() 554 .addOpFilter<ContractionOpInterface, FillOp, GenericOp>()); 555 patterns.add<CopyVectorizationPattern>(ctx); 556 populatePadOpVectorizationPatterns(patterns); 557 populateConvolutionVectorizationPatterns(patterns); 558 (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns)); 559 } 560 561 static void applyPadTensorToGenericPatterns(FuncOp funcOp) { 562 RewritePatternSet patterns(funcOp.getContext()); 563 patterns.add<PadOpTransformationPattern>(funcOp.getContext()); 564 (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns)); 565 } 566 567 static void applyGeneralizePadTensorPatterns(FuncOp funcOp) { 568 RewritePatternSet patterns(funcOp.getContext()); 569 patterns.add<GeneralizePadOpPattern>(funcOp.getContext()); 570 (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns)); 571 } 572 573 static void applyExtractSliceOfPadTensorSwapPattern(FuncOp funcOp) { 574 RewritePatternSet patterns(funcOp.getContext()); 575 patterns.add<ExtractSliceOfPadTensorSwapPattern>(funcOp.getContext()); 576 (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns)); 577 } 578 579 static void applyTilePattern(FuncOp funcOp, const std::string &loopType, 580 ArrayRef<int64_t> tileSizes, 581 ArrayRef<int64_t> peeledLoops, 582 bool scalarizeDynamicDims) { 583 MLIRContext *context = funcOp.getContext(); 584 RewritePatternSet tilingPattern(context); 585 LinalgTilingLoopType type = 586 llvm::StringSwitch<LinalgTilingLoopType>(loopType) 587 .Case("for", LinalgTilingLoopType::Loops) 588 .Case("affine", LinalgTilingLoopType::AffineLoops) 589 .Case("parallel", LinalgTilingLoopType::ParallelLoops) 590 .Case("tiled_loop", LinalgTilingLoopType::TiledLoops); 591 auto linalgTilingOptions = linalg::LinalgTilingOptions() 592 .setPeeledLoops(peeledLoops) 593 .setLoopType(type); 594 if (scalarizeDynamicDims) { 595 linalgTilingOptions.scalarizeDynamicDims(); 596 assert(tileSizes.empty() && 597 "tileSizes and scalarizeDynamicDims is mutually exclusive"); 598 } else { 599 linalgTilingOptions.setTileSizes(tileSizes); 600 } 601 linalg::LinalgTransformationFilter f(StringAttr::get(context, "tile")); 602 TilingPatterns<linalg::MatmulOp, linalg::GenericOp>::insert( 603 tilingPattern, linalgTilingOptions, f); 604 (void)applyPatternsAndFoldGreedily(funcOp, std::move(tilingPattern)); 605 } 606 607 static constexpr char kPeeledLoopsLabel[] = "__peeled_loops__"; 608 static constexpr char kPartialIterationLabel[] = "__partial_iteration__"; 609 610 namespace { 611 /// Peel TiledLoopOps, i.e., split them into two loops: One loop where the 612 /// `idx`-th loop contains only "full" iterations and a second loop for the 613 /// remaining partial iteration (if any). 614 struct TiledLoopPeelingPattern : public OpRewritePattern<TiledLoopOp> { 615 TiledLoopPeelingPattern(MLIRContext *ctx, int64_t idx, bool skipPartial) 616 : OpRewritePattern<TiledLoopOp>(ctx), idx(idx), skipPartial(skipPartial) { 617 } 618 619 LogicalResult matchAndRewrite(TiledLoopOp loopOp, 620 PatternRewriter &rewriter) const override { 621 SmallVector<int64_t> peeledLoops; 622 if (loopOp->hasAttr(kPeeledLoopsLabel)) { 623 auto attr = loopOp->getAttr(kPeeledLoopsLabel).cast<ArrayAttr>(); 624 peeledLoops = 625 llvm::to_vector<4>(llvm::map_range(attr, [](Attribute attr) { 626 return attr.cast<IntegerAttr>().getInt(); 627 })); 628 // Check if the loop was already peeled. 629 if (llvm::find(peeledLoops, idx) != peeledLoops.end()) 630 return failure(); 631 } 632 if (skipPartial && loopOp->hasAttr(kPartialIterationLabel)) 633 // No peeling of loop nests with a partial iteration. 634 return failure(); 635 636 if (static_cast<int64_t>(loopOp.iterator_types().size()) <= idx) 637 return failure(); 638 639 // Peel loop and canonicalize. 640 TiledLoopOp result; 641 if (failed(linalg::peelAndCanonicalizeTiledLoop(rewriter, loopOp, idx, 642 result))) 643 return failure(); 644 645 // Apply label, so that the same loop is not rewritten a second time. 646 peeledLoops.push_back(idx); 647 rewriter.updateRootInPlace(loopOp, [&]() { 648 loopOp->setAttr(kPeeledLoopsLabel, rewriter.getI64ArrayAttr(peeledLoops)); 649 }); 650 result->setAttr(kPeeledLoopsLabel, rewriter.getI64ArrayAttr(peeledLoops)); 651 result->setAttr(kPartialIterationLabel, rewriter.getUnitAttr()); 652 653 return success(); 654 } 655 656 /// Index of loop to peel. 657 int64_t idx; 658 659 /// If set to true, do not peel TiledLoopOps with a partial iteration. 660 bool skipPartial; 661 }; 662 } // namespace 663 664 static void applyTiledLoopPeelingPattern(FuncOp funcOp, 665 ArrayRef<unsigned> loops, 666 bool skipPartial) { 667 MLIRContext *ctx = funcOp.getContext(); 668 RewritePatternSet patterns(ctx); 669 for (unsigned idx : loops) 670 patterns.add<TiledLoopPeelingPattern>(ctx, idx, skipPartial); 671 (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns)); 672 673 // Drop the markers. 674 funcOp.walk([](TiledLoopOp op) { 675 op->removeAttr(kPeeledLoopsLabel); 676 op->removeAttr(kPartialIterationLabel); 677 }); 678 } 679 680 /// Apply transformations specified as patterns. 681 void TestLinalgTransforms::runOnOperation() { 682 auto lambda = [&](void *) { 683 getOperation().walk([](LinalgOp op) { 684 op->removeAttr(LinalgTransforms::kLinalgTransformMarker); 685 }); 686 }; 687 std::unique_ptr<void, decltype(lambda)> cleanupGuard{(void *)1, lambda}; 688 689 if (testPromotionOptions) { 690 RewritePatternSet patterns(&getContext()); 691 fillPromotionCallBackPatterns(&getContext(), patterns); 692 (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)); 693 return; 694 } 695 if (testTileAndDistributionOptions) { 696 RewritePatternSet patterns(&getContext()); 697 fillTileAndDistributePatterns(&getContext(), patterns); 698 (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)); 699 return; 700 } 701 if (testPatterns) 702 return applyPatterns(getOperation()); 703 if (testMatmulToVectorPatterns1dTiling || testMatmulToVectorPatterns2dTiling) 704 return applyMatmulToVectorPatterns(getOperation(), 705 testMatmulToVectorPatterns1dTiling, 706 testMatmulToVectorPatterns2dTiling); 707 if (testVectorTransferForwardingPatterns) 708 return applyVectorTransferForwardingPatterns(getOperation()); 709 if (testGenericToVectorPattern) 710 return applyLinalgToVectorPatterns(getOperation()); 711 if (testTransformPadTensor) 712 return applyPadTensorToGenericPatterns(getOperation()); 713 if (testGeneralizePadTensor) 714 return applyGeneralizePadTensorPatterns(getOperation()); 715 if (testSwapSubTensorPadTensor) 716 return applyExtractSliceOfPadTensorSwapPattern(getOperation()); 717 if (testTiledLoopPeeling.hasValue()) 718 return applyTiledLoopPeelingPattern(getOperation(), testTiledLoopPeeling, 719 skipPartial); 720 if (testTilePattern) 721 return applyTilePattern(getOperation(), loopType, tileSizes, peeledLoops, 722 /*scalarizeDynamicDims=*/false); 723 if (testTileScalarizeDynamicDims) 724 return applyTilePattern(getOperation(), loopType, tileSizes, 725 /*peeledLoops=*/{}, /*scalarizeDynamicDims=*/true); 726 } 727 728 namespace mlir { 729 namespace test { 730 void registerTestLinalgTransforms() { 731 PassRegistration<TestLinalgTransforms>(); 732 } 733 } // namespace test 734 } // namespace mlir 735