1 //===- LoopSpecialization.cpp - scf.parallel/SCR.for specialization -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Specializes parallel loops and for loops for easier unrolling and 10 // vectorization. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PassDetail.h" 15 #include "mlir/Analysis/AffineStructures.h" 16 #include "mlir/Dialect/Affine/IR/AffineOps.h" 17 #include "mlir/Dialect/SCF/Passes.h" 18 #include "mlir/Dialect/SCF/SCF.h" 19 #include "mlir/Dialect/SCF/Transforms.h" 20 #include "mlir/Dialect/StandardOps/IR/Ops.h" 21 #include "mlir/Dialect/Utils/StaticValueUtils.h" 22 #include "mlir/IR/AffineExpr.h" 23 #include "mlir/IR/BlockAndValueMapping.h" 24 #include "mlir/IR/PatternMatch.h" 25 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 26 #include "llvm/ADT/DenseMap.h" 27 28 using namespace mlir; 29 using scf::ForOp; 30 using scf::ParallelOp; 31 32 /// Rewrite a parallel loop with bounds defined by an affine.min with a constant 33 /// into 2 loops after checking if the bounds are equal to that constant. This 34 /// is beneficial if the loop will almost always have the constant bound and 35 /// that version can be fully unrolled and vectorized. 36 static void specializeParallelLoopForUnrolling(ParallelOp op) { 37 SmallVector<int64_t, 2> constantIndices; 38 constantIndices.reserve(op.upperBound().size()); 39 for (auto bound : op.upperBound()) { 40 auto minOp = bound.getDefiningOp<AffineMinOp>(); 41 if (!minOp) 42 return; 43 int64_t minConstant = std::numeric_limits<int64_t>::max(); 44 for (AffineExpr expr : minOp.map().getResults()) { 45 if (auto constantIndex = expr.dyn_cast<AffineConstantExpr>()) 46 minConstant = std::min(minConstant, constantIndex.getValue()); 47 } 48 if (minConstant == std::numeric_limits<int64_t>::max()) 49 return; 50 constantIndices.push_back(minConstant); 51 } 52 53 OpBuilder b(op); 54 BlockAndValueMapping map; 55 Value cond; 56 for (auto bound : llvm::zip(op.upperBound(), constantIndices)) { 57 Value constant = b.create<ConstantIndexOp>(op.getLoc(), std::get<1>(bound)); 58 Value cmp = b.create<CmpIOp>(op.getLoc(), CmpIPredicate::eq, 59 std::get<0>(bound), constant); 60 cond = cond ? b.create<AndOp>(op.getLoc(), cond, cmp) : cmp; 61 map.map(std::get<0>(bound), constant); 62 } 63 auto ifOp = b.create<scf::IfOp>(op.getLoc(), cond, /*withElseRegion=*/true); 64 ifOp.getThenBodyBuilder().clone(*op.getOperation(), map); 65 ifOp.getElseBodyBuilder().clone(*op.getOperation()); 66 op.erase(); 67 } 68 69 /// Rewrite a for loop with bounds defined by an affine.min with a constant into 70 /// 2 loops after checking if the bounds are equal to that constant. This is 71 /// beneficial if the loop will almost always have the constant bound and that 72 /// version can be fully unrolled and vectorized. 73 static void specializeForLoopForUnrolling(ForOp op) { 74 auto bound = op.upperBound(); 75 auto minOp = bound.getDefiningOp<AffineMinOp>(); 76 if (!minOp) 77 return; 78 int64_t minConstant = std::numeric_limits<int64_t>::max(); 79 for (AffineExpr expr : minOp.map().getResults()) { 80 if (auto constantIndex = expr.dyn_cast<AffineConstantExpr>()) 81 minConstant = std::min(minConstant, constantIndex.getValue()); 82 } 83 if (minConstant == std::numeric_limits<int64_t>::max()) 84 return; 85 86 OpBuilder b(op); 87 BlockAndValueMapping map; 88 Value constant = b.create<ConstantIndexOp>(op.getLoc(), minConstant); 89 Value cond = 90 b.create<CmpIOp>(op.getLoc(), CmpIPredicate::eq, bound, constant); 91 map.map(bound, constant); 92 auto ifOp = b.create<scf::IfOp>(op.getLoc(), cond, /*withElseRegion=*/true); 93 ifOp.getThenBodyBuilder().clone(*op.getOperation(), map); 94 ifOp.getElseBodyBuilder().clone(*op.getOperation()); 95 op.erase(); 96 } 97 98 /// Rewrite a for loop with bounds/step that potentially do not divide evenly 99 /// into a for loop where the step divides the iteration space evenly, followed 100 /// by an scf.if for the last (partial) iteration (if any). 101 /// 102 /// This function rewrites the given scf.for loop in-place and creates a new 103 /// scf.if operation for the last iteration. It replaces all uses of the 104 /// unpeeled loop with the results of the newly generated scf.if. 105 /// 106 /// The newly generated scf.if operation is returned via `ifOp`. The boundary 107 /// at which the loop is split (new upper bound) is returned via `splitBound`. 108 /// The return value indicates whether the loop was rewritten or not. 109 static LogicalResult peelForLoop(RewriterBase &b, ForOp forOp, scf::IfOp &ifOp, 110 Value &splitBound) { 111 RewriterBase::InsertionGuard guard(b); 112 auto lbInt = getConstantIntValue(forOp.lowerBound()); 113 auto ubInt = getConstantIntValue(forOp.upperBound()); 114 auto stepInt = getConstantIntValue(forOp.step()); 115 116 // No specialization necessary if step already divides upper bound evenly. 117 if (lbInt && ubInt && stepInt && (*ubInt - *lbInt) % *stepInt == 0) 118 return failure(); 119 // No specialization necessary if step size is 1. 120 if (stepInt == static_cast<int64_t>(1)) 121 return failure(); 122 123 auto loc = forOp.getLoc(); 124 AffineExpr sym0, sym1, sym2; 125 bindSymbols(b.getContext(), sym0, sym1, sym2); 126 // New upper bound: %ub - (%ub - %lb) mod %step 127 auto modMap = AffineMap::get(0, 3, {sym1 - ((sym1 - sym0) % sym2)}); 128 b.setInsertionPoint(forOp); 129 splitBound = b.createOrFold<AffineApplyOp>( 130 loc, modMap, 131 ValueRange{forOp.lowerBound(), forOp.upperBound(), forOp.step()}); 132 133 // Set new upper loop bound. 134 Value previousUb = forOp.upperBound(); 135 b.updateRootInPlace(forOp, 136 [&]() { forOp.upperBoundMutable().assign(splitBound); }); 137 b.setInsertionPointAfter(forOp); 138 139 // Do we need one more iteration? 140 Value hasMoreIter = 141 b.create<CmpIOp>(loc, CmpIPredicate::slt, splitBound, previousUb); 142 143 // Create IfOp for last iteration. 144 auto resultTypes = forOp.getResultTypes(); 145 ifOp = b.create<scf::IfOp>(loc, resultTypes, hasMoreIter, 146 /*withElseRegion=*/!resultTypes.empty()); 147 forOp.replaceAllUsesWith(ifOp->getResults()); 148 149 // Build then case. 150 BlockAndValueMapping bvm; 151 bvm.map(forOp.region().getArgument(0), splitBound); 152 for (auto it : llvm::zip(forOp.getRegionIterArgs(), forOp->getResults())) { 153 bvm.map(std::get<0>(it), std::get<1>(it)); 154 } 155 b.cloneRegionBefore(forOp.region(), ifOp.thenRegion(), 156 ifOp.thenRegion().begin(), bvm); 157 // Build else case. 158 if (!resultTypes.empty()) 159 ifOp.getElseBodyBuilder(b.getListener()) 160 .create<scf::YieldOp>(loc, forOp->getResults()); 161 162 return success(); 163 } 164 165 static void unpackOptionalValues(ArrayRef<Optional<Value>> source, 166 SmallVector<Value> &target) { 167 target = llvm::to_vector<4>(llvm::map_range(source, [](Optional<Value> val) { 168 return val.hasValue() ? *val : Value(); 169 })); 170 } 171 172 /// Bound an identifier `pos` in a given FlatAffineValueConstraints with 173 /// constraints drawn from an affine map. Before adding the constraint, the 174 /// dimensions/symbols of the affine map are aligned with `constraints`. 175 /// `operands` are the SSA Value operands used with the affine map. 176 /// Note: This function adds a new symbol column to the `constraints` for each 177 /// dimension/symbol that exists in the affine map but not in `constraints`. 178 static LogicalResult alignAndAddBound(FlatAffineValueConstraints &constraints, 179 FlatAffineConstraints::BoundType type, 180 unsigned pos, AffineMap map, 181 ValueRange operands) { 182 SmallVector<Value> dims, syms, newSyms; 183 unpackOptionalValues(constraints.getMaybeDimValues(), dims); 184 unpackOptionalValues(constraints.getMaybeSymbolValues(), syms); 185 186 AffineMap alignedMap = 187 alignAffineMapWithValues(map, operands, dims, syms, &newSyms); 188 for (unsigned i = syms.size(); i < newSyms.size(); ++i) 189 constraints.addSymbolId(constraints.getNumSymbolIds(), newSyms[i]); 190 return constraints.addBound(type, pos, alignedMap); 191 } 192 193 /// This function tries to canonicalize affine.min operations by proving that 194 /// its value is bounded by the same lower and upper bound. In that case, the 195 /// operation can be folded away. 196 /// 197 /// Bounds are computed by FlatAffineValueConstraints. Invariants required for 198 /// finding/proving bounds should be supplied via `constraints`. 199 /// 200 /// 1. Add dimensions for `minOp` and `minOpUb` (upper bound of `minOp`). 201 /// 2. Compute an upper bound of `minOp` and bind it to `minOpUb`. SSA values 202 /// that are used in `minOp` but are not part of `dims`, are added as extra 203 /// symbols to the constraint set. 204 /// 3. For each result of `minOp`: Add result as a dimension `r_i`. Prove that 205 /// r_i >= minOpUb. If this is the case, ub(minOp) == lb(minOp) and `minOp` 206 /// can be replaced with that bound. 207 /// 208 /// In summary, the following constraints are added throughout this function. 209 /// Note: `invar` are dimensions added by the caller to express the invariants. 210 /// 211 /// invar | minOp | minOpUb | r_i | extra syms... | const | eq/ineq 212 /// ------+-------+---------+-----+---------------+-------+------------------- 213 /// (various eq./ineq. constraining `invar`, added by the caller) 214 /// ... | 0 | 0 | 0 | 0 | ... | ... 215 /// ------+-------+---------+-----+---------------+-------+------------------- 216 /// (various ineq. constraining `minOp` in terms of `minOp` operands (`invar` 217 /// and extra `minOp` operands "extra syms" that are not in `invar`)). 218 /// ... | -1 | 0 | 0 | ... | ... | >= 0 219 /// ------+-------+---------+-----+---------------+-------+------------------- 220 /// (set `minOpUb` to `minOp` upper bound in terms of `invar` and extra syms) 221 /// ... | 0 | -1 | 0 | ... | ... | = 0 222 /// ------+-------+---------+-----+---------------+-------+------------------- 223 /// (for each `minOp` map result r_i: copy previous constraints, set r_i to 224 /// corresponding map result, prove r_i >= minOpUb via contradiction) 225 /// ... | 0 | 0 | -1 | ... | ... | = 0 226 /// 0 | 0 | 1 | -1 | 0 | -1 | >= 0 227 /// 228 static LogicalResult 229 canonicalizeAffineMinOp(RewriterBase &rewriter, AffineMinOp minOp, 230 FlatAffineValueConstraints constraints) { 231 RewriterBase::InsertionGuard guard(rewriter); 232 AffineMap minOpMap = minOp.getAffineMap(); 233 unsigned numResults = minOpMap.getNumResults(); 234 235 // Add a few extra dimensions. 236 unsigned dimMinOp = constraints.addDimId(); // `minOp` 237 unsigned dimMinOpUb = constraints.addDimId(); // `minOp` upper bound 238 unsigned resultDimStart = constraints.getNumDimIds(); 239 for (unsigned i = 0; i < numResults; ++i) 240 constraints.addDimId(); 241 242 // Add an inequality for each result expr_i of minOpMap: minOp <= expr_i 243 if (failed(alignAndAddBound(constraints, FlatAffineConstraints::UB, dimMinOp, 244 minOpMap, minOp.operands()))) 245 return failure(); 246 247 // Try to compute an upper bound for minOp, expressed in terms of the other 248 // `dims` and extra symbols. 249 SmallVector<AffineMap> minOpValLb(1), minOpValUb(1); 250 constraints.getSliceBounds(dimMinOp, 1, minOp.getContext(), &minOpValLb, 251 &minOpValUb); 252 // TODO: `getSliceBounds` may return multiple bounds at the moment. This is 253 // a TODO of `getSliceBounds` and not handled here. 254 if (!minOpValUb[0] || minOpValUb[0].getNumResults() != 1) 255 return failure(); // No or multiple upper bounds found. 256 257 // Add an equality: dimMinOpUb = minOpValUb[0] 258 // Add back dimension for minOp. (Was removed by `getSliceBounds`.) 259 AffineMap alignedUbMap = minOpValUb[0].shiftDims(/*shift=*/1, 260 /*offset=*/dimMinOp); 261 if (failed(constraints.addBound(FlatAffineConstraints::EQ, dimMinOpUb, 262 alignedUbMap))) 263 return failure(); 264 265 // If the constraint system is empty, there is an inconsistency. (E.g., this 266 // can happen if loop lb > ub.) 267 if (constraints.isEmpty()) 268 return failure(); 269 270 // Prove that each result of minOpMap has a lower bound that is equal to (or 271 // greater than) the upper bound of minOp (`kDimMinOpUb`). In that case, 272 // minOp can be replaced with the bound. I.e., prove that for each result 273 // expr_i (represented by dimension r_i): 274 // 275 // r_i >= minOpUb 276 // 277 // To prove this inequality, add its negation to the constraint set and prove 278 // that the constraint set is empty. 279 for (unsigned i = resultDimStart; i < resultDimStart + numResults; ++i) { 280 FlatAffineValueConstraints newConstr(constraints); 281 282 // Add an equality: r_i = expr_i 283 // Note: These equalities could have been added earlier and used to express 284 // minOp <= expr_i. However, then we run the risk that `getSliceBounds` 285 // computes minOpUb in terms of r_i dims, which is not desired. 286 if (failed(alignAndAddBound(newConstr, FlatAffineConstraints::EQ, i, 287 minOpMap.getSubMap({i - resultDimStart}), 288 minOp.operands()))) 289 return failure(); 290 291 // Add inequality: r_i < minOpUb (equiv.: minOpUb - r_i - 1 >= 0) 292 SmallVector<int64_t> ineq(newConstr.getNumCols(), 0); 293 ineq[dimMinOpUb] = 1; 294 ineq[i] = -1; 295 ineq[newConstr.getNumCols() - 1] = -1; 296 newConstr.addInequality(ineq); 297 if (!newConstr.isEmpty()) 298 return failure(); 299 } 300 301 // Lower and upper bound of `minOp` are equal. Replace `minOp` with its bound. 302 AffineMap newMap = alignedUbMap; 303 SmallVector<Value> newOperands; 304 unpackOptionalValues(constraints.getMaybeDimAndSymbolValues(), newOperands); 305 mlir::canonicalizeMapAndOperands(&newMap, &newOperands); 306 rewriter.setInsertionPoint(minOp); 307 rewriter.replaceOpWithNewOp<AffineApplyOp>(minOp, newMap, newOperands); 308 return success(); 309 } 310 311 /// Try to simplify an affine.min operation `minOp` after loop peeling. This 312 /// function detects affine.min operations such as (ub is the previous upper 313 /// bound of the unpeeled loop): 314 /// ``` 315 /// #map = affine_map<(d0)[s0, s1] -> (s0, -d0 + s1)> 316 /// %r = affine.min #affine.min #map(%iv)[%step, %ub] 317 /// ``` 318 /// and rewrites them into (in the case the peeled loop): 319 /// ``` 320 /// %r = %step 321 /// ``` 322 /// affine.min operations inside the generated scf.if operation are rewritten in 323 /// a similar way. 324 /// 325 /// This function builds up a set of constraints, capable of proving that: 326 /// * Inside the peeled loop: min(step, ub - iv) == step 327 /// * Inside the scf.if operation: min(step, ub - iv) == ub - iv 328 /// 329 /// Note: `ub` is the previous upper bound of the loop (before peeling). 330 /// `insideLoop` must be true for affine.min ops inside the loop and false for 331 /// affine.min ops inside the scf.for op. 332 static LogicalResult rewritePeeledAffineOp(RewriterBase &rewriter, 333 AffineMinOp minOp, Value iv, 334 Value ub, Value step, 335 bool insideLoop) { 336 FlatAffineValueConstraints constraints; 337 constraints.addDimId(0, iv); 338 constraints.addDimId(1, ub); 339 constraints.addDimId(2, step); 340 if (auto constUb = getConstantIntValue(ub)) 341 constraints.addBound(FlatAffineConstraints::EQ, 1, *constUb); 342 if (auto constStep = getConstantIntValue(step)) 343 constraints.addBound(FlatAffineConstraints::EQ, 2, *constStep); 344 345 // Add loop peeling invariant. This is the main piece of knowledge that 346 // enables AffineMinOp simplification. 347 if (insideLoop) { 348 // ub - iv >= step (equiv.: -iv + ub - step + 0 >= 0) 349 // Intuitively: Inside the peeled loop, every iteration is a "full" 350 // iteration, i.e., step divides the iteration space `ub - lb` evenly. 351 constraints.addInequality({-1, 1, -1, 0}); 352 } else { 353 // ub - iv < step (equiv.: iv + -ub + step - 1 >= 0) 354 // Intuitively: `iv` is the split bound here, i.e., the iteration variable 355 // value of the very last iteration (in the unpeeled loop). At that point, 356 // there are less than `step` elements remaining. (Otherwise, the peeled 357 // loop would run for at least one more iteration.) 358 constraints.addInequality({1, -1, 1, -1}); 359 } 360 361 return canonicalizeAffineMinOp(rewriter, minOp, constraints); 362 } 363 364 LogicalResult mlir::scf::peelAndCanonicalizeForLoop(RewriterBase &rewriter, 365 ForOp forOp, 366 scf::IfOp &ifOp) { 367 Value ub = forOp.upperBound(); 368 Value splitBound; 369 if (failed(peelForLoop(rewriter, forOp, ifOp, splitBound))) 370 return failure(); 371 372 // Rewrite affine.min ops. 373 forOp.walk([&](AffineMinOp minOp) { 374 (void)rewritePeeledAffineOp(rewriter, minOp, forOp.getInductionVar(), ub, 375 forOp.step(), /*insideLoop=*/true); 376 }); 377 ifOp.walk([&](AffineMinOp minOp) { 378 (void)rewritePeeledAffineOp(rewriter, minOp, splitBound, ub, forOp.step(), 379 /*insideLoop=*/false); 380 }); 381 382 return success(); 383 } 384 385 /// Canonicalize AffineMinOp operations in the context of for loops with a known 386 /// range. Call `canonicalizeAffineMinOp` and add the following constraints to 387 /// the constraint system (along with the missing dimensions): 388 /// 389 /// * iv >= lb 390 /// * iv < lb + step * ((ub - lb - 1) floorDiv step) + 1 391 /// 392 /// Note: Due to limitations of FlatAffineConstraints, only constant step sizes 393 /// are currently supported. 394 LogicalResult mlir::scf::canonicalizeAffineMinOpInLoop( 395 AffineMinOp minOp, RewriterBase &rewriter, 396 function_ref<LogicalResult(Value, Value &, Value &, Value &)> loopMatcher) { 397 FlatAffineValueConstraints constraints; 398 DenseSet<Value> allIvs; 399 400 // Find all iteration variables among `minOp`'s operands add constrain them. 401 for (Value operand : minOp.operands()) { 402 // Skip duplicate ivs. 403 if (llvm::find(allIvs, operand) != allIvs.end()) 404 continue; 405 406 // If `operand` is an iteration variable: Find corresponding loop 407 // bounds and step. 408 Value iv = operand; 409 Value lb, ub, step; 410 if (failed(loopMatcher(operand, lb, ub, step))) 411 continue; 412 allIvs.insert(iv); 413 414 // FlatAffineConstraints does not support semi-affine expressions. 415 // Therefore, only constant step values are supported. 416 auto stepInt = getConstantIntValue(step); 417 if (!stepInt) 418 continue; 419 420 unsigned dimIv = constraints.addDimId(iv); 421 unsigned dimLb = constraints.addDimId(lb); 422 unsigned dimUb = constraints.addDimId(ub); 423 424 // If loop lower/upper bounds are constant: Add EQ constraint. 425 Optional<int64_t> lbInt = getConstantIntValue(lb); 426 Optional<int64_t> ubInt = getConstantIntValue(ub); 427 if (lbInt) 428 constraints.addBound(FlatAffineConstraints::EQ, dimLb, *lbInt); 429 if (ubInt) 430 constraints.addBound(FlatAffineConstraints::EQ, dimUb, *ubInt); 431 432 // iv >= lb (equiv.: iv - lb >= 0) 433 SmallVector<int64_t> ineqLb(constraints.getNumCols(), 0); 434 ineqLb[dimIv] = 1; 435 ineqLb[dimLb] = -1; 436 constraints.addInequality(ineqLb); 437 438 // iv < lb + step * ((ub - lb - 1) floorDiv step) + 1 439 AffineExpr exprLb = lbInt ? rewriter.getAffineConstantExpr(*lbInt) 440 : rewriter.getAffineDimExpr(dimLb); 441 AffineExpr exprUb = ubInt ? rewriter.getAffineConstantExpr(*ubInt) 442 : rewriter.getAffineDimExpr(dimUb); 443 AffineExpr ivUb = 444 exprLb + 1 + (*stepInt * ((exprUb - exprLb - 1).floorDiv(*stepInt))); 445 auto map = AffineMap::get( 446 /*dimCount=*/constraints.getNumDimIds(), 447 /*symbolCount=*/constraints.getNumSymbolIds(), /*result=*/ivUb); 448 449 if (failed(constraints.addBound(FlatAffineConstraints::UB, dimIv, map))) 450 return failure(); 451 } 452 453 return canonicalizeAffineMinOp(rewriter, minOp, constraints); 454 } 455 456 static constexpr char kPeeledLoopLabel[] = "__peeled_loop__"; 457 static constexpr char kPartialIterationLabel[] = "__partial_iteration__"; 458 459 namespace { 460 struct ForLoopPeelingPattern : public OpRewritePattern<ForOp> { 461 ForLoopPeelingPattern(MLIRContext *ctx, bool skipPartial) 462 : OpRewritePattern<ForOp>(ctx), skipPartial(skipPartial) {} 463 464 LogicalResult matchAndRewrite(ForOp forOp, 465 PatternRewriter &rewriter) const override { 466 // Do not peel already peeled loops. 467 if (forOp->hasAttr(kPeeledLoopLabel)) 468 return failure(); 469 if (skipPartial) { 470 // No peeling of loops inside the partial iteration (scf.if) of another 471 // peeled loop. 472 Operation *op = forOp.getOperation(); 473 while ((op = op->getParentOfType<scf::IfOp>())) { 474 if (op->hasAttr(kPartialIterationLabel)) 475 return failure(); 476 } 477 } 478 // Apply loop peeling. 479 scf::IfOp ifOp; 480 if (failed(peelAndCanonicalizeForLoop(rewriter, forOp, ifOp))) 481 return failure(); 482 // Apply label, so that the same loop is not rewritten a second time. 483 rewriter.updateRootInPlace(forOp, [&]() { 484 forOp->setAttr(kPeeledLoopLabel, rewriter.getUnitAttr()); 485 }); 486 ifOp->setAttr(kPartialIterationLabel, rewriter.getUnitAttr()); 487 return success(); 488 } 489 490 /// If set to true, loops inside partial iterations of another peeled loop 491 /// are not peeled. This reduces the size of the generated code. Partial 492 /// iterations are not usually performance critical. 493 /// Note: Takes into account the entire chain of parent operations, not just 494 /// the direct parent. 495 bool skipPartial; 496 }; 497 498 /// Canonicalize AffineMinOp operations in the context of scf.for and 499 /// scf.parallel loops with a known range. 500 struct AffineMinSCFCanonicalizationPattern 501 : public OpRewritePattern<AffineMinOp> { 502 using OpRewritePattern<AffineMinOp>::OpRewritePattern; 503 504 LogicalResult matchAndRewrite(AffineMinOp minOp, 505 PatternRewriter &rewriter) const override { 506 auto loopMatcher = [](Value iv, Value &lb, Value &ub, Value &step) { 507 if (scf::ForOp forOp = scf::getForInductionVarOwner(iv)) { 508 lb = forOp.lowerBound(); 509 ub = forOp.upperBound(); 510 step = forOp.step(); 511 return success(); 512 } 513 if (scf::ParallelOp parOp = scf::getParallelForInductionVarOwner(iv)) { 514 for (unsigned idx = 0; idx < parOp.getNumLoops(); ++idx) { 515 if (parOp.getInductionVars()[idx] == iv) { 516 lb = parOp.lowerBound()[idx]; 517 ub = parOp.upperBound()[idx]; 518 step = parOp.step()[idx]; 519 return success(); 520 } 521 } 522 return failure(); 523 } 524 return failure(); 525 }; 526 527 return scf::canonicalizeAffineMinOpInLoop(minOp, rewriter, loopMatcher); 528 } 529 }; 530 } // namespace 531 532 namespace { 533 struct ParallelLoopSpecialization 534 : public SCFParallelLoopSpecializationBase<ParallelLoopSpecialization> { 535 void runOnFunction() override { 536 getFunction().walk( 537 [](ParallelOp op) { specializeParallelLoopForUnrolling(op); }); 538 } 539 }; 540 541 struct ForLoopSpecialization 542 : public SCFForLoopSpecializationBase<ForLoopSpecialization> { 543 void runOnFunction() override { 544 getFunction().walk([](ForOp op) { specializeForLoopForUnrolling(op); }); 545 } 546 }; 547 548 struct ForLoopPeeling : public SCFForLoopPeelingBase<ForLoopPeeling> { 549 void runOnFunction() override { 550 FuncOp funcOp = getFunction(); 551 MLIRContext *ctx = funcOp.getContext(); 552 RewritePatternSet patterns(ctx); 553 patterns.add<ForLoopPeelingPattern>(ctx, skipPartial); 554 (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns)); 555 556 // Drop the markers. 557 funcOp.walk([](Operation *op) { 558 op->removeAttr(kPeeledLoopLabel); 559 op->removeAttr(kPartialIterationLabel); 560 }); 561 } 562 }; 563 564 struct AffineMinSCFCanonicalization 565 : public AffineMinSCFCanonicalizationBase<AffineMinSCFCanonicalization> { 566 void runOnFunction() override { 567 FuncOp funcOp = getFunction(); 568 MLIRContext *ctx = funcOp.getContext(); 569 RewritePatternSet patterns(ctx); 570 patterns.add<AffineMinSCFCanonicalizationPattern>(ctx); 571 if (failed(applyPatternsAndFoldGreedily(funcOp, std::move(patterns)))) 572 signalPassFailure(); 573 } 574 }; 575 } // namespace 576 577 std::unique_ptr<Pass> mlir::createAffineMinSCFCanonicalizationPass() { 578 return std::make_unique<AffineMinSCFCanonicalization>(); 579 } 580 581 std::unique_ptr<Pass> mlir::createParallelLoopSpecializationPass() { 582 return std::make_unique<ParallelLoopSpecialization>(); 583 } 584 585 std::unique_ptr<Pass> mlir::createForLoopSpecializationPass() { 586 return std::make_unique<ForLoopSpecialization>(); 587 } 588 589 std::unique_ptr<Pass> mlir::createForLoopPeelingPass() { 590 return std::make_unique<ForLoopPeeling>(); 591 } 592 593 void mlir::scf::populateSCFLoopBodyCanonicalizationPatterns( 594 RewritePatternSet &patterns) { 595 patterns.insert<AffineMinSCFCanonicalizationPattern>(patterns.getContext()); 596 } 597