1 //===- LoopSpecialization.cpp - scf.parallel/SCR.for specialization -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Specializes parallel loops and for loops for easier unrolling and 10 // vectorization. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PassDetail.h" 15 #include "mlir/Analysis/AffineStructures.h" 16 #include "mlir/Dialect/Affine/IR/AffineOps.h" 17 #include "mlir/Dialect/SCF/Passes.h" 18 #include "mlir/Dialect/SCF/SCF.h" 19 #include "mlir/Dialect/SCF/Transforms.h" 20 #include "mlir/Dialect/StandardOps/IR/Ops.h" 21 #include "mlir/Dialect/Utils/StaticValueUtils.h" 22 #include "mlir/IR/AffineExpr.h" 23 #include "mlir/IR/BlockAndValueMapping.h" 24 #include "mlir/IR/PatternMatch.h" 25 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 26 #include "llvm/ADT/DenseMap.h" 27 28 using namespace mlir; 29 using scf::ForOp; 30 using scf::ParallelOp; 31 32 /// Rewrite a parallel loop with bounds defined by an affine.min with a constant 33 /// into 2 loops after checking if the bounds are equal to that constant. This 34 /// is beneficial if the loop will almost always have the constant bound and 35 /// that version can be fully unrolled and vectorized. 36 static void specializeParallelLoopForUnrolling(ParallelOp op) { 37 SmallVector<int64_t, 2> constantIndices; 38 constantIndices.reserve(op.upperBound().size()); 39 for (auto bound : op.upperBound()) { 40 auto minOp = bound.getDefiningOp<AffineMinOp>(); 41 if (!minOp) 42 return; 43 int64_t minConstant = std::numeric_limits<int64_t>::max(); 44 for (AffineExpr expr : minOp.map().getResults()) { 45 if (auto constantIndex = expr.dyn_cast<AffineConstantExpr>()) 46 minConstant = std::min(minConstant, constantIndex.getValue()); 47 } 48 if (minConstant == std::numeric_limits<int64_t>::max()) 49 return; 50 constantIndices.push_back(minConstant); 51 } 52 53 OpBuilder b(op); 54 BlockAndValueMapping map; 55 Value cond; 56 for (auto bound : llvm::zip(op.upperBound(), constantIndices)) { 57 Value constant = b.create<ConstantIndexOp>(op.getLoc(), std::get<1>(bound)); 58 Value cmp = b.create<CmpIOp>(op.getLoc(), CmpIPredicate::eq, 59 std::get<0>(bound), constant); 60 cond = cond ? b.create<AndOp>(op.getLoc(), cond, cmp) : cmp; 61 map.map(std::get<0>(bound), constant); 62 } 63 auto ifOp = b.create<scf::IfOp>(op.getLoc(), cond, /*withElseRegion=*/true); 64 ifOp.getThenBodyBuilder().clone(*op.getOperation(), map); 65 ifOp.getElseBodyBuilder().clone(*op.getOperation()); 66 op.erase(); 67 } 68 69 /// Rewrite a for loop with bounds defined by an affine.min with a constant into 70 /// 2 loops after checking if the bounds are equal to that constant. This is 71 /// beneficial if the loop will almost always have the constant bound and that 72 /// version can be fully unrolled and vectorized. 73 static void specializeForLoopForUnrolling(ForOp op) { 74 auto bound = op.upperBound(); 75 auto minOp = bound.getDefiningOp<AffineMinOp>(); 76 if (!minOp) 77 return; 78 int64_t minConstant = std::numeric_limits<int64_t>::max(); 79 for (AffineExpr expr : minOp.map().getResults()) { 80 if (auto constantIndex = expr.dyn_cast<AffineConstantExpr>()) 81 minConstant = std::min(minConstant, constantIndex.getValue()); 82 } 83 if (minConstant == std::numeric_limits<int64_t>::max()) 84 return; 85 86 OpBuilder b(op); 87 BlockAndValueMapping map; 88 Value constant = b.create<ConstantIndexOp>(op.getLoc(), minConstant); 89 Value cond = 90 b.create<CmpIOp>(op.getLoc(), CmpIPredicate::eq, bound, constant); 91 map.map(bound, constant); 92 auto ifOp = b.create<scf::IfOp>(op.getLoc(), cond, /*withElseRegion=*/true); 93 ifOp.getThenBodyBuilder().clone(*op.getOperation(), map); 94 ifOp.getElseBodyBuilder().clone(*op.getOperation()); 95 op.erase(); 96 } 97 98 /// Rewrite a for loop with bounds/step that potentially do not divide evenly 99 /// into a for loop where the step divides the iteration space evenly, followed 100 /// by an scf.if for the last (partial) iteration (if any). 101 /// 102 /// This function rewrites the given scf.for loop in-place and creates a new 103 /// scf.if operation for the last iteration. It replaces all uses of the 104 /// unpeeled loop with the results of the newly generated scf.if. 105 /// 106 /// The newly generated scf.if operation is returned via `ifOp`. The boundary 107 /// at which the loop is split (new upper bound) is returned via `splitBound`. 108 /// The return value indicates whether the loop was rewritten or not. 109 static LogicalResult peelForLoop(RewriterBase &b, ForOp forOp, 110 ForOp &partialIteration, Value &splitBound) { 111 RewriterBase::InsertionGuard guard(b); 112 auto lbInt = getConstantIntValue(forOp.lowerBound()); 113 auto ubInt = getConstantIntValue(forOp.upperBound()); 114 auto stepInt = getConstantIntValue(forOp.step()); 115 116 // No specialization necessary if step already divides upper bound evenly. 117 if (lbInt && ubInt && stepInt && (*ubInt - *lbInt) % *stepInt == 0) 118 return failure(); 119 // No specialization necessary if step size is 1. 120 if (stepInt == static_cast<int64_t>(1)) 121 return failure(); 122 123 auto loc = forOp.getLoc(); 124 AffineExpr sym0, sym1, sym2; 125 bindSymbols(b.getContext(), sym0, sym1, sym2); 126 // New upper bound: %ub - (%ub - %lb) mod %step 127 auto modMap = AffineMap::get(0, 3, {sym1 - ((sym1 - sym0) % sym2)}); 128 b.setInsertionPoint(forOp); 129 splitBound = b.createOrFold<AffineApplyOp>( 130 loc, modMap, 131 ValueRange{forOp.lowerBound(), forOp.upperBound(), forOp.step()}); 132 133 // Create ForOp for partial iteration. 134 b.setInsertionPointAfter(forOp); 135 partialIteration = cast<ForOp>(b.clone(*forOp.getOperation())); 136 partialIteration.lowerBoundMutable().assign(splitBound); 137 forOp.replaceAllUsesWith(partialIteration->getResults()); 138 partialIteration.initArgsMutable().assign(forOp->getResults()); 139 140 // Set new upper loop bound. 141 b.updateRootInPlace(forOp, 142 [&]() { forOp.upperBoundMutable().assign(splitBound); }); 143 144 return success(); 145 } 146 147 static void unpackOptionalValues(ArrayRef<Optional<Value>> source, 148 SmallVector<Value> &target) { 149 target = llvm::to_vector<4>(llvm::map_range(source, [](Optional<Value> val) { 150 return val.hasValue() ? *val : Value(); 151 })); 152 } 153 154 /// Bound an identifier `pos` in a given FlatAffineValueConstraints with 155 /// constraints drawn from an affine map. Before adding the constraint, the 156 /// dimensions/symbols of the affine map are aligned with `constraints`. 157 /// `operands` are the SSA Value operands used with the affine map. 158 /// Note: This function adds a new symbol column to the `constraints` for each 159 /// dimension/symbol that exists in the affine map but not in `constraints`. 160 static LogicalResult alignAndAddBound(FlatAffineValueConstraints &constraints, 161 FlatAffineConstraints::BoundType type, 162 unsigned pos, AffineMap map, 163 ValueRange operands) { 164 SmallVector<Value> dims, syms, newSyms; 165 unpackOptionalValues(constraints.getMaybeDimValues(), dims); 166 unpackOptionalValues(constraints.getMaybeSymbolValues(), syms); 167 168 AffineMap alignedMap = 169 alignAffineMapWithValues(map, operands, dims, syms, &newSyms); 170 for (unsigned i = syms.size(); i < newSyms.size(); ++i) 171 constraints.appendSymbolId(newSyms[i]); 172 return constraints.addBound(type, pos, alignedMap); 173 } 174 175 /// This function tries to canonicalize min/max operations by proving that their 176 /// value is bounded by the same lower and upper bound. In that case, the 177 /// operation can be folded away. 178 /// 179 /// Bounds are computed by FlatAffineValueConstraints. Invariants required for 180 /// finding/proving bounds should be supplied via `constraints`. 181 /// 182 /// 1. Add dimensions for `op` and `opBound` (lower or upper bound of `op`). 183 /// 2. Compute an upper bound of `op` (in case of `isMin`) or a lower bound (in 184 /// case of `!isMin`) and bind it to `opBound`. SSA values that are used in 185 /// `op` but are not part of `constraints`, are added as extra symbols. 186 /// 3. For each result of `op`: Add result as a dimension `r_i`. Prove that: 187 /// * If `isMin`: r_i >= opBound 188 /// * If `isMax`: r_i <= opBound 189 /// If this is the case, ub(op) == lb(op). 190 /// 4. Replace `op` with `opBound`. 191 /// 192 /// In summary, the following constraints are added throughout this function. 193 /// Note: `invar` are dimensions added by the caller to express the invariants. 194 /// (Showing only the case where `isMin`.) 195 /// 196 /// invar | op | opBound | r_i | extra syms... | const | eq/ineq 197 /// ------+-------+---------+-----+---------------+-------+------------------- 198 /// (various eq./ineq. constraining `invar`, added by the caller) 199 /// ... | 0 | 0 | 0 | 0 | ... | ... 200 /// ------+-------+---------+-----+---------------+-------+------------------- 201 /// (various ineq. constraining `op` in terms of `op` operands (`invar` and 202 /// extra `op` operands "extra syms" that are not in `invar`)). 203 /// ... | -1 | 0 | 0 | ... | ... | >= 0 204 /// ------+-------+---------+-----+---------------+-------+------------------- 205 /// (set `opBound` to `op` upper bound in terms of `invar` and "extra syms") 206 /// ... | 0 | -1 | 0 | ... | ... | = 0 207 /// ------+-------+---------+-----+---------------+-------+------------------- 208 /// (for each `op` map result r_i: set r_i to corresponding map result, 209 /// prove that r_i >= minOpUb via contradiction) 210 /// ... | 0 | 0 | -1 | ... | ... | = 0 211 /// 0 | 0 | 1 | -1 | 0 | -1 | >= 0 212 /// 213 static LogicalResult 214 canonicalizeMinMaxOp(RewriterBase &rewriter, Operation *op, AffineMap map, 215 ValueRange operands, bool isMin, 216 FlatAffineValueConstraints constraints) { 217 RewriterBase::InsertionGuard guard(rewriter); 218 unsigned numResults = map.getNumResults(); 219 220 // Add a few extra dimensions. 221 unsigned dimOp = constraints.appendDimId(); // `op` 222 unsigned dimOpBound = constraints.appendDimId(); // `op` lower/upper bound 223 unsigned resultDimStart = constraints.appendDimId(/*num=*/numResults); 224 225 // Add an inequality for each result expr_i of map: 226 // isMin: op <= expr_i, !isMin: op >= expr_i 227 auto boundType = 228 isMin ? FlatAffineConstraints::UB : FlatAffineConstraints::LB; 229 if (failed(alignAndAddBound(constraints, boundType, dimOp, map, operands))) 230 return failure(); 231 232 // Try to compute a lower/upper bound for op, expressed in terms of the other 233 // `dims` and extra symbols. 234 SmallVector<AffineMap> opLb(1), opUb(1); 235 constraints.getSliceBounds(dimOp, 1, rewriter.getContext(), &opLb, &opUb); 236 AffineMap boundMap = isMin ? opUb[0] : opLb[0]; 237 // TODO: `getSliceBounds` may return multiple bounds at the moment. This is 238 // a TODO of `getSliceBounds` and not handled here. 239 if (!boundMap || boundMap.getNumResults() != 1) 240 return failure(); // No or multiple bounds found. 241 242 // Add an equality: Set dimOpBound to computed bound. 243 // Add back dimension for op. (Was removed by `getSliceBounds`.) 244 AffineMap alignedBoundMap = boundMap.shiftDims(/*shift=*/1, /*offset=*/dimOp); 245 if (failed(constraints.addBound(FlatAffineConstraints::EQ, dimOpBound, 246 alignedBoundMap))) 247 return failure(); 248 249 // If the constraint system is empty, there is an inconsistency. (E.g., this 250 // can happen if loop lb > ub.) 251 if (constraints.isEmpty()) 252 return failure(); 253 254 // In the case of `isMin` (`!isMin` is inversed): 255 // Prove that each result of `map` has a lower bound that is equal to (or 256 // greater than) the upper bound of `op` (`dimOpBound`). In that case, `op` 257 // can be replaced with the bound. I.e., prove that for each result 258 // expr_i (represented by dimension r_i): 259 // 260 // r_i >= opBound 261 // 262 // To prove this inequality, add its negation to the constraint set and prove 263 // that the constraint set is empty. 264 for (unsigned i = resultDimStart; i < resultDimStart + numResults; ++i) { 265 FlatAffineValueConstraints newConstr(constraints); 266 267 // Add an equality: r_i = expr_i 268 // Note: These equalities could have been added earlier and used to express 269 // minOp <= expr_i. However, then we run the risk that `getSliceBounds` 270 // computes minOpUb in terms of r_i dims, which is not desired. 271 if (failed(alignAndAddBound(newConstr, FlatAffineConstraints::EQ, i, 272 map.getSubMap({i - resultDimStart}), operands))) 273 return failure(); 274 275 // If `isMin`: Add inequality: r_i < opBound 276 // equiv.: opBound - r_i - 1 >= 0 277 // If `!isMin`: Add inequality: r_i > opBound 278 // equiv.: -opBound + r_i - 1 >= 0 279 SmallVector<int64_t> ineq(newConstr.getNumCols(), 0); 280 ineq[dimOpBound] = isMin ? 1 : -1; 281 ineq[i] = isMin ? -1 : 1; 282 ineq[newConstr.getNumCols() - 1] = -1; 283 newConstr.addInequality(ineq); 284 if (!newConstr.isEmpty()) 285 return failure(); 286 } 287 288 // Lower and upper bound of `op` are equal. Replace `minOp` with its bound. 289 AffineMap newMap = alignedBoundMap; 290 SmallVector<Value> newOperands; 291 unpackOptionalValues(constraints.getMaybeDimAndSymbolValues(), newOperands); 292 mlir::canonicalizeMapAndOperands(&newMap, &newOperands); 293 rewriter.setInsertionPoint(op); 294 rewriter.replaceOpWithNewOp<AffineApplyOp>(op, newMap, newOperands); 295 return success(); 296 } 297 298 /// Try to simplify a min/max operation `op` after loop peeling. This function 299 /// can simplify min/max operations such as (ub is the previous upper bound of 300 /// the unpeeled loop): 301 /// ``` 302 /// #map = affine_map<(d0)[s0, s1] -> (s0, -d0 + s1)> 303 /// %r = affine.min #affine.min #map(%iv)[%step, %ub] 304 /// ``` 305 /// and rewrites them into (in the case the peeled loop): 306 /// ``` 307 /// %r = %step 308 /// ``` 309 /// min/max operations inside the partial iteration are rewritten in a similar 310 /// way. 311 /// 312 /// This function builds up a set of constraints, capable of proving that: 313 /// * Inside the peeled loop: min(step, ub - iv) == step 314 /// * Inside the partial iteration: min(step, ub - iv) == ub - iv 315 /// 316 /// Returns `success` if the given operation was replaced by a new operation; 317 /// `failure` otherwise. 318 /// 319 /// Note: `ub` is the previous upper bound of the loop (before peeling). 320 /// `insideLoop` must be true for min/max ops inside the loop and false for 321 /// affine.min ops inside the partial iteration. For an explanation of the other 322 /// parameters, see comment of `canonicalizeMinMaxOpInLoop`. 323 LogicalResult mlir::scf::rewritePeeledMinMaxOp(RewriterBase &rewriter, 324 Operation *op, AffineMap map, 325 ValueRange operands, bool isMin, 326 Value iv, Value ub, Value step, 327 bool insideLoop) { 328 FlatAffineValueConstraints constraints; 329 constraints.appendDimId({iv, ub, step}); 330 if (auto constUb = getConstantIntValue(ub)) 331 constraints.addBound(FlatAffineConstraints::EQ, 1, *constUb); 332 if (auto constStep = getConstantIntValue(step)) 333 constraints.addBound(FlatAffineConstraints::EQ, 2, *constStep); 334 335 // Add loop peeling invariant. This is the main piece of knowledge that 336 // enables AffineMinOp simplification. 337 if (insideLoop) { 338 // ub - iv >= step (equiv.: -iv + ub - step + 0 >= 0) 339 // Intuitively: Inside the peeled loop, every iteration is a "full" 340 // iteration, i.e., step divides the iteration space `ub - lb` evenly. 341 constraints.addInequality({-1, 1, -1, 0}); 342 } else { 343 // ub - iv < step (equiv.: iv + -ub + step - 1 >= 0) 344 // Intuitively: `iv` is the split bound here, i.e., the iteration variable 345 // value of the very last iteration (in the unpeeled loop). At that point, 346 // there are less than `step` elements remaining. (Otherwise, the peeled 347 // loop would run for at least one more iteration.) 348 constraints.addInequality({1, -1, 1, -1}); 349 } 350 351 return canonicalizeMinMaxOp(rewriter, op, map, operands, isMin, constraints); 352 } 353 354 template <typename OpTy, bool IsMin> 355 static void rewriteAffineOpAfterPeeling(RewriterBase &rewriter, ForOp forOp, 356 ForOp partialIteration, 357 Value previousUb) { 358 Value mainIv = forOp.getInductionVar(); 359 Value partialIv = partialIteration.getInductionVar(); 360 assert(forOp.step() == partialIteration.step() && 361 "expected same step in main and partial loop"); 362 Value step = forOp.step(); 363 364 forOp.walk([&](OpTy affineOp) { 365 AffineMap map = affineOp.getAffineMap(); 366 (void)scf::rewritePeeledMinMaxOp(rewriter, affineOp, map, 367 affineOp.operands(), IsMin, mainIv, 368 previousUb, step, 369 /*insideLoop=*/true); 370 }); 371 partialIteration.walk([&](OpTy affineOp) { 372 AffineMap map = affineOp.getAffineMap(); 373 (void)scf::rewritePeeledMinMaxOp(rewriter, affineOp, map, 374 affineOp.operands(), IsMin, partialIv, 375 previousUb, step, /*insideLoop=*/false); 376 }); 377 } 378 379 LogicalResult mlir::scf::peelAndCanonicalizeForLoop(RewriterBase &rewriter, 380 ForOp forOp, 381 ForOp &partialIteration) { 382 Value previousUb = forOp.upperBound(); 383 Value splitBound; 384 if (failed(peelForLoop(rewriter, forOp, partialIteration, splitBound))) 385 return failure(); 386 387 // Rewrite affine.min and affine.max ops. 388 rewriteAffineOpAfterPeeling<AffineMinOp, /*IsMin=*/true>( 389 rewriter, forOp, partialIteration, previousUb); 390 rewriteAffineOpAfterPeeling<AffineMaxOp, /*IsMin=*/false>( 391 rewriter, forOp, partialIteration, previousUb); 392 393 return success(); 394 } 395 396 /// Canonicalize min/max operations in the context of for loops with a known 397 /// range. Call `canonicalizeMinMaxOp` and add the following constraints to 398 /// the constraint system (along with the missing dimensions): 399 /// 400 /// * iv >= lb 401 /// * iv < lb + step * ((ub - lb - 1) floorDiv step) + 1 402 /// 403 /// Note: Due to limitations of FlatAffineConstraints, only constant step sizes 404 /// are currently supported. 405 LogicalResult 406 mlir::scf::canonicalizeMinMaxOpInLoop(RewriterBase &rewriter, Operation *op, 407 AffineMap map, ValueRange operands, 408 bool isMin, LoopMatcherFn loopMatcher) { 409 FlatAffineValueConstraints constraints; 410 DenseSet<Value> allIvs; 411 412 // Find all iteration variables among `minOp`'s operands add constrain them. 413 for (Value operand : operands) { 414 // Skip duplicate ivs. 415 if (llvm::find(allIvs, operand) != allIvs.end()) 416 continue; 417 418 // If `operand` is an iteration variable: Find corresponding loop 419 // bounds and step. 420 Value iv = operand; 421 Value lb, ub, step; 422 if (failed(loopMatcher(operand, lb, ub, step))) 423 continue; 424 allIvs.insert(iv); 425 426 // FlatAffineConstraints does not support semi-affine expressions. 427 // Therefore, only constant step values are supported. 428 auto stepInt = getConstantIntValue(step); 429 if (!stepInt) 430 continue; 431 432 unsigned dimIv = constraints.appendDimId(iv); 433 unsigned dimLb = constraints.appendDimId(lb); 434 unsigned dimUb = constraints.appendDimId(ub); 435 436 // If loop lower/upper bounds are constant: Add EQ constraint. 437 Optional<int64_t> lbInt = getConstantIntValue(lb); 438 Optional<int64_t> ubInt = getConstantIntValue(ub); 439 if (lbInt) 440 constraints.addBound(FlatAffineConstraints::EQ, dimLb, *lbInt); 441 if (ubInt) 442 constraints.addBound(FlatAffineConstraints::EQ, dimUb, *ubInt); 443 444 // iv >= lb (equiv.: iv - lb >= 0) 445 SmallVector<int64_t> ineqLb(constraints.getNumCols(), 0); 446 ineqLb[dimIv] = 1; 447 ineqLb[dimLb] = -1; 448 constraints.addInequality(ineqLb); 449 450 // iv < lb + step * ((ub - lb - 1) floorDiv step) + 1 451 AffineExpr exprLb = lbInt ? rewriter.getAffineConstantExpr(*lbInt) 452 : rewriter.getAffineDimExpr(dimLb); 453 AffineExpr exprUb = ubInt ? rewriter.getAffineConstantExpr(*ubInt) 454 : rewriter.getAffineDimExpr(dimUb); 455 AffineExpr ivUb = 456 exprLb + 1 + (*stepInt * ((exprUb - exprLb - 1).floorDiv(*stepInt))); 457 auto map = AffineMap::get( 458 /*dimCount=*/constraints.getNumDimIds(), 459 /*symbolCount=*/constraints.getNumSymbolIds(), /*result=*/ivUb); 460 461 if (failed(constraints.addBound(FlatAffineConstraints::UB, dimIv, map))) 462 return failure(); 463 } 464 465 return canonicalizeMinMaxOp(rewriter, op, map, operands, isMin, constraints); 466 } 467 468 static constexpr char kPeeledLoopLabel[] = "__peeled_loop__"; 469 static constexpr char kPartialIterationLabel[] = "__partial_iteration__"; 470 471 namespace { 472 struct ForLoopPeelingPattern : public OpRewritePattern<ForOp> { 473 ForLoopPeelingPattern(MLIRContext *ctx, bool skipPartial) 474 : OpRewritePattern<ForOp>(ctx), skipPartial(skipPartial) {} 475 476 LogicalResult matchAndRewrite(ForOp forOp, 477 PatternRewriter &rewriter) const override { 478 // Do not peel already peeled loops. 479 if (forOp->hasAttr(kPeeledLoopLabel)) 480 return failure(); 481 if (skipPartial) { 482 // No peeling of loops inside the partial iteration of another peeled 483 // loop. 484 Operation *op = forOp.getOperation(); 485 while ((op = op->getParentOfType<scf::ForOp>())) { 486 if (op->hasAttr(kPartialIterationLabel)) 487 return failure(); 488 } 489 } 490 // Apply loop peeling. 491 scf::ForOp partialIteration; 492 if (failed(peelAndCanonicalizeForLoop(rewriter, forOp, partialIteration))) 493 return failure(); 494 // Apply label, so that the same loop is not rewritten a second time. 495 partialIteration->setAttr(kPeeledLoopLabel, rewriter.getUnitAttr()); 496 rewriter.updateRootInPlace(forOp, [&]() { 497 forOp->setAttr(kPeeledLoopLabel, rewriter.getUnitAttr()); 498 }); 499 partialIteration->setAttr(kPartialIterationLabel, rewriter.getUnitAttr()); 500 return success(); 501 } 502 503 /// If set to true, loops inside partial iterations of another peeled loop 504 /// are not peeled. This reduces the size of the generated code. Partial 505 /// iterations are not usually performance critical. 506 /// Note: Takes into account the entire chain of parent operations, not just 507 /// the direct parent. 508 bool skipPartial; 509 }; 510 } // namespace 511 512 namespace { 513 struct ParallelLoopSpecialization 514 : public SCFParallelLoopSpecializationBase<ParallelLoopSpecialization> { 515 void runOnFunction() override { 516 getFunction().walk( 517 [](ParallelOp op) { specializeParallelLoopForUnrolling(op); }); 518 } 519 }; 520 521 struct ForLoopSpecialization 522 : public SCFForLoopSpecializationBase<ForLoopSpecialization> { 523 void runOnFunction() override { 524 getFunction().walk([](ForOp op) { specializeForLoopForUnrolling(op); }); 525 } 526 }; 527 528 struct ForLoopPeeling : public SCFForLoopPeelingBase<ForLoopPeeling> { 529 void runOnFunction() override { 530 FuncOp funcOp = getFunction(); 531 MLIRContext *ctx = funcOp.getContext(); 532 RewritePatternSet patterns(ctx); 533 patterns.add<ForLoopPeelingPattern>(ctx, skipPartial); 534 (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns)); 535 536 // Drop the markers. 537 funcOp.walk([](Operation *op) { 538 op->removeAttr(kPeeledLoopLabel); 539 op->removeAttr(kPartialIterationLabel); 540 }); 541 } 542 }; 543 } // namespace 544 545 std::unique_ptr<Pass> mlir::createParallelLoopSpecializationPass() { 546 return std::make_unique<ParallelLoopSpecialization>(); 547 } 548 549 std::unique_ptr<Pass> mlir::createForLoopSpecializationPass() { 550 return std::make_unique<ForLoopSpecialization>(); 551 } 552 553 std::unique_ptr<Pass> mlir::createForLoopPeelingPass() { 554 return std::make_unique<ForLoopPeeling>(); 555 } 556