1 //===- LoopSpecialization.cpp - scf.parallel/SCR.for specialization -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Specializes parallel loops and for loops for easier unrolling and 10 // vectorization. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PassDetail.h" 15 #include "mlir/Analysis/AffineStructures.h" 16 #include "mlir/Dialect/Affine/IR/AffineOps.h" 17 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 18 #include "mlir/Dialect/SCF/Passes.h" 19 #include "mlir/Dialect/SCF/SCF.h" 20 #include "mlir/Dialect/SCF/Transforms.h" 21 #include "mlir/Dialect/StandardOps/IR/Ops.h" 22 #include "mlir/Dialect/Utils/StaticValueUtils.h" 23 #include "mlir/IR/AffineExpr.h" 24 #include "mlir/IR/BlockAndValueMapping.h" 25 #include "mlir/IR/PatternMatch.h" 26 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 27 #include "llvm/ADT/DenseMap.h" 28 29 using namespace mlir; 30 using scf::ForOp; 31 using scf::ParallelOp; 32 33 /// Rewrite a parallel loop with bounds defined by an affine.min with a constant 34 /// into 2 loops after checking if the bounds are equal to that constant. This 35 /// is beneficial if the loop will almost always have the constant bound and 36 /// that version can be fully unrolled and vectorized. 37 static void specializeParallelLoopForUnrolling(ParallelOp op) { 38 SmallVector<int64_t, 2> constantIndices; 39 constantIndices.reserve(op.upperBound().size()); 40 for (auto bound : op.upperBound()) { 41 auto minOp = bound.getDefiningOp<AffineMinOp>(); 42 if (!minOp) 43 return; 44 int64_t minConstant = std::numeric_limits<int64_t>::max(); 45 for (AffineExpr expr : minOp.map().getResults()) { 46 if (auto constantIndex = expr.dyn_cast<AffineConstantExpr>()) 47 minConstant = std::min(minConstant, constantIndex.getValue()); 48 } 49 if (minConstant == std::numeric_limits<int64_t>::max()) 50 return; 51 constantIndices.push_back(minConstant); 52 } 53 54 OpBuilder b(op); 55 BlockAndValueMapping map; 56 Value cond; 57 for (auto bound : llvm::zip(op.upperBound(), constantIndices)) { 58 Value constant = 59 b.create<arith::ConstantIndexOp>(op.getLoc(), std::get<1>(bound)); 60 Value cmp = b.create<arith::CmpIOp>(op.getLoc(), arith::CmpIPredicate::eq, 61 std::get<0>(bound), constant); 62 cond = cond ? b.create<arith::AndIOp>(op.getLoc(), cond, cmp) : cmp; 63 map.map(std::get<0>(bound), constant); 64 } 65 auto ifOp = b.create<scf::IfOp>(op.getLoc(), cond, /*withElseRegion=*/true); 66 ifOp.getThenBodyBuilder().clone(*op.getOperation(), map); 67 ifOp.getElseBodyBuilder().clone(*op.getOperation()); 68 op.erase(); 69 } 70 71 /// Rewrite a for loop with bounds defined by an affine.min with a constant into 72 /// 2 loops after checking if the bounds are equal to that constant. This is 73 /// beneficial if the loop will almost always have the constant bound and that 74 /// version can be fully unrolled and vectorized. 75 static void specializeForLoopForUnrolling(ForOp op) { 76 auto bound = op.upperBound(); 77 auto minOp = bound.getDefiningOp<AffineMinOp>(); 78 if (!minOp) 79 return; 80 int64_t minConstant = std::numeric_limits<int64_t>::max(); 81 for (AffineExpr expr : minOp.map().getResults()) { 82 if (auto constantIndex = expr.dyn_cast<AffineConstantExpr>()) 83 minConstant = std::min(minConstant, constantIndex.getValue()); 84 } 85 if (minConstant == std::numeric_limits<int64_t>::max()) 86 return; 87 88 OpBuilder b(op); 89 BlockAndValueMapping map; 90 Value constant = b.create<arith::ConstantIndexOp>(op.getLoc(), minConstant); 91 Value cond = b.create<arith::CmpIOp>(op.getLoc(), arith::CmpIPredicate::eq, 92 bound, constant); 93 map.map(bound, constant); 94 auto ifOp = b.create<scf::IfOp>(op.getLoc(), cond, /*withElseRegion=*/true); 95 ifOp.getThenBodyBuilder().clone(*op.getOperation(), map); 96 ifOp.getElseBodyBuilder().clone(*op.getOperation()); 97 op.erase(); 98 } 99 100 /// Rewrite a for loop with bounds/step that potentially do not divide evenly 101 /// into a for loop where the step divides the iteration space evenly, followed 102 /// by an scf.if for the last (partial) iteration (if any). 103 /// 104 /// This function rewrites the given scf.for loop in-place and creates a new 105 /// scf.if operation for the last iteration. It replaces all uses of the 106 /// unpeeled loop with the results of the newly generated scf.if. 107 /// 108 /// The newly generated scf.if operation is returned via `ifOp`. The boundary 109 /// at which the loop is split (new upper bound) is returned via `splitBound`. 110 /// The return value indicates whether the loop was rewritten or not. 111 static LogicalResult peelForLoop(RewriterBase &b, ForOp forOp, 112 ForOp &partialIteration, Value &splitBound) { 113 RewriterBase::InsertionGuard guard(b); 114 auto lbInt = getConstantIntValue(forOp.lowerBound()); 115 auto ubInt = getConstantIntValue(forOp.upperBound()); 116 auto stepInt = getConstantIntValue(forOp.step()); 117 118 // No specialization necessary if step already divides upper bound evenly. 119 if (lbInt && ubInt && stepInt && (*ubInt - *lbInt) % *stepInt == 0) 120 return failure(); 121 // No specialization necessary if step size is 1. 122 if (stepInt == static_cast<int64_t>(1)) 123 return failure(); 124 125 auto loc = forOp.getLoc(); 126 AffineExpr sym0, sym1, sym2; 127 bindSymbols(b.getContext(), sym0, sym1, sym2); 128 // New upper bound: %ub - (%ub - %lb) mod %step 129 auto modMap = AffineMap::get(0, 3, {sym1 - ((sym1 - sym0) % sym2)}); 130 b.setInsertionPoint(forOp); 131 splitBound = b.createOrFold<AffineApplyOp>( 132 loc, modMap, 133 ValueRange{forOp.lowerBound(), forOp.upperBound(), forOp.step()}); 134 135 // Create ForOp for partial iteration. 136 b.setInsertionPointAfter(forOp); 137 partialIteration = cast<ForOp>(b.clone(*forOp.getOperation())); 138 partialIteration.lowerBoundMutable().assign(splitBound); 139 forOp.replaceAllUsesWith(partialIteration->getResults()); 140 partialIteration.initArgsMutable().assign(forOp->getResults()); 141 142 // Set new upper loop bound. 143 b.updateRootInPlace(forOp, 144 [&]() { forOp.upperBoundMutable().assign(splitBound); }); 145 146 return success(); 147 } 148 149 static void unpackOptionalValues(ArrayRef<Optional<Value>> source, 150 SmallVector<Value> &target) { 151 target = llvm::to_vector<4>(llvm::map_range(source, [](Optional<Value> val) { 152 return val.hasValue() ? *val : Value(); 153 })); 154 } 155 156 /// Bound an identifier `pos` in a given FlatAffineValueConstraints with 157 /// constraints drawn from an affine map. Before adding the constraint, the 158 /// dimensions/symbols of the affine map are aligned with `constraints`. 159 /// `operands` are the SSA Value operands used with the affine map. 160 /// Note: This function adds a new symbol column to the `constraints` for each 161 /// dimension/symbol that exists in the affine map but not in `constraints`. 162 static LogicalResult alignAndAddBound(FlatAffineValueConstraints &constraints, 163 FlatAffineConstraints::BoundType type, 164 unsigned pos, AffineMap map, 165 ValueRange operands) { 166 SmallVector<Value> dims, syms, newSyms; 167 unpackOptionalValues(constraints.getMaybeDimValues(), dims); 168 unpackOptionalValues(constraints.getMaybeSymbolValues(), syms); 169 170 AffineMap alignedMap = 171 alignAffineMapWithValues(map, operands, dims, syms, &newSyms); 172 for (unsigned i = syms.size(); i < newSyms.size(); ++i) 173 constraints.appendSymbolId(newSyms[i]); 174 return constraints.addBound(type, pos, alignedMap); 175 } 176 177 /// Add `val` to each result of `map`. 178 static AffineMap addConstToResults(AffineMap map, int64_t val) { 179 SmallVector<AffineExpr> newResults; 180 for (AffineExpr r : map.getResults()) 181 newResults.push_back(r + val); 182 return AffineMap::get(map.getNumDims(), map.getNumSymbols(), newResults, 183 map.getContext()); 184 } 185 186 /// This function tries to canonicalize min/max operations by proving that their 187 /// value is bounded by the same lower and upper bound. In that case, the 188 /// operation can be folded away. 189 /// 190 /// Bounds are computed by FlatAffineValueConstraints. Invariants required for 191 /// finding/proving bounds should be supplied via `constraints`. 192 /// 193 /// 1. Add dimensions for `op` and `opBound` (lower or upper bound of `op`). 194 /// 2. Compute an upper bound of `op` (in case of `isMin`) or a lower bound (in 195 /// case of `!isMin`) and bind it to `opBound`. SSA values that are used in 196 /// `op` but are not part of `constraints`, are added as extra symbols. 197 /// 3. For each result of `op`: Add result as a dimension `r_i`. Prove that: 198 /// * If `isMin`: r_i >= opBound 199 /// * If `isMax`: r_i <= opBound 200 /// If this is the case, ub(op) == lb(op). 201 /// 4. Replace `op` with `opBound`. 202 /// 203 /// In summary, the following constraints are added throughout this function. 204 /// Note: `invar` are dimensions added by the caller to express the invariants. 205 /// (Showing only the case where `isMin`.) 206 /// 207 /// invar | op | opBound | r_i | extra syms... | const | eq/ineq 208 /// ------+-------+---------+-----+---------------+-------+------------------- 209 /// (various eq./ineq. constraining `invar`, added by the caller) 210 /// ... | 0 | 0 | 0 | 0 | ... | ... 211 /// ------+-------+---------+-----+---------------+-------+------------------- 212 /// (various ineq. constraining `op` in terms of `op` operands (`invar` and 213 /// extra `op` operands "extra syms" that are not in `invar`)). 214 /// ... | -1 | 0 | 0 | ... | ... | >= 0 215 /// ------+-------+---------+-----+---------------+-------+------------------- 216 /// (set `opBound` to `op` upper bound in terms of `invar` and "extra syms") 217 /// ... | 0 | -1 | 0 | ... | ... | = 0 218 /// ------+-------+---------+-----+---------------+-------+------------------- 219 /// (for each `op` map result r_i: set r_i to corresponding map result, 220 /// prove that r_i >= minOpUb via contradiction) 221 /// ... | 0 | 0 | -1 | ... | ... | = 0 222 /// 0 | 0 | 1 | -1 | 0 | -1 | >= 0 223 /// 224 static LogicalResult 225 canonicalizeMinMaxOp(RewriterBase &rewriter, Operation *op, AffineMap map, 226 ValueRange operands, bool isMin, 227 FlatAffineValueConstraints constraints) { 228 RewriterBase::InsertionGuard guard(rewriter); 229 unsigned numResults = map.getNumResults(); 230 231 // Add a few extra dimensions. 232 unsigned dimOp = constraints.appendDimId(); // `op` 233 unsigned dimOpBound = constraints.appendDimId(); // `op` lower/upper bound 234 unsigned resultDimStart = constraints.appendDimId(/*num=*/numResults); 235 236 // Add an inequality for each result expr_i of map: 237 // isMin: op <= expr_i, !isMin: op >= expr_i 238 auto boundType = 239 isMin ? FlatAffineConstraints::UB : FlatAffineConstraints::LB; 240 // Upper bounds are exclusive, so add 1. (`affine.min` ops are inclusive.) 241 AffineMap mapLbUb = isMin ? addConstToResults(map, 1) : map; 242 if (failed( 243 alignAndAddBound(constraints, boundType, dimOp, mapLbUb, operands))) 244 return failure(); 245 246 // Try to compute a lower/upper bound for op, expressed in terms of the other 247 // `dims` and extra symbols. 248 SmallVector<AffineMap> opLb(1), opUb(1); 249 constraints.getSliceBounds(dimOp, 1, rewriter.getContext(), &opLb, &opUb); 250 AffineMap sliceBound = isMin ? opUb[0] : opLb[0]; 251 // TODO: `getSliceBounds` may return multiple bounds at the moment. This is 252 // a TODO of `getSliceBounds` and not handled here. 253 if (!sliceBound || sliceBound.getNumResults() != 1) 254 return failure(); // No or multiple bounds found. 255 // Recover the inclusive UB in the case of an `affine.min`. 256 AffineMap boundMap = isMin ? addConstToResults(sliceBound, -1) : sliceBound; 257 258 // Add an equality: Set dimOpBound to computed bound. 259 // Add back dimension for op. (Was removed by `getSliceBounds`.) 260 AffineMap alignedBoundMap = boundMap.shiftDims(/*shift=*/1, /*offset=*/dimOp); 261 if (failed(constraints.addBound(FlatAffineConstraints::EQ, dimOpBound, 262 alignedBoundMap))) 263 return failure(); 264 265 // If the constraint system is empty, there is an inconsistency. (E.g., this 266 // can happen if loop lb > ub.) 267 if (constraints.isEmpty()) 268 return failure(); 269 270 // In the case of `isMin` (`!isMin` is inversed): 271 // Prove that each result of `map` has a lower bound that is equal to (or 272 // greater than) the upper bound of `op` (`dimOpBound`). In that case, `op` 273 // can be replaced with the bound. I.e., prove that for each result 274 // expr_i (represented by dimension r_i): 275 // 276 // r_i >= opBound 277 // 278 // To prove this inequality, add its negation to the constraint set and prove 279 // that the constraint set is empty. 280 for (unsigned i = resultDimStart; i < resultDimStart + numResults; ++i) { 281 FlatAffineValueConstraints newConstr(constraints); 282 283 // Add an equality: r_i = expr_i 284 // Note: These equalities could have been added earlier and used to express 285 // minOp <= expr_i. However, then we run the risk that `getSliceBounds` 286 // computes minOpUb in terms of r_i dims, which is not desired. 287 if (failed(alignAndAddBound(newConstr, FlatAffineConstraints::EQ, i, 288 map.getSubMap({i - resultDimStart}), operands))) 289 return failure(); 290 291 // If `isMin`: Add inequality: r_i < opBound 292 // equiv.: opBound - r_i - 1 >= 0 293 // If `!isMin`: Add inequality: r_i > opBound 294 // equiv.: -opBound + r_i - 1 >= 0 295 SmallVector<int64_t> ineq(newConstr.getNumCols(), 0); 296 ineq[dimOpBound] = isMin ? 1 : -1; 297 ineq[i] = isMin ? -1 : 1; 298 ineq[newConstr.getNumCols() - 1] = -1; 299 newConstr.addInequality(ineq); 300 if (!newConstr.isEmpty()) 301 return failure(); 302 } 303 304 // Lower and upper bound of `op` are equal. Replace `minOp` with its bound. 305 AffineMap newMap = alignedBoundMap; 306 SmallVector<Value> newOperands; 307 unpackOptionalValues(constraints.getMaybeDimAndSymbolValues(), newOperands); 308 // If dims/symbols have known constant values, use those in order to simplify 309 // the affine map further. 310 for (int64_t i = 0; i < constraints.getNumDimAndSymbolIds(); ++i) { 311 // Skip unused operands and operands that are already constants. 312 if (!newOperands[i] || getConstantIntValue(newOperands[i])) 313 continue; 314 if (auto bound = constraints.getConstantBound(FlatAffineConstraints::EQ, i)) 315 newOperands[i] = 316 rewriter.create<arith::ConstantIndexOp>(op->getLoc(), *bound); 317 } 318 mlir::canonicalizeMapAndOperands(&newMap, &newOperands); 319 rewriter.setInsertionPoint(op); 320 rewriter.replaceOpWithNewOp<AffineApplyOp>(op, newMap, newOperands); 321 return success(); 322 } 323 324 /// Try to simplify a min/max operation `op` after loop peeling. This function 325 /// can simplify min/max operations such as (ub is the previous upper bound of 326 /// the unpeeled loop): 327 /// ``` 328 /// #map = affine_map<(d0)[s0, s1] -> (s0, -d0 + s1)> 329 /// %r = affine.min #affine.min #map(%iv)[%step, %ub] 330 /// ``` 331 /// and rewrites them into (in the case the peeled loop): 332 /// ``` 333 /// %r = %step 334 /// ``` 335 /// min/max operations inside the partial iteration are rewritten in a similar 336 /// way. 337 /// 338 /// This function builds up a set of constraints, capable of proving that: 339 /// * Inside the peeled loop: min(step, ub - iv) == step 340 /// * Inside the partial iteration: min(step, ub - iv) == ub - iv 341 /// 342 /// Returns `success` if the given operation was replaced by a new operation; 343 /// `failure` otherwise. 344 /// 345 /// Note: `ub` is the previous upper bound of the loop (before peeling). 346 /// `insideLoop` must be true for min/max ops inside the loop and false for 347 /// affine.min ops inside the partial iteration. For an explanation of the other 348 /// parameters, see comment of `canonicalizeMinMaxOpInLoop`. 349 LogicalResult mlir::scf::rewritePeeledMinMaxOp(RewriterBase &rewriter, 350 Operation *op, AffineMap map, 351 ValueRange operands, bool isMin, 352 Value iv, Value ub, Value step, 353 bool insideLoop) { 354 FlatAffineValueConstraints constraints; 355 constraints.appendDimId({iv, ub, step}); 356 if (auto constUb = getConstantIntValue(ub)) 357 constraints.addBound(FlatAffineConstraints::EQ, 1, *constUb); 358 if (auto constStep = getConstantIntValue(step)) 359 constraints.addBound(FlatAffineConstraints::EQ, 2, *constStep); 360 361 // Add loop peeling invariant. This is the main piece of knowledge that 362 // enables AffineMinOp simplification. 363 if (insideLoop) { 364 // ub - iv >= step (equiv.: -iv + ub - step + 0 >= 0) 365 // Intuitively: Inside the peeled loop, every iteration is a "full" 366 // iteration, i.e., step divides the iteration space `ub - lb` evenly. 367 constraints.addInequality({-1, 1, -1, 0}); 368 } else { 369 // ub - iv < step (equiv.: iv + -ub + step - 1 >= 0) 370 // Intuitively: `iv` is the split bound here, i.e., the iteration variable 371 // value of the very last iteration (in the unpeeled loop). At that point, 372 // there are less than `step` elements remaining. (Otherwise, the peeled 373 // loop would run for at least one more iteration.) 374 constraints.addInequality({1, -1, 1, -1}); 375 } 376 377 return canonicalizeMinMaxOp(rewriter, op, map, operands, isMin, constraints); 378 } 379 380 template <typename OpTy, bool IsMin> 381 static void rewriteAffineOpAfterPeeling(RewriterBase &rewriter, ForOp forOp, 382 ForOp partialIteration, 383 Value previousUb) { 384 Value mainIv = forOp.getInductionVar(); 385 Value partialIv = partialIteration.getInductionVar(); 386 assert(forOp.step() == partialIteration.step() && 387 "expected same step in main and partial loop"); 388 Value step = forOp.step(); 389 390 forOp.walk([&](OpTy affineOp) { 391 AffineMap map = affineOp.getAffineMap(); 392 (void)scf::rewritePeeledMinMaxOp(rewriter, affineOp, map, 393 affineOp.operands(), IsMin, mainIv, 394 previousUb, step, 395 /*insideLoop=*/true); 396 }); 397 partialIteration.walk([&](OpTy affineOp) { 398 AffineMap map = affineOp.getAffineMap(); 399 (void)scf::rewritePeeledMinMaxOp(rewriter, affineOp, map, 400 affineOp.operands(), IsMin, partialIv, 401 previousUb, step, /*insideLoop=*/false); 402 }); 403 } 404 405 LogicalResult mlir::scf::peelAndCanonicalizeForLoop(RewriterBase &rewriter, 406 ForOp forOp, 407 ForOp &partialIteration) { 408 Value previousUb = forOp.upperBound(); 409 Value splitBound; 410 if (failed(peelForLoop(rewriter, forOp, partialIteration, splitBound))) 411 return failure(); 412 413 // Rewrite affine.min and affine.max ops. 414 rewriteAffineOpAfterPeeling<AffineMinOp, /*IsMin=*/true>( 415 rewriter, forOp, partialIteration, previousUb); 416 rewriteAffineOpAfterPeeling<AffineMaxOp, /*IsMin=*/false>( 417 rewriter, forOp, partialIteration, previousUb); 418 419 return success(); 420 } 421 422 /// Canonicalize min/max operations in the context of for loops with a known 423 /// range. Call `canonicalizeMinMaxOp` and add the following constraints to 424 /// the constraint system (along with the missing dimensions): 425 /// 426 /// * iv >= lb 427 /// * iv < lb + step * ((ub - lb - 1) floorDiv step) + 1 428 /// 429 /// Note: Due to limitations of FlatAffineConstraints, only constant step sizes 430 /// are currently supported. 431 LogicalResult 432 mlir::scf::canonicalizeMinMaxOpInLoop(RewriterBase &rewriter, Operation *op, 433 AffineMap map, ValueRange operands, 434 bool isMin, LoopMatcherFn loopMatcher) { 435 FlatAffineValueConstraints constraints; 436 DenseSet<Value> allIvs; 437 438 // Find all iteration variables among `minOp`'s operands add constrain them. 439 for (Value operand : operands) { 440 // Skip duplicate ivs. 441 if (llvm::find(allIvs, operand) != allIvs.end()) 442 continue; 443 444 // If `operand` is an iteration variable: Find corresponding loop 445 // bounds and step. 446 Value iv = operand; 447 Value lb, ub, step; 448 if (failed(loopMatcher(operand, lb, ub, step))) 449 continue; 450 allIvs.insert(iv); 451 452 // FlatAffineConstraints does not support semi-affine expressions. 453 // Therefore, only constant step values are supported. 454 auto stepInt = getConstantIntValue(step); 455 if (!stepInt) 456 continue; 457 458 unsigned dimIv = constraints.appendDimId(iv); 459 unsigned dimLb = constraints.appendDimId(lb); 460 unsigned dimUb = constraints.appendDimId(ub); 461 462 // If loop lower/upper bounds are constant: Add EQ constraint. 463 Optional<int64_t> lbInt = getConstantIntValue(lb); 464 Optional<int64_t> ubInt = getConstantIntValue(ub); 465 if (lbInt) 466 constraints.addBound(FlatAffineConstraints::EQ, dimLb, *lbInt); 467 if (ubInt) 468 constraints.addBound(FlatAffineConstraints::EQ, dimUb, *ubInt); 469 470 // Lower bound: iv >= lb (equiv.: iv - lb >= 0) 471 SmallVector<int64_t> ineqLb(constraints.getNumCols(), 0); 472 ineqLb[dimIv] = 1; 473 ineqLb[dimLb] = -1; 474 constraints.addInequality(ineqLb); 475 476 // Upper bound 477 AffineExpr ivUb; 478 if (lbInt && ubInt && (*lbInt + *stepInt >= *ubInt)) { 479 // The loop has at most one iteration. 480 // iv < lb + 1 481 // TODO: Try to derive this constraint by simplifying the expression in 482 // the else-branch. 483 ivUb = rewriter.getAffineDimExpr(dimLb) + 1; 484 } else { 485 // The loop may have more than one iteration. 486 // iv < lb + step * ((ub - lb - 1) floorDiv step) + 1 487 AffineExpr exprLb = lbInt ? rewriter.getAffineConstantExpr(*lbInt) 488 : rewriter.getAffineDimExpr(dimLb); 489 AffineExpr exprUb = ubInt ? rewriter.getAffineConstantExpr(*ubInt) 490 : rewriter.getAffineDimExpr(dimUb); 491 ivUb = 492 exprLb + 1 + (*stepInt * ((exprUb - exprLb - 1).floorDiv(*stepInt))); 493 } 494 auto map = AffineMap::get( 495 /*dimCount=*/constraints.getNumDimIds(), 496 /*symbolCount=*/constraints.getNumSymbolIds(), /*result=*/ivUb); 497 498 if (failed(constraints.addBound(FlatAffineConstraints::UB, dimIv, map))) 499 return failure(); 500 } 501 502 return canonicalizeMinMaxOp(rewriter, op, map, operands, isMin, constraints); 503 } 504 505 static constexpr char kPeeledLoopLabel[] = "__peeled_loop__"; 506 static constexpr char kPartialIterationLabel[] = "__partial_iteration__"; 507 508 namespace { 509 struct ForLoopPeelingPattern : public OpRewritePattern<ForOp> { 510 ForLoopPeelingPattern(MLIRContext *ctx, bool skipPartial) 511 : OpRewritePattern<ForOp>(ctx), skipPartial(skipPartial) {} 512 513 LogicalResult matchAndRewrite(ForOp forOp, 514 PatternRewriter &rewriter) const override { 515 // Do not peel already peeled loops. 516 if (forOp->hasAttr(kPeeledLoopLabel)) 517 return failure(); 518 if (skipPartial) { 519 // No peeling of loops inside the partial iteration of another peeled 520 // loop. 521 Operation *op = forOp.getOperation(); 522 while ((op = op->getParentOfType<scf::ForOp>())) { 523 if (op->hasAttr(kPartialIterationLabel)) 524 return failure(); 525 } 526 } 527 // Apply loop peeling. 528 scf::ForOp partialIteration; 529 if (failed(peelAndCanonicalizeForLoop(rewriter, forOp, partialIteration))) 530 return failure(); 531 // Apply label, so that the same loop is not rewritten a second time. 532 partialIteration->setAttr(kPeeledLoopLabel, rewriter.getUnitAttr()); 533 rewriter.updateRootInPlace(forOp, [&]() { 534 forOp->setAttr(kPeeledLoopLabel, rewriter.getUnitAttr()); 535 }); 536 partialIteration->setAttr(kPartialIterationLabel, rewriter.getUnitAttr()); 537 return success(); 538 } 539 540 /// If set to true, loops inside partial iterations of another peeled loop 541 /// are not peeled. This reduces the size of the generated code. Partial 542 /// iterations are not usually performance critical. 543 /// Note: Takes into account the entire chain of parent operations, not just 544 /// the direct parent. 545 bool skipPartial; 546 }; 547 } // namespace 548 549 namespace { 550 struct ParallelLoopSpecialization 551 : public SCFParallelLoopSpecializationBase<ParallelLoopSpecialization> { 552 void runOnFunction() override { 553 getFunction().walk( 554 [](ParallelOp op) { specializeParallelLoopForUnrolling(op); }); 555 } 556 }; 557 558 struct ForLoopSpecialization 559 : public SCFForLoopSpecializationBase<ForLoopSpecialization> { 560 void runOnFunction() override { 561 getFunction().walk([](ForOp op) { specializeForLoopForUnrolling(op); }); 562 } 563 }; 564 565 struct ForLoopPeeling : public SCFForLoopPeelingBase<ForLoopPeeling> { 566 void runOnFunction() override { 567 FuncOp funcOp = getFunction(); 568 MLIRContext *ctx = funcOp.getContext(); 569 RewritePatternSet patterns(ctx); 570 patterns.add<ForLoopPeelingPattern>(ctx, skipPartial); 571 (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns)); 572 573 // Drop the markers. 574 funcOp.walk([](Operation *op) { 575 op->removeAttr(kPeeledLoopLabel); 576 op->removeAttr(kPartialIterationLabel); 577 }); 578 } 579 }; 580 } // namespace 581 582 std::unique_ptr<Pass> mlir::createParallelLoopSpecializationPass() { 583 return std::make_unique<ParallelLoopSpecialization>(); 584 } 585 586 std::unique_ptr<Pass> mlir::createForLoopSpecializationPass() { 587 return std::make_unique<ForLoopSpecialization>(); 588 } 589 590 std::unique_ptr<Pass> mlir::createForLoopPeelingPass() { 591 return std::make_unique<ForLoopPeeling>(); 592 } 593