1 //===-- InductiveRangeCheckElimination.cpp - ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // The InductiveRangeCheckElimination pass splits a loop's iteration space into 10 // three disjoint ranges. It does that in a way such that the loop running in 11 // the middle loop provably does not need range checks. As an example, it will 12 // convert 13 // 14 // len = < known positive > 15 // for (i = 0; i < n; i++) { 16 // if (0 <= i && i < len) { 17 // do_something(); 18 // } else { 19 // throw_out_of_bounds(); 20 // } 21 // } 22 // 23 // to 24 // 25 // len = < known positive > 26 // limit = smin(n, len) 27 // // no first segment 28 // for (i = 0; i < limit; i++) { 29 // if (0 <= i && i < len) { // this check is fully redundant 30 // do_something(); 31 // } else { 32 // throw_out_of_bounds(); 33 // } 34 // } 35 // for (i = limit; i < n; i++) { 36 // if (0 <= i && i < len) { 37 // do_something(); 38 // } else { 39 // throw_out_of_bounds(); 40 // } 41 // } 42 //===----------------------------------------------------------------------===// 43 44 #include "llvm/ADT/Optional.h" 45 #include "llvm/Analysis/BranchProbabilityInfo.h" 46 #include "llvm/Analysis/LoopInfo.h" 47 #include "llvm/Analysis/LoopPass.h" 48 #include "llvm/Analysis/ScalarEvolution.h" 49 #include "llvm/Analysis/ScalarEvolutionExpander.h" 50 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 51 #include "llvm/IR/Dominators.h" 52 #include "llvm/IR/Function.h" 53 #include "llvm/IR/IRBuilder.h" 54 #include "llvm/IR/Instructions.h" 55 #include "llvm/IR/PatternMatch.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/Debug.h" 58 #include "llvm/Support/raw_ostream.h" 59 #include "llvm/Transforms/Scalar.h" 60 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 61 #include "llvm/Transforms/Utils/Cloning.h" 62 #include "llvm/Transforms/Utils/LoopUtils.h" 63 #include "llvm/Transforms/Utils/LoopSimplify.h" 64 65 using namespace llvm; 66 67 static cl::opt<unsigned> LoopSizeCutoff("irce-loop-size-cutoff", cl::Hidden, 68 cl::init(64)); 69 70 static cl::opt<bool> PrintChangedLoops("irce-print-changed-loops", cl::Hidden, 71 cl::init(false)); 72 73 static cl::opt<bool> PrintRangeChecks("irce-print-range-checks", cl::Hidden, 74 cl::init(false)); 75 76 static cl::opt<int> MaxExitProbReciprocal("irce-max-exit-prob-reciprocal", 77 cl::Hidden, cl::init(10)); 78 79 static cl::opt<bool> SkipProfitabilityChecks("irce-skip-profitability-checks", 80 cl::Hidden, cl::init(false)); 81 82 static const char *ClonedLoopTag = "irce.loop.clone"; 83 84 #define DEBUG_TYPE "irce" 85 86 namespace { 87 88 /// An inductive range check is conditional branch in a loop with 89 /// 90 /// 1. a very cold successor (i.e. the branch jumps to that successor very 91 /// rarely) 92 /// 93 /// and 94 /// 95 /// 2. a condition that is provably true for some contiguous range of values 96 /// taken by the containing loop's induction variable. 97 /// 98 class InductiveRangeCheck { 99 // Classifies a range check 100 enum RangeCheckKind : unsigned { 101 // Range check of the form "0 <= I". 102 RANGE_CHECK_LOWER = 1, 103 104 // Range check of the form "I < L" where L is known positive. 105 RANGE_CHECK_UPPER = 2, 106 107 // The logical and of the RANGE_CHECK_LOWER and RANGE_CHECK_UPPER 108 // conditions. 109 RANGE_CHECK_BOTH = RANGE_CHECK_LOWER | RANGE_CHECK_UPPER, 110 111 // Unrecognized range check condition. 112 RANGE_CHECK_UNKNOWN = (unsigned)-1 113 }; 114 115 static StringRef rangeCheckKindToStr(RangeCheckKind); 116 117 const SCEV *Offset = nullptr; 118 const SCEV *Scale = nullptr; 119 Value *Length = nullptr; 120 Use *CheckUse = nullptr; 121 RangeCheckKind Kind = RANGE_CHECK_UNKNOWN; 122 123 static RangeCheckKind parseRangeCheckICmp(Loop *L, ICmpInst *ICI, 124 ScalarEvolution &SE, Value *&Index, 125 Value *&Length); 126 127 static void 128 extractRangeChecksFromCond(Loop *L, ScalarEvolution &SE, Use &ConditionUse, 129 SmallVectorImpl<InductiveRangeCheck> &Checks, 130 SmallPtrSetImpl<Value *> &Visited); 131 132 public: 133 const SCEV *getOffset() const { return Offset; } 134 const SCEV *getScale() const { return Scale; } 135 Value *getLength() const { return Length; } 136 137 void print(raw_ostream &OS) const { 138 OS << "InductiveRangeCheck:\n"; 139 OS << " Kind: " << rangeCheckKindToStr(Kind) << "\n"; 140 OS << " Offset: "; 141 Offset->print(OS); 142 OS << " Scale: "; 143 Scale->print(OS); 144 OS << " Length: "; 145 if (Length) 146 Length->print(OS); 147 else 148 OS << "(null)"; 149 OS << "\n CheckUse: "; 150 getCheckUse()->getUser()->print(OS); 151 OS << " Operand: " << getCheckUse()->getOperandNo() << "\n"; 152 } 153 154 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 155 void dump() { 156 print(dbgs()); 157 } 158 #endif 159 160 Use *getCheckUse() const { return CheckUse; } 161 162 /// Represents an signed integer range [Range.getBegin(), Range.getEnd()). If 163 /// R.getEnd() sle R.getBegin(), then R denotes the empty range. 164 165 class Range { 166 const SCEV *Begin; 167 const SCEV *End; 168 169 public: 170 Range(const SCEV *Begin, const SCEV *End) : Begin(Begin), End(End) { 171 assert(Begin->getType() == End->getType() && "ill-typed range!"); 172 } 173 174 Type *getType() const { return Begin->getType(); } 175 const SCEV *getBegin() const { return Begin; } 176 const SCEV *getEnd() const { return End; } 177 }; 178 179 /// This is the value the condition of the branch needs to evaluate to for the 180 /// branch to take the hot successor (see (1) above). 181 bool getPassingDirection() { return true; } 182 183 /// Computes a range for the induction variable (IndVar) in which the range 184 /// check is redundant and can be constant-folded away. The induction 185 /// variable is not required to be the canonical {0,+,1} induction variable. 186 Optional<Range> computeSafeIterationSpace(ScalarEvolution &SE, 187 const SCEVAddRecExpr *IndVar) const; 188 189 /// Parse out a set of inductive range checks from \p BI and append them to \p 190 /// Checks. 191 /// 192 /// NB! There may be conditions feeding into \p BI that aren't inductive range 193 /// checks, and hence don't end up in \p Checks. 194 static void 195 extractRangeChecksFromBranch(BranchInst *BI, Loop *L, ScalarEvolution &SE, 196 BranchProbabilityInfo &BPI, 197 SmallVectorImpl<InductiveRangeCheck> &Checks); 198 }; 199 200 class InductiveRangeCheckElimination : public LoopPass { 201 public: 202 static char ID; 203 InductiveRangeCheckElimination() : LoopPass(ID) { 204 initializeInductiveRangeCheckEliminationPass( 205 *PassRegistry::getPassRegistry()); 206 } 207 208 void getAnalysisUsage(AnalysisUsage &AU) const override { 209 AU.addRequired<BranchProbabilityInfoWrapperPass>(); 210 getLoopAnalysisUsage(AU); 211 } 212 213 bool runOnLoop(Loop *L, LPPassManager &LPM) override; 214 }; 215 216 char InductiveRangeCheckElimination::ID = 0; 217 } 218 219 INITIALIZE_PASS_BEGIN(InductiveRangeCheckElimination, "irce", 220 "Inductive range check elimination", false, false) 221 INITIALIZE_PASS_DEPENDENCY(BranchProbabilityInfoWrapperPass) 222 INITIALIZE_PASS_DEPENDENCY(LoopPass) 223 INITIALIZE_PASS_END(InductiveRangeCheckElimination, "irce", 224 "Inductive range check elimination", false, false) 225 226 StringRef InductiveRangeCheck::rangeCheckKindToStr( 227 InductiveRangeCheck::RangeCheckKind RCK) { 228 switch (RCK) { 229 case InductiveRangeCheck::RANGE_CHECK_UNKNOWN: 230 return "RANGE_CHECK_UNKNOWN"; 231 232 case InductiveRangeCheck::RANGE_CHECK_UPPER: 233 return "RANGE_CHECK_UPPER"; 234 235 case InductiveRangeCheck::RANGE_CHECK_LOWER: 236 return "RANGE_CHECK_LOWER"; 237 238 case InductiveRangeCheck::RANGE_CHECK_BOTH: 239 return "RANGE_CHECK_BOTH"; 240 } 241 242 llvm_unreachable("unknown range check type!"); 243 } 244 245 /// Parse a single ICmp instruction, `ICI`, into a range check. If `ICI` cannot 246 /// be interpreted as a range check, return `RANGE_CHECK_UNKNOWN` and set 247 /// `Index` and `Length` to `nullptr`. Otherwise set `Index` to the value being 248 /// range checked, and set `Length` to the upper limit `Index` is being range 249 /// checked with if (and only if) the range check type is stronger or equal to 250 /// RANGE_CHECK_UPPER. 251 /// 252 InductiveRangeCheck::RangeCheckKind 253 InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI, 254 ScalarEvolution &SE, Value *&Index, 255 Value *&Length) { 256 257 auto IsNonNegativeAndNotLoopVarying = [&SE, L](Value *V) { 258 const SCEV *S = SE.getSCEV(V); 259 if (isa<SCEVCouldNotCompute>(S)) 260 return false; 261 262 return SE.getLoopDisposition(S, L) == ScalarEvolution::LoopInvariant && 263 SE.isKnownNonNegative(S); 264 }; 265 266 using namespace llvm::PatternMatch; 267 268 ICmpInst::Predicate Pred = ICI->getPredicate(); 269 Value *LHS = ICI->getOperand(0); 270 Value *RHS = ICI->getOperand(1); 271 272 switch (Pred) { 273 default: 274 return RANGE_CHECK_UNKNOWN; 275 276 case ICmpInst::ICMP_SLE: 277 std::swap(LHS, RHS); 278 LLVM_FALLTHROUGH; 279 case ICmpInst::ICMP_SGE: 280 if (match(RHS, m_ConstantInt<0>())) { 281 Index = LHS; 282 return RANGE_CHECK_LOWER; 283 } 284 return RANGE_CHECK_UNKNOWN; 285 286 case ICmpInst::ICMP_SLT: 287 std::swap(LHS, RHS); 288 LLVM_FALLTHROUGH; 289 case ICmpInst::ICMP_SGT: 290 if (match(RHS, m_ConstantInt<-1>())) { 291 Index = LHS; 292 return RANGE_CHECK_LOWER; 293 } 294 295 if (IsNonNegativeAndNotLoopVarying(LHS)) { 296 Index = RHS; 297 Length = LHS; 298 return RANGE_CHECK_UPPER; 299 } 300 return RANGE_CHECK_UNKNOWN; 301 302 case ICmpInst::ICMP_ULT: 303 std::swap(LHS, RHS); 304 LLVM_FALLTHROUGH; 305 case ICmpInst::ICMP_UGT: 306 if (IsNonNegativeAndNotLoopVarying(LHS)) { 307 Index = RHS; 308 Length = LHS; 309 return RANGE_CHECK_BOTH; 310 } 311 return RANGE_CHECK_UNKNOWN; 312 } 313 314 llvm_unreachable("default clause returns!"); 315 } 316 317 void InductiveRangeCheck::extractRangeChecksFromCond( 318 Loop *L, ScalarEvolution &SE, Use &ConditionUse, 319 SmallVectorImpl<InductiveRangeCheck> &Checks, 320 SmallPtrSetImpl<Value *> &Visited) { 321 using namespace llvm::PatternMatch; 322 323 Value *Condition = ConditionUse.get(); 324 if (!Visited.insert(Condition).second) 325 return; 326 327 if (match(Condition, m_And(m_Value(), m_Value()))) { 328 SmallVector<InductiveRangeCheck, 8> SubChecks; 329 extractRangeChecksFromCond(L, SE, cast<User>(Condition)->getOperandUse(0), 330 SubChecks, Visited); 331 extractRangeChecksFromCond(L, SE, cast<User>(Condition)->getOperandUse(1), 332 SubChecks, Visited); 333 334 if (SubChecks.size() == 2) { 335 // Handle a special case where we know how to merge two checks separately 336 // checking the upper and lower bounds into a full range check. 337 const auto &RChkA = SubChecks[0]; 338 const auto &RChkB = SubChecks[1]; 339 if ((RChkA.Length == RChkB.Length || !RChkA.Length || !RChkB.Length) && 340 RChkA.Offset == RChkB.Offset && RChkA.Scale == RChkB.Scale) { 341 342 // If RChkA.Kind == RChkB.Kind then we just found two identical checks. 343 // But if one of them is a RANGE_CHECK_LOWER and the other is a 344 // RANGE_CHECK_UPPER (only possibility if they're different) then 345 // together they form a RANGE_CHECK_BOTH. 346 SubChecks[0].Kind = 347 (InductiveRangeCheck::RangeCheckKind)(RChkA.Kind | RChkB.Kind); 348 SubChecks[0].Length = RChkA.Length ? RChkA.Length : RChkB.Length; 349 SubChecks[0].CheckUse = &ConditionUse; 350 351 // We updated one of the checks in place, now erase the other. 352 SubChecks.pop_back(); 353 } 354 } 355 356 Checks.insert(Checks.end(), SubChecks.begin(), SubChecks.end()); 357 return; 358 } 359 360 ICmpInst *ICI = dyn_cast<ICmpInst>(Condition); 361 if (!ICI) 362 return; 363 364 Value *Length = nullptr, *Index; 365 auto RCKind = parseRangeCheckICmp(L, ICI, SE, Index, Length); 366 if (RCKind == InductiveRangeCheck::RANGE_CHECK_UNKNOWN) 367 return; 368 369 const auto *IndexAddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Index)); 370 bool IsAffineIndex = 371 IndexAddRec && (IndexAddRec->getLoop() == L) && IndexAddRec->isAffine(); 372 373 if (!IsAffineIndex) 374 return; 375 376 InductiveRangeCheck IRC; 377 IRC.Length = Length; 378 IRC.Offset = IndexAddRec->getStart(); 379 IRC.Scale = IndexAddRec->getStepRecurrence(SE); 380 IRC.CheckUse = &ConditionUse; 381 IRC.Kind = RCKind; 382 Checks.push_back(IRC); 383 } 384 385 void InductiveRangeCheck::extractRangeChecksFromBranch( 386 BranchInst *BI, Loop *L, ScalarEvolution &SE, BranchProbabilityInfo &BPI, 387 SmallVectorImpl<InductiveRangeCheck> &Checks) { 388 389 if (BI->isUnconditional() || BI->getParent() == L->getLoopLatch()) 390 return; 391 392 BranchProbability LikelyTaken(15, 16); 393 394 if (!SkipProfitabilityChecks && 395 BPI.getEdgeProbability(BI->getParent(), (unsigned)0) < LikelyTaken) 396 return; 397 398 SmallPtrSet<Value *, 8> Visited; 399 InductiveRangeCheck::extractRangeChecksFromCond(L, SE, BI->getOperandUse(0), 400 Checks, Visited); 401 } 402 403 namespace { 404 405 // Keeps track of the structure of a loop. This is similar to llvm::Loop, 406 // except that it is more lightweight and can track the state of a loop through 407 // changing and potentially invalid IR. This structure also formalizes the 408 // kinds of loops we can deal with -- ones that have a single latch that is also 409 // an exiting block *and* have a canonical induction variable. 410 struct LoopStructure { 411 const char *Tag; 412 413 BasicBlock *Header; 414 BasicBlock *Latch; 415 416 // `Latch's terminator instruction is `LatchBr', and it's `LatchBrExitIdx'th 417 // successor is `LatchExit', the exit block of the loop. 418 BranchInst *LatchBr; 419 BasicBlock *LatchExit; 420 unsigned LatchBrExitIdx; 421 422 Value *IndVarNext; 423 Value *IndVarStart; 424 Value *LoopExitAt; 425 bool IndVarIncreasing; 426 427 LoopStructure() 428 : Tag(""), Header(nullptr), Latch(nullptr), LatchBr(nullptr), 429 LatchExit(nullptr), LatchBrExitIdx(-1), IndVarNext(nullptr), 430 IndVarStart(nullptr), LoopExitAt(nullptr), IndVarIncreasing(false) {} 431 432 template <typename M> LoopStructure map(M Map) const { 433 LoopStructure Result; 434 Result.Tag = Tag; 435 Result.Header = cast<BasicBlock>(Map(Header)); 436 Result.Latch = cast<BasicBlock>(Map(Latch)); 437 Result.LatchBr = cast<BranchInst>(Map(LatchBr)); 438 Result.LatchExit = cast<BasicBlock>(Map(LatchExit)); 439 Result.LatchBrExitIdx = LatchBrExitIdx; 440 Result.IndVarNext = Map(IndVarNext); 441 Result.IndVarStart = Map(IndVarStart); 442 Result.LoopExitAt = Map(LoopExitAt); 443 Result.IndVarIncreasing = IndVarIncreasing; 444 return Result; 445 } 446 447 static Optional<LoopStructure> parseLoopStructure(ScalarEvolution &, 448 BranchProbabilityInfo &BPI, 449 Loop &, 450 const char *&); 451 }; 452 453 /// This class is used to constrain loops to run within a given iteration space. 454 /// The algorithm this class implements is given a Loop and a range [Begin, 455 /// End). The algorithm then tries to break out a "main loop" out of the loop 456 /// it is given in a way that the "main loop" runs with the induction variable 457 /// in a subset of [Begin, End). The algorithm emits appropriate pre and post 458 /// loops to run any remaining iterations. The pre loop runs any iterations in 459 /// which the induction variable is < Begin, and the post loop runs any 460 /// iterations in which the induction variable is >= End. 461 /// 462 class LoopConstrainer { 463 // The representation of a clone of the original loop we started out with. 464 struct ClonedLoop { 465 // The cloned blocks 466 std::vector<BasicBlock *> Blocks; 467 468 // `Map` maps values in the clonee into values in the cloned version 469 ValueToValueMapTy Map; 470 471 // An instance of `LoopStructure` for the cloned loop 472 LoopStructure Structure; 473 }; 474 475 // Result of rewriting the range of a loop. See changeIterationSpaceEnd for 476 // more details on what these fields mean. 477 struct RewrittenRangeInfo { 478 BasicBlock *PseudoExit; 479 BasicBlock *ExitSelector; 480 std::vector<PHINode *> PHIValuesAtPseudoExit; 481 PHINode *IndVarEnd; 482 483 RewrittenRangeInfo() 484 : PseudoExit(nullptr), ExitSelector(nullptr), IndVarEnd(nullptr) {} 485 }; 486 487 // Calculated subranges we restrict the iteration space of the main loop to. 488 // See the implementation of `calculateSubRanges' for more details on how 489 // these fields are computed. `LowLimit` is None if there is no restriction 490 // on low end of the restricted iteration space of the main loop. `HighLimit` 491 // is None if there is no restriction on high end of the restricted iteration 492 // space of the main loop. 493 494 struct SubRanges { 495 Optional<const SCEV *> LowLimit; 496 Optional<const SCEV *> HighLimit; 497 }; 498 499 // A utility function that does a `replaceUsesOfWith' on the incoming block 500 // set of a `PHINode' -- replaces instances of `Block' in the `PHINode's 501 // incoming block list with `ReplaceBy'. 502 static void replacePHIBlock(PHINode *PN, BasicBlock *Block, 503 BasicBlock *ReplaceBy); 504 505 // Compute a safe set of limits for the main loop to run in -- effectively the 506 // intersection of `Range' and the iteration space of the original loop. 507 // Return None if unable to compute the set of subranges. 508 // 509 Optional<SubRanges> calculateSubRanges() const; 510 511 // Clone `OriginalLoop' and return the result in CLResult. The IR after 512 // running `cloneLoop' is well formed except for the PHI nodes in CLResult -- 513 // the PHI nodes say that there is an incoming edge from `OriginalPreheader` 514 // but there is no such edge. 515 // 516 void cloneLoop(ClonedLoop &CLResult, const char *Tag) const; 517 518 // Create the appropriate loop structure needed to describe a cloned copy of 519 // `Original`. The clone is described by `VM`. 520 Loop *createClonedLoopStructure(Loop *Original, Loop *Parent, 521 ValueToValueMapTy &VM); 522 523 // Rewrite the iteration space of the loop denoted by (LS, Preheader). The 524 // iteration space of the rewritten loop ends at ExitLoopAt. The start of the 525 // iteration space is not changed. `ExitLoopAt' is assumed to be slt 526 // `OriginalHeaderCount'. 527 // 528 // If there are iterations left to execute, control is made to jump to 529 // `ContinuationBlock', otherwise they take the normal loop exit. The 530 // returned `RewrittenRangeInfo' object is populated as follows: 531 // 532 // .PseudoExit is a basic block that unconditionally branches to 533 // `ContinuationBlock'. 534 // 535 // .ExitSelector is a basic block that decides, on exit from the loop, 536 // whether to branch to the "true" exit or to `PseudoExit'. 537 // 538 // .PHIValuesAtPseudoExit are PHINodes in `PseudoExit' that compute the value 539 // for each PHINode in the loop header on taking the pseudo exit. 540 // 541 // After changeIterationSpaceEnd, `Preheader' is no longer a legitimate 542 // preheader because it is made to branch to the loop header only 543 // conditionally. 544 // 545 RewrittenRangeInfo 546 changeIterationSpaceEnd(const LoopStructure &LS, BasicBlock *Preheader, 547 Value *ExitLoopAt, 548 BasicBlock *ContinuationBlock) const; 549 550 // The loop denoted by `LS' has `OldPreheader' as its preheader. This 551 // function creates a new preheader for `LS' and returns it. 552 // 553 BasicBlock *createPreheader(const LoopStructure &LS, BasicBlock *OldPreheader, 554 const char *Tag) const; 555 556 // `ContinuationBlockAndPreheader' was the continuation block for some call to 557 // `changeIterationSpaceEnd' and is the preheader to the loop denoted by `LS'. 558 // This function rewrites the PHI nodes in `LS.Header' to start with the 559 // correct value. 560 void rewriteIncomingValuesForPHIs( 561 LoopStructure &LS, BasicBlock *ContinuationBlockAndPreheader, 562 const LoopConstrainer::RewrittenRangeInfo &RRI) const; 563 564 // Even though we do not preserve any passes at this time, we at least need to 565 // keep the parent loop structure consistent. The `LPPassManager' seems to 566 // verify this after running a loop pass. This function adds the list of 567 // blocks denoted by BBs to this loops parent loop if required. 568 void addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs); 569 570 // Some global state. 571 Function &F; 572 LLVMContext &Ctx; 573 ScalarEvolution &SE; 574 DominatorTree &DT; 575 LPPassManager &LPM; 576 LoopInfo &LI; 577 578 // Information about the original loop we started out with. 579 Loop &OriginalLoop; 580 const SCEV *LatchTakenCount; 581 BasicBlock *OriginalPreheader; 582 583 // The preheader of the main loop. This may or may not be different from 584 // `OriginalPreheader'. 585 BasicBlock *MainLoopPreheader; 586 587 // The range we need to run the main loop in. 588 InductiveRangeCheck::Range Range; 589 590 // The structure of the main loop (see comment at the beginning of this class 591 // for a definition) 592 LoopStructure MainLoopStructure; 593 594 public: 595 LoopConstrainer(Loop &L, LoopInfo &LI, LPPassManager &LPM, 596 const LoopStructure &LS, ScalarEvolution &SE, 597 DominatorTree &DT, InductiveRangeCheck::Range R) 598 : F(*L.getHeader()->getParent()), Ctx(L.getHeader()->getContext()), 599 SE(SE), DT(DT), LPM(LPM), LI(LI), OriginalLoop(L), 600 LatchTakenCount(nullptr), OriginalPreheader(nullptr), 601 MainLoopPreheader(nullptr), Range(R), MainLoopStructure(LS) {} 602 603 // Entry point for the algorithm. Returns true on success. 604 bool run(); 605 }; 606 607 } 608 609 void LoopConstrainer::replacePHIBlock(PHINode *PN, BasicBlock *Block, 610 BasicBlock *ReplaceBy) { 611 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 612 if (PN->getIncomingBlock(i) == Block) 613 PN->setIncomingBlock(i, ReplaceBy); 614 } 615 616 static bool CanBeSMax(ScalarEvolution &SE, const SCEV *S) { 617 APInt SMax = 618 APInt::getSignedMaxValue(cast<IntegerType>(S->getType())->getBitWidth()); 619 return SE.getSignedRange(S).contains(SMax) && 620 SE.getUnsignedRange(S).contains(SMax); 621 } 622 623 static bool CanBeSMin(ScalarEvolution &SE, const SCEV *S) { 624 APInt SMin = 625 APInt::getSignedMinValue(cast<IntegerType>(S->getType())->getBitWidth()); 626 return SE.getSignedRange(S).contains(SMin) && 627 SE.getUnsignedRange(S).contains(SMin); 628 } 629 630 Optional<LoopStructure> 631 LoopStructure::parseLoopStructure(ScalarEvolution &SE, BranchProbabilityInfo &BPI, 632 Loop &L, const char *&FailureReason) { 633 if (!L.isLoopSimplifyForm()) { 634 FailureReason = "loop not in LoopSimplify form"; 635 return None; 636 } 637 638 BasicBlock *Latch = L.getLoopLatch(); 639 assert(Latch && "Simplified loops only have one latch!"); 640 641 if (Latch->getTerminator()->getMetadata(ClonedLoopTag)) { 642 FailureReason = "loop has already been cloned"; 643 return None; 644 } 645 646 if (!L.isLoopExiting(Latch)) { 647 FailureReason = "no loop latch"; 648 return None; 649 } 650 651 BasicBlock *Header = L.getHeader(); 652 BasicBlock *Preheader = L.getLoopPreheader(); 653 if (!Preheader) { 654 FailureReason = "no preheader"; 655 return None; 656 } 657 658 BranchInst *LatchBr = dyn_cast<BranchInst>(Latch->getTerminator()); 659 if (!LatchBr || LatchBr->isUnconditional()) { 660 FailureReason = "latch terminator not conditional branch"; 661 return None; 662 } 663 664 unsigned LatchBrExitIdx = LatchBr->getSuccessor(0) == Header ? 1 : 0; 665 666 BranchProbability ExitProbability = 667 BPI.getEdgeProbability(LatchBr->getParent(), LatchBrExitIdx); 668 669 if (!SkipProfitabilityChecks && 670 ExitProbability > BranchProbability(1, MaxExitProbReciprocal)) { 671 FailureReason = "short running loop, not profitable"; 672 return None; 673 } 674 675 ICmpInst *ICI = dyn_cast<ICmpInst>(LatchBr->getCondition()); 676 if (!ICI || !isa<IntegerType>(ICI->getOperand(0)->getType())) { 677 FailureReason = "latch terminator branch not conditional on integral icmp"; 678 return None; 679 } 680 681 const SCEV *LatchCount = SE.getExitCount(&L, Latch); 682 if (isa<SCEVCouldNotCompute>(LatchCount)) { 683 FailureReason = "could not compute latch count"; 684 return None; 685 } 686 687 ICmpInst::Predicate Pred = ICI->getPredicate(); 688 Value *LeftValue = ICI->getOperand(0); 689 const SCEV *LeftSCEV = SE.getSCEV(LeftValue); 690 IntegerType *IndVarTy = cast<IntegerType>(LeftValue->getType()); 691 692 Value *RightValue = ICI->getOperand(1); 693 const SCEV *RightSCEV = SE.getSCEV(RightValue); 694 695 // We canonicalize `ICI` such that `LeftSCEV` is an add recurrence. 696 if (!isa<SCEVAddRecExpr>(LeftSCEV)) { 697 if (isa<SCEVAddRecExpr>(RightSCEV)) { 698 std::swap(LeftSCEV, RightSCEV); 699 std::swap(LeftValue, RightValue); 700 Pred = ICmpInst::getSwappedPredicate(Pred); 701 } else { 702 FailureReason = "no add recurrences in the icmp"; 703 return None; 704 } 705 } 706 707 auto HasNoSignedWrap = [&](const SCEVAddRecExpr *AR) { 708 if (AR->getNoWrapFlags(SCEV::FlagNSW)) 709 return true; 710 711 IntegerType *Ty = cast<IntegerType>(AR->getType()); 712 IntegerType *WideTy = 713 IntegerType::get(Ty->getContext(), Ty->getBitWidth() * 2); 714 715 const SCEVAddRecExpr *ExtendAfterOp = 716 dyn_cast<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); 717 if (ExtendAfterOp) { 718 const SCEV *ExtendedStart = SE.getSignExtendExpr(AR->getStart(), WideTy); 719 const SCEV *ExtendedStep = 720 SE.getSignExtendExpr(AR->getStepRecurrence(SE), WideTy); 721 722 bool NoSignedWrap = ExtendAfterOp->getStart() == ExtendedStart && 723 ExtendAfterOp->getStepRecurrence(SE) == ExtendedStep; 724 725 if (NoSignedWrap) 726 return true; 727 } 728 729 // We may have proved this when computing the sign extension above. 730 return AR->getNoWrapFlags(SCEV::FlagNSW) != SCEV::FlagAnyWrap; 731 }; 732 733 auto IsInductionVar = [&](const SCEVAddRecExpr *AR, bool &IsIncreasing) { 734 if (!AR->isAffine()) 735 return false; 736 737 // Currently we only work with induction variables that have been proved to 738 // not wrap. This restriction can potentially be lifted in the future. 739 740 if (!HasNoSignedWrap(AR)) 741 return false; 742 743 if (const SCEVConstant *StepExpr = 744 dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) { 745 ConstantInt *StepCI = StepExpr->getValue(); 746 if (StepCI->isOne() || StepCI->isMinusOne()) { 747 IsIncreasing = StepCI->isOne(); 748 return true; 749 } 750 } 751 752 return false; 753 }; 754 755 // `ICI` is interpreted as taking the backedge if the *next* value of the 756 // induction variable satisfies some constraint. 757 758 const SCEVAddRecExpr *IndVarNext = cast<SCEVAddRecExpr>(LeftSCEV); 759 bool IsIncreasing = false; 760 if (!IsInductionVar(IndVarNext, IsIncreasing)) { 761 FailureReason = "LHS in icmp not induction variable"; 762 return None; 763 } 764 765 ConstantInt *One = ConstantInt::get(IndVarTy, 1); 766 // TODO: generalize the predicates here to also match their unsigned variants. 767 if (IsIncreasing) { 768 bool FoundExpectedPred = 769 (Pred == ICmpInst::ICMP_SLT && LatchBrExitIdx == 1) || 770 (Pred == ICmpInst::ICMP_SGT && LatchBrExitIdx == 0); 771 772 if (!FoundExpectedPred) { 773 FailureReason = "expected icmp slt semantically, found something else"; 774 return None; 775 } 776 777 if (LatchBrExitIdx == 0) { 778 if (CanBeSMax(SE, RightSCEV)) { 779 // TODO: this restriction is easily removable -- we just have to 780 // remember that the icmp was an slt and not an sle. 781 FailureReason = "limit may overflow when coercing sle to slt"; 782 return None; 783 } 784 785 IRBuilder<> B(Preheader->getTerminator()); 786 RightValue = B.CreateAdd(RightValue, One); 787 } 788 789 } else { 790 bool FoundExpectedPred = 791 (Pred == ICmpInst::ICMP_SGT && LatchBrExitIdx == 1) || 792 (Pred == ICmpInst::ICMP_SLT && LatchBrExitIdx == 0); 793 794 if (!FoundExpectedPred) { 795 FailureReason = "expected icmp sgt semantically, found something else"; 796 return None; 797 } 798 799 if (LatchBrExitIdx == 0) { 800 if (CanBeSMin(SE, RightSCEV)) { 801 // TODO: this restriction is easily removable -- we just have to 802 // remember that the icmp was an sgt and not an sge. 803 FailureReason = "limit may overflow when coercing sge to sgt"; 804 return None; 805 } 806 807 IRBuilder<> B(Preheader->getTerminator()); 808 RightValue = B.CreateSub(RightValue, One); 809 } 810 } 811 812 const SCEV *StartNext = IndVarNext->getStart(); 813 const SCEV *Addend = SE.getNegativeSCEV(IndVarNext->getStepRecurrence(SE)); 814 const SCEV *IndVarStart = SE.getAddExpr(StartNext, Addend); 815 816 BasicBlock *LatchExit = LatchBr->getSuccessor(LatchBrExitIdx); 817 818 assert(SE.getLoopDisposition(LatchCount, &L) == 819 ScalarEvolution::LoopInvariant && 820 "loop variant exit count doesn't make sense!"); 821 822 assert(!L.contains(LatchExit) && "expected an exit block!"); 823 const DataLayout &DL = Preheader->getModule()->getDataLayout(); 824 Value *IndVarStartV = 825 SCEVExpander(SE, DL, "irce") 826 .expandCodeFor(IndVarStart, IndVarTy, Preheader->getTerminator()); 827 IndVarStartV->setName("indvar.start"); 828 829 LoopStructure Result; 830 831 Result.Tag = "main"; 832 Result.Header = Header; 833 Result.Latch = Latch; 834 Result.LatchBr = LatchBr; 835 Result.LatchExit = LatchExit; 836 Result.LatchBrExitIdx = LatchBrExitIdx; 837 Result.IndVarStart = IndVarStartV; 838 Result.IndVarNext = LeftValue; 839 Result.IndVarIncreasing = IsIncreasing; 840 Result.LoopExitAt = RightValue; 841 842 FailureReason = nullptr; 843 844 return Result; 845 } 846 847 Optional<LoopConstrainer::SubRanges> 848 LoopConstrainer::calculateSubRanges() const { 849 IntegerType *Ty = cast<IntegerType>(LatchTakenCount->getType()); 850 851 if (Range.getType() != Ty) 852 return None; 853 854 LoopConstrainer::SubRanges Result; 855 856 // I think we can be more aggressive here and make this nuw / nsw if the 857 // addition that feeds into the icmp for the latch's terminating branch is nuw 858 // / nsw. In any case, a wrapping 2's complement addition is safe. 859 ConstantInt *One = ConstantInt::get(Ty, 1); 860 const SCEV *Start = SE.getSCEV(MainLoopStructure.IndVarStart); 861 const SCEV *End = SE.getSCEV(MainLoopStructure.LoopExitAt); 862 863 bool Increasing = MainLoopStructure.IndVarIncreasing; 864 865 // We compute `Smallest` and `Greatest` such that [Smallest, Greatest) is the 866 // range of values the induction variable takes. 867 868 const SCEV *Smallest = nullptr, *Greatest = nullptr; 869 870 if (Increasing) { 871 Smallest = Start; 872 Greatest = End; 873 } else { 874 // These two computations may sign-overflow. Here is why that is okay: 875 // 876 // We know that the induction variable does not sign-overflow on any 877 // iteration except the last one, and it starts at `Start` and ends at 878 // `End`, decrementing by one every time. 879 // 880 // * if `Smallest` sign-overflows we know `End` is `INT_SMAX`. Since the 881 // induction variable is decreasing we know that that the smallest value 882 // the loop body is actually executed with is `INT_SMIN` == `Smallest`. 883 // 884 // * if `Greatest` sign-overflows, we know it can only be `INT_SMIN`. In 885 // that case, `Clamp` will always return `Smallest` and 886 // [`Result.LowLimit`, `Result.HighLimit`) = [`Smallest`, `Smallest`) 887 // will be an empty range. Returning an empty range is always safe. 888 // 889 890 Smallest = SE.getAddExpr(End, SE.getSCEV(One)); 891 Greatest = SE.getAddExpr(Start, SE.getSCEV(One)); 892 } 893 894 auto Clamp = [this, Smallest, Greatest](const SCEV *S) { 895 return SE.getSMaxExpr(Smallest, SE.getSMinExpr(Greatest, S)); 896 }; 897 898 // In some cases we can prove that we don't need a pre or post loop 899 900 bool ProvablyNoPreloop = 901 SE.isKnownPredicate(ICmpInst::ICMP_SLE, Range.getBegin(), Smallest); 902 if (!ProvablyNoPreloop) 903 Result.LowLimit = Clamp(Range.getBegin()); 904 905 bool ProvablyNoPostLoop = 906 SE.isKnownPredicate(ICmpInst::ICMP_SLE, Greatest, Range.getEnd()); 907 if (!ProvablyNoPostLoop) 908 Result.HighLimit = Clamp(Range.getEnd()); 909 910 return Result; 911 } 912 913 void LoopConstrainer::cloneLoop(LoopConstrainer::ClonedLoop &Result, 914 const char *Tag) const { 915 for (BasicBlock *BB : OriginalLoop.getBlocks()) { 916 BasicBlock *Clone = CloneBasicBlock(BB, Result.Map, Twine(".") + Tag, &F); 917 Result.Blocks.push_back(Clone); 918 Result.Map[BB] = Clone; 919 } 920 921 auto GetClonedValue = [&Result](Value *V) { 922 assert(V && "null values not in domain!"); 923 auto It = Result.Map.find(V); 924 if (It == Result.Map.end()) 925 return V; 926 return static_cast<Value *>(It->second); 927 }; 928 929 auto *ClonedLatch = 930 cast<BasicBlock>(GetClonedValue(OriginalLoop.getLoopLatch())); 931 ClonedLatch->getTerminator()->setMetadata(ClonedLoopTag, 932 MDNode::get(Ctx, {})); 933 934 Result.Structure = MainLoopStructure.map(GetClonedValue); 935 Result.Structure.Tag = Tag; 936 937 for (unsigned i = 0, e = Result.Blocks.size(); i != e; ++i) { 938 BasicBlock *ClonedBB = Result.Blocks[i]; 939 BasicBlock *OriginalBB = OriginalLoop.getBlocks()[i]; 940 941 assert(Result.Map[OriginalBB] == ClonedBB && "invariant!"); 942 943 for (Instruction &I : *ClonedBB) 944 RemapInstruction(&I, Result.Map, 945 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 946 947 // Exit blocks will now have one more predecessor and their PHI nodes need 948 // to be edited to reflect that. No phi nodes need to be introduced because 949 // the loop is in LCSSA. 950 951 for (auto *SBB : successors(OriginalBB)) { 952 if (OriginalLoop.contains(SBB)) 953 continue; // not an exit block 954 955 for (Instruction &I : *SBB) { 956 auto *PN = dyn_cast<PHINode>(&I); 957 if (!PN) 958 break; 959 960 Value *OldIncoming = PN->getIncomingValueForBlock(OriginalBB); 961 PN->addIncoming(GetClonedValue(OldIncoming), ClonedBB); 962 } 963 } 964 } 965 } 966 967 LoopConstrainer::RewrittenRangeInfo LoopConstrainer::changeIterationSpaceEnd( 968 const LoopStructure &LS, BasicBlock *Preheader, Value *ExitSubloopAt, 969 BasicBlock *ContinuationBlock) const { 970 971 // We start with a loop with a single latch: 972 // 973 // +--------------------+ 974 // | | 975 // | preheader | 976 // | | 977 // +--------+-----------+ 978 // | ----------------\ 979 // | / | 980 // +--------v----v------+ | 981 // | | | 982 // | header | | 983 // | | | 984 // +--------------------+ | 985 // | 986 // ..... | 987 // | 988 // +--------------------+ | 989 // | | | 990 // | latch >----------/ 991 // | | 992 // +-------v------------+ 993 // | 994 // | 995 // | +--------------------+ 996 // | | | 997 // +---> original exit | 998 // | | 999 // +--------------------+ 1000 // 1001 // We change the control flow to look like 1002 // 1003 // 1004 // +--------------------+ 1005 // | | 1006 // | preheader >-------------------------+ 1007 // | | | 1008 // +--------v-----------+ | 1009 // | /-------------+ | 1010 // | / | | 1011 // +--------v--v--------+ | | 1012 // | | | | 1013 // | header | | +--------+ | 1014 // | | | | | | 1015 // +--------------------+ | | +-----v-----v-----------+ 1016 // | | | | 1017 // | | | .pseudo.exit | 1018 // | | | | 1019 // | | +-----------v-----------+ 1020 // | | | 1021 // ..... | | | 1022 // | | +--------v-------------+ 1023 // +--------------------+ | | | | 1024 // | | | | | ContinuationBlock | 1025 // | latch >------+ | | | 1026 // | | | +----------------------+ 1027 // +---------v----------+ | 1028 // | | 1029 // | | 1030 // | +---------------^-----+ 1031 // | | | 1032 // +-----> .exit.selector | 1033 // | | 1034 // +----------v----------+ 1035 // | 1036 // +--------------------+ | 1037 // | | | 1038 // | original exit <----+ 1039 // | | 1040 // +--------------------+ 1041 // 1042 1043 RewrittenRangeInfo RRI; 1044 1045 BasicBlock *BBInsertLocation = LS.Latch->getNextNode(); 1046 RRI.ExitSelector = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".exit.selector", 1047 &F, BBInsertLocation); 1048 RRI.PseudoExit = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".pseudo.exit", &F, 1049 BBInsertLocation); 1050 1051 BranchInst *PreheaderJump = cast<BranchInst>(Preheader->getTerminator()); 1052 bool Increasing = LS.IndVarIncreasing; 1053 1054 IRBuilder<> B(PreheaderJump); 1055 1056 // EnterLoopCond - is it okay to start executing this `LS'? 1057 Value *EnterLoopCond = Increasing 1058 ? B.CreateICmpSLT(LS.IndVarStart, ExitSubloopAt) 1059 : B.CreateICmpSGT(LS.IndVarStart, ExitSubloopAt); 1060 1061 B.CreateCondBr(EnterLoopCond, LS.Header, RRI.PseudoExit); 1062 PreheaderJump->eraseFromParent(); 1063 1064 LS.LatchBr->setSuccessor(LS.LatchBrExitIdx, RRI.ExitSelector); 1065 B.SetInsertPoint(LS.LatchBr); 1066 Value *TakeBackedgeLoopCond = 1067 Increasing ? B.CreateICmpSLT(LS.IndVarNext, ExitSubloopAt) 1068 : B.CreateICmpSGT(LS.IndVarNext, ExitSubloopAt); 1069 Value *CondForBranch = LS.LatchBrExitIdx == 1 1070 ? TakeBackedgeLoopCond 1071 : B.CreateNot(TakeBackedgeLoopCond); 1072 1073 LS.LatchBr->setCondition(CondForBranch); 1074 1075 B.SetInsertPoint(RRI.ExitSelector); 1076 1077 // IterationsLeft - are there any more iterations left, given the original 1078 // upper bound on the induction variable? If not, we branch to the "real" 1079 // exit. 1080 Value *IterationsLeft = Increasing 1081 ? B.CreateICmpSLT(LS.IndVarNext, LS.LoopExitAt) 1082 : B.CreateICmpSGT(LS.IndVarNext, LS.LoopExitAt); 1083 B.CreateCondBr(IterationsLeft, RRI.PseudoExit, LS.LatchExit); 1084 1085 BranchInst *BranchToContinuation = 1086 BranchInst::Create(ContinuationBlock, RRI.PseudoExit); 1087 1088 // We emit PHI nodes into `RRI.PseudoExit' that compute the "latest" value of 1089 // each of the PHI nodes in the loop header. This feeds into the initial 1090 // value of the same PHI nodes if/when we continue execution. 1091 for (Instruction &I : *LS.Header) { 1092 auto *PN = dyn_cast<PHINode>(&I); 1093 if (!PN) 1094 break; 1095 1096 PHINode *NewPHI = PHINode::Create(PN->getType(), 2, PN->getName() + ".copy", 1097 BranchToContinuation); 1098 1099 NewPHI->addIncoming(PN->getIncomingValueForBlock(Preheader), Preheader); 1100 NewPHI->addIncoming(PN->getIncomingValueForBlock(LS.Latch), 1101 RRI.ExitSelector); 1102 RRI.PHIValuesAtPseudoExit.push_back(NewPHI); 1103 } 1104 1105 RRI.IndVarEnd = PHINode::Create(LS.IndVarNext->getType(), 2, "indvar.end", 1106 BranchToContinuation); 1107 RRI.IndVarEnd->addIncoming(LS.IndVarStart, Preheader); 1108 RRI.IndVarEnd->addIncoming(LS.IndVarNext, RRI.ExitSelector); 1109 1110 // The latch exit now has a branch from `RRI.ExitSelector' instead of 1111 // `LS.Latch'. The PHI nodes need to be updated to reflect that. 1112 for (Instruction &I : *LS.LatchExit) { 1113 if (PHINode *PN = dyn_cast<PHINode>(&I)) 1114 replacePHIBlock(PN, LS.Latch, RRI.ExitSelector); 1115 else 1116 break; 1117 } 1118 1119 return RRI; 1120 } 1121 1122 void LoopConstrainer::rewriteIncomingValuesForPHIs( 1123 LoopStructure &LS, BasicBlock *ContinuationBlock, 1124 const LoopConstrainer::RewrittenRangeInfo &RRI) const { 1125 1126 unsigned PHIIndex = 0; 1127 for (Instruction &I : *LS.Header) { 1128 auto *PN = dyn_cast<PHINode>(&I); 1129 if (!PN) 1130 break; 1131 1132 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) 1133 if (PN->getIncomingBlock(i) == ContinuationBlock) 1134 PN->setIncomingValue(i, RRI.PHIValuesAtPseudoExit[PHIIndex++]); 1135 } 1136 1137 LS.IndVarStart = RRI.IndVarEnd; 1138 } 1139 1140 BasicBlock *LoopConstrainer::createPreheader(const LoopStructure &LS, 1141 BasicBlock *OldPreheader, 1142 const char *Tag) const { 1143 1144 BasicBlock *Preheader = BasicBlock::Create(Ctx, Tag, &F, LS.Header); 1145 BranchInst::Create(LS.Header, Preheader); 1146 1147 for (Instruction &I : *LS.Header) { 1148 auto *PN = dyn_cast<PHINode>(&I); 1149 if (!PN) 1150 break; 1151 1152 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) 1153 replacePHIBlock(PN, OldPreheader, Preheader); 1154 } 1155 1156 return Preheader; 1157 } 1158 1159 void LoopConstrainer::addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs) { 1160 Loop *ParentLoop = OriginalLoop.getParentLoop(); 1161 if (!ParentLoop) 1162 return; 1163 1164 for (BasicBlock *BB : BBs) 1165 ParentLoop->addBasicBlockToLoop(BB, LI); 1166 } 1167 1168 Loop *LoopConstrainer::createClonedLoopStructure(Loop *Original, Loop *Parent, 1169 ValueToValueMapTy &VM) { 1170 Loop &New = LPM.addLoop(Parent); 1171 1172 // Add all of the blocks in Original to the new loop. 1173 for (auto *BB : Original->blocks()) 1174 if (LI.getLoopFor(BB) == Original) 1175 New.addBasicBlockToLoop(cast<BasicBlock>(VM[BB]), LI); 1176 1177 // Add all of the subloops to the new loop. 1178 for (Loop *SubLoop : *Original) 1179 createClonedLoopStructure(SubLoop, &New, VM); 1180 1181 return &New; 1182 } 1183 1184 bool LoopConstrainer::run() { 1185 BasicBlock *Preheader = nullptr; 1186 LatchTakenCount = SE.getExitCount(&OriginalLoop, MainLoopStructure.Latch); 1187 Preheader = OriginalLoop.getLoopPreheader(); 1188 assert(!isa<SCEVCouldNotCompute>(LatchTakenCount) && Preheader != nullptr && 1189 "preconditions!"); 1190 1191 OriginalPreheader = Preheader; 1192 MainLoopPreheader = Preheader; 1193 1194 Optional<SubRanges> MaybeSR = calculateSubRanges(); 1195 if (!MaybeSR.hasValue()) { 1196 DEBUG(dbgs() << "irce: could not compute subranges\n"); 1197 return false; 1198 } 1199 1200 SubRanges SR = MaybeSR.getValue(); 1201 bool Increasing = MainLoopStructure.IndVarIncreasing; 1202 IntegerType *IVTy = 1203 cast<IntegerType>(MainLoopStructure.IndVarNext->getType()); 1204 1205 SCEVExpander Expander(SE, F.getParent()->getDataLayout(), "irce"); 1206 Instruction *InsertPt = OriginalPreheader->getTerminator(); 1207 1208 // It would have been better to make `PreLoop' and `PostLoop' 1209 // `Optional<ClonedLoop>'s, but `ValueToValueMapTy' does not have a copy 1210 // constructor. 1211 ClonedLoop PreLoop, PostLoop; 1212 bool NeedsPreLoop = 1213 Increasing ? SR.LowLimit.hasValue() : SR.HighLimit.hasValue(); 1214 bool NeedsPostLoop = 1215 Increasing ? SR.HighLimit.hasValue() : SR.LowLimit.hasValue(); 1216 1217 Value *ExitPreLoopAt = nullptr; 1218 Value *ExitMainLoopAt = nullptr; 1219 const SCEVConstant *MinusOneS = 1220 cast<SCEVConstant>(SE.getConstant(IVTy, -1, true /* isSigned */)); 1221 1222 if (NeedsPreLoop) { 1223 const SCEV *ExitPreLoopAtSCEV = nullptr; 1224 1225 if (Increasing) 1226 ExitPreLoopAtSCEV = *SR.LowLimit; 1227 else { 1228 if (CanBeSMin(SE, *SR.HighLimit)) { 1229 DEBUG(dbgs() << "irce: could not prove no-overflow when computing " 1230 << "preloop exit limit. HighLimit = " << *(*SR.HighLimit) 1231 << "\n"); 1232 return false; 1233 } 1234 ExitPreLoopAtSCEV = SE.getAddExpr(*SR.HighLimit, MinusOneS); 1235 } 1236 1237 ExitPreLoopAt = Expander.expandCodeFor(ExitPreLoopAtSCEV, IVTy, InsertPt); 1238 ExitPreLoopAt->setName("exit.preloop.at"); 1239 } 1240 1241 if (NeedsPostLoop) { 1242 const SCEV *ExitMainLoopAtSCEV = nullptr; 1243 1244 if (Increasing) 1245 ExitMainLoopAtSCEV = *SR.HighLimit; 1246 else { 1247 if (CanBeSMin(SE, *SR.LowLimit)) { 1248 DEBUG(dbgs() << "irce: could not prove no-overflow when computing " 1249 << "mainloop exit limit. LowLimit = " << *(*SR.LowLimit) 1250 << "\n"); 1251 return false; 1252 } 1253 ExitMainLoopAtSCEV = SE.getAddExpr(*SR.LowLimit, MinusOneS); 1254 } 1255 1256 ExitMainLoopAt = Expander.expandCodeFor(ExitMainLoopAtSCEV, IVTy, InsertPt); 1257 ExitMainLoopAt->setName("exit.mainloop.at"); 1258 } 1259 1260 // We clone these ahead of time so that we don't have to deal with changing 1261 // and temporarily invalid IR as we transform the loops. 1262 if (NeedsPreLoop) 1263 cloneLoop(PreLoop, "preloop"); 1264 if (NeedsPostLoop) 1265 cloneLoop(PostLoop, "postloop"); 1266 1267 RewrittenRangeInfo PreLoopRRI; 1268 1269 if (NeedsPreLoop) { 1270 Preheader->getTerminator()->replaceUsesOfWith(MainLoopStructure.Header, 1271 PreLoop.Structure.Header); 1272 1273 MainLoopPreheader = 1274 createPreheader(MainLoopStructure, Preheader, "mainloop"); 1275 PreLoopRRI = changeIterationSpaceEnd(PreLoop.Structure, Preheader, 1276 ExitPreLoopAt, MainLoopPreheader); 1277 rewriteIncomingValuesForPHIs(MainLoopStructure, MainLoopPreheader, 1278 PreLoopRRI); 1279 } 1280 1281 BasicBlock *PostLoopPreheader = nullptr; 1282 RewrittenRangeInfo PostLoopRRI; 1283 1284 if (NeedsPostLoop) { 1285 PostLoopPreheader = 1286 createPreheader(PostLoop.Structure, Preheader, "postloop"); 1287 PostLoopRRI = changeIterationSpaceEnd(MainLoopStructure, MainLoopPreheader, 1288 ExitMainLoopAt, PostLoopPreheader); 1289 rewriteIncomingValuesForPHIs(PostLoop.Structure, PostLoopPreheader, 1290 PostLoopRRI); 1291 } 1292 1293 BasicBlock *NewMainLoopPreheader = 1294 MainLoopPreheader != Preheader ? MainLoopPreheader : nullptr; 1295 BasicBlock *NewBlocks[] = {PostLoopPreheader, PreLoopRRI.PseudoExit, 1296 PreLoopRRI.ExitSelector, PostLoopRRI.PseudoExit, 1297 PostLoopRRI.ExitSelector, NewMainLoopPreheader}; 1298 1299 // Some of the above may be nullptr, filter them out before passing to 1300 // addToParentLoopIfNeeded. 1301 auto NewBlocksEnd = 1302 std::remove(std::begin(NewBlocks), std::end(NewBlocks), nullptr); 1303 1304 addToParentLoopIfNeeded(makeArrayRef(std::begin(NewBlocks), NewBlocksEnd)); 1305 1306 DT.recalculate(F); 1307 1308 if (!PreLoop.Blocks.empty()) { 1309 auto *L = createClonedLoopStructure( 1310 &OriginalLoop, OriginalLoop.getParentLoop(), PreLoop.Map); 1311 formLCSSARecursively(*L, DT, &LI, &SE); 1312 simplifyLoop(L, &DT, &LI, &SE, nullptr, true); 1313 } 1314 1315 if (!PostLoop.Blocks.empty()) { 1316 auto *L = createClonedLoopStructure( 1317 &OriginalLoop, OriginalLoop.getParentLoop(), PostLoop.Map); 1318 formLCSSARecursively(*L, DT, &LI, &SE); 1319 simplifyLoop(L, &DT, &LI, &SE, nullptr, true); 1320 } 1321 1322 formLCSSARecursively(OriginalLoop, DT, &LI, &SE); 1323 simplifyLoop(&OriginalLoop, &DT, &LI, &SE, nullptr, true); 1324 1325 return true; 1326 } 1327 1328 /// Computes and returns a range of values for the induction variable (IndVar) 1329 /// in which the range check can be safely elided. If it cannot compute such a 1330 /// range, returns None. 1331 Optional<InductiveRangeCheck::Range> 1332 InductiveRangeCheck::computeSafeIterationSpace( 1333 ScalarEvolution &SE, const SCEVAddRecExpr *IndVar) const { 1334 // IndVar is of the form "A + B * I" (where "I" is the canonical induction 1335 // variable, that may or may not exist as a real llvm::Value in the loop) and 1336 // this inductive range check is a range check on the "C + D * I" ("C" is 1337 // getOffset() and "D" is getScale()). We rewrite the value being range 1338 // checked to "M + N * IndVar" where "N" = "D * B^(-1)" and "M" = "C - NA". 1339 // Currently we support this only for "B" = "D" = { 1 or -1 }, but the code 1340 // can be generalized as needed. 1341 // 1342 // The actual inequalities we solve are of the form 1343 // 1344 // 0 <= M + 1 * IndVar < L given L >= 0 (i.e. N == 1) 1345 // 1346 // The inequality is satisfied by -M <= IndVar < (L - M) [^1]. All additions 1347 // and subtractions are twos-complement wrapping and comparisons are signed. 1348 // 1349 // Proof: 1350 // 1351 // If there exists IndVar such that -M <= IndVar < (L - M) then it follows 1352 // that -M <= (-M + L) [== Eq. 1]. Since L >= 0, if (-M + L) sign-overflows 1353 // then (-M + L) < (-M). Hence by [Eq. 1], (-M + L) could not have 1354 // overflown. 1355 // 1356 // This means IndVar = t + (-M) for t in [0, L). Hence (IndVar + M) = t. 1357 // Hence 0 <= (IndVar + M) < L 1358 1359 // [^1]: Note that the solution does _not_ apply if L < 0; consider values M = 1360 // 127, IndVar = 126 and L = -2 in an i8 world. 1361 1362 if (!IndVar->isAffine()) 1363 return None; 1364 1365 const SCEV *A = IndVar->getStart(); 1366 const SCEVConstant *B = dyn_cast<SCEVConstant>(IndVar->getStepRecurrence(SE)); 1367 if (!B) 1368 return None; 1369 1370 const SCEV *C = getOffset(); 1371 const SCEVConstant *D = dyn_cast<SCEVConstant>(getScale()); 1372 if (D != B) 1373 return None; 1374 1375 ConstantInt *ConstD = D->getValue(); 1376 if (!(ConstD->isMinusOne() || ConstD->isOne())) 1377 return None; 1378 1379 const SCEV *M = SE.getMinusSCEV(C, A); 1380 1381 const SCEV *Begin = SE.getNegativeSCEV(M); 1382 const SCEV *UpperLimit = nullptr; 1383 1384 // We strengthen "0 <= I" to "0 <= I < INT_SMAX" and "I < L" to "0 <= I < L". 1385 // We can potentially do much better here. 1386 if (Value *V = getLength()) { 1387 UpperLimit = SE.getSCEV(V); 1388 } else { 1389 assert(Kind == InductiveRangeCheck::RANGE_CHECK_LOWER && "invariant!"); 1390 unsigned BitWidth = cast<IntegerType>(IndVar->getType())->getBitWidth(); 1391 UpperLimit = SE.getConstant(APInt::getSignedMaxValue(BitWidth)); 1392 } 1393 1394 const SCEV *End = SE.getMinusSCEV(UpperLimit, M); 1395 return InductiveRangeCheck::Range(Begin, End); 1396 } 1397 1398 static Optional<InductiveRangeCheck::Range> 1399 IntersectRange(ScalarEvolution &SE, 1400 const Optional<InductiveRangeCheck::Range> &R1, 1401 const InductiveRangeCheck::Range &R2) { 1402 if (!R1.hasValue()) 1403 return R2; 1404 auto &R1Value = R1.getValue(); 1405 1406 // TODO: we could widen the smaller range and have this work; but for now we 1407 // bail out to keep things simple. 1408 if (R1Value.getType() != R2.getType()) 1409 return None; 1410 1411 const SCEV *NewBegin = SE.getSMaxExpr(R1Value.getBegin(), R2.getBegin()); 1412 const SCEV *NewEnd = SE.getSMinExpr(R1Value.getEnd(), R2.getEnd()); 1413 1414 return InductiveRangeCheck::Range(NewBegin, NewEnd); 1415 } 1416 1417 bool InductiveRangeCheckElimination::runOnLoop(Loop *L, LPPassManager &LPM) { 1418 if (skipLoop(L)) 1419 return false; 1420 1421 if (L->getBlocks().size() >= LoopSizeCutoff) { 1422 DEBUG(dbgs() << "irce: giving up constraining loop, too large\n";); 1423 return false; 1424 } 1425 1426 BasicBlock *Preheader = L->getLoopPreheader(); 1427 if (!Preheader) { 1428 DEBUG(dbgs() << "irce: loop has no preheader, leaving\n"); 1429 return false; 1430 } 1431 1432 LLVMContext &Context = Preheader->getContext(); 1433 SmallVector<InductiveRangeCheck, 16> RangeChecks; 1434 ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1435 BranchProbabilityInfo &BPI = 1436 getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI(); 1437 1438 for (auto BBI : L->getBlocks()) 1439 if (BranchInst *TBI = dyn_cast<BranchInst>(BBI->getTerminator())) 1440 InductiveRangeCheck::extractRangeChecksFromBranch(TBI, L, SE, BPI, 1441 RangeChecks); 1442 1443 if (RangeChecks.empty()) 1444 return false; 1445 1446 auto PrintRecognizedRangeChecks = [&](raw_ostream &OS) { 1447 OS << "irce: looking at loop "; L->print(OS); 1448 OS << "irce: loop has " << RangeChecks.size() 1449 << " inductive range checks: \n"; 1450 for (InductiveRangeCheck &IRC : RangeChecks) 1451 IRC.print(OS); 1452 }; 1453 1454 DEBUG(PrintRecognizedRangeChecks(dbgs())); 1455 1456 if (PrintRangeChecks) 1457 PrintRecognizedRangeChecks(errs()); 1458 1459 const char *FailureReason = nullptr; 1460 Optional<LoopStructure> MaybeLoopStructure = 1461 LoopStructure::parseLoopStructure(SE, BPI, *L, FailureReason); 1462 if (!MaybeLoopStructure.hasValue()) { 1463 DEBUG(dbgs() << "irce: could not parse loop structure: " << FailureReason 1464 << "\n";); 1465 return false; 1466 } 1467 LoopStructure LS = MaybeLoopStructure.getValue(); 1468 bool Increasing = LS.IndVarIncreasing; 1469 const SCEV *MinusOne = 1470 SE.getConstant(LS.IndVarNext->getType(), Increasing ? -1 : 1, true); 1471 const SCEVAddRecExpr *IndVar = 1472 cast<SCEVAddRecExpr>(SE.getAddExpr(SE.getSCEV(LS.IndVarNext), MinusOne)); 1473 1474 Optional<InductiveRangeCheck::Range> SafeIterRange; 1475 Instruction *ExprInsertPt = Preheader->getTerminator(); 1476 1477 SmallVector<InductiveRangeCheck, 4> RangeChecksToEliminate; 1478 1479 IRBuilder<> B(ExprInsertPt); 1480 for (InductiveRangeCheck &IRC : RangeChecks) { 1481 auto Result = IRC.computeSafeIterationSpace(SE, IndVar); 1482 if (Result.hasValue()) { 1483 auto MaybeSafeIterRange = 1484 IntersectRange(SE, SafeIterRange, Result.getValue()); 1485 if (MaybeSafeIterRange.hasValue()) { 1486 RangeChecksToEliminate.push_back(IRC); 1487 SafeIterRange = MaybeSafeIterRange.getValue(); 1488 } 1489 } 1490 } 1491 1492 if (!SafeIterRange.hasValue()) 1493 return false; 1494 1495 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1496 LoopConstrainer LC(*L, getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), LPM, 1497 LS, SE, DT, SafeIterRange.getValue()); 1498 bool Changed = LC.run(); 1499 1500 if (Changed) { 1501 auto PrintConstrainedLoopInfo = [L]() { 1502 dbgs() << "irce: in function "; 1503 dbgs() << L->getHeader()->getParent()->getName() << ": "; 1504 dbgs() << "constrained "; 1505 L->print(dbgs()); 1506 }; 1507 1508 DEBUG(PrintConstrainedLoopInfo()); 1509 1510 if (PrintChangedLoops) 1511 PrintConstrainedLoopInfo(); 1512 1513 // Optimize away the now-redundant range checks. 1514 1515 for (InductiveRangeCheck &IRC : RangeChecksToEliminate) { 1516 ConstantInt *FoldedRangeCheck = IRC.getPassingDirection() 1517 ? ConstantInt::getTrue(Context) 1518 : ConstantInt::getFalse(Context); 1519 IRC.getCheckUse()->set(FoldedRangeCheck); 1520 } 1521 } 1522 1523 return Changed; 1524 } 1525 1526 Pass *llvm::createInductiveRangeCheckEliminationPass() { 1527 return new InductiveRangeCheckElimination; 1528 } 1529