1 //===-- InductiveRangeCheckElimination.cpp - ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // The InductiveRangeCheckElimination pass splits a loop's iteration space into 10 // three disjoint ranges. It does that in a way such that the loop running in 11 // the middle loop provably does not need range checks. As an example, it will 12 // convert 13 // 14 // len = < known positive > 15 // for (i = 0; i < n; i++) { 16 // if (0 <= i && i < len) { 17 // do_something(); 18 // } else { 19 // throw_out_of_bounds(); 20 // } 21 // } 22 // 23 // to 24 // 25 // len = < known positive > 26 // limit = smin(n, len) 27 // // no first segment 28 // for (i = 0; i < limit; i++) { 29 // if (0 <= i && i < len) { // this check is fully redundant 30 // do_something(); 31 // } else { 32 // throw_out_of_bounds(); 33 // } 34 // } 35 // for (i = limit; i < n; i++) { 36 // if (0 <= i && i < len) { 37 // do_something(); 38 // } else { 39 // throw_out_of_bounds(); 40 // } 41 // } 42 //===----------------------------------------------------------------------===// 43 44 #include "llvm/ADT/Optional.h" 45 #include "llvm/Analysis/BranchProbabilityInfo.h" 46 #include "llvm/Analysis/InstructionSimplify.h" 47 #include "llvm/Analysis/LoopInfo.h" 48 #include "llvm/Analysis/LoopPass.h" 49 #include "llvm/Analysis/ScalarEvolution.h" 50 #include "llvm/Analysis/ScalarEvolutionExpander.h" 51 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 52 #include "llvm/Analysis/ValueTracking.h" 53 #include "llvm/IR/Dominators.h" 54 #include "llvm/IR/Function.h" 55 #include "llvm/IR/IRBuilder.h" 56 #include "llvm/IR/Instructions.h" 57 #include "llvm/IR/Module.h" 58 #include "llvm/IR/PatternMatch.h" 59 #include "llvm/IR/ValueHandle.h" 60 #include "llvm/IR/Verifier.h" 61 #include "llvm/Pass.h" 62 #include "llvm/Support/Debug.h" 63 #include "llvm/Support/raw_ostream.h" 64 #include "llvm/Transforms/Scalar.h" 65 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 66 #include "llvm/Transforms/Utils/Cloning.h" 67 #include "llvm/Transforms/Utils/LoopUtils.h" 68 #include "llvm/Transforms/Utils/SimplifyIndVar.h" 69 #include "llvm/Transforms/Utils/UnrollLoop.h" 70 71 using namespace llvm; 72 73 static cl::opt<unsigned> LoopSizeCutoff("irce-loop-size-cutoff", cl::Hidden, 74 cl::init(64)); 75 76 static cl::opt<bool> PrintChangedLoops("irce-print-changed-loops", cl::Hidden, 77 cl::init(false)); 78 79 static cl::opt<bool> PrintRangeChecks("irce-print-range-checks", cl::Hidden, 80 cl::init(false)); 81 82 static cl::opt<int> MaxExitProbReciprocal("irce-max-exit-prob-reciprocal", 83 cl::Hidden, cl::init(10)); 84 85 static cl::opt<bool> SkipProfitabilityChecks("irce-skip-profitability-checks", 86 cl::Hidden, cl::init(false)); 87 88 #define DEBUG_TYPE "irce" 89 90 namespace { 91 92 /// An inductive range check is conditional branch in a loop with 93 /// 94 /// 1. a very cold successor (i.e. the branch jumps to that successor very 95 /// rarely) 96 /// 97 /// and 98 /// 99 /// 2. a condition that is provably true for some contiguous range of values 100 /// taken by the containing loop's induction variable. 101 /// 102 class InductiveRangeCheck { 103 // Classifies a range check 104 enum RangeCheckKind : unsigned { 105 // Range check of the form "0 <= I". 106 RANGE_CHECK_LOWER = 1, 107 108 // Range check of the form "I < L" where L is known positive. 109 RANGE_CHECK_UPPER = 2, 110 111 // The logical and of the RANGE_CHECK_LOWER and RANGE_CHECK_UPPER 112 // conditions. 113 RANGE_CHECK_BOTH = RANGE_CHECK_LOWER | RANGE_CHECK_UPPER, 114 115 // Unrecognized range check condition. 116 RANGE_CHECK_UNKNOWN = (unsigned)-1 117 }; 118 119 static StringRef rangeCheckKindToStr(RangeCheckKind); 120 121 const SCEV *Offset = nullptr; 122 const SCEV *Scale = nullptr; 123 Value *Length = nullptr; 124 Use *CheckUse = nullptr; 125 RangeCheckKind Kind = RANGE_CHECK_UNKNOWN; 126 127 static RangeCheckKind parseRangeCheckICmp(Loop *L, ICmpInst *ICI, 128 ScalarEvolution &SE, Value *&Index, 129 Value *&Length); 130 131 static void 132 extractRangeChecksFromCond(Loop *L, ScalarEvolution &SE, Use &ConditionUse, 133 SmallVectorImpl<InductiveRangeCheck> &Checks, 134 SmallPtrSetImpl<Value *> &Visited); 135 136 public: 137 const SCEV *getOffset() const { return Offset; } 138 const SCEV *getScale() const { return Scale; } 139 Value *getLength() const { return Length; } 140 141 void print(raw_ostream &OS) const { 142 OS << "InductiveRangeCheck:\n"; 143 OS << " Kind: " << rangeCheckKindToStr(Kind) << "\n"; 144 OS << " Offset: "; 145 Offset->print(OS); 146 OS << " Scale: "; 147 Scale->print(OS); 148 OS << " Length: "; 149 if (Length) 150 Length->print(OS); 151 else 152 OS << "(null)"; 153 OS << "\n CheckUse: "; 154 getCheckUse()->getUser()->print(OS); 155 OS << " Operand: " << getCheckUse()->getOperandNo() << "\n"; 156 } 157 158 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 159 void dump() { 160 print(dbgs()); 161 } 162 #endif 163 164 Use *getCheckUse() const { return CheckUse; } 165 166 /// Represents an signed integer range [Range.getBegin(), Range.getEnd()). If 167 /// R.getEnd() sle R.getBegin(), then R denotes the empty range. 168 169 class Range { 170 const SCEV *Begin; 171 const SCEV *End; 172 173 public: 174 Range(const SCEV *Begin, const SCEV *End) : Begin(Begin), End(End) { 175 assert(Begin->getType() == End->getType() && "ill-typed range!"); 176 } 177 178 Type *getType() const { return Begin->getType(); } 179 const SCEV *getBegin() const { return Begin; } 180 const SCEV *getEnd() const { return End; } 181 }; 182 183 /// This is the value the condition of the branch needs to evaluate to for the 184 /// branch to take the hot successor (see (1) above). 185 bool getPassingDirection() { return true; } 186 187 /// Computes a range for the induction variable (IndVar) in which the range 188 /// check is redundant and can be constant-folded away. The induction 189 /// variable is not required to be the canonical {0,+,1} induction variable. 190 Optional<Range> computeSafeIterationSpace(ScalarEvolution &SE, 191 const SCEVAddRecExpr *IndVar) const; 192 193 /// Parse out a set of inductive range checks from \p BI and append them to \p 194 /// Checks. 195 /// 196 /// NB! There may be conditions feeding into \p BI that aren't inductive range 197 /// checks, and hence don't end up in \p Checks. 198 static void 199 extractRangeChecksFromBranch(BranchInst *BI, Loop *L, ScalarEvolution &SE, 200 BranchProbabilityInfo &BPI, 201 SmallVectorImpl<InductiveRangeCheck> &Checks); 202 }; 203 204 class InductiveRangeCheckElimination : public LoopPass { 205 public: 206 static char ID; 207 InductiveRangeCheckElimination() : LoopPass(ID) { 208 initializeInductiveRangeCheckEliminationPass( 209 *PassRegistry::getPassRegistry()); 210 } 211 212 void getAnalysisUsage(AnalysisUsage &AU) const override { 213 AU.addRequired<BranchProbabilityInfoWrapperPass>(); 214 getLoopAnalysisUsage(AU); 215 } 216 217 bool runOnLoop(Loop *L, LPPassManager &LPM) override; 218 }; 219 220 char InductiveRangeCheckElimination::ID = 0; 221 } 222 223 INITIALIZE_PASS_BEGIN(InductiveRangeCheckElimination, "irce", 224 "Inductive range check elimination", false, false) 225 INITIALIZE_PASS_DEPENDENCY(BranchProbabilityInfoWrapperPass) 226 INITIALIZE_PASS_DEPENDENCY(LoopPass) 227 INITIALIZE_PASS_END(InductiveRangeCheckElimination, "irce", 228 "Inductive range check elimination", false, false) 229 230 StringRef InductiveRangeCheck::rangeCheckKindToStr( 231 InductiveRangeCheck::RangeCheckKind RCK) { 232 switch (RCK) { 233 case InductiveRangeCheck::RANGE_CHECK_UNKNOWN: 234 return "RANGE_CHECK_UNKNOWN"; 235 236 case InductiveRangeCheck::RANGE_CHECK_UPPER: 237 return "RANGE_CHECK_UPPER"; 238 239 case InductiveRangeCheck::RANGE_CHECK_LOWER: 240 return "RANGE_CHECK_LOWER"; 241 242 case InductiveRangeCheck::RANGE_CHECK_BOTH: 243 return "RANGE_CHECK_BOTH"; 244 } 245 246 llvm_unreachable("unknown range check type!"); 247 } 248 249 /// Parse a single ICmp instruction, `ICI`, into a range check. If `ICI` cannot 250 /// be interpreted as a range check, return `RANGE_CHECK_UNKNOWN` and set 251 /// `Index` and `Length` to `nullptr`. Otherwise set `Index` to the value being 252 /// range checked, and set `Length` to the upper limit `Index` is being range 253 /// checked with if (and only if) the range check type is stronger or equal to 254 /// RANGE_CHECK_UPPER. 255 /// 256 InductiveRangeCheck::RangeCheckKind 257 InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI, 258 ScalarEvolution &SE, Value *&Index, 259 Value *&Length) { 260 261 auto IsNonNegativeAndNotLoopVarying = [&SE, L](Value *V) { 262 const SCEV *S = SE.getSCEV(V); 263 if (isa<SCEVCouldNotCompute>(S)) 264 return false; 265 266 return SE.getLoopDisposition(S, L) == ScalarEvolution::LoopInvariant && 267 SE.isKnownNonNegative(S); 268 }; 269 270 using namespace llvm::PatternMatch; 271 272 ICmpInst::Predicate Pred = ICI->getPredicate(); 273 Value *LHS = ICI->getOperand(0); 274 Value *RHS = ICI->getOperand(1); 275 276 switch (Pred) { 277 default: 278 return RANGE_CHECK_UNKNOWN; 279 280 case ICmpInst::ICMP_SLE: 281 std::swap(LHS, RHS); 282 // fallthrough 283 case ICmpInst::ICMP_SGE: 284 if (match(RHS, m_ConstantInt<0>())) { 285 Index = LHS; 286 return RANGE_CHECK_LOWER; 287 } 288 return RANGE_CHECK_UNKNOWN; 289 290 case ICmpInst::ICMP_SLT: 291 std::swap(LHS, RHS); 292 // fallthrough 293 case ICmpInst::ICMP_SGT: 294 if (match(RHS, m_ConstantInt<-1>())) { 295 Index = LHS; 296 return RANGE_CHECK_LOWER; 297 } 298 299 if (IsNonNegativeAndNotLoopVarying(LHS)) { 300 Index = RHS; 301 Length = LHS; 302 return RANGE_CHECK_UPPER; 303 } 304 return RANGE_CHECK_UNKNOWN; 305 306 case ICmpInst::ICMP_ULT: 307 std::swap(LHS, RHS); 308 // fallthrough 309 case ICmpInst::ICMP_UGT: 310 if (IsNonNegativeAndNotLoopVarying(LHS)) { 311 Index = RHS; 312 Length = LHS; 313 return RANGE_CHECK_BOTH; 314 } 315 return RANGE_CHECK_UNKNOWN; 316 } 317 318 llvm_unreachable("default clause returns!"); 319 } 320 321 void InductiveRangeCheck::extractRangeChecksFromCond( 322 Loop *L, ScalarEvolution &SE, Use &ConditionUse, 323 SmallVectorImpl<InductiveRangeCheck> &Checks, 324 SmallPtrSetImpl<Value *> &Visited) { 325 using namespace llvm::PatternMatch; 326 327 Value *Condition = ConditionUse.get(); 328 if (!Visited.insert(Condition).second) 329 return; 330 331 if (match(Condition, m_And(m_Value(), m_Value()))) { 332 SmallVector<InductiveRangeCheck, 8> SubChecks; 333 extractRangeChecksFromCond(L, SE, cast<User>(Condition)->getOperandUse(0), 334 SubChecks, Visited); 335 extractRangeChecksFromCond(L, SE, cast<User>(Condition)->getOperandUse(1), 336 SubChecks, Visited); 337 338 if (SubChecks.size() == 2) { 339 // Handle a special case where we know how to merge two checks separately 340 // checking the upper and lower bounds into a full range check. 341 const auto &RChkA = SubChecks[0]; 342 const auto &RChkB = SubChecks[1]; 343 if ((RChkA.Length == RChkB.Length || !RChkA.Length || !RChkB.Length) && 344 RChkA.Offset == RChkB.Offset && RChkA.Scale == RChkB.Scale) { 345 346 // If RChkA.Kind == RChkB.Kind then we just found two identical checks. 347 // But if one of them is a RANGE_CHECK_LOWER and the other is a 348 // RANGE_CHECK_UPPER (only possibility if they're different) then 349 // together they form a RANGE_CHECK_BOTH. 350 SubChecks[0].Kind = 351 (InductiveRangeCheck::RangeCheckKind)(RChkA.Kind | RChkB.Kind); 352 SubChecks[0].Length = RChkA.Length ? RChkA.Length : RChkB.Length; 353 SubChecks[0].CheckUse = &ConditionUse; 354 355 // We updated one of the checks in place, now erase the other. 356 SubChecks.pop_back(); 357 } 358 } 359 360 Checks.insert(Checks.end(), SubChecks.begin(), SubChecks.end()); 361 return; 362 } 363 364 ICmpInst *ICI = dyn_cast<ICmpInst>(Condition); 365 if (!ICI) 366 return; 367 368 Value *Length = nullptr, *Index; 369 auto RCKind = parseRangeCheckICmp(L, ICI, SE, Index, Length); 370 if (RCKind == InductiveRangeCheck::RANGE_CHECK_UNKNOWN) 371 return; 372 373 const auto *IndexAddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Index)); 374 bool IsAffineIndex = 375 IndexAddRec && (IndexAddRec->getLoop() == L) && IndexAddRec->isAffine(); 376 377 if (!IsAffineIndex) 378 return; 379 380 InductiveRangeCheck IRC; 381 IRC.Length = Length; 382 IRC.Offset = IndexAddRec->getStart(); 383 IRC.Scale = IndexAddRec->getStepRecurrence(SE); 384 IRC.CheckUse = &ConditionUse; 385 IRC.Kind = RCKind; 386 Checks.push_back(IRC); 387 } 388 389 void InductiveRangeCheck::extractRangeChecksFromBranch( 390 BranchInst *BI, Loop *L, ScalarEvolution &SE, BranchProbabilityInfo &BPI, 391 SmallVectorImpl<InductiveRangeCheck> &Checks) { 392 393 if (BI->isUnconditional() || BI->getParent() == L->getLoopLatch()) 394 return; 395 396 BranchProbability LikelyTaken(15, 16); 397 398 if (!SkipProfitabilityChecks && 399 BPI.getEdgeProbability(BI->getParent(), (unsigned)0) < LikelyTaken) 400 return; 401 402 SmallPtrSet<Value *, 8> Visited; 403 InductiveRangeCheck::extractRangeChecksFromCond(L, SE, BI->getOperandUse(0), 404 Checks, Visited); 405 } 406 407 namespace { 408 409 // Keeps track of the structure of a loop. This is similar to llvm::Loop, 410 // except that it is more lightweight and can track the state of a loop through 411 // changing and potentially invalid IR. This structure also formalizes the 412 // kinds of loops we can deal with -- ones that have a single latch that is also 413 // an exiting block *and* have a canonical induction variable. 414 struct LoopStructure { 415 const char *Tag; 416 417 BasicBlock *Header; 418 BasicBlock *Latch; 419 420 // `Latch's terminator instruction is `LatchBr', and it's `LatchBrExitIdx'th 421 // successor is `LatchExit', the exit block of the loop. 422 BranchInst *LatchBr; 423 BasicBlock *LatchExit; 424 unsigned LatchBrExitIdx; 425 426 Value *IndVarNext; 427 Value *IndVarStart; 428 Value *LoopExitAt; 429 bool IndVarIncreasing; 430 431 LoopStructure() 432 : Tag(""), Header(nullptr), Latch(nullptr), LatchBr(nullptr), 433 LatchExit(nullptr), LatchBrExitIdx(-1), IndVarNext(nullptr), 434 IndVarStart(nullptr), LoopExitAt(nullptr), IndVarIncreasing(false) {} 435 436 template <typename M> LoopStructure map(M Map) const { 437 LoopStructure Result; 438 Result.Tag = Tag; 439 Result.Header = cast<BasicBlock>(Map(Header)); 440 Result.Latch = cast<BasicBlock>(Map(Latch)); 441 Result.LatchBr = cast<BranchInst>(Map(LatchBr)); 442 Result.LatchExit = cast<BasicBlock>(Map(LatchExit)); 443 Result.LatchBrExitIdx = LatchBrExitIdx; 444 Result.IndVarNext = Map(IndVarNext); 445 Result.IndVarStart = Map(IndVarStart); 446 Result.LoopExitAt = Map(LoopExitAt); 447 Result.IndVarIncreasing = IndVarIncreasing; 448 return Result; 449 } 450 451 static Optional<LoopStructure> parseLoopStructure(ScalarEvolution &, 452 BranchProbabilityInfo &BPI, 453 Loop &, 454 const char *&); 455 }; 456 457 /// This class is used to constrain loops to run within a given iteration space. 458 /// The algorithm this class implements is given a Loop and a range [Begin, 459 /// End). The algorithm then tries to break out a "main loop" out of the loop 460 /// it is given in a way that the "main loop" runs with the induction variable 461 /// in a subset of [Begin, End). The algorithm emits appropriate pre and post 462 /// loops to run any remaining iterations. The pre loop runs any iterations in 463 /// which the induction variable is < Begin, and the post loop runs any 464 /// iterations in which the induction variable is >= End. 465 /// 466 class LoopConstrainer { 467 // The representation of a clone of the original loop we started out with. 468 struct ClonedLoop { 469 // The cloned blocks 470 std::vector<BasicBlock *> Blocks; 471 472 // `Map` maps values in the clonee into values in the cloned version 473 ValueToValueMapTy Map; 474 475 // An instance of `LoopStructure` for the cloned loop 476 LoopStructure Structure; 477 }; 478 479 // Result of rewriting the range of a loop. See changeIterationSpaceEnd for 480 // more details on what these fields mean. 481 struct RewrittenRangeInfo { 482 BasicBlock *PseudoExit; 483 BasicBlock *ExitSelector; 484 std::vector<PHINode *> PHIValuesAtPseudoExit; 485 PHINode *IndVarEnd; 486 487 RewrittenRangeInfo() 488 : PseudoExit(nullptr), ExitSelector(nullptr), IndVarEnd(nullptr) {} 489 }; 490 491 // Calculated subranges we restrict the iteration space of the main loop to. 492 // See the implementation of `calculateSubRanges' for more details on how 493 // these fields are computed. `LowLimit` is None if there is no restriction 494 // on low end of the restricted iteration space of the main loop. `HighLimit` 495 // is None if there is no restriction on high end of the restricted iteration 496 // space of the main loop. 497 498 struct SubRanges { 499 Optional<const SCEV *> LowLimit; 500 Optional<const SCEV *> HighLimit; 501 }; 502 503 // A utility function that does a `replaceUsesOfWith' on the incoming block 504 // set of a `PHINode' -- replaces instances of `Block' in the `PHINode's 505 // incoming block list with `ReplaceBy'. 506 static void replacePHIBlock(PHINode *PN, BasicBlock *Block, 507 BasicBlock *ReplaceBy); 508 509 // Compute a safe set of limits for the main loop to run in -- effectively the 510 // intersection of `Range' and the iteration space of the original loop. 511 // Return None if unable to compute the set of subranges. 512 // 513 Optional<SubRanges> calculateSubRanges() const; 514 515 // Clone `OriginalLoop' and return the result in CLResult. The IR after 516 // running `cloneLoop' is well formed except for the PHI nodes in CLResult -- 517 // the PHI nodes say that there is an incoming edge from `OriginalPreheader` 518 // but there is no such edge. 519 // 520 void cloneLoop(ClonedLoop &CLResult, const char *Tag) const; 521 522 // Rewrite the iteration space of the loop denoted by (LS, Preheader). The 523 // iteration space of the rewritten loop ends at ExitLoopAt. The start of the 524 // iteration space is not changed. `ExitLoopAt' is assumed to be slt 525 // `OriginalHeaderCount'. 526 // 527 // If there are iterations left to execute, control is made to jump to 528 // `ContinuationBlock', otherwise they take the normal loop exit. The 529 // returned `RewrittenRangeInfo' object is populated as follows: 530 // 531 // .PseudoExit is a basic block that unconditionally branches to 532 // `ContinuationBlock'. 533 // 534 // .ExitSelector is a basic block that decides, on exit from the loop, 535 // whether to branch to the "true" exit or to `PseudoExit'. 536 // 537 // .PHIValuesAtPseudoExit are PHINodes in `PseudoExit' that compute the value 538 // for each PHINode in the loop header on taking the pseudo exit. 539 // 540 // After changeIterationSpaceEnd, `Preheader' is no longer a legitimate 541 // preheader because it is made to branch to the loop header only 542 // conditionally. 543 // 544 RewrittenRangeInfo 545 changeIterationSpaceEnd(const LoopStructure &LS, BasicBlock *Preheader, 546 Value *ExitLoopAt, 547 BasicBlock *ContinuationBlock) const; 548 549 // The loop denoted by `LS' has `OldPreheader' as its preheader. This 550 // function creates a new preheader for `LS' and returns it. 551 // 552 BasicBlock *createPreheader(const LoopStructure &LS, BasicBlock *OldPreheader, 553 const char *Tag) const; 554 555 // `ContinuationBlockAndPreheader' was the continuation block for some call to 556 // `changeIterationSpaceEnd' and is the preheader to the loop denoted by `LS'. 557 // This function rewrites the PHI nodes in `LS.Header' to start with the 558 // correct value. 559 void rewriteIncomingValuesForPHIs( 560 LoopStructure &LS, BasicBlock *ContinuationBlockAndPreheader, 561 const LoopConstrainer::RewrittenRangeInfo &RRI) const; 562 563 // Even though we do not preserve any passes at this time, we at least need to 564 // keep the parent loop structure consistent. The `LPPassManager' seems to 565 // verify this after running a loop pass. This function adds the list of 566 // blocks denoted by BBs to this loops parent loop if required. 567 void addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs); 568 569 // Some global state. 570 Function &F; 571 LLVMContext &Ctx; 572 ScalarEvolution &SE; 573 574 // Information about the original loop we started out with. 575 Loop &OriginalLoop; 576 LoopInfo &OriginalLoopInfo; 577 const SCEV *LatchTakenCount; 578 BasicBlock *OriginalPreheader; 579 580 // The preheader of the main loop. This may or may not be different from 581 // `OriginalPreheader'. 582 BasicBlock *MainLoopPreheader; 583 584 // The range we need to run the main loop in. 585 InductiveRangeCheck::Range Range; 586 587 // The structure of the main loop (see comment at the beginning of this class 588 // for a definition) 589 LoopStructure MainLoopStructure; 590 591 public: 592 LoopConstrainer(Loop &L, LoopInfo &LI, const LoopStructure &LS, 593 ScalarEvolution &SE, InductiveRangeCheck::Range R) 594 : F(*L.getHeader()->getParent()), Ctx(L.getHeader()->getContext()), 595 SE(SE), OriginalLoop(L), OriginalLoopInfo(LI), LatchTakenCount(nullptr), 596 OriginalPreheader(nullptr), MainLoopPreheader(nullptr), Range(R), 597 MainLoopStructure(LS) {} 598 599 // Entry point for the algorithm. Returns true on success. 600 bool run(); 601 }; 602 603 } 604 605 void LoopConstrainer::replacePHIBlock(PHINode *PN, BasicBlock *Block, 606 BasicBlock *ReplaceBy) { 607 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 608 if (PN->getIncomingBlock(i) == Block) 609 PN->setIncomingBlock(i, ReplaceBy); 610 } 611 612 static bool CanBeSMax(ScalarEvolution &SE, const SCEV *S) { 613 APInt SMax = 614 APInt::getSignedMaxValue(cast<IntegerType>(S->getType())->getBitWidth()); 615 return SE.getSignedRange(S).contains(SMax) && 616 SE.getUnsignedRange(S).contains(SMax); 617 } 618 619 static bool CanBeSMin(ScalarEvolution &SE, const SCEV *S) { 620 APInt SMin = 621 APInt::getSignedMinValue(cast<IntegerType>(S->getType())->getBitWidth()); 622 return SE.getSignedRange(S).contains(SMin) && 623 SE.getUnsignedRange(S).contains(SMin); 624 } 625 626 Optional<LoopStructure> 627 LoopStructure::parseLoopStructure(ScalarEvolution &SE, BranchProbabilityInfo &BPI, 628 Loop &L, const char *&FailureReason) { 629 assert(L.isLoopSimplifyForm() && "should follow from addRequired<>"); 630 631 BasicBlock *Latch = L.getLoopLatch(); 632 if (!L.isLoopExiting(Latch)) { 633 FailureReason = "no loop latch"; 634 return None; 635 } 636 637 BasicBlock *Header = L.getHeader(); 638 BasicBlock *Preheader = L.getLoopPreheader(); 639 if (!Preheader) { 640 FailureReason = "no preheader"; 641 return None; 642 } 643 644 BranchInst *LatchBr = dyn_cast<BranchInst>(Latch->getTerminator()); 645 if (!LatchBr || LatchBr->isUnconditional()) { 646 FailureReason = "latch terminator not conditional branch"; 647 return None; 648 } 649 650 unsigned LatchBrExitIdx = LatchBr->getSuccessor(0) == Header ? 1 : 0; 651 652 BranchProbability ExitProbability = 653 BPI.getEdgeProbability(LatchBr->getParent(), LatchBrExitIdx); 654 655 if (!SkipProfitabilityChecks && 656 ExitProbability > BranchProbability(1, MaxExitProbReciprocal)) { 657 FailureReason = "short running loop, not profitable"; 658 return None; 659 } 660 661 ICmpInst *ICI = dyn_cast<ICmpInst>(LatchBr->getCondition()); 662 if (!ICI || !isa<IntegerType>(ICI->getOperand(0)->getType())) { 663 FailureReason = "latch terminator branch not conditional on integral icmp"; 664 return None; 665 } 666 667 const SCEV *LatchCount = SE.getExitCount(&L, Latch); 668 if (isa<SCEVCouldNotCompute>(LatchCount)) { 669 FailureReason = "could not compute latch count"; 670 return None; 671 } 672 673 ICmpInst::Predicate Pred = ICI->getPredicate(); 674 Value *LeftValue = ICI->getOperand(0); 675 const SCEV *LeftSCEV = SE.getSCEV(LeftValue); 676 IntegerType *IndVarTy = cast<IntegerType>(LeftValue->getType()); 677 678 Value *RightValue = ICI->getOperand(1); 679 const SCEV *RightSCEV = SE.getSCEV(RightValue); 680 681 // We canonicalize `ICI` such that `LeftSCEV` is an add recurrence. 682 if (!isa<SCEVAddRecExpr>(LeftSCEV)) { 683 if (isa<SCEVAddRecExpr>(RightSCEV)) { 684 std::swap(LeftSCEV, RightSCEV); 685 std::swap(LeftValue, RightValue); 686 Pred = ICmpInst::getSwappedPredicate(Pred); 687 } else { 688 FailureReason = "no add recurrences in the icmp"; 689 return None; 690 } 691 } 692 693 auto HasNoSignedWrap = [&](const SCEVAddRecExpr *AR) { 694 if (AR->getNoWrapFlags(SCEV::FlagNSW)) 695 return true; 696 697 IntegerType *Ty = cast<IntegerType>(AR->getType()); 698 IntegerType *WideTy = 699 IntegerType::get(Ty->getContext(), Ty->getBitWidth() * 2); 700 701 const SCEVAddRecExpr *ExtendAfterOp = 702 dyn_cast<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); 703 if (ExtendAfterOp) { 704 const SCEV *ExtendedStart = SE.getSignExtendExpr(AR->getStart(), WideTy); 705 const SCEV *ExtendedStep = 706 SE.getSignExtendExpr(AR->getStepRecurrence(SE), WideTy); 707 708 bool NoSignedWrap = ExtendAfterOp->getStart() == ExtendedStart && 709 ExtendAfterOp->getStepRecurrence(SE) == ExtendedStep; 710 711 if (NoSignedWrap) 712 return true; 713 } 714 715 // We may have proved this when computing the sign extension above. 716 return AR->getNoWrapFlags(SCEV::FlagNSW) != SCEV::FlagAnyWrap; 717 }; 718 719 auto IsInductionVar = [&](const SCEVAddRecExpr *AR, bool &IsIncreasing) { 720 if (!AR->isAffine()) 721 return false; 722 723 // Currently we only work with induction variables that have been proved to 724 // not wrap. This restriction can potentially be lifted in the future. 725 726 if (!HasNoSignedWrap(AR)) 727 return false; 728 729 if (const SCEVConstant *StepExpr = 730 dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) { 731 ConstantInt *StepCI = StepExpr->getValue(); 732 if (StepCI->isOne() || StepCI->isMinusOne()) { 733 IsIncreasing = StepCI->isOne(); 734 return true; 735 } 736 } 737 738 return false; 739 }; 740 741 // `ICI` is interpreted as taking the backedge if the *next* value of the 742 // induction variable satisfies some constraint. 743 744 const SCEVAddRecExpr *IndVarNext = cast<SCEVAddRecExpr>(LeftSCEV); 745 bool IsIncreasing = false; 746 if (!IsInductionVar(IndVarNext, IsIncreasing)) { 747 FailureReason = "LHS in icmp not induction variable"; 748 return None; 749 } 750 751 ConstantInt *One = ConstantInt::get(IndVarTy, 1); 752 // TODO: generalize the predicates here to also match their unsigned variants. 753 if (IsIncreasing) { 754 bool FoundExpectedPred = 755 (Pred == ICmpInst::ICMP_SLT && LatchBrExitIdx == 1) || 756 (Pred == ICmpInst::ICMP_SGT && LatchBrExitIdx == 0); 757 758 if (!FoundExpectedPred) { 759 FailureReason = "expected icmp slt semantically, found something else"; 760 return None; 761 } 762 763 if (LatchBrExitIdx == 0) { 764 if (CanBeSMax(SE, RightSCEV)) { 765 // TODO: this restriction is easily removable -- we just have to 766 // remember that the icmp was an slt and not an sle. 767 FailureReason = "limit may overflow when coercing sle to slt"; 768 return None; 769 } 770 771 IRBuilder<> B(Preheader->getTerminator()); 772 RightValue = B.CreateAdd(RightValue, One); 773 } 774 775 } else { 776 bool FoundExpectedPred = 777 (Pred == ICmpInst::ICMP_SGT && LatchBrExitIdx == 1) || 778 (Pred == ICmpInst::ICMP_SLT && LatchBrExitIdx == 0); 779 780 if (!FoundExpectedPred) { 781 FailureReason = "expected icmp sgt semantically, found something else"; 782 return None; 783 } 784 785 if (LatchBrExitIdx == 0) { 786 if (CanBeSMin(SE, RightSCEV)) { 787 // TODO: this restriction is easily removable -- we just have to 788 // remember that the icmp was an sgt and not an sge. 789 FailureReason = "limit may overflow when coercing sge to sgt"; 790 return None; 791 } 792 793 IRBuilder<> B(Preheader->getTerminator()); 794 RightValue = B.CreateSub(RightValue, One); 795 } 796 } 797 798 const SCEV *StartNext = IndVarNext->getStart(); 799 const SCEV *Addend = SE.getNegativeSCEV(IndVarNext->getStepRecurrence(SE)); 800 const SCEV *IndVarStart = SE.getAddExpr(StartNext, Addend); 801 802 BasicBlock *LatchExit = LatchBr->getSuccessor(LatchBrExitIdx); 803 804 assert(SE.getLoopDisposition(LatchCount, &L) == 805 ScalarEvolution::LoopInvariant && 806 "loop variant exit count doesn't make sense!"); 807 808 assert(!L.contains(LatchExit) && "expected an exit block!"); 809 const DataLayout &DL = Preheader->getModule()->getDataLayout(); 810 Value *IndVarStartV = 811 SCEVExpander(SE, DL, "irce") 812 .expandCodeFor(IndVarStart, IndVarTy, Preheader->getTerminator()); 813 IndVarStartV->setName("indvar.start"); 814 815 LoopStructure Result; 816 817 Result.Tag = "main"; 818 Result.Header = Header; 819 Result.Latch = Latch; 820 Result.LatchBr = LatchBr; 821 Result.LatchExit = LatchExit; 822 Result.LatchBrExitIdx = LatchBrExitIdx; 823 Result.IndVarStart = IndVarStartV; 824 Result.IndVarNext = LeftValue; 825 Result.IndVarIncreasing = IsIncreasing; 826 Result.LoopExitAt = RightValue; 827 828 FailureReason = nullptr; 829 830 return Result; 831 } 832 833 Optional<LoopConstrainer::SubRanges> 834 LoopConstrainer::calculateSubRanges() const { 835 IntegerType *Ty = cast<IntegerType>(LatchTakenCount->getType()); 836 837 if (Range.getType() != Ty) 838 return None; 839 840 LoopConstrainer::SubRanges Result; 841 842 // I think we can be more aggressive here and make this nuw / nsw if the 843 // addition that feeds into the icmp for the latch's terminating branch is nuw 844 // / nsw. In any case, a wrapping 2's complement addition is safe. 845 ConstantInt *One = ConstantInt::get(Ty, 1); 846 const SCEV *Start = SE.getSCEV(MainLoopStructure.IndVarStart); 847 const SCEV *End = SE.getSCEV(MainLoopStructure.LoopExitAt); 848 849 bool Increasing = MainLoopStructure.IndVarIncreasing; 850 851 // We compute `Smallest` and `Greatest` such that [Smallest, Greatest) is the 852 // range of values the induction variable takes. 853 854 const SCEV *Smallest = nullptr, *Greatest = nullptr; 855 856 if (Increasing) { 857 Smallest = Start; 858 Greatest = End; 859 } else { 860 // These two computations may sign-overflow. Here is why that is okay: 861 // 862 // We know that the induction variable does not sign-overflow on any 863 // iteration except the last one, and it starts at `Start` and ends at 864 // `End`, decrementing by one every time. 865 // 866 // * if `Smallest` sign-overflows we know `End` is `INT_SMAX`. Since the 867 // induction variable is decreasing we know that that the smallest value 868 // the loop body is actually executed with is `INT_SMIN` == `Smallest`. 869 // 870 // * if `Greatest` sign-overflows, we know it can only be `INT_SMIN`. In 871 // that case, `Clamp` will always return `Smallest` and 872 // [`Result.LowLimit`, `Result.HighLimit`) = [`Smallest`, `Smallest`) 873 // will be an empty range. Returning an empty range is always safe. 874 // 875 876 Smallest = SE.getAddExpr(End, SE.getSCEV(One)); 877 Greatest = SE.getAddExpr(Start, SE.getSCEV(One)); 878 } 879 880 auto Clamp = [this, Smallest, Greatest](const SCEV *S) { 881 return SE.getSMaxExpr(Smallest, SE.getSMinExpr(Greatest, S)); 882 }; 883 884 // In some cases we can prove that we don't need a pre or post loop 885 886 bool ProvablyNoPreloop = 887 SE.isKnownPredicate(ICmpInst::ICMP_SLE, Range.getBegin(), Smallest); 888 if (!ProvablyNoPreloop) 889 Result.LowLimit = Clamp(Range.getBegin()); 890 891 bool ProvablyNoPostLoop = 892 SE.isKnownPredicate(ICmpInst::ICMP_SLE, Greatest, Range.getEnd()); 893 if (!ProvablyNoPostLoop) 894 Result.HighLimit = Clamp(Range.getEnd()); 895 896 return Result; 897 } 898 899 void LoopConstrainer::cloneLoop(LoopConstrainer::ClonedLoop &Result, 900 const char *Tag) const { 901 for (BasicBlock *BB : OriginalLoop.getBlocks()) { 902 BasicBlock *Clone = CloneBasicBlock(BB, Result.Map, Twine(".") + Tag, &F); 903 Result.Blocks.push_back(Clone); 904 Result.Map[BB] = Clone; 905 } 906 907 auto GetClonedValue = [&Result](Value *V) { 908 assert(V && "null values not in domain!"); 909 auto It = Result.Map.find(V); 910 if (It == Result.Map.end()) 911 return V; 912 return static_cast<Value *>(It->second); 913 }; 914 915 Result.Structure = MainLoopStructure.map(GetClonedValue); 916 Result.Structure.Tag = Tag; 917 918 for (unsigned i = 0, e = Result.Blocks.size(); i != e; ++i) { 919 BasicBlock *ClonedBB = Result.Blocks[i]; 920 BasicBlock *OriginalBB = OriginalLoop.getBlocks()[i]; 921 922 assert(Result.Map[OriginalBB] == ClonedBB && "invariant!"); 923 924 for (Instruction &I : *ClonedBB) 925 RemapInstruction(&I, Result.Map, 926 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 927 928 // Exit blocks will now have one more predecessor and their PHI nodes need 929 // to be edited to reflect that. No phi nodes need to be introduced because 930 // the loop is in LCSSA. 931 932 for (auto SBBI = succ_begin(OriginalBB), SBBE = succ_end(OriginalBB); 933 SBBI != SBBE; ++SBBI) { 934 935 if (OriginalLoop.contains(*SBBI)) 936 continue; // not an exit block 937 938 for (Instruction &I : **SBBI) { 939 if (!isa<PHINode>(&I)) 940 break; 941 942 PHINode *PN = cast<PHINode>(&I); 943 Value *OldIncoming = PN->getIncomingValueForBlock(OriginalBB); 944 PN->addIncoming(GetClonedValue(OldIncoming), ClonedBB); 945 } 946 } 947 } 948 } 949 950 LoopConstrainer::RewrittenRangeInfo LoopConstrainer::changeIterationSpaceEnd( 951 const LoopStructure &LS, BasicBlock *Preheader, Value *ExitSubloopAt, 952 BasicBlock *ContinuationBlock) const { 953 954 // We start with a loop with a single latch: 955 // 956 // +--------------------+ 957 // | | 958 // | preheader | 959 // | | 960 // +--------+-----------+ 961 // | ----------------\ 962 // | / | 963 // +--------v----v------+ | 964 // | | | 965 // | header | | 966 // | | | 967 // +--------------------+ | 968 // | 969 // ..... | 970 // | 971 // +--------------------+ | 972 // | | | 973 // | latch >----------/ 974 // | | 975 // +-------v------------+ 976 // | 977 // | 978 // | +--------------------+ 979 // | | | 980 // +---> original exit | 981 // | | 982 // +--------------------+ 983 // 984 // We change the control flow to look like 985 // 986 // 987 // +--------------------+ 988 // | | 989 // | preheader >-------------------------+ 990 // | | | 991 // +--------v-----------+ | 992 // | /-------------+ | 993 // | / | | 994 // +--------v--v--------+ | | 995 // | | | | 996 // | header | | +--------+ | 997 // | | | | | | 998 // +--------------------+ | | +-----v-----v-----------+ 999 // | | | | 1000 // | | | .pseudo.exit | 1001 // | | | | 1002 // | | +-----------v-----------+ 1003 // | | | 1004 // ..... | | | 1005 // | | +--------v-------------+ 1006 // +--------------------+ | | | | 1007 // | | | | | ContinuationBlock | 1008 // | latch >------+ | | | 1009 // | | | +----------------------+ 1010 // +---------v----------+ | 1011 // | | 1012 // | | 1013 // | +---------------^-----+ 1014 // | | | 1015 // +-----> .exit.selector | 1016 // | | 1017 // +----------v----------+ 1018 // | 1019 // +--------------------+ | 1020 // | | | 1021 // | original exit <----+ 1022 // | | 1023 // +--------------------+ 1024 // 1025 1026 RewrittenRangeInfo RRI; 1027 1028 auto BBInsertLocation = std::next(Function::iterator(LS.Latch)); 1029 RRI.ExitSelector = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".exit.selector", 1030 &F, &*BBInsertLocation); 1031 RRI.PseudoExit = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".pseudo.exit", &F, 1032 &*BBInsertLocation); 1033 1034 BranchInst *PreheaderJump = cast<BranchInst>(Preheader->getTerminator()); 1035 bool Increasing = LS.IndVarIncreasing; 1036 1037 IRBuilder<> B(PreheaderJump); 1038 1039 // EnterLoopCond - is it okay to start executing this `LS'? 1040 Value *EnterLoopCond = Increasing 1041 ? B.CreateICmpSLT(LS.IndVarStart, ExitSubloopAt) 1042 : B.CreateICmpSGT(LS.IndVarStart, ExitSubloopAt); 1043 1044 B.CreateCondBr(EnterLoopCond, LS.Header, RRI.PseudoExit); 1045 PreheaderJump->eraseFromParent(); 1046 1047 LS.LatchBr->setSuccessor(LS.LatchBrExitIdx, RRI.ExitSelector); 1048 B.SetInsertPoint(LS.LatchBr); 1049 Value *TakeBackedgeLoopCond = 1050 Increasing ? B.CreateICmpSLT(LS.IndVarNext, ExitSubloopAt) 1051 : B.CreateICmpSGT(LS.IndVarNext, ExitSubloopAt); 1052 Value *CondForBranch = LS.LatchBrExitIdx == 1 1053 ? TakeBackedgeLoopCond 1054 : B.CreateNot(TakeBackedgeLoopCond); 1055 1056 LS.LatchBr->setCondition(CondForBranch); 1057 1058 B.SetInsertPoint(RRI.ExitSelector); 1059 1060 // IterationsLeft - are there any more iterations left, given the original 1061 // upper bound on the induction variable? If not, we branch to the "real" 1062 // exit. 1063 Value *IterationsLeft = Increasing 1064 ? B.CreateICmpSLT(LS.IndVarNext, LS.LoopExitAt) 1065 : B.CreateICmpSGT(LS.IndVarNext, LS.LoopExitAt); 1066 B.CreateCondBr(IterationsLeft, RRI.PseudoExit, LS.LatchExit); 1067 1068 BranchInst *BranchToContinuation = 1069 BranchInst::Create(ContinuationBlock, RRI.PseudoExit); 1070 1071 // We emit PHI nodes into `RRI.PseudoExit' that compute the "latest" value of 1072 // each of the PHI nodes in the loop header. This feeds into the initial 1073 // value of the same PHI nodes if/when we continue execution. 1074 for (Instruction &I : *LS.Header) { 1075 if (!isa<PHINode>(&I)) 1076 break; 1077 1078 PHINode *PN = cast<PHINode>(&I); 1079 1080 PHINode *NewPHI = PHINode::Create(PN->getType(), 2, PN->getName() + ".copy", 1081 BranchToContinuation); 1082 1083 NewPHI->addIncoming(PN->getIncomingValueForBlock(Preheader), Preheader); 1084 NewPHI->addIncoming(PN->getIncomingValueForBlock(LS.Latch), 1085 RRI.ExitSelector); 1086 RRI.PHIValuesAtPseudoExit.push_back(NewPHI); 1087 } 1088 1089 RRI.IndVarEnd = PHINode::Create(LS.IndVarNext->getType(), 2, "indvar.end", 1090 BranchToContinuation); 1091 RRI.IndVarEnd->addIncoming(LS.IndVarStart, Preheader); 1092 RRI.IndVarEnd->addIncoming(LS.IndVarNext, RRI.ExitSelector); 1093 1094 // The latch exit now has a branch from `RRI.ExitSelector' instead of 1095 // `LS.Latch'. The PHI nodes need to be updated to reflect that. 1096 for (Instruction &I : *LS.LatchExit) { 1097 if (PHINode *PN = dyn_cast<PHINode>(&I)) 1098 replacePHIBlock(PN, LS.Latch, RRI.ExitSelector); 1099 else 1100 break; 1101 } 1102 1103 return RRI; 1104 } 1105 1106 void LoopConstrainer::rewriteIncomingValuesForPHIs( 1107 LoopStructure &LS, BasicBlock *ContinuationBlock, 1108 const LoopConstrainer::RewrittenRangeInfo &RRI) const { 1109 1110 unsigned PHIIndex = 0; 1111 for (Instruction &I : *LS.Header) { 1112 if (!isa<PHINode>(&I)) 1113 break; 1114 1115 PHINode *PN = cast<PHINode>(&I); 1116 1117 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) 1118 if (PN->getIncomingBlock(i) == ContinuationBlock) 1119 PN->setIncomingValue(i, RRI.PHIValuesAtPseudoExit[PHIIndex++]); 1120 } 1121 1122 LS.IndVarStart = RRI.IndVarEnd; 1123 } 1124 1125 BasicBlock *LoopConstrainer::createPreheader(const LoopStructure &LS, 1126 BasicBlock *OldPreheader, 1127 const char *Tag) const { 1128 1129 BasicBlock *Preheader = BasicBlock::Create(Ctx, Tag, &F, LS.Header); 1130 BranchInst::Create(LS.Header, Preheader); 1131 1132 for (Instruction &I : *LS.Header) { 1133 if (!isa<PHINode>(&I)) 1134 break; 1135 1136 PHINode *PN = cast<PHINode>(&I); 1137 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) 1138 replacePHIBlock(PN, OldPreheader, Preheader); 1139 } 1140 1141 return Preheader; 1142 } 1143 1144 void LoopConstrainer::addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs) { 1145 Loop *ParentLoop = OriginalLoop.getParentLoop(); 1146 if (!ParentLoop) 1147 return; 1148 1149 for (BasicBlock *BB : BBs) 1150 ParentLoop->addBasicBlockToLoop(BB, OriginalLoopInfo); 1151 } 1152 1153 bool LoopConstrainer::run() { 1154 BasicBlock *Preheader = nullptr; 1155 LatchTakenCount = SE.getExitCount(&OriginalLoop, MainLoopStructure.Latch); 1156 Preheader = OriginalLoop.getLoopPreheader(); 1157 assert(!isa<SCEVCouldNotCompute>(LatchTakenCount) && Preheader != nullptr && 1158 "preconditions!"); 1159 1160 OriginalPreheader = Preheader; 1161 MainLoopPreheader = Preheader; 1162 1163 Optional<SubRanges> MaybeSR = calculateSubRanges(); 1164 if (!MaybeSR.hasValue()) { 1165 DEBUG(dbgs() << "irce: could not compute subranges\n"); 1166 return false; 1167 } 1168 1169 SubRanges SR = MaybeSR.getValue(); 1170 bool Increasing = MainLoopStructure.IndVarIncreasing; 1171 IntegerType *IVTy = 1172 cast<IntegerType>(MainLoopStructure.IndVarNext->getType()); 1173 1174 SCEVExpander Expander(SE, F.getParent()->getDataLayout(), "irce"); 1175 Instruction *InsertPt = OriginalPreheader->getTerminator(); 1176 1177 // It would have been better to make `PreLoop' and `PostLoop' 1178 // `Optional<ClonedLoop>'s, but `ValueToValueMapTy' does not have a copy 1179 // constructor. 1180 ClonedLoop PreLoop, PostLoop; 1181 bool NeedsPreLoop = 1182 Increasing ? SR.LowLimit.hasValue() : SR.HighLimit.hasValue(); 1183 bool NeedsPostLoop = 1184 Increasing ? SR.HighLimit.hasValue() : SR.LowLimit.hasValue(); 1185 1186 Value *ExitPreLoopAt = nullptr; 1187 Value *ExitMainLoopAt = nullptr; 1188 const SCEVConstant *MinusOneS = 1189 cast<SCEVConstant>(SE.getConstant(IVTy, -1, true /* isSigned */)); 1190 1191 if (NeedsPreLoop) { 1192 const SCEV *ExitPreLoopAtSCEV = nullptr; 1193 1194 if (Increasing) 1195 ExitPreLoopAtSCEV = *SR.LowLimit; 1196 else { 1197 if (CanBeSMin(SE, *SR.HighLimit)) { 1198 DEBUG(dbgs() << "irce: could not prove no-overflow when computing " 1199 << "preloop exit limit. HighLimit = " << *(*SR.HighLimit) 1200 << "\n"); 1201 return false; 1202 } 1203 ExitPreLoopAtSCEV = SE.getAddExpr(*SR.HighLimit, MinusOneS); 1204 } 1205 1206 ExitPreLoopAt = Expander.expandCodeFor(ExitPreLoopAtSCEV, IVTy, InsertPt); 1207 ExitPreLoopAt->setName("exit.preloop.at"); 1208 } 1209 1210 if (NeedsPostLoop) { 1211 const SCEV *ExitMainLoopAtSCEV = nullptr; 1212 1213 if (Increasing) 1214 ExitMainLoopAtSCEV = *SR.HighLimit; 1215 else { 1216 if (CanBeSMin(SE, *SR.LowLimit)) { 1217 DEBUG(dbgs() << "irce: could not prove no-overflow when computing " 1218 << "mainloop exit limit. LowLimit = " << *(*SR.LowLimit) 1219 << "\n"); 1220 return false; 1221 } 1222 ExitMainLoopAtSCEV = SE.getAddExpr(*SR.LowLimit, MinusOneS); 1223 } 1224 1225 ExitMainLoopAt = Expander.expandCodeFor(ExitMainLoopAtSCEV, IVTy, InsertPt); 1226 ExitMainLoopAt->setName("exit.mainloop.at"); 1227 } 1228 1229 // We clone these ahead of time so that we don't have to deal with changing 1230 // and temporarily invalid IR as we transform the loops. 1231 if (NeedsPreLoop) 1232 cloneLoop(PreLoop, "preloop"); 1233 if (NeedsPostLoop) 1234 cloneLoop(PostLoop, "postloop"); 1235 1236 RewrittenRangeInfo PreLoopRRI; 1237 1238 if (NeedsPreLoop) { 1239 Preheader->getTerminator()->replaceUsesOfWith(MainLoopStructure.Header, 1240 PreLoop.Structure.Header); 1241 1242 MainLoopPreheader = 1243 createPreheader(MainLoopStructure, Preheader, "mainloop"); 1244 PreLoopRRI = changeIterationSpaceEnd(PreLoop.Structure, Preheader, 1245 ExitPreLoopAt, MainLoopPreheader); 1246 rewriteIncomingValuesForPHIs(MainLoopStructure, MainLoopPreheader, 1247 PreLoopRRI); 1248 } 1249 1250 BasicBlock *PostLoopPreheader = nullptr; 1251 RewrittenRangeInfo PostLoopRRI; 1252 1253 if (NeedsPostLoop) { 1254 PostLoopPreheader = 1255 createPreheader(PostLoop.Structure, Preheader, "postloop"); 1256 PostLoopRRI = changeIterationSpaceEnd(MainLoopStructure, MainLoopPreheader, 1257 ExitMainLoopAt, PostLoopPreheader); 1258 rewriteIncomingValuesForPHIs(PostLoop.Structure, PostLoopPreheader, 1259 PostLoopRRI); 1260 } 1261 1262 BasicBlock *NewMainLoopPreheader = 1263 MainLoopPreheader != Preheader ? MainLoopPreheader : nullptr; 1264 BasicBlock *NewBlocks[] = {PostLoopPreheader, PreLoopRRI.PseudoExit, 1265 PreLoopRRI.ExitSelector, PostLoopRRI.PseudoExit, 1266 PostLoopRRI.ExitSelector, NewMainLoopPreheader}; 1267 1268 // Some of the above may be nullptr, filter them out before passing to 1269 // addToParentLoopIfNeeded. 1270 auto NewBlocksEnd = 1271 std::remove(std::begin(NewBlocks), std::end(NewBlocks), nullptr); 1272 1273 addToParentLoopIfNeeded(makeArrayRef(std::begin(NewBlocks), NewBlocksEnd)); 1274 addToParentLoopIfNeeded(PreLoop.Blocks); 1275 addToParentLoopIfNeeded(PostLoop.Blocks); 1276 1277 return true; 1278 } 1279 1280 /// Computes and returns a range of values for the induction variable (IndVar) 1281 /// in which the range check can be safely elided. If it cannot compute such a 1282 /// range, returns None. 1283 Optional<InductiveRangeCheck::Range> 1284 InductiveRangeCheck::computeSafeIterationSpace( 1285 ScalarEvolution &SE, const SCEVAddRecExpr *IndVar) const { 1286 // IndVar is of the form "A + B * I" (where "I" is the canonical induction 1287 // variable, that may or may not exist as a real llvm::Value in the loop) and 1288 // this inductive range check is a range check on the "C + D * I" ("C" is 1289 // getOffset() and "D" is getScale()). We rewrite the value being range 1290 // checked to "M + N * IndVar" where "N" = "D * B^(-1)" and "M" = "C - NA". 1291 // Currently we support this only for "B" = "D" = { 1 or -1 }, but the code 1292 // can be generalized as needed. 1293 // 1294 // The actual inequalities we solve are of the form 1295 // 1296 // 0 <= M + 1 * IndVar < L given L >= 0 (i.e. N == 1) 1297 // 1298 // The inequality is satisfied by -M <= IndVar < (L - M) [^1]. All additions 1299 // and subtractions are twos-complement wrapping and comparisons are signed. 1300 // 1301 // Proof: 1302 // 1303 // If there exists IndVar such that -M <= IndVar < (L - M) then it follows 1304 // that -M <= (-M + L) [== Eq. 1]. Since L >= 0, if (-M + L) sign-overflows 1305 // then (-M + L) < (-M). Hence by [Eq. 1], (-M + L) could not have 1306 // overflown. 1307 // 1308 // This means IndVar = t + (-M) for t in [0, L). Hence (IndVar + M) = t. 1309 // Hence 0 <= (IndVar + M) < L 1310 1311 // [^1]: Note that the solution does _not_ apply if L < 0; consider values M = 1312 // 127, IndVar = 126 and L = -2 in an i8 world. 1313 1314 if (!IndVar->isAffine()) 1315 return None; 1316 1317 const SCEV *A = IndVar->getStart(); 1318 const SCEVConstant *B = dyn_cast<SCEVConstant>(IndVar->getStepRecurrence(SE)); 1319 if (!B) 1320 return None; 1321 1322 const SCEV *C = getOffset(); 1323 const SCEVConstant *D = dyn_cast<SCEVConstant>(getScale()); 1324 if (D != B) 1325 return None; 1326 1327 ConstantInt *ConstD = D->getValue(); 1328 if (!(ConstD->isMinusOne() || ConstD->isOne())) 1329 return None; 1330 1331 const SCEV *M = SE.getMinusSCEV(C, A); 1332 1333 const SCEV *Begin = SE.getNegativeSCEV(M); 1334 const SCEV *UpperLimit = nullptr; 1335 1336 // We strengthen "0 <= I" to "0 <= I < INT_SMAX" and "I < L" to "0 <= I < L". 1337 // We can potentially do much better here. 1338 if (Value *V = getLength()) { 1339 UpperLimit = SE.getSCEV(V); 1340 } else { 1341 assert(Kind == InductiveRangeCheck::RANGE_CHECK_LOWER && "invariant!"); 1342 unsigned BitWidth = cast<IntegerType>(IndVar->getType())->getBitWidth(); 1343 UpperLimit = SE.getConstant(APInt::getSignedMaxValue(BitWidth)); 1344 } 1345 1346 const SCEV *End = SE.getMinusSCEV(UpperLimit, M); 1347 return InductiveRangeCheck::Range(Begin, End); 1348 } 1349 1350 static Optional<InductiveRangeCheck::Range> 1351 IntersectRange(ScalarEvolution &SE, 1352 const Optional<InductiveRangeCheck::Range> &R1, 1353 const InductiveRangeCheck::Range &R2) { 1354 if (!R1.hasValue()) 1355 return R2; 1356 auto &R1Value = R1.getValue(); 1357 1358 // TODO: we could widen the smaller range and have this work; but for now we 1359 // bail out to keep things simple. 1360 if (R1Value.getType() != R2.getType()) 1361 return None; 1362 1363 const SCEV *NewBegin = SE.getSMaxExpr(R1Value.getBegin(), R2.getBegin()); 1364 const SCEV *NewEnd = SE.getSMinExpr(R1Value.getEnd(), R2.getEnd()); 1365 1366 return InductiveRangeCheck::Range(NewBegin, NewEnd); 1367 } 1368 1369 bool InductiveRangeCheckElimination::runOnLoop(Loop *L, LPPassManager &LPM) { 1370 if (skipLoop(L)) 1371 return false; 1372 1373 if (L->getBlocks().size() >= LoopSizeCutoff) { 1374 DEBUG(dbgs() << "irce: giving up constraining loop, too large\n";); 1375 return false; 1376 } 1377 1378 BasicBlock *Preheader = L->getLoopPreheader(); 1379 if (!Preheader) { 1380 DEBUG(dbgs() << "irce: loop has no preheader, leaving\n"); 1381 return false; 1382 } 1383 1384 LLVMContext &Context = Preheader->getContext(); 1385 SmallVector<InductiveRangeCheck, 16> RangeChecks; 1386 ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1387 BranchProbabilityInfo &BPI = 1388 getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI(); 1389 1390 for (auto BBI : L->getBlocks()) 1391 if (BranchInst *TBI = dyn_cast<BranchInst>(BBI->getTerminator())) 1392 InductiveRangeCheck::extractRangeChecksFromBranch(TBI, L, SE, BPI, 1393 RangeChecks); 1394 1395 if (RangeChecks.empty()) 1396 return false; 1397 1398 auto PrintRecognizedRangeChecks = [&](raw_ostream &OS) { 1399 OS << "irce: looking at loop "; L->print(OS); 1400 OS << "irce: loop has " << RangeChecks.size() 1401 << " inductive range checks: \n"; 1402 for (InductiveRangeCheck &IRC : RangeChecks) 1403 IRC.print(OS); 1404 }; 1405 1406 DEBUG(PrintRecognizedRangeChecks(dbgs())); 1407 1408 if (PrintRangeChecks) 1409 PrintRecognizedRangeChecks(errs()); 1410 1411 const char *FailureReason = nullptr; 1412 Optional<LoopStructure> MaybeLoopStructure = 1413 LoopStructure::parseLoopStructure(SE, BPI, *L, FailureReason); 1414 if (!MaybeLoopStructure.hasValue()) { 1415 DEBUG(dbgs() << "irce: could not parse loop structure: " << FailureReason 1416 << "\n";); 1417 return false; 1418 } 1419 LoopStructure LS = MaybeLoopStructure.getValue(); 1420 bool Increasing = LS.IndVarIncreasing; 1421 const SCEV *MinusOne = 1422 SE.getConstant(LS.IndVarNext->getType(), Increasing ? -1 : 1, true); 1423 const SCEVAddRecExpr *IndVar = 1424 cast<SCEVAddRecExpr>(SE.getAddExpr(SE.getSCEV(LS.IndVarNext), MinusOne)); 1425 1426 Optional<InductiveRangeCheck::Range> SafeIterRange; 1427 Instruction *ExprInsertPt = Preheader->getTerminator(); 1428 1429 SmallVector<InductiveRangeCheck, 4> RangeChecksToEliminate; 1430 1431 IRBuilder<> B(ExprInsertPt); 1432 for (InductiveRangeCheck &IRC : RangeChecks) { 1433 auto Result = IRC.computeSafeIterationSpace(SE, IndVar); 1434 if (Result.hasValue()) { 1435 auto MaybeSafeIterRange = 1436 IntersectRange(SE, SafeIterRange, Result.getValue()); 1437 if (MaybeSafeIterRange.hasValue()) { 1438 RangeChecksToEliminate.push_back(IRC); 1439 SafeIterRange = MaybeSafeIterRange.getValue(); 1440 } 1441 } 1442 } 1443 1444 if (!SafeIterRange.hasValue()) 1445 return false; 1446 1447 LoopConstrainer LC(*L, getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), LS, 1448 SE, SafeIterRange.getValue()); 1449 bool Changed = LC.run(); 1450 1451 if (Changed) { 1452 auto PrintConstrainedLoopInfo = [L]() { 1453 dbgs() << "irce: in function "; 1454 dbgs() << L->getHeader()->getParent()->getName() << ": "; 1455 dbgs() << "constrained "; 1456 L->print(dbgs()); 1457 }; 1458 1459 DEBUG(PrintConstrainedLoopInfo()); 1460 1461 if (PrintChangedLoops) 1462 PrintConstrainedLoopInfo(); 1463 1464 // Optimize away the now-redundant range checks. 1465 1466 for (InductiveRangeCheck &IRC : RangeChecksToEliminate) { 1467 ConstantInt *FoldedRangeCheck = IRC.getPassingDirection() 1468 ? ConstantInt::getTrue(Context) 1469 : ConstantInt::getFalse(Context); 1470 IRC.getCheckUse()->set(FoldedRangeCheck); 1471 } 1472 } 1473 1474 return Changed; 1475 } 1476 1477 Pass *llvm::createInductiveRangeCheckEliminationPass() { 1478 return new InductiveRangeCheckElimination; 1479 } 1480