1 //===-- InductiveRangeCheckElimination.cpp - ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // The InductiveRangeCheckElimination pass splits a loop's iteration space into 10 // three disjoint ranges. It does that in a way such that the loop running in 11 // the middle loop provably does not need range checks. As an example, it will 12 // convert 13 // 14 // len = < known positive > 15 // for (i = 0; i < n; i++) { 16 // if (0 <= i && i < len) { 17 // do_something(); 18 // } else { 19 // throw_out_of_bounds(); 20 // } 21 // } 22 // 23 // to 24 // 25 // len = < known positive > 26 // limit = smin(n, len) 27 // // no first segment 28 // for (i = 0; i < limit; i++) { 29 // if (0 <= i && i < len) { // this check is fully redundant 30 // do_something(); 31 // } else { 32 // throw_out_of_bounds(); 33 // } 34 // } 35 // for (i = limit; i < n; i++) { 36 // if (0 <= i && i < len) { 37 // do_something(); 38 // } else { 39 // throw_out_of_bounds(); 40 // } 41 // } 42 //===----------------------------------------------------------------------===// 43 44 #include "llvm/ADT/Optional.h" 45 #include "llvm/Analysis/BranchProbabilityInfo.h" 46 #include "llvm/Analysis/LoopInfo.h" 47 #include "llvm/Analysis/LoopPass.h" 48 #include "llvm/Analysis/ScalarEvolution.h" 49 #include "llvm/Analysis/ScalarEvolutionExpander.h" 50 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 51 #include "llvm/IR/Dominators.h" 52 #include "llvm/IR/Function.h" 53 #include "llvm/IR/IRBuilder.h" 54 #include "llvm/IR/Instructions.h" 55 #include "llvm/IR/PatternMatch.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/Debug.h" 58 #include "llvm/Support/raw_ostream.h" 59 #include "llvm/Transforms/Scalar.h" 60 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 61 #include "llvm/Transforms/Utils/Cloning.h" 62 #include "llvm/Transforms/Utils/LoopSimplify.h" 63 #include "llvm/Transforms/Utils/LoopUtils.h" 64 65 using namespace llvm; 66 67 static cl::opt<unsigned> LoopSizeCutoff("irce-loop-size-cutoff", cl::Hidden, 68 cl::init(64)); 69 70 static cl::opt<bool> PrintChangedLoops("irce-print-changed-loops", cl::Hidden, 71 cl::init(false)); 72 73 static cl::opt<bool> PrintRangeChecks("irce-print-range-checks", cl::Hidden, 74 cl::init(false)); 75 76 static cl::opt<int> MaxExitProbReciprocal("irce-max-exit-prob-reciprocal", 77 cl::Hidden, cl::init(10)); 78 79 static cl::opt<bool> SkipProfitabilityChecks("irce-skip-profitability-checks", 80 cl::Hidden, cl::init(false)); 81 82 static const char *ClonedLoopTag = "irce.loop.clone"; 83 84 #define DEBUG_TYPE "irce" 85 86 namespace { 87 88 /// An inductive range check is conditional branch in a loop with 89 /// 90 /// 1. a very cold successor (i.e. the branch jumps to that successor very 91 /// rarely) 92 /// 93 /// and 94 /// 95 /// 2. a condition that is provably true for some contiguous range of values 96 /// taken by the containing loop's induction variable. 97 /// 98 class InductiveRangeCheck { 99 // Classifies a range check 100 enum RangeCheckKind : unsigned { 101 // Range check of the form "0 <= I". 102 RANGE_CHECK_LOWER = 1, 103 104 // Range check of the form "I < L" where L is known positive. 105 RANGE_CHECK_UPPER = 2, 106 107 // The logical and of the RANGE_CHECK_LOWER and RANGE_CHECK_UPPER 108 // conditions. 109 RANGE_CHECK_BOTH = RANGE_CHECK_LOWER | RANGE_CHECK_UPPER, 110 111 // Unrecognized range check condition. 112 RANGE_CHECK_UNKNOWN = (unsigned)-1 113 }; 114 115 static StringRef rangeCheckKindToStr(RangeCheckKind); 116 117 const SCEV *Offset = nullptr; 118 const SCEV *Scale = nullptr; 119 Value *Length = nullptr; 120 Use *CheckUse = nullptr; 121 RangeCheckKind Kind = RANGE_CHECK_UNKNOWN; 122 123 static RangeCheckKind parseRangeCheckICmp(Loop *L, ICmpInst *ICI, 124 ScalarEvolution &SE, Value *&Index, 125 Value *&Length); 126 127 static void 128 extractRangeChecksFromCond(Loop *L, ScalarEvolution &SE, Use &ConditionUse, 129 SmallVectorImpl<InductiveRangeCheck> &Checks, 130 SmallPtrSetImpl<Value *> &Visited); 131 132 public: 133 const SCEV *getOffset() const { return Offset; } 134 const SCEV *getScale() const { return Scale; } 135 Value *getLength() const { return Length; } 136 137 void print(raw_ostream &OS) const { 138 OS << "InductiveRangeCheck:\n"; 139 OS << " Kind: " << rangeCheckKindToStr(Kind) << "\n"; 140 OS << " Offset: "; 141 Offset->print(OS); 142 OS << " Scale: "; 143 Scale->print(OS); 144 OS << " Length: "; 145 if (Length) 146 Length->print(OS); 147 else 148 OS << "(null)"; 149 OS << "\n CheckUse: "; 150 getCheckUse()->getUser()->print(OS); 151 OS << " Operand: " << getCheckUse()->getOperandNo() << "\n"; 152 } 153 154 LLVM_DUMP_METHOD 155 void dump() { 156 print(dbgs()); 157 } 158 159 Use *getCheckUse() const { return CheckUse; } 160 161 /// Represents an signed integer range [Range.getBegin(), Range.getEnd()). If 162 /// R.getEnd() sle R.getBegin(), then R denotes the empty range. 163 164 class Range { 165 const SCEV *Begin; 166 const SCEV *End; 167 168 public: 169 Range(const SCEV *Begin, const SCEV *End) : Begin(Begin), End(End) { 170 assert(Begin->getType() == End->getType() && "ill-typed range!"); 171 } 172 173 Type *getType() const { return Begin->getType(); } 174 const SCEV *getBegin() const { return Begin; } 175 const SCEV *getEnd() const { return End; } 176 }; 177 178 /// This is the value the condition of the branch needs to evaluate to for the 179 /// branch to take the hot successor (see (1) above). 180 bool getPassingDirection() { return true; } 181 182 /// Computes a range for the induction variable (IndVar) in which the range 183 /// check is redundant and can be constant-folded away. The induction 184 /// variable is not required to be the canonical {0,+,1} induction variable. 185 Optional<Range> computeSafeIterationSpace(ScalarEvolution &SE, 186 const SCEVAddRecExpr *IndVar) const; 187 188 /// Parse out a set of inductive range checks from \p BI and append them to \p 189 /// Checks. 190 /// 191 /// NB! There may be conditions feeding into \p BI that aren't inductive range 192 /// checks, and hence don't end up in \p Checks. 193 static void 194 extractRangeChecksFromBranch(BranchInst *BI, Loop *L, ScalarEvolution &SE, 195 BranchProbabilityInfo &BPI, 196 SmallVectorImpl<InductiveRangeCheck> &Checks); 197 }; 198 199 class InductiveRangeCheckElimination : public LoopPass { 200 public: 201 static char ID; 202 InductiveRangeCheckElimination() : LoopPass(ID) { 203 initializeInductiveRangeCheckEliminationPass( 204 *PassRegistry::getPassRegistry()); 205 } 206 207 void getAnalysisUsage(AnalysisUsage &AU) const override { 208 AU.addRequired<BranchProbabilityInfoWrapperPass>(); 209 getLoopAnalysisUsage(AU); 210 } 211 212 bool runOnLoop(Loop *L, LPPassManager &LPM) override; 213 }; 214 215 char InductiveRangeCheckElimination::ID = 0; 216 } 217 218 INITIALIZE_PASS_BEGIN(InductiveRangeCheckElimination, "irce", 219 "Inductive range check elimination", false, false) 220 INITIALIZE_PASS_DEPENDENCY(BranchProbabilityInfoWrapperPass) 221 INITIALIZE_PASS_DEPENDENCY(LoopPass) 222 INITIALIZE_PASS_END(InductiveRangeCheckElimination, "irce", 223 "Inductive range check elimination", false, false) 224 225 StringRef InductiveRangeCheck::rangeCheckKindToStr( 226 InductiveRangeCheck::RangeCheckKind RCK) { 227 switch (RCK) { 228 case InductiveRangeCheck::RANGE_CHECK_UNKNOWN: 229 return "RANGE_CHECK_UNKNOWN"; 230 231 case InductiveRangeCheck::RANGE_CHECK_UPPER: 232 return "RANGE_CHECK_UPPER"; 233 234 case InductiveRangeCheck::RANGE_CHECK_LOWER: 235 return "RANGE_CHECK_LOWER"; 236 237 case InductiveRangeCheck::RANGE_CHECK_BOTH: 238 return "RANGE_CHECK_BOTH"; 239 } 240 241 llvm_unreachable("unknown range check type!"); 242 } 243 244 /// Parse a single ICmp instruction, `ICI`, into a range check. If `ICI` cannot 245 /// be interpreted as a range check, return `RANGE_CHECK_UNKNOWN` and set 246 /// `Index` and `Length` to `nullptr`. Otherwise set `Index` to the value being 247 /// range checked, and set `Length` to the upper limit `Index` is being range 248 /// checked with if (and only if) the range check type is stronger or equal to 249 /// RANGE_CHECK_UPPER. 250 /// 251 InductiveRangeCheck::RangeCheckKind 252 InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI, 253 ScalarEvolution &SE, Value *&Index, 254 Value *&Length) { 255 256 auto IsNonNegativeAndNotLoopVarying = [&SE, L](Value *V) { 257 const SCEV *S = SE.getSCEV(V); 258 if (isa<SCEVCouldNotCompute>(S)) 259 return false; 260 261 return SE.getLoopDisposition(S, L) == ScalarEvolution::LoopInvariant && 262 SE.isKnownNonNegative(S); 263 }; 264 265 using namespace llvm::PatternMatch; 266 267 ICmpInst::Predicate Pred = ICI->getPredicate(); 268 Value *LHS = ICI->getOperand(0); 269 Value *RHS = ICI->getOperand(1); 270 271 switch (Pred) { 272 default: 273 return RANGE_CHECK_UNKNOWN; 274 275 case ICmpInst::ICMP_SLE: 276 std::swap(LHS, RHS); 277 LLVM_FALLTHROUGH; 278 case ICmpInst::ICMP_SGE: 279 if (match(RHS, m_ConstantInt<0>())) { 280 Index = LHS; 281 return RANGE_CHECK_LOWER; 282 } 283 return RANGE_CHECK_UNKNOWN; 284 285 case ICmpInst::ICMP_SLT: 286 std::swap(LHS, RHS); 287 LLVM_FALLTHROUGH; 288 case ICmpInst::ICMP_SGT: 289 if (match(RHS, m_ConstantInt<-1>())) { 290 Index = LHS; 291 return RANGE_CHECK_LOWER; 292 } 293 294 if (IsNonNegativeAndNotLoopVarying(LHS)) { 295 Index = RHS; 296 Length = LHS; 297 return RANGE_CHECK_UPPER; 298 } 299 return RANGE_CHECK_UNKNOWN; 300 301 case ICmpInst::ICMP_ULT: 302 std::swap(LHS, RHS); 303 LLVM_FALLTHROUGH; 304 case ICmpInst::ICMP_UGT: 305 if (IsNonNegativeAndNotLoopVarying(LHS)) { 306 Index = RHS; 307 Length = LHS; 308 return RANGE_CHECK_BOTH; 309 } 310 return RANGE_CHECK_UNKNOWN; 311 } 312 313 llvm_unreachable("default clause returns!"); 314 } 315 316 void InductiveRangeCheck::extractRangeChecksFromCond( 317 Loop *L, ScalarEvolution &SE, Use &ConditionUse, 318 SmallVectorImpl<InductiveRangeCheck> &Checks, 319 SmallPtrSetImpl<Value *> &Visited) { 320 using namespace llvm::PatternMatch; 321 322 Value *Condition = ConditionUse.get(); 323 if (!Visited.insert(Condition).second) 324 return; 325 326 if (match(Condition, m_And(m_Value(), m_Value()))) { 327 SmallVector<InductiveRangeCheck, 8> SubChecks; 328 extractRangeChecksFromCond(L, SE, cast<User>(Condition)->getOperandUse(0), 329 SubChecks, Visited); 330 extractRangeChecksFromCond(L, SE, cast<User>(Condition)->getOperandUse(1), 331 SubChecks, Visited); 332 333 if (SubChecks.size() == 2) { 334 // Handle a special case where we know how to merge two checks separately 335 // checking the upper and lower bounds into a full range check. 336 const auto &RChkA = SubChecks[0]; 337 const auto &RChkB = SubChecks[1]; 338 if ((RChkA.Length == RChkB.Length || !RChkA.Length || !RChkB.Length) && 339 RChkA.Offset == RChkB.Offset && RChkA.Scale == RChkB.Scale) { 340 341 // If RChkA.Kind == RChkB.Kind then we just found two identical checks. 342 // But if one of them is a RANGE_CHECK_LOWER and the other is a 343 // RANGE_CHECK_UPPER (only possibility if they're different) then 344 // together they form a RANGE_CHECK_BOTH. 345 SubChecks[0].Kind = 346 (InductiveRangeCheck::RangeCheckKind)(RChkA.Kind | RChkB.Kind); 347 SubChecks[0].Length = RChkA.Length ? RChkA.Length : RChkB.Length; 348 SubChecks[0].CheckUse = &ConditionUse; 349 350 // We updated one of the checks in place, now erase the other. 351 SubChecks.pop_back(); 352 } 353 } 354 355 Checks.insert(Checks.end(), SubChecks.begin(), SubChecks.end()); 356 return; 357 } 358 359 ICmpInst *ICI = dyn_cast<ICmpInst>(Condition); 360 if (!ICI) 361 return; 362 363 Value *Length = nullptr, *Index; 364 auto RCKind = parseRangeCheckICmp(L, ICI, SE, Index, Length); 365 if (RCKind == InductiveRangeCheck::RANGE_CHECK_UNKNOWN) 366 return; 367 368 const auto *IndexAddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Index)); 369 bool IsAffineIndex = 370 IndexAddRec && (IndexAddRec->getLoop() == L) && IndexAddRec->isAffine(); 371 372 if (!IsAffineIndex) 373 return; 374 375 InductiveRangeCheck IRC; 376 IRC.Length = Length; 377 IRC.Offset = IndexAddRec->getStart(); 378 IRC.Scale = IndexAddRec->getStepRecurrence(SE); 379 IRC.CheckUse = &ConditionUse; 380 IRC.Kind = RCKind; 381 Checks.push_back(IRC); 382 } 383 384 void InductiveRangeCheck::extractRangeChecksFromBranch( 385 BranchInst *BI, Loop *L, ScalarEvolution &SE, BranchProbabilityInfo &BPI, 386 SmallVectorImpl<InductiveRangeCheck> &Checks) { 387 388 if (BI->isUnconditional() || BI->getParent() == L->getLoopLatch()) 389 return; 390 391 BranchProbability LikelyTaken(15, 16); 392 393 if (!SkipProfitabilityChecks && 394 BPI.getEdgeProbability(BI->getParent(), (unsigned)0) < LikelyTaken) 395 return; 396 397 SmallPtrSet<Value *, 8> Visited; 398 InductiveRangeCheck::extractRangeChecksFromCond(L, SE, BI->getOperandUse(0), 399 Checks, Visited); 400 } 401 402 // Add metadata to the loop L to disable loop optimizations. Callers need to 403 // confirm that optimizing loop L is not beneficial. 404 static void DisableAllLoopOptsOnLoop(Loop &L) { 405 // We do not care about any existing loopID related metadata for L, since we 406 // are setting all loop metadata to false. 407 LLVMContext &Context = L.getHeader()->getContext(); 408 // Reserve first location for self reference to the LoopID metadata node. 409 MDNode *Dummy = MDNode::get(Context, {}); 410 MDNode *DisableUnroll = MDNode::get( 411 Context, {MDString::get(Context, "llvm.loop.unroll.disable")}); 412 Metadata *FalseVal = 413 ConstantAsMetadata::get(ConstantInt::get(Type::getInt1Ty(Context), 0)); 414 MDNode *DisableVectorize = MDNode::get( 415 Context, 416 {MDString::get(Context, "llvm.loop.vectorize.enable"), FalseVal}); 417 MDNode *DisableLICMVersioning = MDNode::get( 418 Context, {MDString::get(Context, "llvm.loop.licm_versioning.disable")}); 419 MDNode *DisableDistribution= MDNode::get( 420 Context, 421 {MDString::get(Context, "llvm.loop.distribute.enable"), FalseVal}); 422 MDNode *NewLoopID = 423 MDNode::get(Context, {Dummy, DisableUnroll, DisableVectorize, 424 DisableLICMVersioning, DisableDistribution}); 425 // Set operand 0 to refer to the loop id itself. 426 NewLoopID->replaceOperandWith(0, NewLoopID); 427 L.setLoopID(NewLoopID); 428 } 429 430 namespace { 431 432 // Keeps track of the structure of a loop. This is similar to llvm::Loop, 433 // except that it is more lightweight and can track the state of a loop through 434 // changing and potentially invalid IR. This structure also formalizes the 435 // kinds of loops we can deal with -- ones that have a single latch that is also 436 // an exiting block *and* have a canonical induction variable. 437 struct LoopStructure { 438 const char *Tag; 439 440 BasicBlock *Header; 441 BasicBlock *Latch; 442 443 // `Latch's terminator instruction is `LatchBr', and it's `LatchBrExitIdx'th 444 // successor is `LatchExit', the exit block of the loop. 445 BranchInst *LatchBr; 446 BasicBlock *LatchExit; 447 unsigned LatchBrExitIdx; 448 449 // The loop represented by this instance of LoopStructure is semantically 450 // equivalent to: 451 // 452 // intN_ty inc = IndVarIncreasing ? 1 : -1; 453 // pred_ty predicate = IndVarIncreasing ? ICMP_SLT : ICMP_SGT; 454 // 455 // for (intN_ty iv = IndVarStart; predicate(iv, LoopExitAt); iv = IndVarNext) 456 // ... body ... 457 458 Value *IndVarNext; 459 Value *IndVarStart; 460 Value *LoopExitAt; 461 bool IndVarIncreasing; 462 463 LoopStructure() 464 : Tag(""), Header(nullptr), Latch(nullptr), LatchBr(nullptr), 465 LatchExit(nullptr), LatchBrExitIdx(-1), IndVarNext(nullptr), 466 IndVarStart(nullptr), LoopExitAt(nullptr), IndVarIncreasing(false) {} 467 468 template <typename M> LoopStructure map(M Map) const { 469 LoopStructure Result; 470 Result.Tag = Tag; 471 Result.Header = cast<BasicBlock>(Map(Header)); 472 Result.Latch = cast<BasicBlock>(Map(Latch)); 473 Result.LatchBr = cast<BranchInst>(Map(LatchBr)); 474 Result.LatchExit = cast<BasicBlock>(Map(LatchExit)); 475 Result.LatchBrExitIdx = LatchBrExitIdx; 476 Result.IndVarNext = Map(IndVarNext); 477 Result.IndVarStart = Map(IndVarStart); 478 Result.LoopExitAt = Map(LoopExitAt); 479 Result.IndVarIncreasing = IndVarIncreasing; 480 return Result; 481 } 482 483 static Optional<LoopStructure> parseLoopStructure(ScalarEvolution &, 484 BranchProbabilityInfo &BPI, 485 Loop &, 486 const char *&); 487 }; 488 489 /// This class is used to constrain loops to run within a given iteration space. 490 /// The algorithm this class implements is given a Loop and a range [Begin, 491 /// End). The algorithm then tries to break out a "main loop" out of the loop 492 /// it is given in a way that the "main loop" runs with the induction variable 493 /// in a subset of [Begin, End). The algorithm emits appropriate pre and post 494 /// loops to run any remaining iterations. The pre loop runs any iterations in 495 /// which the induction variable is < Begin, and the post loop runs any 496 /// iterations in which the induction variable is >= End. 497 /// 498 class LoopConstrainer { 499 // The representation of a clone of the original loop we started out with. 500 struct ClonedLoop { 501 // The cloned blocks 502 std::vector<BasicBlock *> Blocks; 503 504 // `Map` maps values in the clonee into values in the cloned version 505 ValueToValueMapTy Map; 506 507 // An instance of `LoopStructure` for the cloned loop 508 LoopStructure Structure; 509 }; 510 511 // Result of rewriting the range of a loop. See changeIterationSpaceEnd for 512 // more details on what these fields mean. 513 struct RewrittenRangeInfo { 514 BasicBlock *PseudoExit; 515 BasicBlock *ExitSelector; 516 std::vector<PHINode *> PHIValuesAtPseudoExit; 517 PHINode *IndVarEnd; 518 519 RewrittenRangeInfo() 520 : PseudoExit(nullptr), ExitSelector(nullptr), IndVarEnd(nullptr) {} 521 }; 522 523 // Calculated subranges we restrict the iteration space of the main loop to. 524 // See the implementation of `calculateSubRanges' for more details on how 525 // these fields are computed. `LowLimit` is None if there is no restriction 526 // on low end of the restricted iteration space of the main loop. `HighLimit` 527 // is None if there is no restriction on high end of the restricted iteration 528 // space of the main loop. 529 530 struct SubRanges { 531 Optional<const SCEV *> LowLimit; 532 Optional<const SCEV *> HighLimit; 533 }; 534 535 // A utility function that does a `replaceUsesOfWith' on the incoming block 536 // set of a `PHINode' -- replaces instances of `Block' in the `PHINode's 537 // incoming block list with `ReplaceBy'. 538 static void replacePHIBlock(PHINode *PN, BasicBlock *Block, 539 BasicBlock *ReplaceBy); 540 541 // Compute a safe set of limits for the main loop to run in -- effectively the 542 // intersection of `Range' and the iteration space of the original loop. 543 // Return None if unable to compute the set of subranges. 544 // 545 Optional<SubRanges> calculateSubRanges() const; 546 547 // Clone `OriginalLoop' and return the result in CLResult. The IR after 548 // running `cloneLoop' is well formed except for the PHI nodes in CLResult -- 549 // the PHI nodes say that there is an incoming edge from `OriginalPreheader` 550 // but there is no such edge. 551 // 552 void cloneLoop(ClonedLoop &CLResult, const char *Tag) const; 553 554 // Create the appropriate loop structure needed to describe a cloned copy of 555 // `Original`. The clone is described by `VM`. 556 Loop *createClonedLoopStructure(Loop *Original, Loop *Parent, 557 ValueToValueMapTy &VM); 558 559 // Rewrite the iteration space of the loop denoted by (LS, Preheader). The 560 // iteration space of the rewritten loop ends at ExitLoopAt. The start of the 561 // iteration space is not changed. `ExitLoopAt' is assumed to be slt 562 // `OriginalHeaderCount'. 563 // 564 // If there are iterations left to execute, control is made to jump to 565 // `ContinuationBlock', otherwise they take the normal loop exit. The 566 // returned `RewrittenRangeInfo' object is populated as follows: 567 // 568 // .PseudoExit is a basic block that unconditionally branches to 569 // `ContinuationBlock'. 570 // 571 // .ExitSelector is a basic block that decides, on exit from the loop, 572 // whether to branch to the "true" exit or to `PseudoExit'. 573 // 574 // .PHIValuesAtPseudoExit are PHINodes in `PseudoExit' that compute the value 575 // for each PHINode in the loop header on taking the pseudo exit. 576 // 577 // After changeIterationSpaceEnd, `Preheader' is no longer a legitimate 578 // preheader because it is made to branch to the loop header only 579 // conditionally. 580 // 581 RewrittenRangeInfo 582 changeIterationSpaceEnd(const LoopStructure &LS, BasicBlock *Preheader, 583 Value *ExitLoopAt, 584 BasicBlock *ContinuationBlock) const; 585 586 // The loop denoted by `LS' has `OldPreheader' as its preheader. This 587 // function creates a new preheader for `LS' and returns it. 588 // 589 BasicBlock *createPreheader(const LoopStructure &LS, BasicBlock *OldPreheader, 590 const char *Tag) const; 591 592 // `ContinuationBlockAndPreheader' was the continuation block for some call to 593 // `changeIterationSpaceEnd' and is the preheader to the loop denoted by `LS'. 594 // This function rewrites the PHI nodes in `LS.Header' to start with the 595 // correct value. 596 void rewriteIncomingValuesForPHIs( 597 LoopStructure &LS, BasicBlock *ContinuationBlockAndPreheader, 598 const LoopConstrainer::RewrittenRangeInfo &RRI) const; 599 600 // Even though we do not preserve any passes at this time, we at least need to 601 // keep the parent loop structure consistent. The `LPPassManager' seems to 602 // verify this after running a loop pass. This function adds the list of 603 // blocks denoted by BBs to this loops parent loop if required. 604 void addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs); 605 606 // Some global state. 607 Function &F; 608 LLVMContext &Ctx; 609 ScalarEvolution &SE; 610 DominatorTree &DT; 611 LPPassManager &LPM; 612 LoopInfo &LI; 613 614 // Information about the original loop we started out with. 615 Loop &OriginalLoop; 616 const SCEV *LatchTakenCount; 617 BasicBlock *OriginalPreheader; 618 619 // The preheader of the main loop. This may or may not be different from 620 // `OriginalPreheader'. 621 BasicBlock *MainLoopPreheader; 622 623 // The range we need to run the main loop in. 624 InductiveRangeCheck::Range Range; 625 626 // The structure of the main loop (see comment at the beginning of this class 627 // for a definition) 628 LoopStructure MainLoopStructure; 629 630 public: 631 LoopConstrainer(Loop &L, LoopInfo &LI, LPPassManager &LPM, 632 const LoopStructure &LS, ScalarEvolution &SE, 633 DominatorTree &DT, InductiveRangeCheck::Range R) 634 : F(*L.getHeader()->getParent()), Ctx(L.getHeader()->getContext()), 635 SE(SE), DT(DT), LPM(LPM), LI(LI), OriginalLoop(L), 636 LatchTakenCount(nullptr), OriginalPreheader(nullptr), 637 MainLoopPreheader(nullptr), Range(R), MainLoopStructure(LS) {} 638 639 // Entry point for the algorithm. Returns true on success. 640 bool run(); 641 }; 642 643 } 644 645 void LoopConstrainer::replacePHIBlock(PHINode *PN, BasicBlock *Block, 646 BasicBlock *ReplaceBy) { 647 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 648 if (PN->getIncomingBlock(i) == Block) 649 PN->setIncomingBlock(i, ReplaceBy); 650 } 651 652 static bool CanBeSMax(ScalarEvolution &SE, const SCEV *S) { 653 APInt SMax = 654 APInt::getSignedMaxValue(cast<IntegerType>(S->getType())->getBitWidth()); 655 return SE.getSignedRange(S).contains(SMax) && 656 SE.getUnsignedRange(S).contains(SMax); 657 } 658 659 static bool CanBeSMin(ScalarEvolution &SE, const SCEV *S) { 660 APInt SMin = 661 APInt::getSignedMinValue(cast<IntegerType>(S->getType())->getBitWidth()); 662 return SE.getSignedRange(S).contains(SMin) && 663 SE.getUnsignedRange(S).contains(SMin); 664 } 665 666 Optional<LoopStructure> 667 LoopStructure::parseLoopStructure(ScalarEvolution &SE, BranchProbabilityInfo &BPI, 668 Loop &L, const char *&FailureReason) { 669 if (!L.isLoopSimplifyForm()) { 670 FailureReason = "loop not in LoopSimplify form"; 671 return None; 672 } 673 674 BasicBlock *Latch = L.getLoopLatch(); 675 assert(Latch && "Simplified loops only have one latch!"); 676 677 if (Latch->getTerminator()->getMetadata(ClonedLoopTag)) { 678 FailureReason = "loop has already been cloned"; 679 return None; 680 } 681 682 if (!L.isLoopExiting(Latch)) { 683 FailureReason = "no loop latch"; 684 return None; 685 } 686 687 BasicBlock *Header = L.getHeader(); 688 BasicBlock *Preheader = L.getLoopPreheader(); 689 if (!Preheader) { 690 FailureReason = "no preheader"; 691 return None; 692 } 693 694 BranchInst *LatchBr = dyn_cast<BranchInst>(Latch->getTerminator()); 695 if (!LatchBr || LatchBr->isUnconditional()) { 696 FailureReason = "latch terminator not conditional branch"; 697 return None; 698 } 699 700 unsigned LatchBrExitIdx = LatchBr->getSuccessor(0) == Header ? 1 : 0; 701 702 BranchProbability ExitProbability = 703 BPI.getEdgeProbability(LatchBr->getParent(), LatchBrExitIdx); 704 705 if (!SkipProfitabilityChecks && 706 ExitProbability > BranchProbability(1, MaxExitProbReciprocal)) { 707 FailureReason = "short running loop, not profitable"; 708 return None; 709 } 710 711 ICmpInst *ICI = dyn_cast<ICmpInst>(LatchBr->getCondition()); 712 if (!ICI || !isa<IntegerType>(ICI->getOperand(0)->getType())) { 713 FailureReason = "latch terminator branch not conditional on integral icmp"; 714 return None; 715 } 716 717 const SCEV *LatchCount = SE.getExitCount(&L, Latch); 718 if (isa<SCEVCouldNotCompute>(LatchCount)) { 719 FailureReason = "could not compute latch count"; 720 return None; 721 } 722 723 ICmpInst::Predicate Pred = ICI->getPredicate(); 724 Value *LeftValue = ICI->getOperand(0); 725 const SCEV *LeftSCEV = SE.getSCEV(LeftValue); 726 IntegerType *IndVarTy = cast<IntegerType>(LeftValue->getType()); 727 728 Value *RightValue = ICI->getOperand(1); 729 const SCEV *RightSCEV = SE.getSCEV(RightValue); 730 731 // We canonicalize `ICI` such that `LeftSCEV` is an add recurrence. 732 if (!isa<SCEVAddRecExpr>(LeftSCEV)) { 733 if (isa<SCEVAddRecExpr>(RightSCEV)) { 734 std::swap(LeftSCEV, RightSCEV); 735 std::swap(LeftValue, RightValue); 736 Pred = ICmpInst::getSwappedPredicate(Pred); 737 } else { 738 FailureReason = "no add recurrences in the icmp"; 739 return None; 740 } 741 } 742 743 auto HasNoSignedWrap = [&](const SCEVAddRecExpr *AR) { 744 if (AR->getNoWrapFlags(SCEV::FlagNSW)) 745 return true; 746 747 IntegerType *Ty = cast<IntegerType>(AR->getType()); 748 IntegerType *WideTy = 749 IntegerType::get(Ty->getContext(), Ty->getBitWidth() * 2); 750 751 const SCEVAddRecExpr *ExtendAfterOp = 752 dyn_cast<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); 753 if (ExtendAfterOp) { 754 const SCEV *ExtendedStart = SE.getSignExtendExpr(AR->getStart(), WideTy); 755 const SCEV *ExtendedStep = 756 SE.getSignExtendExpr(AR->getStepRecurrence(SE), WideTy); 757 758 bool NoSignedWrap = ExtendAfterOp->getStart() == ExtendedStart && 759 ExtendAfterOp->getStepRecurrence(SE) == ExtendedStep; 760 761 if (NoSignedWrap) 762 return true; 763 } 764 765 // We may have proved this when computing the sign extension above. 766 return AR->getNoWrapFlags(SCEV::FlagNSW) != SCEV::FlagAnyWrap; 767 }; 768 769 auto IsInductionVar = [&](const SCEVAddRecExpr *AR, bool &IsIncreasing) { 770 if (!AR->isAffine()) 771 return false; 772 773 // Currently we only work with induction variables that have been proved to 774 // not wrap. This restriction can potentially be lifted in the future. 775 776 if (!HasNoSignedWrap(AR)) 777 return false; 778 779 if (const SCEVConstant *StepExpr = 780 dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) { 781 ConstantInt *StepCI = StepExpr->getValue(); 782 if (StepCI->isOne() || StepCI->isMinusOne()) { 783 IsIncreasing = StepCI->isOne(); 784 return true; 785 } 786 } 787 788 return false; 789 }; 790 791 // `ICI` is interpreted as taking the backedge if the *next* value of the 792 // induction variable satisfies some constraint. 793 794 const SCEVAddRecExpr *IndVarNext = cast<SCEVAddRecExpr>(LeftSCEV); 795 bool IsIncreasing = false; 796 if (!IsInductionVar(IndVarNext, IsIncreasing)) { 797 FailureReason = "LHS in icmp not induction variable"; 798 return None; 799 } 800 801 const SCEV *StartNext = IndVarNext->getStart(); 802 const SCEV *Addend = SE.getNegativeSCEV(IndVarNext->getStepRecurrence(SE)); 803 const SCEV *IndVarStart = SE.getAddExpr(StartNext, Addend); 804 805 ConstantInt *One = ConstantInt::get(IndVarTy, 1); 806 // TODO: generalize the predicates here to also match their unsigned variants. 807 if (IsIncreasing) { 808 bool FoundExpectedPred = 809 (Pred == ICmpInst::ICMP_SLT && LatchBrExitIdx == 1) || 810 (Pred == ICmpInst::ICMP_SGT && LatchBrExitIdx == 0); 811 812 if (!FoundExpectedPred) { 813 FailureReason = "expected icmp slt semantically, found something else"; 814 return None; 815 } 816 817 if (LatchBrExitIdx == 0) { 818 if (CanBeSMax(SE, RightSCEV)) { 819 // TODO: this restriction is easily removable -- we just have to 820 // remember that the icmp was an slt and not an sle. 821 FailureReason = "limit may overflow when coercing sle to slt"; 822 return None; 823 } 824 825 if (!SE.isLoopEntryGuardedByCond( 826 &L, CmpInst::ICMP_SLT, IndVarStart, 827 SE.getAddExpr(RightSCEV, SE.getOne(RightSCEV->getType())))) { 828 FailureReason = "Induction variable start not bounded by upper limit"; 829 return None; 830 } 831 832 IRBuilder<> B(Preheader->getTerminator()); 833 RightValue = B.CreateAdd(RightValue, One); 834 } else { 835 if (!SE.isLoopEntryGuardedByCond(&L, CmpInst::ICMP_SLT, IndVarStart, 836 RightSCEV)) { 837 FailureReason = "Induction variable start not bounded by upper limit"; 838 return None; 839 } 840 } 841 } else { 842 bool FoundExpectedPred = 843 (Pred == ICmpInst::ICMP_SGT && LatchBrExitIdx == 1) || 844 (Pred == ICmpInst::ICMP_SLT && LatchBrExitIdx == 0); 845 846 if (!FoundExpectedPred) { 847 FailureReason = "expected icmp sgt semantically, found something else"; 848 return None; 849 } 850 851 if (LatchBrExitIdx == 0) { 852 if (CanBeSMin(SE, RightSCEV)) { 853 // TODO: this restriction is easily removable -- we just have to 854 // remember that the icmp was an sgt and not an sge. 855 FailureReason = "limit may overflow when coercing sge to sgt"; 856 return None; 857 } 858 859 if (!SE.isLoopEntryGuardedByCond( 860 &L, CmpInst::ICMP_SGT, IndVarStart, 861 SE.getMinusSCEV(RightSCEV, SE.getOne(RightSCEV->getType())))) { 862 FailureReason = "Induction variable start not bounded by lower limit"; 863 return None; 864 } 865 866 IRBuilder<> B(Preheader->getTerminator()); 867 RightValue = B.CreateSub(RightValue, One); 868 } else { 869 if (!SE.isLoopEntryGuardedByCond(&L, CmpInst::ICMP_SGT, IndVarStart, 870 RightSCEV)) { 871 FailureReason = "Induction variable start not bounded by lower limit"; 872 return None; 873 } 874 } 875 } 876 877 BasicBlock *LatchExit = LatchBr->getSuccessor(LatchBrExitIdx); 878 879 assert(SE.getLoopDisposition(LatchCount, &L) == 880 ScalarEvolution::LoopInvariant && 881 "loop variant exit count doesn't make sense!"); 882 883 assert(!L.contains(LatchExit) && "expected an exit block!"); 884 const DataLayout &DL = Preheader->getModule()->getDataLayout(); 885 Value *IndVarStartV = 886 SCEVExpander(SE, DL, "irce") 887 .expandCodeFor(IndVarStart, IndVarTy, Preheader->getTerminator()); 888 IndVarStartV->setName("indvar.start"); 889 890 LoopStructure Result; 891 892 Result.Tag = "main"; 893 Result.Header = Header; 894 Result.Latch = Latch; 895 Result.LatchBr = LatchBr; 896 Result.LatchExit = LatchExit; 897 Result.LatchBrExitIdx = LatchBrExitIdx; 898 Result.IndVarStart = IndVarStartV; 899 Result.IndVarNext = LeftValue; 900 Result.IndVarIncreasing = IsIncreasing; 901 Result.LoopExitAt = RightValue; 902 903 FailureReason = nullptr; 904 905 return Result; 906 } 907 908 Optional<LoopConstrainer::SubRanges> 909 LoopConstrainer::calculateSubRanges() const { 910 IntegerType *Ty = cast<IntegerType>(LatchTakenCount->getType()); 911 912 if (Range.getType() != Ty) 913 return None; 914 915 LoopConstrainer::SubRanges Result; 916 917 // I think we can be more aggressive here and make this nuw / nsw if the 918 // addition that feeds into the icmp for the latch's terminating branch is nuw 919 // / nsw. In any case, a wrapping 2's complement addition is safe. 920 ConstantInt *One = ConstantInt::get(Ty, 1); 921 const SCEV *Start = SE.getSCEV(MainLoopStructure.IndVarStart); 922 const SCEV *End = SE.getSCEV(MainLoopStructure.LoopExitAt); 923 924 bool Increasing = MainLoopStructure.IndVarIncreasing; 925 926 // We compute `Smallest` and `Greatest` such that [Smallest, Greatest) is the 927 // range of values the induction variable takes. 928 929 const SCEV *Smallest = nullptr, *Greatest = nullptr; 930 931 if (Increasing) { 932 Smallest = Start; 933 Greatest = End; 934 } else { 935 // These two computations may sign-overflow. Here is why that is okay: 936 // 937 // We know that the induction variable does not sign-overflow on any 938 // iteration except the last one, and it starts at `Start` and ends at 939 // `End`, decrementing by one every time. 940 // 941 // * if `Smallest` sign-overflows we know `End` is `INT_SMAX`. Since the 942 // induction variable is decreasing we know that that the smallest value 943 // the loop body is actually executed with is `INT_SMIN` == `Smallest`. 944 // 945 // * if `Greatest` sign-overflows, we know it can only be `INT_SMIN`. In 946 // that case, `Clamp` will always return `Smallest` and 947 // [`Result.LowLimit`, `Result.HighLimit`) = [`Smallest`, `Smallest`) 948 // will be an empty range. Returning an empty range is always safe. 949 // 950 951 Smallest = SE.getAddExpr(End, SE.getSCEV(One)); 952 Greatest = SE.getAddExpr(Start, SE.getSCEV(One)); 953 } 954 955 auto Clamp = [this, Smallest, Greatest](const SCEV *S) { 956 return SE.getSMaxExpr(Smallest, SE.getSMinExpr(Greatest, S)); 957 }; 958 959 // In some cases we can prove that we don't need a pre or post loop 960 961 bool ProvablyNoPreloop = 962 SE.isKnownPredicate(ICmpInst::ICMP_SLE, Range.getBegin(), Smallest); 963 if (!ProvablyNoPreloop) 964 Result.LowLimit = Clamp(Range.getBegin()); 965 966 bool ProvablyNoPostLoop = 967 SE.isKnownPredicate(ICmpInst::ICMP_SLE, Greatest, Range.getEnd()); 968 if (!ProvablyNoPostLoop) 969 Result.HighLimit = Clamp(Range.getEnd()); 970 971 return Result; 972 } 973 974 void LoopConstrainer::cloneLoop(LoopConstrainer::ClonedLoop &Result, 975 const char *Tag) const { 976 for (BasicBlock *BB : OriginalLoop.getBlocks()) { 977 BasicBlock *Clone = CloneBasicBlock(BB, Result.Map, Twine(".") + Tag, &F); 978 Result.Blocks.push_back(Clone); 979 Result.Map[BB] = Clone; 980 } 981 982 auto GetClonedValue = [&Result](Value *V) { 983 assert(V && "null values not in domain!"); 984 auto It = Result.Map.find(V); 985 if (It == Result.Map.end()) 986 return V; 987 return static_cast<Value *>(It->second); 988 }; 989 990 auto *ClonedLatch = 991 cast<BasicBlock>(GetClonedValue(OriginalLoop.getLoopLatch())); 992 ClonedLatch->getTerminator()->setMetadata(ClonedLoopTag, 993 MDNode::get(Ctx, {})); 994 995 Result.Structure = MainLoopStructure.map(GetClonedValue); 996 Result.Structure.Tag = Tag; 997 998 for (unsigned i = 0, e = Result.Blocks.size(); i != e; ++i) { 999 BasicBlock *ClonedBB = Result.Blocks[i]; 1000 BasicBlock *OriginalBB = OriginalLoop.getBlocks()[i]; 1001 1002 assert(Result.Map[OriginalBB] == ClonedBB && "invariant!"); 1003 1004 for (Instruction &I : *ClonedBB) 1005 RemapInstruction(&I, Result.Map, 1006 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 1007 1008 // Exit blocks will now have one more predecessor and their PHI nodes need 1009 // to be edited to reflect that. No phi nodes need to be introduced because 1010 // the loop is in LCSSA. 1011 1012 for (auto *SBB : successors(OriginalBB)) { 1013 if (OriginalLoop.contains(SBB)) 1014 continue; // not an exit block 1015 1016 for (Instruction &I : *SBB) { 1017 auto *PN = dyn_cast<PHINode>(&I); 1018 if (!PN) 1019 break; 1020 1021 Value *OldIncoming = PN->getIncomingValueForBlock(OriginalBB); 1022 PN->addIncoming(GetClonedValue(OldIncoming), ClonedBB); 1023 } 1024 } 1025 } 1026 } 1027 1028 LoopConstrainer::RewrittenRangeInfo LoopConstrainer::changeIterationSpaceEnd( 1029 const LoopStructure &LS, BasicBlock *Preheader, Value *ExitSubloopAt, 1030 BasicBlock *ContinuationBlock) const { 1031 1032 // We start with a loop with a single latch: 1033 // 1034 // +--------------------+ 1035 // | | 1036 // | preheader | 1037 // | | 1038 // +--------+-----------+ 1039 // | ----------------\ 1040 // | / | 1041 // +--------v----v------+ | 1042 // | | | 1043 // | header | | 1044 // | | | 1045 // +--------------------+ | 1046 // | 1047 // ..... | 1048 // | 1049 // +--------------------+ | 1050 // | | | 1051 // | latch >----------/ 1052 // | | 1053 // +-------v------------+ 1054 // | 1055 // | 1056 // | +--------------------+ 1057 // | | | 1058 // +---> original exit | 1059 // | | 1060 // +--------------------+ 1061 // 1062 // We change the control flow to look like 1063 // 1064 // 1065 // +--------------------+ 1066 // | | 1067 // | preheader >-------------------------+ 1068 // | | | 1069 // +--------v-----------+ | 1070 // | /-------------+ | 1071 // | / | | 1072 // +--------v--v--------+ | | 1073 // | | | | 1074 // | header | | +--------+ | 1075 // | | | | | | 1076 // +--------------------+ | | +-----v-----v-----------+ 1077 // | | | | 1078 // | | | .pseudo.exit | 1079 // | | | | 1080 // | | +-----------v-----------+ 1081 // | | | 1082 // ..... | | | 1083 // | | +--------v-------------+ 1084 // +--------------------+ | | | | 1085 // | | | | | ContinuationBlock | 1086 // | latch >------+ | | | 1087 // | | | +----------------------+ 1088 // +---------v----------+ | 1089 // | | 1090 // | | 1091 // | +---------------^-----+ 1092 // | | | 1093 // +-----> .exit.selector | 1094 // | | 1095 // +----------v----------+ 1096 // | 1097 // +--------------------+ | 1098 // | | | 1099 // | original exit <----+ 1100 // | | 1101 // +--------------------+ 1102 // 1103 1104 RewrittenRangeInfo RRI; 1105 1106 BasicBlock *BBInsertLocation = LS.Latch->getNextNode(); 1107 RRI.ExitSelector = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".exit.selector", 1108 &F, BBInsertLocation); 1109 RRI.PseudoExit = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".pseudo.exit", &F, 1110 BBInsertLocation); 1111 1112 BranchInst *PreheaderJump = cast<BranchInst>(Preheader->getTerminator()); 1113 bool Increasing = LS.IndVarIncreasing; 1114 1115 IRBuilder<> B(PreheaderJump); 1116 1117 // EnterLoopCond - is it okay to start executing this `LS'? 1118 Value *EnterLoopCond = Increasing 1119 ? B.CreateICmpSLT(LS.IndVarStart, ExitSubloopAt) 1120 : B.CreateICmpSGT(LS.IndVarStart, ExitSubloopAt); 1121 1122 B.CreateCondBr(EnterLoopCond, LS.Header, RRI.PseudoExit); 1123 PreheaderJump->eraseFromParent(); 1124 1125 LS.LatchBr->setSuccessor(LS.LatchBrExitIdx, RRI.ExitSelector); 1126 B.SetInsertPoint(LS.LatchBr); 1127 Value *TakeBackedgeLoopCond = 1128 Increasing ? B.CreateICmpSLT(LS.IndVarNext, ExitSubloopAt) 1129 : B.CreateICmpSGT(LS.IndVarNext, ExitSubloopAt); 1130 Value *CondForBranch = LS.LatchBrExitIdx == 1 1131 ? TakeBackedgeLoopCond 1132 : B.CreateNot(TakeBackedgeLoopCond); 1133 1134 LS.LatchBr->setCondition(CondForBranch); 1135 1136 B.SetInsertPoint(RRI.ExitSelector); 1137 1138 // IterationsLeft - are there any more iterations left, given the original 1139 // upper bound on the induction variable? If not, we branch to the "real" 1140 // exit. 1141 Value *IterationsLeft = Increasing 1142 ? B.CreateICmpSLT(LS.IndVarNext, LS.LoopExitAt) 1143 : B.CreateICmpSGT(LS.IndVarNext, LS.LoopExitAt); 1144 B.CreateCondBr(IterationsLeft, RRI.PseudoExit, LS.LatchExit); 1145 1146 BranchInst *BranchToContinuation = 1147 BranchInst::Create(ContinuationBlock, RRI.PseudoExit); 1148 1149 // We emit PHI nodes into `RRI.PseudoExit' that compute the "latest" value of 1150 // each of the PHI nodes in the loop header. This feeds into the initial 1151 // value of the same PHI nodes if/when we continue execution. 1152 for (Instruction &I : *LS.Header) { 1153 auto *PN = dyn_cast<PHINode>(&I); 1154 if (!PN) 1155 break; 1156 1157 PHINode *NewPHI = PHINode::Create(PN->getType(), 2, PN->getName() + ".copy", 1158 BranchToContinuation); 1159 1160 NewPHI->addIncoming(PN->getIncomingValueForBlock(Preheader), Preheader); 1161 NewPHI->addIncoming(PN->getIncomingValueForBlock(LS.Latch), 1162 RRI.ExitSelector); 1163 RRI.PHIValuesAtPseudoExit.push_back(NewPHI); 1164 } 1165 1166 RRI.IndVarEnd = PHINode::Create(LS.IndVarNext->getType(), 2, "indvar.end", 1167 BranchToContinuation); 1168 RRI.IndVarEnd->addIncoming(LS.IndVarStart, Preheader); 1169 RRI.IndVarEnd->addIncoming(LS.IndVarNext, RRI.ExitSelector); 1170 1171 // The latch exit now has a branch from `RRI.ExitSelector' instead of 1172 // `LS.Latch'. The PHI nodes need to be updated to reflect that. 1173 for (Instruction &I : *LS.LatchExit) { 1174 if (PHINode *PN = dyn_cast<PHINode>(&I)) 1175 replacePHIBlock(PN, LS.Latch, RRI.ExitSelector); 1176 else 1177 break; 1178 } 1179 1180 return RRI; 1181 } 1182 1183 void LoopConstrainer::rewriteIncomingValuesForPHIs( 1184 LoopStructure &LS, BasicBlock *ContinuationBlock, 1185 const LoopConstrainer::RewrittenRangeInfo &RRI) const { 1186 1187 unsigned PHIIndex = 0; 1188 for (Instruction &I : *LS.Header) { 1189 auto *PN = dyn_cast<PHINode>(&I); 1190 if (!PN) 1191 break; 1192 1193 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) 1194 if (PN->getIncomingBlock(i) == ContinuationBlock) 1195 PN->setIncomingValue(i, RRI.PHIValuesAtPseudoExit[PHIIndex++]); 1196 } 1197 1198 LS.IndVarStart = RRI.IndVarEnd; 1199 } 1200 1201 BasicBlock *LoopConstrainer::createPreheader(const LoopStructure &LS, 1202 BasicBlock *OldPreheader, 1203 const char *Tag) const { 1204 1205 BasicBlock *Preheader = BasicBlock::Create(Ctx, Tag, &F, LS.Header); 1206 BranchInst::Create(LS.Header, Preheader); 1207 1208 for (Instruction &I : *LS.Header) { 1209 auto *PN = dyn_cast<PHINode>(&I); 1210 if (!PN) 1211 break; 1212 1213 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) 1214 replacePHIBlock(PN, OldPreheader, Preheader); 1215 } 1216 1217 return Preheader; 1218 } 1219 1220 void LoopConstrainer::addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs) { 1221 Loop *ParentLoop = OriginalLoop.getParentLoop(); 1222 if (!ParentLoop) 1223 return; 1224 1225 for (BasicBlock *BB : BBs) 1226 ParentLoop->addBasicBlockToLoop(BB, LI); 1227 } 1228 1229 Loop *LoopConstrainer::createClonedLoopStructure(Loop *Original, Loop *Parent, 1230 ValueToValueMapTy &VM) { 1231 Loop &New = *new Loop(); 1232 if (Parent) 1233 Parent->addChildLoop(&New); 1234 else 1235 LI.addTopLevelLoop(&New); 1236 LPM.addLoop(New); 1237 1238 // Add all of the blocks in Original to the new loop. 1239 for (auto *BB : Original->blocks()) 1240 if (LI.getLoopFor(BB) == Original) 1241 New.addBasicBlockToLoop(cast<BasicBlock>(VM[BB]), LI); 1242 1243 // Add all of the subloops to the new loop. 1244 for (Loop *SubLoop : *Original) 1245 createClonedLoopStructure(SubLoop, &New, VM); 1246 1247 return &New; 1248 } 1249 1250 bool LoopConstrainer::run() { 1251 BasicBlock *Preheader = nullptr; 1252 LatchTakenCount = SE.getExitCount(&OriginalLoop, MainLoopStructure.Latch); 1253 Preheader = OriginalLoop.getLoopPreheader(); 1254 assert(!isa<SCEVCouldNotCompute>(LatchTakenCount) && Preheader != nullptr && 1255 "preconditions!"); 1256 1257 OriginalPreheader = Preheader; 1258 MainLoopPreheader = Preheader; 1259 1260 Optional<SubRanges> MaybeSR = calculateSubRanges(); 1261 if (!MaybeSR.hasValue()) { 1262 DEBUG(dbgs() << "irce: could not compute subranges\n"); 1263 return false; 1264 } 1265 1266 SubRanges SR = MaybeSR.getValue(); 1267 bool Increasing = MainLoopStructure.IndVarIncreasing; 1268 IntegerType *IVTy = 1269 cast<IntegerType>(MainLoopStructure.IndVarNext->getType()); 1270 1271 SCEVExpander Expander(SE, F.getParent()->getDataLayout(), "irce"); 1272 Instruction *InsertPt = OriginalPreheader->getTerminator(); 1273 1274 // It would have been better to make `PreLoop' and `PostLoop' 1275 // `Optional<ClonedLoop>'s, but `ValueToValueMapTy' does not have a copy 1276 // constructor. 1277 ClonedLoop PreLoop, PostLoop; 1278 bool NeedsPreLoop = 1279 Increasing ? SR.LowLimit.hasValue() : SR.HighLimit.hasValue(); 1280 bool NeedsPostLoop = 1281 Increasing ? SR.HighLimit.hasValue() : SR.LowLimit.hasValue(); 1282 1283 Value *ExitPreLoopAt = nullptr; 1284 Value *ExitMainLoopAt = nullptr; 1285 const SCEVConstant *MinusOneS = 1286 cast<SCEVConstant>(SE.getConstant(IVTy, -1, true /* isSigned */)); 1287 1288 if (NeedsPreLoop) { 1289 const SCEV *ExitPreLoopAtSCEV = nullptr; 1290 1291 if (Increasing) 1292 ExitPreLoopAtSCEV = *SR.LowLimit; 1293 else { 1294 if (CanBeSMin(SE, *SR.HighLimit)) { 1295 DEBUG(dbgs() << "irce: could not prove no-overflow when computing " 1296 << "preloop exit limit. HighLimit = " << *(*SR.HighLimit) 1297 << "\n"); 1298 return false; 1299 } 1300 ExitPreLoopAtSCEV = SE.getAddExpr(*SR.HighLimit, MinusOneS); 1301 } 1302 1303 ExitPreLoopAt = Expander.expandCodeFor(ExitPreLoopAtSCEV, IVTy, InsertPt); 1304 ExitPreLoopAt->setName("exit.preloop.at"); 1305 } 1306 1307 if (NeedsPostLoop) { 1308 const SCEV *ExitMainLoopAtSCEV = nullptr; 1309 1310 if (Increasing) 1311 ExitMainLoopAtSCEV = *SR.HighLimit; 1312 else { 1313 if (CanBeSMin(SE, *SR.LowLimit)) { 1314 DEBUG(dbgs() << "irce: could not prove no-overflow when computing " 1315 << "mainloop exit limit. LowLimit = " << *(*SR.LowLimit) 1316 << "\n"); 1317 return false; 1318 } 1319 ExitMainLoopAtSCEV = SE.getAddExpr(*SR.LowLimit, MinusOneS); 1320 } 1321 1322 ExitMainLoopAt = Expander.expandCodeFor(ExitMainLoopAtSCEV, IVTy, InsertPt); 1323 ExitMainLoopAt->setName("exit.mainloop.at"); 1324 } 1325 1326 // We clone these ahead of time so that we don't have to deal with changing 1327 // and temporarily invalid IR as we transform the loops. 1328 if (NeedsPreLoop) 1329 cloneLoop(PreLoop, "preloop"); 1330 if (NeedsPostLoop) 1331 cloneLoop(PostLoop, "postloop"); 1332 1333 RewrittenRangeInfo PreLoopRRI; 1334 1335 if (NeedsPreLoop) { 1336 Preheader->getTerminator()->replaceUsesOfWith(MainLoopStructure.Header, 1337 PreLoop.Structure.Header); 1338 1339 MainLoopPreheader = 1340 createPreheader(MainLoopStructure, Preheader, "mainloop"); 1341 PreLoopRRI = changeIterationSpaceEnd(PreLoop.Structure, Preheader, 1342 ExitPreLoopAt, MainLoopPreheader); 1343 rewriteIncomingValuesForPHIs(MainLoopStructure, MainLoopPreheader, 1344 PreLoopRRI); 1345 } 1346 1347 BasicBlock *PostLoopPreheader = nullptr; 1348 RewrittenRangeInfo PostLoopRRI; 1349 1350 if (NeedsPostLoop) { 1351 PostLoopPreheader = 1352 createPreheader(PostLoop.Structure, Preheader, "postloop"); 1353 PostLoopRRI = changeIterationSpaceEnd(MainLoopStructure, MainLoopPreheader, 1354 ExitMainLoopAt, PostLoopPreheader); 1355 rewriteIncomingValuesForPHIs(PostLoop.Structure, PostLoopPreheader, 1356 PostLoopRRI); 1357 } 1358 1359 BasicBlock *NewMainLoopPreheader = 1360 MainLoopPreheader != Preheader ? MainLoopPreheader : nullptr; 1361 BasicBlock *NewBlocks[] = {PostLoopPreheader, PreLoopRRI.PseudoExit, 1362 PreLoopRRI.ExitSelector, PostLoopRRI.PseudoExit, 1363 PostLoopRRI.ExitSelector, NewMainLoopPreheader}; 1364 1365 // Some of the above may be nullptr, filter them out before passing to 1366 // addToParentLoopIfNeeded. 1367 auto NewBlocksEnd = 1368 std::remove(std::begin(NewBlocks), std::end(NewBlocks), nullptr); 1369 1370 addToParentLoopIfNeeded(makeArrayRef(std::begin(NewBlocks), NewBlocksEnd)); 1371 1372 DT.recalculate(F); 1373 1374 // We need to first add all the pre and post loop blocks into the loop 1375 // structures (as part of createClonedLoopStructure), and then update the 1376 // LCSSA form and LoopSimplifyForm. This is necessary for correctly updating 1377 // LI when LoopSimplifyForm is generated. 1378 Loop *PreL = nullptr, *PostL = nullptr; 1379 if (!PreLoop.Blocks.empty()) { 1380 PreL = createClonedLoopStructure( 1381 &OriginalLoop, OriginalLoop.getParentLoop(), PreLoop.Map); 1382 } 1383 1384 if (!PostLoop.Blocks.empty()) { 1385 PostL = createClonedLoopStructure( 1386 &OriginalLoop, OriginalLoop.getParentLoop(), PostLoop.Map); 1387 } 1388 1389 // This function canonicalizes the loop into Loop-Simplify and LCSSA forms. 1390 auto CanonicalizeLoop = [&] (Loop *L, bool IsOriginalLoop) { 1391 formLCSSARecursively(*L, DT, &LI, &SE); 1392 simplifyLoop(L, &DT, &LI, &SE, nullptr, true); 1393 // Pre/post loops are slow paths, we do not need to perform any loop 1394 // optimizations on them. 1395 if (!IsOriginalLoop) 1396 DisableAllLoopOptsOnLoop(*L); 1397 }; 1398 if (PreL) 1399 CanonicalizeLoop(PreL, false); 1400 if (PostL) 1401 CanonicalizeLoop(PostL, false); 1402 CanonicalizeLoop(&OriginalLoop, true); 1403 1404 return true; 1405 } 1406 1407 /// Computes and returns a range of values for the induction variable (IndVar) 1408 /// in which the range check can be safely elided. If it cannot compute such a 1409 /// range, returns None. 1410 Optional<InductiveRangeCheck::Range> 1411 InductiveRangeCheck::computeSafeIterationSpace( 1412 ScalarEvolution &SE, const SCEVAddRecExpr *IndVar) const { 1413 // IndVar is of the form "A + B * I" (where "I" is the canonical induction 1414 // variable, that may or may not exist as a real llvm::Value in the loop) and 1415 // this inductive range check is a range check on the "C + D * I" ("C" is 1416 // getOffset() and "D" is getScale()). We rewrite the value being range 1417 // checked to "M + N * IndVar" where "N" = "D * B^(-1)" and "M" = "C - NA". 1418 // Currently we support this only for "B" = "D" = { 1 or -1 }, but the code 1419 // can be generalized as needed. 1420 // 1421 // The actual inequalities we solve are of the form 1422 // 1423 // 0 <= M + 1 * IndVar < L given L >= 0 (i.e. N == 1) 1424 // 1425 // The inequality is satisfied by -M <= IndVar < (L - M) [^1]. All additions 1426 // and subtractions are twos-complement wrapping and comparisons are signed. 1427 // 1428 // Proof: 1429 // 1430 // If there exists IndVar such that -M <= IndVar < (L - M) then it follows 1431 // that -M <= (-M + L) [== Eq. 1]. Since L >= 0, if (-M + L) sign-overflows 1432 // then (-M + L) < (-M). Hence by [Eq. 1], (-M + L) could not have 1433 // overflown. 1434 // 1435 // This means IndVar = t + (-M) for t in [0, L). Hence (IndVar + M) = t. 1436 // Hence 0 <= (IndVar + M) < L 1437 1438 // [^1]: Note that the solution does _not_ apply if L < 0; consider values M = 1439 // 127, IndVar = 126 and L = -2 in an i8 world. 1440 1441 if (!IndVar->isAffine()) 1442 return None; 1443 1444 const SCEV *A = IndVar->getStart(); 1445 const SCEVConstant *B = dyn_cast<SCEVConstant>(IndVar->getStepRecurrence(SE)); 1446 if (!B) 1447 return None; 1448 1449 const SCEV *C = getOffset(); 1450 const SCEVConstant *D = dyn_cast<SCEVConstant>(getScale()); 1451 if (D != B) 1452 return None; 1453 1454 ConstantInt *ConstD = D->getValue(); 1455 if (!(ConstD->isMinusOne() || ConstD->isOne())) 1456 return None; 1457 1458 const SCEV *M = SE.getMinusSCEV(C, A); 1459 1460 const SCEV *Begin = SE.getNegativeSCEV(M); 1461 const SCEV *UpperLimit = nullptr; 1462 1463 // We strengthen "0 <= I" to "0 <= I < INT_SMAX" and "I < L" to "0 <= I < L". 1464 // We can potentially do much better here. 1465 if (Value *V = getLength()) { 1466 UpperLimit = SE.getSCEV(V); 1467 } else { 1468 assert(Kind == InductiveRangeCheck::RANGE_CHECK_LOWER && "invariant!"); 1469 unsigned BitWidth = cast<IntegerType>(IndVar->getType())->getBitWidth(); 1470 UpperLimit = SE.getConstant(APInt::getSignedMaxValue(BitWidth)); 1471 } 1472 1473 const SCEV *End = SE.getMinusSCEV(UpperLimit, M); 1474 return InductiveRangeCheck::Range(Begin, End); 1475 } 1476 1477 static Optional<InductiveRangeCheck::Range> 1478 IntersectRange(ScalarEvolution &SE, 1479 const Optional<InductiveRangeCheck::Range> &R1, 1480 const InductiveRangeCheck::Range &R2) { 1481 if (!R1.hasValue()) 1482 return R2; 1483 auto &R1Value = R1.getValue(); 1484 1485 // TODO: we could widen the smaller range and have this work; but for now we 1486 // bail out to keep things simple. 1487 if (R1Value.getType() != R2.getType()) 1488 return None; 1489 1490 const SCEV *NewBegin = SE.getSMaxExpr(R1Value.getBegin(), R2.getBegin()); 1491 const SCEV *NewEnd = SE.getSMinExpr(R1Value.getEnd(), R2.getEnd()); 1492 1493 return InductiveRangeCheck::Range(NewBegin, NewEnd); 1494 } 1495 1496 bool InductiveRangeCheckElimination::runOnLoop(Loop *L, LPPassManager &LPM) { 1497 if (skipLoop(L)) 1498 return false; 1499 1500 if (L->getBlocks().size() >= LoopSizeCutoff) { 1501 DEBUG(dbgs() << "irce: giving up constraining loop, too large\n";); 1502 return false; 1503 } 1504 1505 BasicBlock *Preheader = L->getLoopPreheader(); 1506 if (!Preheader) { 1507 DEBUG(dbgs() << "irce: loop has no preheader, leaving\n"); 1508 return false; 1509 } 1510 1511 LLVMContext &Context = Preheader->getContext(); 1512 SmallVector<InductiveRangeCheck, 16> RangeChecks; 1513 ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1514 BranchProbabilityInfo &BPI = 1515 getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI(); 1516 1517 for (auto BBI : L->getBlocks()) 1518 if (BranchInst *TBI = dyn_cast<BranchInst>(BBI->getTerminator())) 1519 InductiveRangeCheck::extractRangeChecksFromBranch(TBI, L, SE, BPI, 1520 RangeChecks); 1521 1522 if (RangeChecks.empty()) 1523 return false; 1524 1525 auto PrintRecognizedRangeChecks = [&](raw_ostream &OS) { 1526 OS << "irce: looking at loop "; L->print(OS); 1527 OS << "irce: loop has " << RangeChecks.size() 1528 << " inductive range checks: \n"; 1529 for (InductiveRangeCheck &IRC : RangeChecks) 1530 IRC.print(OS); 1531 }; 1532 1533 DEBUG(PrintRecognizedRangeChecks(dbgs())); 1534 1535 if (PrintRangeChecks) 1536 PrintRecognizedRangeChecks(errs()); 1537 1538 const char *FailureReason = nullptr; 1539 Optional<LoopStructure> MaybeLoopStructure = 1540 LoopStructure::parseLoopStructure(SE, BPI, *L, FailureReason); 1541 if (!MaybeLoopStructure.hasValue()) { 1542 DEBUG(dbgs() << "irce: could not parse loop structure: " << FailureReason 1543 << "\n";); 1544 return false; 1545 } 1546 LoopStructure LS = MaybeLoopStructure.getValue(); 1547 bool Increasing = LS.IndVarIncreasing; 1548 const SCEV *MinusOne = 1549 SE.getConstant(LS.IndVarNext->getType(), Increasing ? -1 : 1, true); 1550 const SCEVAddRecExpr *IndVar = 1551 cast<SCEVAddRecExpr>(SE.getAddExpr(SE.getSCEV(LS.IndVarNext), MinusOne)); 1552 1553 Optional<InductiveRangeCheck::Range> SafeIterRange; 1554 Instruction *ExprInsertPt = Preheader->getTerminator(); 1555 1556 SmallVector<InductiveRangeCheck, 4> RangeChecksToEliminate; 1557 1558 IRBuilder<> B(ExprInsertPt); 1559 for (InductiveRangeCheck &IRC : RangeChecks) { 1560 auto Result = IRC.computeSafeIterationSpace(SE, IndVar); 1561 if (Result.hasValue()) { 1562 auto MaybeSafeIterRange = 1563 IntersectRange(SE, SafeIterRange, Result.getValue()); 1564 if (MaybeSafeIterRange.hasValue()) { 1565 RangeChecksToEliminate.push_back(IRC); 1566 SafeIterRange = MaybeSafeIterRange.getValue(); 1567 } 1568 } 1569 } 1570 1571 if (!SafeIterRange.hasValue()) 1572 return false; 1573 1574 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1575 LoopConstrainer LC(*L, getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), LPM, 1576 LS, SE, DT, SafeIterRange.getValue()); 1577 bool Changed = LC.run(); 1578 1579 if (Changed) { 1580 auto PrintConstrainedLoopInfo = [L]() { 1581 dbgs() << "irce: in function "; 1582 dbgs() << L->getHeader()->getParent()->getName() << ": "; 1583 dbgs() << "constrained "; 1584 L->print(dbgs()); 1585 }; 1586 1587 DEBUG(PrintConstrainedLoopInfo()); 1588 1589 if (PrintChangedLoops) 1590 PrintConstrainedLoopInfo(); 1591 1592 // Optimize away the now-redundant range checks. 1593 1594 for (InductiveRangeCheck &IRC : RangeChecksToEliminate) { 1595 ConstantInt *FoldedRangeCheck = IRC.getPassingDirection() 1596 ? ConstantInt::getTrue(Context) 1597 : ConstantInt::getFalse(Context); 1598 IRC.getCheckUse()->set(FoldedRangeCheck); 1599 } 1600 } 1601 1602 return Changed; 1603 } 1604 1605 Pass *llvm::createInductiveRangeCheckEliminationPass() { 1606 return new InductiveRangeCheckElimination; 1607 } 1608