1 //===- InductiveRangeCheckElimination.cpp - -------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The InductiveRangeCheckElimination pass splits a loop's iteration space into 11 // three disjoint ranges. It does that in a way such that the loop running in 12 // the middle loop provably does not need range checks. As an example, it will 13 // convert 14 // 15 // len = < known positive > 16 // for (i = 0; i < n; i++) { 17 // if (0 <= i && i < len) { 18 // do_something(); 19 // } else { 20 // throw_out_of_bounds(); 21 // } 22 // } 23 // 24 // to 25 // 26 // len = < known positive > 27 // limit = smin(n, len) 28 // // no first segment 29 // for (i = 0; i < limit; i++) { 30 // if (0 <= i && i < len) { // this check is fully redundant 31 // do_something(); 32 // } else { 33 // throw_out_of_bounds(); 34 // } 35 // } 36 // for (i = limit; i < n; i++) { 37 // if (0 <= i && i < len) { 38 // do_something(); 39 // } else { 40 // throw_out_of_bounds(); 41 // } 42 // } 43 // 44 //===----------------------------------------------------------------------===// 45 46 #include "llvm/ADT/APInt.h" 47 #include "llvm/ADT/ArrayRef.h" 48 #include "llvm/ADT/None.h" 49 #include "llvm/ADT/Optional.h" 50 #include "llvm/ADT/SmallPtrSet.h" 51 #include "llvm/ADT/SmallVector.h" 52 #include "llvm/ADT/StringRef.h" 53 #include "llvm/ADT/Twine.h" 54 #include "llvm/Analysis/BranchProbabilityInfo.h" 55 #include "llvm/Analysis/LoopInfo.h" 56 #include "llvm/Analysis/LoopPass.h" 57 #include "llvm/Analysis/ScalarEvolution.h" 58 #include "llvm/Analysis/ScalarEvolutionExpander.h" 59 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 60 #include "llvm/IR/BasicBlock.h" 61 #include "llvm/IR/CFG.h" 62 #include "llvm/IR/Constants.h" 63 #include "llvm/IR/DerivedTypes.h" 64 #include "llvm/IR/Dominators.h" 65 #include "llvm/IR/Function.h" 66 #include "llvm/IR/IRBuilder.h" 67 #include "llvm/IR/InstrTypes.h" 68 #include "llvm/IR/Instructions.h" 69 #include "llvm/IR/Metadata.h" 70 #include "llvm/IR/Module.h" 71 #include "llvm/IR/PatternMatch.h" 72 #include "llvm/IR/Type.h" 73 #include "llvm/IR/Use.h" 74 #include "llvm/IR/User.h" 75 #include "llvm/IR/Value.h" 76 #include "llvm/Pass.h" 77 #include "llvm/Support/BranchProbability.h" 78 #include "llvm/Support/Casting.h" 79 #include "llvm/Support/CommandLine.h" 80 #include "llvm/Support/Compiler.h" 81 #include "llvm/Support/Debug.h" 82 #include "llvm/Support/ErrorHandling.h" 83 #include "llvm/Support/raw_ostream.h" 84 #include "llvm/Transforms/Scalar.h" 85 #include "llvm/Transforms/Utils/Cloning.h" 86 #include "llvm/Transforms/Utils/LoopSimplify.h" 87 #include "llvm/Transforms/Utils/LoopUtils.h" 88 #include "llvm/Transforms/Utils/ValueMapper.h" 89 #include <algorithm> 90 #include <cassert> 91 #include <iterator> 92 #include <limits> 93 #include <utility> 94 #include <vector> 95 96 using namespace llvm; 97 using namespace llvm::PatternMatch; 98 99 static cl::opt<unsigned> LoopSizeCutoff("irce-loop-size-cutoff", cl::Hidden, 100 cl::init(64)); 101 102 static cl::opt<bool> PrintChangedLoops("irce-print-changed-loops", cl::Hidden, 103 cl::init(false)); 104 105 static cl::opt<bool> PrintRangeChecks("irce-print-range-checks", cl::Hidden, 106 cl::init(false)); 107 108 static cl::opt<int> MaxExitProbReciprocal("irce-max-exit-prob-reciprocal", 109 cl::Hidden, cl::init(10)); 110 111 static cl::opt<bool> SkipProfitabilityChecks("irce-skip-profitability-checks", 112 cl::Hidden, cl::init(false)); 113 114 static cl::opt<bool> AllowUnsignedLatchCondition("irce-allow-unsigned-latch", 115 cl::Hidden, cl::init(true)); 116 117 static const char *ClonedLoopTag = "irce.loop.clone"; 118 119 #define DEBUG_TYPE "irce" 120 121 namespace { 122 123 /// An inductive range check is conditional branch in a loop with 124 /// 125 /// 1. a very cold successor (i.e. the branch jumps to that successor very 126 /// rarely) 127 /// 128 /// and 129 /// 130 /// 2. a condition that is provably true for some contiguous range of values 131 /// taken by the containing loop's induction variable. 132 /// 133 class InductiveRangeCheck { 134 // Classifies a range check 135 enum RangeCheckKind : unsigned { 136 // Range check of the form "0 <= I". 137 RANGE_CHECK_LOWER = 1, 138 139 // Range check of the form "I < L" where L is known positive. 140 RANGE_CHECK_UPPER = 2, 141 142 // The logical and of the RANGE_CHECK_LOWER and RANGE_CHECK_UPPER 143 // conditions. 144 RANGE_CHECK_BOTH = RANGE_CHECK_LOWER | RANGE_CHECK_UPPER, 145 146 // Unrecognized range check condition. 147 RANGE_CHECK_UNKNOWN = (unsigned)-1 148 }; 149 150 static StringRef rangeCheckKindToStr(RangeCheckKind); 151 152 const SCEV *Begin = nullptr; 153 const SCEV *Step = nullptr; 154 const SCEV *End = nullptr; 155 Use *CheckUse = nullptr; 156 RangeCheckKind Kind = RANGE_CHECK_UNKNOWN; 157 bool IsSigned = true; 158 159 static RangeCheckKind parseRangeCheckICmp(Loop *L, ICmpInst *ICI, 160 ScalarEvolution &SE, Value *&Index, 161 Value *&Length, bool &IsSigned); 162 163 static void 164 extractRangeChecksFromCond(Loop *L, ScalarEvolution &SE, Use &ConditionUse, 165 SmallVectorImpl<InductiveRangeCheck> &Checks, 166 SmallPtrSetImpl<Value *> &Visited); 167 168 public: 169 const SCEV *getBegin() const { return Begin; } 170 const SCEV *getStep() const { return Step; } 171 const SCEV *getEnd() const { return End; } 172 bool isSigned() const { return IsSigned; } 173 174 void print(raw_ostream &OS) const { 175 OS << "InductiveRangeCheck:\n"; 176 OS << " Kind: " << rangeCheckKindToStr(Kind) << "\n"; 177 OS << " Begin: "; 178 Begin->print(OS); 179 OS << " Step: "; 180 Step->print(OS); 181 OS << " End: "; 182 End->print(OS); 183 OS << "\n CheckUse: "; 184 getCheckUse()->getUser()->print(OS); 185 OS << " Operand: " << getCheckUse()->getOperandNo() << "\n"; 186 } 187 188 LLVM_DUMP_METHOD 189 void dump() { 190 print(dbgs()); 191 } 192 193 Use *getCheckUse() const { return CheckUse; } 194 195 /// Represents an signed integer range [Range.getBegin(), Range.getEnd()). If 196 /// R.getEnd() le R.getBegin(), then R denotes the empty range. 197 198 class Range { 199 const SCEV *Begin; 200 const SCEV *End; 201 202 public: 203 Range(const SCEV *Begin, const SCEV *End) : Begin(Begin), End(End) { 204 assert(Begin->getType() == End->getType() && "ill-typed range!"); 205 } 206 207 Type *getType() const { return Begin->getType(); } 208 const SCEV *getBegin() const { return Begin; } 209 const SCEV *getEnd() const { return End; } 210 bool isEmpty(ScalarEvolution &SE, bool IsSigned) const { 211 if (Begin == End) 212 return true; 213 if (IsSigned) 214 return SE.isKnownPredicate(ICmpInst::ICMP_SGE, Begin, End); 215 else 216 return SE.isKnownPredicate(ICmpInst::ICMP_UGE, Begin, End); 217 } 218 }; 219 220 /// This is the value the condition of the branch needs to evaluate to for the 221 /// branch to take the hot successor (see (1) above). 222 bool getPassingDirection() { return true; } 223 224 /// Computes a range for the induction variable (IndVar) in which the range 225 /// check is redundant and can be constant-folded away. The induction 226 /// variable is not required to be the canonical {0,+,1} induction variable. 227 Optional<Range> computeSafeIterationSpace(ScalarEvolution &SE, 228 const SCEVAddRecExpr *IndVar, 229 bool IsLatchSigned) const; 230 231 /// Parse out a set of inductive range checks from \p BI and append them to \p 232 /// Checks. 233 /// 234 /// NB! There may be conditions feeding into \p BI that aren't inductive range 235 /// checks, and hence don't end up in \p Checks. 236 static void 237 extractRangeChecksFromBranch(BranchInst *BI, Loop *L, ScalarEvolution &SE, 238 BranchProbabilityInfo &BPI, 239 SmallVectorImpl<InductiveRangeCheck> &Checks); 240 }; 241 242 class InductiveRangeCheckElimination : public LoopPass { 243 public: 244 static char ID; 245 246 InductiveRangeCheckElimination() : LoopPass(ID) { 247 initializeInductiveRangeCheckEliminationPass( 248 *PassRegistry::getPassRegistry()); 249 } 250 251 void getAnalysisUsage(AnalysisUsage &AU) const override { 252 AU.addRequired<BranchProbabilityInfoWrapperPass>(); 253 getLoopAnalysisUsage(AU); 254 } 255 256 bool runOnLoop(Loop *L, LPPassManager &LPM) override; 257 }; 258 259 } // end anonymous namespace 260 261 char InductiveRangeCheckElimination::ID = 0; 262 263 INITIALIZE_PASS_BEGIN(InductiveRangeCheckElimination, "irce", 264 "Inductive range check elimination", false, false) 265 INITIALIZE_PASS_DEPENDENCY(BranchProbabilityInfoWrapperPass) 266 INITIALIZE_PASS_DEPENDENCY(LoopPass) 267 INITIALIZE_PASS_END(InductiveRangeCheckElimination, "irce", 268 "Inductive range check elimination", false, false) 269 270 StringRef InductiveRangeCheck::rangeCheckKindToStr( 271 InductiveRangeCheck::RangeCheckKind RCK) { 272 switch (RCK) { 273 case InductiveRangeCheck::RANGE_CHECK_UNKNOWN: 274 return "RANGE_CHECK_UNKNOWN"; 275 276 case InductiveRangeCheck::RANGE_CHECK_UPPER: 277 return "RANGE_CHECK_UPPER"; 278 279 case InductiveRangeCheck::RANGE_CHECK_LOWER: 280 return "RANGE_CHECK_LOWER"; 281 282 case InductiveRangeCheck::RANGE_CHECK_BOTH: 283 return "RANGE_CHECK_BOTH"; 284 } 285 286 llvm_unreachable("unknown range check type!"); 287 } 288 289 /// Parse a single ICmp instruction, `ICI`, into a range check. If `ICI` cannot 290 /// be interpreted as a range check, return `RANGE_CHECK_UNKNOWN` and set 291 /// `Index` and `Length` to `nullptr`. Otherwise set `Index` to the value being 292 /// range checked, and set `Length` to the upper limit `Index` is being range 293 /// checked with if (and only if) the range check type is stronger or equal to 294 /// RANGE_CHECK_UPPER. 295 InductiveRangeCheck::RangeCheckKind 296 InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI, 297 ScalarEvolution &SE, Value *&Index, 298 Value *&Length, bool &IsSigned) { 299 auto IsNonNegativeAndNotLoopVarying = [&SE, L](Value *V) { 300 const SCEV *S = SE.getSCEV(V); 301 if (isa<SCEVCouldNotCompute>(S)) 302 return false; 303 304 return SE.getLoopDisposition(S, L) == ScalarEvolution::LoopInvariant && 305 SE.isKnownNonNegative(S); 306 }; 307 308 ICmpInst::Predicate Pred = ICI->getPredicate(); 309 Value *LHS = ICI->getOperand(0); 310 Value *RHS = ICI->getOperand(1); 311 312 switch (Pred) { 313 default: 314 return RANGE_CHECK_UNKNOWN; 315 316 case ICmpInst::ICMP_SLE: 317 std::swap(LHS, RHS); 318 LLVM_FALLTHROUGH; 319 case ICmpInst::ICMP_SGE: 320 IsSigned = true; 321 if (match(RHS, m_ConstantInt<0>())) { 322 Index = LHS; 323 return RANGE_CHECK_LOWER; 324 } 325 return RANGE_CHECK_UNKNOWN; 326 327 case ICmpInst::ICMP_SLT: 328 std::swap(LHS, RHS); 329 LLVM_FALLTHROUGH; 330 case ICmpInst::ICMP_SGT: 331 IsSigned = true; 332 if (match(RHS, m_ConstantInt<-1>())) { 333 Index = LHS; 334 return RANGE_CHECK_LOWER; 335 } 336 337 if (IsNonNegativeAndNotLoopVarying(LHS)) { 338 Index = RHS; 339 Length = LHS; 340 return RANGE_CHECK_UPPER; 341 } 342 return RANGE_CHECK_UNKNOWN; 343 344 case ICmpInst::ICMP_ULT: 345 std::swap(LHS, RHS); 346 LLVM_FALLTHROUGH; 347 case ICmpInst::ICMP_UGT: 348 IsSigned = false; 349 if (IsNonNegativeAndNotLoopVarying(LHS)) { 350 Index = RHS; 351 Length = LHS; 352 return RANGE_CHECK_BOTH; 353 } 354 return RANGE_CHECK_UNKNOWN; 355 } 356 357 llvm_unreachable("default clause returns!"); 358 } 359 360 void InductiveRangeCheck::extractRangeChecksFromCond( 361 Loop *L, ScalarEvolution &SE, Use &ConditionUse, 362 SmallVectorImpl<InductiveRangeCheck> &Checks, 363 SmallPtrSetImpl<Value *> &Visited) { 364 Value *Condition = ConditionUse.get(); 365 if (!Visited.insert(Condition).second) 366 return; 367 368 // TODO: Do the same for OR, XOR, NOT etc? 369 if (match(Condition, m_And(m_Value(), m_Value()))) { 370 extractRangeChecksFromCond(L, SE, cast<User>(Condition)->getOperandUse(0), 371 Checks, Visited); 372 extractRangeChecksFromCond(L, SE, cast<User>(Condition)->getOperandUse(1), 373 Checks, Visited); 374 return; 375 } 376 377 ICmpInst *ICI = dyn_cast<ICmpInst>(Condition); 378 if (!ICI) 379 return; 380 381 Value *Length = nullptr, *Index; 382 bool IsSigned; 383 auto RCKind = parseRangeCheckICmp(L, ICI, SE, Index, Length, IsSigned); 384 if (RCKind == InductiveRangeCheck::RANGE_CHECK_UNKNOWN) 385 return; 386 387 const auto *IndexAddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Index)); 388 bool IsAffineIndex = 389 IndexAddRec && (IndexAddRec->getLoop() == L) && IndexAddRec->isAffine(); 390 391 if (!IsAffineIndex) 392 return; 393 394 const SCEV *End = nullptr; 395 // We strengthen "0 <= I" to "0 <= I < INT_SMAX" and "I < L" to "0 <= I < L". 396 // We can potentially do much better here. 397 if (Length) 398 End = SE.getSCEV(Length); 399 else { 400 assert(RCKind == InductiveRangeCheck::RANGE_CHECK_LOWER && "invariant!"); 401 // So far we can only reach this point for Signed range check. This may 402 // change in future. In this case we will need to pick Unsigned max for the 403 // unsigned range check. 404 unsigned BitWidth = cast<IntegerType>(IndexAddRec->getType())->getBitWidth(); 405 const SCEV *SIntMax = SE.getConstant(APInt::getSignedMaxValue(BitWidth)); 406 End = SIntMax; 407 } 408 409 InductiveRangeCheck IRC; 410 IRC.End = End; 411 IRC.Begin = IndexAddRec->getStart(); 412 IRC.Step = IndexAddRec->getStepRecurrence(SE); 413 IRC.CheckUse = &ConditionUse; 414 IRC.Kind = RCKind; 415 IRC.IsSigned = IsSigned; 416 Checks.push_back(IRC); 417 } 418 419 void InductiveRangeCheck::extractRangeChecksFromBranch( 420 BranchInst *BI, Loop *L, ScalarEvolution &SE, BranchProbabilityInfo &BPI, 421 SmallVectorImpl<InductiveRangeCheck> &Checks) { 422 if (BI->isUnconditional() || BI->getParent() == L->getLoopLatch()) 423 return; 424 425 BranchProbability LikelyTaken(15, 16); 426 427 if (!SkipProfitabilityChecks && 428 BPI.getEdgeProbability(BI->getParent(), (unsigned)0) < LikelyTaken) 429 return; 430 431 SmallPtrSet<Value *, 8> Visited; 432 InductiveRangeCheck::extractRangeChecksFromCond(L, SE, BI->getOperandUse(0), 433 Checks, Visited); 434 } 435 436 // Add metadata to the loop L to disable loop optimizations. Callers need to 437 // confirm that optimizing loop L is not beneficial. 438 static void DisableAllLoopOptsOnLoop(Loop &L) { 439 // We do not care about any existing loopID related metadata for L, since we 440 // are setting all loop metadata to false. 441 LLVMContext &Context = L.getHeader()->getContext(); 442 // Reserve first location for self reference to the LoopID metadata node. 443 MDNode *Dummy = MDNode::get(Context, {}); 444 MDNode *DisableUnroll = MDNode::get( 445 Context, {MDString::get(Context, "llvm.loop.unroll.disable")}); 446 Metadata *FalseVal = 447 ConstantAsMetadata::get(ConstantInt::get(Type::getInt1Ty(Context), 0)); 448 MDNode *DisableVectorize = MDNode::get( 449 Context, 450 {MDString::get(Context, "llvm.loop.vectorize.enable"), FalseVal}); 451 MDNode *DisableLICMVersioning = MDNode::get( 452 Context, {MDString::get(Context, "llvm.loop.licm_versioning.disable")}); 453 MDNode *DisableDistribution= MDNode::get( 454 Context, 455 {MDString::get(Context, "llvm.loop.distribute.enable"), FalseVal}); 456 MDNode *NewLoopID = 457 MDNode::get(Context, {Dummy, DisableUnroll, DisableVectorize, 458 DisableLICMVersioning, DisableDistribution}); 459 // Set operand 0 to refer to the loop id itself. 460 NewLoopID->replaceOperandWith(0, NewLoopID); 461 L.setLoopID(NewLoopID); 462 } 463 464 namespace { 465 466 // Keeps track of the structure of a loop. This is similar to llvm::Loop, 467 // except that it is more lightweight and can track the state of a loop through 468 // changing and potentially invalid IR. This structure also formalizes the 469 // kinds of loops we can deal with -- ones that have a single latch that is also 470 // an exiting block *and* have a canonical induction variable. 471 struct LoopStructure { 472 const char *Tag = ""; 473 474 BasicBlock *Header = nullptr; 475 BasicBlock *Latch = nullptr; 476 477 // `Latch's terminator instruction is `LatchBr', and it's `LatchBrExitIdx'th 478 // successor is `LatchExit', the exit block of the loop. 479 BranchInst *LatchBr = nullptr; 480 BasicBlock *LatchExit = nullptr; 481 unsigned LatchBrExitIdx = std::numeric_limits<unsigned>::max(); 482 483 // The loop represented by this instance of LoopStructure is semantically 484 // equivalent to: 485 // 486 // intN_ty inc = IndVarIncreasing ? 1 : -1; 487 // pred_ty predicate = IndVarIncreasing ? ICMP_SLT : ICMP_SGT; 488 // 489 // for (intN_ty iv = IndVarStart; predicate(iv, LoopExitAt); iv = IndVarBase) 490 // ... body ... 491 492 Value *IndVarBase = nullptr; 493 Value *IndVarStart = nullptr; 494 Value *IndVarStep = nullptr; 495 Value *LoopExitAt = nullptr; 496 bool IndVarIncreasing = false; 497 bool IsSignedPredicate = true; 498 499 LoopStructure() = default; 500 501 template <typename M> LoopStructure map(M Map) const { 502 LoopStructure Result; 503 Result.Tag = Tag; 504 Result.Header = cast<BasicBlock>(Map(Header)); 505 Result.Latch = cast<BasicBlock>(Map(Latch)); 506 Result.LatchBr = cast<BranchInst>(Map(LatchBr)); 507 Result.LatchExit = cast<BasicBlock>(Map(LatchExit)); 508 Result.LatchBrExitIdx = LatchBrExitIdx; 509 Result.IndVarBase = Map(IndVarBase); 510 Result.IndVarStart = Map(IndVarStart); 511 Result.IndVarStep = Map(IndVarStep); 512 Result.LoopExitAt = Map(LoopExitAt); 513 Result.IndVarIncreasing = IndVarIncreasing; 514 Result.IsSignedPredicate = IsSignedPredicate; 515 return Result; 516 } 517 518 static Optional<LoopStructure> parseLoopStructure(ScalarEvolution &, 519 BranchProbabilityInfo &BPI, 520 Loop &, 521 const char *&); 522 }; 523 524 /// This class is used to constrain loops to run within a given iteration space. 525 /// The algorithm this class implements is given a Loop and a range [Begin, 526 /// End). The algorithm then tries to break out a "main loop" out of the loop 527 /// it is given in a way that the "main loop" runs with the induction variable 528 /// in a subset of [Begin, End). The algorithm emits appropriate pre and post 529 /// loops to run any remaining iterations. The pre loop runs any iterations in 530 /// which the induction variable is < Begin, and the post loop runs any 531 /// iterations in which the induction variable is >= End. 532 class LoopConstrainer { 533 // The representation of a clone of the original loop we started out with. 534 struct ClonedLoop { 535 // The cloned blocks 536 std::vector<BasicBlock *> Blocks; 537 538 // `Map` maps values in the clonee into values in the cloned version 539 ValueToValueMapTy Map; 540 541 // An instance of `LoopStructure` for the cloned loop 542 LoopStructure Structure; 543 }; 544 545 // Result of rewriting the range of a loop. See changeIterationSpaceEnd for 546 // more details on what these fields mean. 547 struct RewrittenRangeInfo { 548 BasicBlock *PseudoExit = nullptr; 549 BasicBlock *ExitSelector = nullptr; 550 std::vector<PHINode *> PHIValuesAtPseudoExit; 551 PHINode *IndVarEnd = nullptr; 552 553 RewrittenRangeInfo() = default; 554 }; 555 556 // Calculated subranges we restrict the iteration space of the main loop to. 557 // See the implementation of `calculateSubRanges' for more details on how 558 // these fields are computed. `LowLimit` is None if there is no restriction 559 // on low end of the restricted iteration space of the main loop. `HighLimit` 560 // is None if there is no restriction on high end of the restricted iteration 561 // space of the main loop. 562 563 struct SubRanges { 564 Optional<const SCEV *> LowLimit; 565 Optional<const SCEV *> HighLimit; 566 }; 567 568 // A utility function that does a `replaceUsesOfWith' on the incoming block 569 // set of a `PHINode' -- replaces instances of `Block' in the `PHINode's 570 // incoming block list with `ReplaceBy'. 571 static void replacePHIBlock(PHINode *PN, BasicBlock *Block, 572 BasicBlock *ReplaceBy); 573 574 // Compute a safe set of limits for the main loop to run in -- effectively the 575 // intersection of `Range' and the iteration space of the original loop. 576 // Return None if unable to compute the set of subranges. 577 Optional<SubRanges> calculateSubRanges(bool IsSignedPredicate) const; 578 579 // Clone `OriginalLoop' and return the result in CLResult. The IR after 580 // running `cloneLoop' is well formed except for the PHI nodes in CLResult -- 581 // the PHI nodes say that there is an incoming edge from `OriginalPreheader` 582 // but there is no such edge. 583 void cloneLoop(ClonedLoop &CLResult, const char *Tag) const; 584 585 // Create the appropriate loop structure needed to describe a cloned copy of 586 // `Original`. The clone is described by `VM`. 587 Loop *createClonedLoopStructure(Loop *Original, Loop *Parent, 588 ValueToValueMapTy &VM); 589 590 // Rewrite the iteration space of the loop denoted by (LS, Preheader). The 591 // iteration space of the rewritten loop ends at ExitLoopAt. The start of the 592 // iteration space is not changed. `ExitLoopAt' is assumed to be slt 593 // `OriginalHeaderCount'. 594 // 595 // If there are iterations left to execute, control is made to jump to 596 // `ContinuationBlock', otherwise they take the normal loop exit. The 597 // returned `RewrittenRangeInfo' object is populated as follows: 598 // 599 // .PseudoExit is a basic block that unconditionally branches to 600 // `ContinuationBlock'. 601 // 602 // .ExitSelector is a basic block that decides, on exit from the loop, 603 // whether to branch to the "true" exit or to `PseudoExit'. 604 // 605 // .PHIValuesAtPseudoExit are PHINodes in `PseudoExit' that compute the value 606 // for each PHINode in the loop header on taking the pseudo exit. 607 // 608 // After changeIterationSpaceEnd, `Preheader' is no longer a legitimate 609 // preheader because it is made to branch to the loop header only 610 // conditionally. 611 RewrittenRangeInfo 612 changeIterationSpaceEnd(const LoopStructure &LS, BasicBlock *Preheader, 613 Value *ExitLoopAt, 614 BasicBlock *ContinuationBlock) const; 615 616 // The loop denoted by `LS' has `OldPreheader' as its preheader. This 617 // function creates a new preheader for `LS' and returns it. 618 BasicBlock *createPreheader(const LoopStructure &LS, BasicBlock *OldPreheader, 619 const char *Tag) const; 620 621 // `ContinuationBlockAndPreheader' was the continuation block for some call to 622 // `changeIterationSpaceEnd' and is the preheader to the loop denoted by `LS'. 623 // This function rewrites the PHI nodes in `LS.Header' to start with the 624 // correct value. 625 void rewriteIncomingValuesForPHIs( 626 LoopStructure &LS, BasicBlock *ContinuationBlockAndPreheader, 627 const LoopConstrainer::RewrittenRangeInfo &RRI) const; 628 629 // Even though we do not preserve any passes at this time, we at least need to 630 // keep the parent loop structure consistent. The `LPPassManager' seems to 631 // verify this after running a loop pass. This function adds the list of 632 // blocks denoted by BBs to this loops parent loop if required. 633 void addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs); 634 635 // Some global state. 636 Function &F; 637 LLVMContext &Ctx; 638 ScalarEvolution &SE; 639 DominatorTree &DT; 640 LPPassManager &LPM; 641 LoopInfo &LI; 642 643 // Information about the original loop we started out with. 644 Loop &OriginalLoop; 645 646 const SCEV *LatchTakenCount = nullptr; 647 BasicBlock *OriginalPreheader = nullptr; 648 649 // The preheader of the main loop. This may or may not be different from 650 // `OriginalPreheader'. 651 BasicBlock *MainLoopPreheader = nullptr; 652 653 // The range we need to run the main loop in. 654 InductiveRangeCheck::Range Range; 655 656 // The structure of the main loop (see comment at the beginning of this class 657 // for a definition) 658 LoopStructure MainLoopStructure; 659 660 public: 661 LoopConstrainer(Loop &L, LoopInfo &LI, LPPassManager &LPM, 662 const LoopStructure &LS, ScalarEvolution &SE, 663 DominatorTree &DT, InductiveRangeCheck::Range R) 664 : F(*L.getHeader()->getParent()), Ctx(L.getHeader()->getContext()), 665 SE(SE), DT(DT), LPM(LPM), LI(LI), OriginalLoop(L), Range(R), 666 MainLoopStructure(LS) {} 667 668 // Entry point for the algorithm. Returns true on success. 669 bool run(); 670 }; 671 672 } // end anonymous namespace 673 674 void LoopConstrainer::replacePHIBlock(PHINode *PN, BasicBlock *Block, 675 BasicBlock *ReplaceBy) { 676 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 677 if (PN->getIncomingBlock(i) == Block) 678 PN->setIncomingBlock(i, ReplaceBy); 679 } 680 681 static bool CanBeMax(ScalarEvolution &SE, const SCEV *S, bool Signed) { 682 APInt Max = Signed ? 683 APInt::getSignedMaxValue(cast<IntegerType>(S->getType())->getBitWidth()) : 684 APInt::getMaxValue(cast<IntegerType>(S->getType())->getBitWidth()); 685 return SE.getSignedRange(S).contains(Max) && 686 SE.getUnsignedRange(S).contains(Max); 687 } 688 689 static bool SumCanReachMax(ScalarEvolution &SE, const SCEV *S1, const SCEV *S2, 690 bool Signed) { 691 // S1 < INT_MAX - S2 ===> S1 + S2 < INT_MAX. 692 assert(SE.isKnownNonNegative(S2) && 693 "We expected the 2nd arg to be non-negative!"); 694 const SCEV *Max = SE.getConstant( 695 Signed ? APInt::getSignedMaxValue( 696 cast<IntegerType>(S1->getType())->getBitWidth()) 697 : APInt::getMaxValue( 698 cast<IntegerType>(S1->getType())->getBitWidth())); 699 const SCEV *CapForS1 = SE.getMinusSCEV(Max, S2); 700 return !SE.isKnownPredicate(Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, 701 S1, CapForS1); 702 } 703 704 static bool CanBeMin(ScalarEvolution &SE, const SCEV *S, bool Signed) { 705 APInt Min = Signed ? 706 APInt::getSignedMinValue(cast<IntegerType>(S->getType())->getBitWidth()) : 707 APInt::getMinValue(cast<IntegerType>(S->getType())->getBitWidth()); 708 return SE.getSignedRange(S).contains(Min) && 709 SE.getUnsignedRange(S).contains(Min); 710 } 711 712 static bool SumCanReachMin(ScalarEvolution &SE, const SCEV *S1, const SCEV *S2, 713 bool Signed) { 714 // S1 > INT_MIN - S2 ===> S1 + S2 > INT_MIN. 715 assert(SE.isKnownNonPositive(S2) && 716 "We expected the 2nd arg to be non-positive!"); 717 const SCEV *Max = SE.getConstant( 718 Signed ? APInt::getSignedMinValue( 719 cast<IntegerType>(S1->getType())->getBitWidth()) 720 : APInt::getMinValue( 721 cast<IntegerType>(S1->getType())->getBitWidth())); 722 const SCEV *CapForS1 = SE.getMinusSCEV(Max, S2); 723 return !SE.isKnownPredicate(Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, 724 S1, CapForS1); 725 } 726 727 Optional<LoopStructure> 728 LoopStructure::parseLoopStructure(ScalarEvolution &SE, 729 BranchProbabilityInfo &BPI, 730 Loop &L, const char *&FailureReason) { 731 if (!L.isLoopSimplifyForm()) { 732 FailureReason = "loop not in LoopSimplify form"; 733 return None; 734 } 735 736 BasicBlock *Latch = L.getLoopLatch(); 737 assert(Latch && "Simplified loops only have one latch!"); 738 739 if (Latch->getTerminator()->getMetadata(ClonedLoopTag)) { 740 FailureReason = "loop has already been cloned"; 741 return None; 742 } 743 744 if (!L.isLoopExiting(Latch)) { 745 FailureReason = "no loop latch"; 746 return None; 747 } 748 749 BasicBlock *Header = L.getHeader(); 750 BasicBlock *Preheader = L.getLoopPreheader(); 751 if (!Preheader) { 752 FailureReason = "no preheader"; 753 return None; 754 } 755 756 BranchInst *LatchBr = dyn_cast<BranchInst>(Latch->getTerminator()); 757 if (!LatchBr || LatchBr->isUnconditional()) { 758 FailureReason = "latch terminator not conditional branch"; 759 return None; 760 } 761 762 unsigned LatchBrExitIdx = LatchBr->getSuccessor(0) == Header ? 1 : 0; 763 764 BranchProbability ExitProbability = 765 BPI.getEdgeProbability(LatchBr->getParent(), LatchBrExitIdx); 766 767 if (!SkipProfitabilityChecks && 768 ExitProbability > BranchProbability(1, MaxExitProbReciprocal)) { 769 FailureReason = "short running loop, not profitable"; 770 return None; 771 } 772 773 ICmpInst *ICI = dyn_cast<ICmpInst>(LatchBr->getCondition()); 774 if (!ICI || !isa<IntegerType>(ICI->getOperand(0)->getType())) { 775 FailureReason = "latch terminator branch not conditional on integral icmp"; 776 return None; 777 } 778 779 const SCEV *LatchCount = SE.getExitCount(&L, Latch); 780 if (isa<SCEVCouldNotCompute>(LatchCount)) { 781 FailureReason = "could not compute latch count"; 782 return None; 783 } 784 785 ICmpInst::Predicate Pred = ICI->getPredicate(); 786 Value *LeftValue = ICI->getOperand(0); 787 const SCEV *LeftSCEV = SE.getSCEV(LeftValue); 788 IntegerType *IndVarTy = cast<IntegerType>(LeftValue->getType()); 789 790 Value *RightValue = ICI->getOperand(1); 791 const SCEV *RightSCEV = SE.getSCEV(RightValue); 792 793 // We canonicalize `ICI` such that `LeftSCEV` is an add recurrence. 794 if (!isa<SCEVAddRecExpr>(LeftSCEV)) { 795 if (isa<SCEVAddRecExpr>(RightSCEV)) { 796 std::swap(LeftSCEV, RightSCEV); 797 std::swap(LeftValue, RightValue); 798 Pred = ICmpInst::getSwappedPredicate(Pred); 799 } else { 800 FailureReason = "no add recurrences in the icmp"; 801 return None; 802 } 803 } 804 805 auto HasNoSignedWrap = [&](const SCEVAddRecExpr *AR) { 806 if (AR->getNoWrapFlags(SCEV::FlagNSW)) 807 return true; 808 809 IntegerType *Ty = cast<IntegerType>(AR->getType()); 810 IntegerType *WideTy = 811 IntegerType::get(Ty->getContext(), Ty->getBitWidth() * 2); 812 813 const SCEVAddRecExpr *ExtendAfterOp = 814 dyn_cast<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); 815 if (ExtendAfterOp) { 816 const SCEV *ExtendedStart = SE.getSignExtendExpr(AR->getStart(), WideTy); 817 const SCEV *ExtendedStep = 818 SE.getSignExtendExpr(AR->getStepRecurrence(SE), WideTy); 819 820 bool NoSignedWrap = ExtendAfterOp->getStart() == ExtendedStart && 821 ExtendAfterOp->getStepRecurrence(SE) == ExtendedStep; 822 823 if (NoSignedWrap) 824 return true; 825 } 826 827 // We may have proved this when computing the sign extension above. 828 return AR->getNoWrapFlags(SCEV::FlagNSW) != SCEV::FlagAnyWrap; 829 }; 830 831 // Here we check whether the suggested AddRec is an induction variable that 832 // can be handled (i.e. with known constant step), and if yes, calculate its 833 // step and identify whether it is increasing or decreasing. 834 auto IsInductionVar = [&](const SCEVAddRecExpr *AR, bool &IsIncreasing, 835 ConstantInt *&StepCI) { 836 if (!AR->isAffine()) 837 return false; 838 839 // Currently we only work with induction variables that have been proved to 840 // not wrap. This restriction can potentially be lifted in the future. 841 842 if (!HasNoSignedWrap(AR)) 843 return false; 844 845 if (const SCEVConstant *StepExpr = 846 dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) { 847 StepCI = StepExpr->getValue(); 848 assert(!StepCI->isZero() && "Zero step?"); 849 IsIncreasing = !StepCI->isNegative(); 850 return true; 851 } 852 853 return false; 854 }; 855 856 // `ICI` is interpreted as taking the backedge if the *next* value of the 857 // induction variable satisfies some constraint. 858 859 const SCEVAddRecExpr *IndVarBase = cast<SCEVAddRecExpr>(LeftSCEV); 860 bool IsIncreasing = false; 861 bool IsSignedPredicate = true; 862 ConstantInt *StepCI; 863 if (!IsInductionVar(IndVarBase, IsIncreasing, StepCI)) { 864 FailureReason = "LHS in icmp not induction variable"; 865 return None; 866 } 867 868 const SCEV *StartNext = IndVarBase->getStart(); 869 const SCEV *Addend = SE.getNegativeSCEV(IndVarBase->getStepRecurrence(SE)); 870 const SCEV *IndVarStart = SE.getAddExpr(StartNext, Addend); 871 const SCEV *Step = SE.getSCEV(StepCI); 872 873 ConstantInt *One = ConstantInt::get(IndVarTy, 1); 874 if (IsIncreasing) { 875 bool DecreasedRightValueByOne = false; 876 if (StepCI->isOne()) { 877 // Try to turn eq/ne predicates to those we can work with. 878 if (Pred == ICmpInst::ICMP_NE && LatchBrExitIdx == 1) 879 // while (++i != len) { while (++i < len) { 880 // ... ---> ... 881 // } } 882 // If both parts are known non-negative, it is profitable to use 883 // unsigned comparison in increasing loop. This allows us to make the 884 // comparison check against "RightSCEV + 1" more optimistic. 885 if (SE.isKnownNonNegative(IndVarStart) && 886 SE.isKnownNonNegative(RightSCEV)) 887 Pred = ICmpInst::ICMP_ULT; 888 else 889 Pred = ICmpInst::ICMP_SLT; 890 else if (Pred == ICmpInst::ICMP_EQ && LatchBrExitIdx == 0 && 891 !CanBeMin(SE, RightSCEV, /* IsSignedPredicate */ true)) { 892 // while (true) { while (true) { 893 // if (++i == len) ---> if (++i > len - 1) 894 // break; break; 895 // ... ... 896 // } } 897 // TODO: Insert ICMP_UGT if both are non-negative? 898 Pred = ICmpInst::ICMP_SGT; 899 RightSCEV = SE.getMinusSCEV(RightSCEV, SE.getOne(RightSCEV->getType())); 900 DecreasedRightValueByOne = true; 901 } 902 } 903 904 bool LTPred = (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_ULT); 905 bool GTPred = (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_UGT); 906 bool FoundExpectedPred = 907 (LTPred && LatchBrExitIdx == 1) || (GTPred && LatchBrExitIdx == 0); 908 909 if (!FoundExpectedPred) { 910 FailureReason = "expected icmp slt semantically, found something else"; 911 return None; 912 } 913 914 IsSignedPredicate = 915 Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGT; 916 917 if (!IsSignedPredicate && !AllowUnsignedLatchCondition) { 918 FailureReason = "unsigned latch conditions are explicitly prohibited"; 919 return None; 920 } 921 922 // The predicate that we need to check that the induction variable lies 923 // within bounds. 924 ICmpInst::Predicate BoundPred = 925 IsSignedPredicate ? CmpInst::ICMP_SLT : CmpInst::ICMP_ULT; 926 927 if (LatchBrExitIdx == 0) { 928 const SCEV *StepMinusOne = SE.getMinusSCEV(Step, 929 SE.getOne(Step->getType())); 930 if (SumCanReachMax(SE, RightSCEV, StepMinusOne, IsSignedPredicate)) { 931 // TODO: this restriction is easily removable -- we just have to 932 // remember that the icmp was an slt and not an sle. 933 FailureReason = "limit may overflow when coercing le to lt"; 934 return None; 935 } 936 937 if (!SE.isAvailableAtLoopEntry(RightSCEV, &L) || 938 !SE.isLoopEntryGuardedByCond(&L, BoundPred, IndVarStart, 939 SE.getAddExpr(RightSCEV, Step))) { 940 FailureReason = "Induction variable start not bounded by upper limit"; 941 return None; 942 } 943 944 // We need to increase the right value unless we have already decreased 945 // it virtually when we replaced EQ with SGT. 946 if (!DecreasedRightValueByOne) { 947 IRBuilder<> B(Preheader->getTerminator()); 948 RightValue = B.CreateAdd(RightValue, One); 949 } 950 } else { 951 if (!SE.isAvailableAtLoopEntry(RightSCEV, &L) || 952 !SE.isLoopEntryGuardedByCond(&L, BoundPred, IndVarStart, RightSCEV)) { 953 FailureReason = "Induction variable start not bounded by upper limit"; 954 return None; 955 } 956 assert(!DecreasedRightValueByOne && 957 "Right value can be decreased only for LatchBrExitIdx == 0!"); 958 } 959 } else { 960 bool IncreasedRightValueByOne = false; 961 if (StepCI->isMinusOne()) { 962 // Try to turn eq/ne predicates to those we can work with. 963 if (Pred == ICmpInst::ICMP_NE && LatchBrExitIdx == 1) 964 // while (--i != len) { while (--i > len) { 965 // ... ---> ... 966 // } } 967 // We intentionally don't turn the predicate into UGT even if we know 968 // that both operands are non-negative, because it will only pessimize 969 // our check against "RightSCEV - 1". 970 Pred = ICmpInst::ICMP_SGT; 971 else if (Pred == ICmpInst::ICMP_EQ && LatchBrExitIdx == 0 && 972 !CanBeMax(SE, RightSCEV, /* IsSignedPredicate */ true)) { 973 // while (true) { while (true) { 974 // if (--i == len) ---> if (--i < len + 1) 975 // break; break; 976 // ... ... 977 // } } 978 // TODO: Insert ICMP_ULT if both are non-negative? 979 Pred = ICmpInst::ICMP_SLT; 980 RightSCEV = SE.getAddExpr(RightSCEV, SE.getOne(RightSCEV->getType())); 981 IncreasedRightValueByOne = true; 982 } 983 } 984 985 bool LTPred = (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_ULT); 986 bool GTPred = (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_UGT); 987 988 bool FoundExpectedPred = 989 (GTPred && LatchBrExitIdx == 1) || (LTPred && LatchBrExitIdx == 0); 990 991 if (!FoundExpectedPred) { 992 FailureReason = "expected icmp sgt semantically, found something else"; 993 return None; 994 } 995 996 IsSignedPredicate = 997 Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGT; 998 999 if (!IsSignedPredicate && !AllowUnsignedLatchCondition) { 1000 FailureReason = "unsigned latch conditions are explicitly prohibited"; 1001 return None; 1002 } 1003 1004 // The predicate that we need to check that the induction variable lies 1005 // within bounds. 1006 ICmpInst::Predicate BoundPred = 1007 IsSignedPredicate ? CmpInst::ICMP_SGT : CmpInst::ICMP_UGT; 1008 1009 if (LatchBrExitIdx == 0) { 1010 const SCEV *StepPlusOne = SE.getAddExpr(Step, SE.getOne(Step->getType())); 1011 if (SumCanReachMin(SE, RightSCEV, StepPlusOne, IsSignedPredicate)) { 1012 // TODO: this restriction is easily removable -- we just have to 1013 // remember that the icmp was an sgt and not an sge. 1014 FailureReason = "limit may overflow when coercing ge to gt"; 1015 return None; 1016 } 1017 1018 if (!SE.isAvailableAtLoopEntry(RightSCEV, &L) || 1019 !SE.isLoopEntryGuardedByCond( 1020 &L, BoundPred, IndVarStart, 1021 SE.getMinusSCEV(RightSCEV, SE.getOne(RightSCEV->getType())))) { 1022 FailureReason = "Induction variable start not bounded by lower limit"; 1023 return None; 1024 } 1025 1026 // We need to decrease the right value unless we have already increased 1027 // it virtually when we replaced EQ with SLT. 1028 if (!IncreasedRightValueByOne) { 1029 IRBuilder<> B(Preheader->getTerminator()); 1030 RightValue = B.CreateSub(RightValue, One); 1031 } 1032 } else { 1033 if (!SE.isAvailableAtLoopEntry(RightSCEV, &L) || 1034 !SE.isLoopEntryGuardedByCond(&L, BoundPred, IndVarStart, RightSCEV)) { 1035 FailureReason = "Induction variable start not bounded by lower limit"; 1036 return None; 1037 } 1038 assert(!IncreasedRightValueByOne && 1039 "Right value can be increased only for LatchBrExitIdx == 0!"); 1040 } 1041 } 1042 BasicBlock *LatchExit = LatchBr->getSuccessor(LatchBrExitIdx); 1043 1044 assert(SE.getLoopDisposition(LatchCount, &L) == 1045 ScalarEvolution::LoopInvariant && 1046 "loop variant exit count doesn't make sense!"); 1047 1048 assert(!L.contains(LatchExit) && "expected an exit block!"); 1049 const DataLayout &DL = Preheader->getModule()->getDataLayout(); 1050 Value *IndVarStartV = 1051 SCEVExpander(SE, DL, "irce") 1052 .expandCodeFor(IndVarStart, IndVarTy, Preheader->getTerminator()); 1053 IndVarStartV->setName("indvar.start"); 1054 1055 LoopStructure Result; 1056 1057 Result.Tag = "main"; 1058 Result.Header = Header; 1059 Result.Latch = Latch; 1060 Result.LatchBr = LatchBr; 1061 Result.LatchExit = LatchExit; 1062 Result.LatchBrExitIdx = LatchBrExitIdx; 1063 Result.IndVarStart = IndVarStartV; 1064 Result.IndVarStep = StepCI; 1065 Result.IndVarBase = LeftValue; 1066 Result.IndVarIncreasing = IsIncreasing; 1067 Result.LoopExitAt = RightValue; 1068 Result.IsSignedPredicate = IsSignedPredicate; 1069 1070 FailureReason = nullptr; 1071 1072 return Result; 1073 } 1074 1075 Optional<LoopConstrainer::SubRanges> 1076 LoopConstrainer::calculateSubRanges(bool IsSignedPredicate) const { 1077 IntegerType *Ty = cast<IntegerType>(LatchTakenCount->getType()); 1078 1079 if (Range.getType() != Ty) 1080 return None; 1081 1082 LoopConstrainer::SubRanges Result; 1083 1084 // I think we can be more aggressive here and make this nuw / nsw if the 1085 // addition that feeds into the icmp for the latch's terminating branch is nuw 1086 // / nsw. In any case, a wrapping 2's complement addition is safe. 1087 const SCEV *Start = SE.getSCEV(MainLoopStructure.IndVarStart); 1088 const SCEV *End = SE.getSCEV(MainLoopStructure.LoopExitAt); 1089 1090 bool Increasing = MainLoopStructure.IndVarIncreasing; 1091 1092 // We compute `Smallest` and `Greatest` such that [Smallest, Greatest), or 1093 // [Smallest, GreatestSeen] is the range of values the induction variable 1094 // takes. 1095 1096 const SCEV *Smallest = nullptr, *Greatest = nullptr, *GreatestSeen = nullptr; 1097 1098 const SCEV *One = SE.getOne(Ty); 1099 if (Increasing) { 1100 Smallest = Start; 1101 Greatest = End; 1102 // No overflow, because the range [Smallest, GreatestSeen] is not empty. 1103 GreatestSeen = SE.getMinusSCEV(End, One); 1104 } else { 1105 // These two computations may sign-overflow. Here is why that is okay: 1106 // 1107 // We know that the induction variable does not sign-overflow on any 1108 // iteration except the last one, and it starts at `Start` and ends at 1109 // `End`, decrementing by one every time. 1110 // 1111 // * if `Smallest` sign-overflows we know `End` is `INT_SMAX`. Since the 1112 // induction variable is decreasing we know that that the smallest value 1113 // the loop body is actually executed with is `INT_SMIN` == `Smallest`. 1114 // 1115 // * if `Greatest` sign-overflows, we know it can only be `INT_SMIN`. In 1116 // that case, `Clamp` will always return `Smallest` and 1117 // [`Result.LowLimit`, `Result.HighLimit`) = [`Smallest`, `Smallest`) 1118 // will be an empty range. Returning an empty range is always safe. 1119 1120 Smallest = SE.getAddExpr(End, One); 1121 Greatest = SE.getAddExpr(Start, One); 1122 GreatestSeen = Start; 1123 } 1124 1125 auto Clamp = [this, Smallest, Greatest, IsSignedPredicate](const SCEV *S) { 1126 return IsSignedPredicate 1127 ? SE.getSMaxExpr(Smallest, SE.getSMinExpr(Greatest, S)) 1128 : SE.getUMaxExpr(Smallest, SE.getUMinExpr(Greatest, S)); 1129 }; 1130 1131 // In some cases we can prove that we don't need a pre or post loop. 1132 ICmpInst::Predicate PredLE = 1133 IsSignedPredicate ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 1134 ICmpInst::Predicate PredLT = 1135 IsSignedPredicate ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; 1136 1137 bool ProvablyNoPreloop = 1138 SE.isKnownPredicate(PredLE, Range.getBegin(), Smallest); 1139 if (!ProvablyNoPreloop) 1140 Result.LowLimit = Clamp(Range.getBegin()); 1141 1142 bool ProvablyNoPostLoop = 1143 SE.isKnownPredicate(PredLT, GreatestSeen, Range.getEnd()); 1144 if (!ProvablyNoPostLoop) 1145 Result.HighLimit = Clamp(Range.getEnd()); 1146 1147 return Result; 1148 } 1149 1150 void LoopConstrainer::cloneLoop(LoopConstrainer::ClonedLoop &Result, 1151 const char *Tag) const { 1152 for (BasicBlock *BB : OriginalLoop.getBlocks()) { 1153 BasicBlock *Clone = CloneBasicBlock(BB, Result.Map, Twine(".") + Tag, &F); 1154 Result.Blocks.push_back(Clone); 1155 Result.Map[BB] = Clone; 1156 } 1157 1158 auto GetClonedValue = [&Result](Value *V) { 1159 assert(V && "null values not in domain!"); 1160 auto It = Result.Map.find(V); 1161 if (It == Result.Map.end()) 1162 return V; 1163 return static_cast<Value *>(It->second); 1164 }; 1165 1166 auto *ClonedLatch = 1167 cast<BasicBlock>(GetClonedValue(OriginalLoop.getLoopLatch())); 1168 ClonedLatch->getTerminator()->setMetadata(ClonedLoopTag, 1169 MDNode::get(Ctx, {})); 1170 1171 Result.Structure = MainLoopStructure.map(GetClonedValue); 1172 Result.Structure.Tag = Tag; 1173 1174 for (unsigned i = 0, e = Result.Blocks.size(); i != e; ++i) { 1175 BasicBlock *ClonedBB = Result.Blocks[i]; 1176 BasicBlock *OriginalBB = OriginalLoop.getBlocks()[i]; 1177 1178 assert(Result.Map[OriginalBB] == ClonedBB && "invariant!"); 1179 1180 for (Instruction &I : *ClonedBB) 1181 RemapInstruction(&I, Result.Map, 1182 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 1183 1184 // Exit blocks will now have one more predecessor and their PHI nodes need 1185 // to be edited to reflect that. No phi nodes need to be introduced because 1186 // the loop is in LCSSA. 1187 1188 for (auto *SBB : successors(OriginalBB)) { 1189 if (OriginalLoop.contains(SBB)) 1190 continue; // not an exit block 1191 1192 for (PHINode &PN : SBB->phis()) { 1193 Value *OldIncoming = PN.getIncomingValueForBlock(OriginalBB); 1194 PN.addIncoming(GetClonedValue(OldIncoming), ClonedBB); 1195 } 1196 } 1197 } 1198 } 1199 1200 LoopConstrainer::RewrittenRangeInfo LoopConstrainer::changeIterationSpaceEnd( 1201 const LoopStructure &LS, BasicBlock *Preheader, Value *ExitSubloopAt, 1202 BasicBlock *ContinuationBlock) const { 1203 // We start with a loop with a single latch: 1204 // 1205 // +--------------------+ 1206 // | | 1207 // | preheader | 1208 // | | 1209 // +--------+-----------+ 1210 // | ----------------\ 1211 // | / | 1212 // +--------v----v------+ | 1213 // | | | 1214 // | header | | 1215 // | | | 1216 // +--------------------+ | 1217 // | 1218 // ..... | 1219 // | 1220 // +--------------------+ | 1221 // | | | 1222 // | latch >----------/ 1223 // | | 1224 // +-------v------------+ 1225 // | 1226 // | 1227 // | +--------------------+ 1228 // | | | 1229 // +---> original exit | 1230 // | | 1231 // +--------------------+ 1232 // 1233 // We change the control flow to look like 1234 // 1235 // 1236 // +--------------------+ 1237 // | | 1238 // | preheader >-------------------------+ 1239 // | | | 1240 // +--------v-----------+ | 1241 // | /-------------+ | 1242 // | / | | 1243 // +--------v--v--------+ | | 1244 // | | | | 1245 // | header | | +--------+ | 1246 // | | | | | | 1247 // +--------------------+ | | +-----v-----v-----------+ 1248 // | | | | 1249 // | | | .pseudo.exit | 1250 // | | | | 1251 // | | +-----------v-----------+ 1252 // | | | 1253 // ..... | | | 1254 // | | +--------v-------------+ 1255 // +--------------------+ | | | | 1256 // | | | | | ContinuationBlock | 1257 // | latch >------+ | | | 1258 // | | | +----------------------+ 1259 // +---------v----------+ | 1260 // | | 1261 // | | 1262 // | +---------------^-----+ 1263 // | | | 1264 // +-----> .exit.selector | 1265 // | | 1266 // +----------v----------+ 1267 // | 1268 // +--------------------+ | 1269 // | | | 1270 // | original exit <----+ 1271 // | | 1272 // +--------------------+ 1273 1274 RewrittenRangeInfo RRI; 1275 1276 BasicBlock *BBInsertLocation = LS.Latch->getNextNode(); 1277 RRI.ExitSelector = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".exit.selector", 1278 &F, BBInsertLocation); 1279 RRI.PseudoExit = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".pseudo.exit", &F, 1280 BBInsertLocation); 1281 1282 BranchInst *PreheaderJump = cast<BranchInst>(Preheader->getTerminator()); 1283 bool Increasing = LS.IndVarIncreasing; 1284 bool IsSignedPredicate = LS.IsSignedPredicate; 1285 1286 IRBuilder<> B(PreheaderJump); 1287 1288 // EnterLoopCond - is it okay to start executing this `LS'? 1289 Value *EnterLoopCond = nullptr; 1290 if (Increasing) 1291 EnterLoopCond = IsSignedPredicate 1292 ? B.CreateICmpSLT(LS.IndVarStart, ExitSubloopAt) 1293 : B.CreateICmpULT(LS.IndVarStart, ExitSubloopAt); 1294 else 1295 EnterLoopCond = IsSignedPredicate 1296 ? B.CreateICmpSGT(LS.IndVarStart, ExitSubloopAt) 1297 : B.CreateICmpUGT(LS.IndVarStart, ExitSubloopAt); 1298 1299 B.CreateCondBr(EnterLoopCond, LS.Header, RRI.PseudoExit); 1300 PreheaderJump->eraseFromParent(); 1301 1302 LS.LatchBr->setSuccessor(LS.LatchBrExitIdx, RRI.ExitSelector); 1303 B.SetInsertPoint(LS.LatchBr); 1304 Value *TakeBackedgeLoopCond = nullptr; 1305 if (Increasing) 1306 TakeBackedgeLoopCond = IsSignedPredicate 1307 ? B.CreateICmpSLT(LS.IndVarBase, ExitSubloopAt) 1308 : B.CreateICmpULT(LS.IndVarBase, ExitSubloopAt); 1309 else 1310 TakeBackedgeLoopCond = IsSignedPredicate 1311 ? B.CreateICmpSGT(LS.IndVarBase, ExitSubloopAt) 1312 : B.CreateICmpUGT(LS.IndVarBase, ExitSubloopAt); 1313 Value *CondForBranch = LS.LatchBrExitIdx == 1 1314 ? TakeBackedgeLoopCond 1315 : B.CreateNot(TakeBackedgeLoopCond); 1316 1317 LS.LatchBr->setCondition(CondForBranch); 1318 1319 B.SetInsertPoint(RRI.ExitSelector); 1320 1321 // IterationsLeft - are there any more iterations left, given the original 1322 // upper bound on the induction variable? If not, we branch to the "real" 1323 // exit. 1324 Value *IterationsLeft = nullptr; 1325 if (Increasing) 1326 IterationsLeft = IsSignedPredicate 1327 ? B.CreateICmpSLT(LS.IndVarBase, LS.LoopExitAt) 1328 : B.CreateICmpULT(LS.IndVarBase, LS.LoopExitAt); 1329 else 1330 IterationsLeft = IsSignedPredicate 1331 ? B.CreateICmpSGT(LS.IndVarBase, LS.LoopExitAt) 1332 : B.CreateICmpUGT(LS.IndVarBase, LS.LoopExitAt); 1333 B.CreateCondBr(IterationsLeft, RRI.PseudoExit, LS.LatchExit); 1334 1335 BranchInst *BranchToContinuation = 1336 BranchInst::Create(ContinuationBlock, RRI.PseudoExit); 1337 1338 // We emit PHI nodes into `RRI.PseudoExit' that compute the "latest" value of 1339 // each of the PHI nodes in the loop header. This feeds into the initial 1340 // value of the same PHI nodes if/when we continue execution. 1341 for (PHINode &PN : LS.Header->phis()) { 1342 PHINode *NewPHI = PHINode::Create(PN.getType(), 2, PN.getName() + ".copy", 1343 BranchToContinuation); 1344 1345 NewPHI->addIncoming(PN.getIncomingValueForBlock(Preheader), Preheader); 1346 NewPHI->addIncoming(PN.getIncomingValueForBlock(LS.Latch), 1347 RRI.ExitSelector); 1348 RRI.PHIValuesAtPseudoExit.push_back(NewPHI); 1349 } 1350 1351 RRI.IndVarEnd = PHINode::Create(LS.IndVarBase->getType(), 2, "indvar.end", 1352 BranchToContinuation); 1353 RRI.IndVarEnd->addIncoming(LS.IndVarStart, Preheader); 1354 RRI.IndVarEnd->addIncoming(LS.IndVarBase, RRI.ExitSelector); 1355 1356 // The latch exit now has a branch from `RRI.ExitSelector' instead of 1357 // `LS.Latch'. The PHI nodes need to be updated to reflect that. 1358 for (PHINode &PN : LS.LatchExit->phis()) 1359 replacePHIBlock(&PN, LS.Latch, RRI.ExitSelector); 1360 1361 return RRI; 1362 } 1363 1364 void LoopConstrainer::rewriteIncomingValuesForPHIs( 1365 LoopStructure &LS, BasicBlock *ContinuationBlock, 1366 const LoopConstrainer::RewrittenRangeInfo &RRI) const { 1367 unsigned PHIIndex = 0; 1368 for (PHINode &PN : LS.Header->phis()) 1369 for (unsigned i = 0, e = PN.getNumIncomingValues(); i < e; ++i) 1370 if (PN.getIncomingBlock(i) == ContinuationBlock) 1371 PN.setIncomingValue(i, RRI.PHIValuesAtPseudoExit[PHIIndex++]); 1372 1373 LS.IndVarStart = RRI.IndVarEnd; 1374 } 1375 1376 BasicBlock *LoopConstrainer::createPreheader(const LoopStructure &LS, 1377 BasicBlock *OldPreheader, 1378 const char *Tag) const { 1379 BasicBlock *Preheader = BasicBlock::Create(Ctx, Tag, &F, LS.Header); 1380 BranchInst::Create(LS.Header, Preheader); 1381 1382 for (PHINode &PN : LS.Header->phis()) 1383 for (unsigned i = 0, e = PN.getNumIncomingValues(); i < e; ++i) 1384 replacePHIBlock(&PN, OldPreheader, Preheader); 1385 1386 return Preheader; 1387 } 1388 1389 void LoopConstrainer::addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs) { 1390 Loop *ParentLoop = OriginalLoop.getParentLoop(); 1391 if (!ParentLoop) 1392 return; 1393 1394 for (BasicBlock *BB : BBs) 1395 ParentLoop->addBasicBlockToLoop(BB, LI); 1396 } 1397 1398 Loop *LoopConstrainer::createClonedLoopStructure(Loop *Original, Loop *Parent, 1399 ValueToValueMapTy &VM) { 1400 Loop &New = *LI.AllocateLoop(); 1401 if (Parent) 1402 Parent->addChildLoop(&New); 1403 else 1404 LI.addTopLevelLoop(&New); 1405 LPM.addLoop(New); 1406 1407 // Add all of the blocks in Original to the new loop. 1408 for (auto *BB : Original->blocks()) 1409 if (LI.getLoopFor(BB) == Original) 1410 New.addBasicBlockToLoop(cast<BasicBlock>(VM[BB]), LI); 1411 1412 // Add all of the subloops to the new loop. 1413 for (Loop *SubLoop : *Original) 1414 createClonedLoopStructure(SubLoop, &New, VM); 1415 1416 return &New; 1417 } 1418 1419 bool LoopConstrainer::run() { 1420 BasicBlock *Preheader = nullptr; 1421 LatchTakenCount = SE.getExitCount(&OriginalLoop, MainLoopStructure.Latch); 1422 Preheader = OriginalLoop.getLoopPreheader(); 1423 assert(!isa<SCEVCouldNotCompute>(LatchTakenCount) && Preheader != nullptr && 1424 "preconditions!"); 1425 1426 OriginalPreheader = Preheader; 1427 MainLoopPreheader = Preheader; 1428 1429 bool IsSignedPredicate = MainLoopStructure.IsSignedPredicate; 1430 Optional<SubRanges> MaybeSR = calculateSubRanges(IsSignedPredicate); 1431 if (!MaybeSR.hasValue()) { 1432 DEBUG(dbgs() << "irce: could not compute subranges\n"); 1433 return false; 1434 } 1435 1436 SubRanges SR = MaybeSR.getValue(); 1437 bool Increasing = MainLoopStructure.IndVarIncreasing; 1438 IntegerType *IVTy = 1439 cast<IntegerType>(MainLoopStructure.IndVarBase->getType()); 1440 1441 SCEVExpander Expander(SE, F.getParent()->getDataLayout(), "irce"); 1442 Instruction *InsertPt = OriginalPreheader->getTerminator(); 1443 1444 // It would have been better to make `PreLoop' and `PostLoop' 1445 // `Optional<ClonedLoop>'s, but `ValueToValueMapTy' does not have a copy 1446 // constructor. 1447 ClonedLoop PreLoop, PostLoop; 1448 bool NeedsPreLoop = 1449 Increasing ? SR.LowLimit.hasValue() : SR.HighLimit.hasValue(); 1450 bool NeedsPostLoop = 1451 Increasing ? SR.HighLimit.hasValue() : SR.LowLimit.hasValue(); 1452 1453 Value *ExitPreLoopAt = nullptr; 1454 Value *ExitMainLoopAt = nullptr; 1455 const SCEVConstant *MinusOneS = 1456 cast<SCEVConstant>(SE.getConstant(IVTy, -1, true /* isSigned */)); 1457 1458 if (NeedsPreLoop) { 1459 const SCEV *ExitPreLoopAtSCEV = nullptr; 1460 1461 if (Increasing) 1462 ExitPreLoopAtSCEV = *SR.LowLimit; 1463 else { 1464 if (CanBeMin(SE, *SR.HighLimit, IsSignedPredicate)) { 1465 DEBUG(dbgs() << "irce: could not prove no-overflow when computing " 1466 << "preloop exit limit. HighLimit = " << *(*SR.HighLimit) 1467 << "\n"); 1468 return false; 1469 } 1470 ExitPreLoopAtSCEV = SE.getAddExpr(*SR.HighLimit, MinusOneS); 1471 } 1472 1473 if (!isSafeToExpandAt(ExitPreLoopAtSCEV, InsertPt, SE)) { 1474 DEBUG(dbgs() << "irce: could not prove that it is safe to expand the" 1475 << " preloop exit limit " << *ExitPreLoopAtSCEV 1476 << " at block " << InsertPt->getParent()->getName() << "\n"); 1477 return false; 1478 } 1479 1480 ExitPreLoopAt = Expander.expandCodeFor(ExitPreLoopAtSCEV, IVTy, InsertPt); 1481 ExitPreLoopAt->setName("exit.preloop.at"); 1482 } 1483 1484 if (NeedsPostLoop) { 1485 const SCEV *ExitMainLoopAtSCEV = nullptr; 1486 1487 if (Increasing) 1488 ExitMainLoopAtSCEV = *SR.HighLimit; 1489 else { 1490 if (CanBeMin(SE, *SR.LowLimit, IsSignedPredicate)) { 1491 DEBUG(dbgs() << "irce: could not prove no-overflow when computing " 1492 << "mainloop exit limit. LowLimit = " << *(*SR.LowLimit) 1493 << "\n"); 1494 return false; 1495 } 1496 ExitMainLoopAtSCEV = SE.getAddExpr(*SR.LowLimit, MinusOneS); 1497 } 1498 1499 if (!isSafeToExpandAt(ExitMainLoopAtSCEV, InsertPt, SE)) { 1500 DEBUG(dbgs() << "irce: could not prove that it is safe to expand the" 1501 << " main loop exit limit " << *ExitMainLoopAtSCEV 1502 << " at block " << InsertPt->getParent()->getName() << "\n"); 1503 return false; 1504 } 1505 1506 ExitMainLoopAt = Expander.expandCodeFor(ExitMainLoopAtSCEV, IVTy, InsertPt); 1507 ExitMainLoopAt->setName("exit.mainloop.at"); 1508 } 1509 1510 // We clone these ahead of time so that we don't have to deal with changing 1511 // and temporarily invalid IR as we transform the loops. 1512 if (NeedsPreLoop) 1513 cloneLoop(PreLoop, "preloop"); 1514 if (NeedsPostLoop) 1515 cloneLoop(PostLoop, "postloop"); 1516 1517 RewrittenRangeInfo PreLoopRRI; 1518 1519 if (NeedsPreLoop) { 1520 Preheader->getTerminator()->replaceUsesOfWith(MainLoopStructure.Header, 1521 PreLoop.Structure.Header); 1522 1523 MainLoopPreheader = 1524 createPreheader(MainLoopStructure, Preheader, "mainloop"); 1525 PreLoopRRI = changeIterationSpaceEnd(PreLoop.Structure, Preheader, 1526 ExitPreLoopAt, MainLoopPreheader); 1527 rewriteIncomingValuesForPHIs(MainLoopStructure, MainLoopPreheader, 1528 PreLoopRRI); 1529 } 1530 1531 BasicBlock *PostLoopPreheader = nullptr; 1532 RewrittenRangeInfo PostLoopRRI; 1533 1534 if (NeedsPostLoop) { 1535 PostLoopPreheader = 1536 createPreheader(PostLoop.Structure, Preheader, "postloop"); 1537 PostLoopRRI = changeIterationSpaceEnd(MainLoopStructure, MainLoopPreheader, 1538 ExitMainLoopAt, PostLoopPreheader); 1539 rewriteIncomingValuesForPHIs(PostLoop.Structure, PostLoopPreheader, 1540 PostLoopRRI); 1541 } 1542 1543 BasicBlock *NewMainLoopPreheader = 1544 MainLoopPreheader != Preheader ? MainLoopPreheader : nullptr; 1545 BasicBlock *NewBlocks[] = {PostLoopPreheader, PreLoopRRI.PseudoExit, 1546 PreLoopRRI.ExitSelector, PostLoopRRI.PseudoExit, 1547 PostLoopRRI.ExitSelector, NewMainLoopPreheader}; 1548 1549 // Some of the above may be nullptr, filter them out before passing to 1550 // addToParentLoopIfNeeded. 1551 auto NewBlocksEnd = 1552 std::remove(std::begin(NewBlocks), std::end(NewBlocks), nullptr); 1553 1554 addToParentLoopIfNeeded(makeArrayRef(std::begin(NewBlocks), NewBlocksEnd)); 1555 1556 DT.recalculate(F); 1557 1558 // We need to first add all the pre and post loop blocks into the loop 1559 // structures (as part of createClonedLoopStructure), and then update the 1560 // LCSSA form and LoopSimplifyForm. This is necessary for correctly updating 1561 // LI when LoopSimplifyForm is generated. 1562 Loop *PreL = nullptr, *PostL = nullptr; 1563 if (!PreLoop.Blocks.empty()) { 1564 PreL = createClonedLoopStructure( 1565 &OriginalLoop, OriginalLoop.getParentLoop(), PreLoop.Map); 1566 } 1567 1568 if (!PostLoop.Blocks.empty()) { 1569 PostL = createClonedLoopStructure( 1570 &OriginalLoop, OriginalLoop.getParentLoop(), PostLoop.Map); 1571 } 1572 1573 // This function canonicalizes the loop into Loop-Simplify and LCSSA forms. 1574 auto CanonicalizeLoop = [&] (Loop *L, bool IsOriginalLoop) { 1575 formLCSSARecursively(*L, DT, &LI, &SE); 1576 simplifyLoop(L, &DT, &LI, &SE, nullptr, true); 1577 // Pre/post loops are slow paths, we do not need to perform any loop 1578 // optimizations on them. 1579 if (!IsOriginalLoop) 1580 DisableAllLoopOptsOnLoop(*L); 1581 }; 1582 if (PreL) 1583 CanonicalizeLoop(PreL, false); 1584 if (PostL) 1585 CanonicalizeLoop(PostL, false); 1586 CanonicalizeLoop(&OriginalLoop, true); 1587 1588 return true; 1589 } 1590 1591 /// Computes and returns a range of values for the induction variable (IndVar) 1592 /// in which the range check can be safely elided. If it cannot compute such a 1593 /// range, returns None. 1594 Optional<InductiveRangeCheck::Range> 1595 InductiveRangeCheck::computeSafeIterationSpace( 1596 ScalarEvolution &SE, const SCEVAddRecExpr *IndVar, 1597 bool IsLatchSigned) const { 1598 // IndVar is of the form "A + B * I" (where "I" is the canonical induction 1599 // variable, that may or may not exist as a real llvm::Value in the loop) and 1600 // this inductive range check is a range check on the "C + D * I" ("C" is 1601 // getBegin() and "D" is getStep()). We rewrite the value being range 1602 // checked to "M + N * IndVar" where "N" = "D * B^(-1)" and "M" = "C - NA". 1603 // 1604 // The actual inequalities we solve are of the form 1605 // 1606 // 0 <= M + 1 * IndVar < L given L >= 0 (i.e. N == 1) 1607 // 1608 // Here L stands for upper limit of the safe iteration space. 1609 // The inequality is satisfied by (0 - M) <= IndVar < (L - M). To avoid 1610 // overflows when calculating (0 - M) and (L - M) we, depending on type of 1611 // IV's iteration space, limit the calculations by borders of the iteration 1612 // space. For example, if IndVar is unsigned, (0 - M) overflows for any M > 0. 1613 // If we figured out that "anything greater than (-M) is safe", we strengthen 1614 // this to "everything greater than 0 is safe", assuming that values between 1615 // -M and 0 just do not exist in unsigned iteration space, and we don't want 1616 // to deal with overflown values. 1617 1618 if (!IndVar->isAffine()) 1619 return None; 1620 1621 const SCEV *A = IndVar->getStart(); 1622 const SCEVConstant *B = dyn_cast<SCEVConstant>(IndVar->getStepRecurrence(SE)); 1623 if (!B) 1624 return None; 1625 assert(!B->isZero() && "Recurrence with zero step?"); 1626 1627 const SCEV *C = getBegin(); 1628 const SCEVConstant *D = dyn_cast<SCEVConstant>(getStep()); 1629 if (D != B) 1630 return None; 1631 1632 assert(!D->getValue()->isZero() && "Recurrence with zero step?"); 1633 unsigned BitWidth = cast<IntegerType>(IndVar->getType())->getBitWidth(); 1634 const SCEV *SIntMax = SE.getConstant(APInt::getSignedMaxValue(BitWidth)); 1635 1636 // Subtract Y from X so that it does not go through border of the IV 1637 // iteration space. Mathematically, it is equivalent to: 1638 // 1639 // ClampedSubtract(X, Y) = min(max(X - Y, INT_MIN), INT_MAX). [1] 1640 // 1641 // In [1], 'X - Y' is a mathematical subtraction (result is not bounded to 1642 // any width of bit grid). But after we take min/max, the result is 1643 // guaranteed to be within [INT_MIN, INT_MAX]. 1644 // 1645 // In [1], INT_MAX and INT_MIN are respectively signed and unsigned max/min 1646 // values, depending on type of latch condition that defines IV iteration 1647 // space. 1648 auto ClampedSubtract = [&](const SCEV *X, const SCEV *Y) { 1649 if (IsLatchSigned) { 1650 // X is a number from signed range, Y is interpreted as signed. 1651 // Even if Y is SINT_MAX, (X - Y) does not reach SINT_MIN. So the only 1652 // thing we should care about is that we didn't cross SINT_MAX. 1653 // So, if Y is positive, we subtract Y safely. 1654 // Rule 1: Y > 0 ---> Y. 1655 // If 0 <= -Y <= (SINT_MAX - X), we subtract Y safely. 1656 // Rule 2: Y >=s (X - SINT_MAX) ---> Y. 1657 // If 0 <= (SINT_MAX - X) < -Y, we can only subtract (X - SINT_MAX). 1658 // Rule 3: Y <s (X - SINT_MAX) ---> (X - SINT_MAX). 1659 // It gives us smax(Y, X - SINT_MAX) to subtract in all cases. 1660 const SCEV *XMinusSIntMax = SE.getMinusSCEV(X, SIntMax); 1661 return SE.getMinusSCEV(X, SE.getSMaxExpr(Y, XMinusSIntMax), 1662 SCEV::FlagNSW); 1663 } else 1664 // X is a number from unsigned range, Y is interpreted as signed. 1665 // Even if Y is SINT_MIN, (X - Y) does not reach UINT_MAX. So the only 1666 // thing we should care about is that we didn't cross zero. 1667 // So, if Y is negative, we subtract Y safely. 1668 // Rule 1: Y <s 0 ---> Y. 1669 // If 0 <= Y <= X, we subtract Y safely. 1670 // Rule 2: Y <=s X ---> Y. 1671 // If 0 <= X < Y, we should stop at 0 and can only subtract X. 1672 // Rule 3: Y >s X ---> X. 1673 // It gives us smin(X, Y) to subtract in all cases. 1674 return SE.getMinusSCEV(X, SE.getSMinExpr(X, Y), SCEV::FlagNUW); 1675 }; 1676 const SCEV *M = SE.getMinusSCEV(C, A); 1677 const SCEV *Zero = SE.getZero(M->getType()); 1678 const SCEV *Begin = ClampedSubtract(Zero, M); 1679 const SCEV *End = ClampedSubtract(getEnd(), M); 1680 return InductiveRangeCheck::Range(Begin, End); 1681 } 1682 1683 static Optional<InductiveRangeCheck::Range> 1684 IntersectSignedRange(ScalarEvolution &SE, 1685 const Optional<InductiveRangeCheck::Range> &R1, 1686 const InductiveRangeCheck::Range &R2) { 1687 if (R2.isEmpty(SE, /* IsSigned */ true)) 1688 return None; 1689 if (!R1.hasValue()) 1690 return R2; 1691 auto &R1Value = R1.getValue(); 1692 // We never return empty ranges from this function, and R1 is supposed to be 1693 // a result of intersection. Thus, R1 is never empty. 1694 assert(!R1Value.isEmpty(SE, /* IsSigned */ true) && 1695 "We should never have empty R1!"); 1696 1697 // TODO: we could widen the smaller range and have this work; but for now we 1698 // bail out to keep things simple. 1699 if (R1Value.getType() != R2.getType()) 1700 return None; 1701 1702 const SCEV *NewBegin = SE.getSMaxExpr(R1Value.getBegin(), R2.getBegin()); 1703 const SCEV *NewEnd = SE.getSMinExpr(R1Value.getEnd(), R2.getEnd()); 1704 1705 // If the resulting range is empty, just return None. 1706 auto Ret = InductiveRangeCheck::Range(NewBegin, NewEnd); 1707 if (Ret.isEmpty(SE, /* IsSigned */ true)) 1708 return None; 1709 return Ret; 1710 } 1711 1712 static Optional<InductiveRangeCheck::Range> 1713 IntersectUnsignedRange(ScalarEvolution &SE, 1714 const Optional<InductiveRangeCheck::Range> &R1, 1715 const InductiveRangeCheck::Range &R2) { 1716 if (R2.isEmpty(SE, /* IsSigned */ false)) 1717 return None; 1718 if (!R1.hasValue()) 1719 return R2; 1720 auto &R1Value = R1.getValue(); 1721 // We never return empty ranges from this function, and R1 is supposed to be 1722 // a result of intersection. Thus, R1 is never empty. 1723 assert(!R1Value.isEmpty(SE, /* IsSigned */ false) && 1724 "We should never have empty R1!"); 1725 1726 // TODO: we could widen the smaller range and have this work; but for now we 1727 // bail out to keep things simple. 1728 if (R1Value.getType() != R2.getType()) 1729 return None; 1730 1731 const SCEV *NewBegin = SE.getUMaxExpr(R1Value.getBegin(), R2.getBegin()); 1732 const SCEV *NewEnd = SE.getUMinExpr(R1Value.getEnd(), R2.getEnd()); 1733 1734 // If the resulting range is empty, just return None. 1735 auto Ret = InductiveRangeCheck::Range(NewBegin, NewEnd); 1736 if (Ret.isEmpty(SE, /* IsSigned */ false)) 1737 return None; 1738 return Ret; 1739 } 1740 1741 bool InductiveRangeCheckElimination::runOnLoop(Loop *L, LPPassManager &LPM) { 1742 if (skipLoop(L)) 1743 return false; 1744 1745 if (L->getBlocks().size() >= LoopSizeCutoff) { 1746 DEBUG(dbgs() << "irce: giving up constraining loop, too large\n";); 1747 return false; 1748 } 1749 1750 BasicBlock *Preheader = L->getLoopPreheader(); 1751 if (!Preheader) { 1752 DEBUG(dbgs() << "irce: loop has no preheader, leaving\n"); 1753 return false; 1754 } 1755 1756 LLVMContext &Context = Preheader->getContext(); 1757 SmallVector<InductiveRangeCheck, 16> RangeChecks; 1758 ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1759 BranchProbabilityInfo &BPI = 1760 getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI(); 1761 1762 for (auto BBI : L->getBlocks()) 1763 if (BranchInst *TBI = dyn_cast<BranchInst>(BBI->getTerminator())) 1764 InductiveRangeCheck::extractRangeChecksFromBranch(TBI, L, SE, BPI, 1765 RangeChecks); 1766 1767 if (RangeChecks.empty()) 1768 return false; 1769 1770 auto PrintRecognizedRangeChecks = [&](raw_ostream &OS) { 1771 OS << "irce: looking at loop "; L->print(OS); 1772 OS << "irce: loop has " << RangeChecks.size() 1773 << " inductive range checks: \n"; 1774 for (InductiveRangeCheck &IRC : RangeChecks) 1775 IRC.print(OS); 1776 }; 1777 1778 DEBUG(PrintRecognizedRangeChecks(dbgs())); 1779 1780 if (PrintRangeChecks) 1781 PrintRecognizedRangeChecks(errs()); 1782 1783 const char *FailureReason = nullptr; 1784 Optional<LoopStructure> MaybeLoopStructure = 1785 LoopStructure::parseLoopStructure(SE, BPI, *L, FailureReason); 1786 if (!MaybeLoopStructure.hasValue()) { 1787 DEBUG(dbgs() << "irce: could not parse loop structure: " << FailureReason 1788 << "\n";); 1789 return false; 1790 } 1791 LoopStructure LS = MaybeLoopStructure.getValue(); 1792 const SCEVAddRecExpr *IndVar = 1793 cast<SCEVAddRecExpr>(SE.getMinusSCEV(SE.getSCEV(LS.IndVarBase), SE.getSCEV(LS.IndVarStep))); 1794 1795 Optional<InductiveRangeCheck::Range> SafeIterRange; 1796 Instruction *ExprInsertPt = Preheader->getTerminator(); 1797 1798 SmallVector<InductiveRangeCheck, 4> RangeChecksToEliminate; 1799 // Basing on the type of latch predicate, we interpret the IV iteration range 1800 // as signed or unsigned range. We use different min/max functions (signed or 1801 // unsigned) when intersecting this range with safe iteration ranges implied 1802 // by range checks. 1803 auto IntersectRange = 1804 LS.IsSignedPredicate ? IntersectSignedRange : IntersectUnsignedRange; 1805 1806 IRBuilder<> B(ExprInsertPt); 1807 for (InductiveRangeCheck &IRC : RangeChecks) { 1808 auto Result = IRC.computeSafeIterationSpace(SE, IndVar, 1809 LS.IsSignedPredicate); 1810 if (Result.hasValue()) { 1811 auto MaybeSafeIterRange = 1812 IntersectRange(SE, SafeIterRange, Result.getValue()); 1813 if (MaybeSafeIterRange.hasValue()) { 1814 assert( 1815 !MaybeSafeIterRange.getValue().isEmpty(SE, LS.IsSignedPredicate) && 1816 "We should never return empty ranges!"); 1817 RangeChecksToEliminate.push_back(IRC); 1818 SafeIterRange = MaybeSafeIterRange.getValue(); 1819 } 1820 } 1821 } 1822 1823 if (!SafeIterRange.hasValue()) 1824 return false; 1825 1826 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1827 LoopConstrainer LC(*L, getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), LPM, 1828 LS, SE, DT, SafeIterRange.getValue()); 1829 bool Changed = LC.run(); 1830 1831 if (Changed) { 1832 auto PrintConstrainedLoopInfo = [L]() { 1833 dbgs() << "irce: in function "; 1834 dbgs() << L->getHeader()->getParent()->getName() << ": "; 1835 dbgs() << "constrained "; 1836 L->print(dbgs()); 1837 }; 1838 1839 DEBUG(PrintConstrainedLoopInfo()); 1840 1841 if (PrintChangedLoops) 1842 PrintConstrainedLoopInfo(); 1843 1844 // Optimize away the now-redundant range checks. 1845 1846 for (InductiveRangeCheck &IRC : RangeChecksToEliminate) { 1847 ConstantInt *FoldedRangeCheck = IRC.getPassingDirection() 1848 ? ConstantInt::getTrue(Context) 1849 : ConstantInt::getFalse(Context); 1850 IRC.getCheckUse()->set(FoldedRangeCheck); 1851 } 1852 } 1853 1854 return Changed; 1855 } 1856 1857 Pass *llvm::createInductiveRangeCheckEliminationPass() { 1858 return new InductiveRangeCheckElimination; 1859 } 1860