1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #include "llvm/Analysis/ScalarEvolution.h" 62 #include "llvm/ADT/APInt.h" 63 #include "llvm/ADT/ArrayRef.h" 64 #include "llvm/ADT/DenseMap.h" 65 #include "llvm/ADT/DepthFirstIterator.h" 66 #include "llvm/ADT/EquivalenceClasses.h" 67 #include "llvm/ADT/FoldingSet.h" 68 #include "llvm/ADT/None.h" 69 #include "llvm/ADT/Optional.h" 70 #include "llvm/ADT/STLExtras.h" 71 #include "llvm/ADT/ScopeExit.h" 72 #include "llvm/ADT/Sequence.h" 73 #include "llvm/ADT/SetVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallSet.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/Statistic.h" 78 #include "llvm/ADT/StringRef.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/ConstantFolding.h" 81 #include "llvm/Analysis/InstructionSimplify.h" 82 #include "llvm/Analysis/LoopInfo.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/Config/llvm-config.h" 87 #include "llvm/IR/Argument.h" 88 #include "llvm/IR/BasicBlock.h" 89 #include "llvm/IR/CFG.h" 90 #include "llvm/IR/CallSite.h" 91 #include "llvm/IR/Constant.h" 92 #include "llvm/IR/ConstantRange.h" 93 #include "llvm/IR/Constants.h" 94 #include "llvm/IR/DataLayout.h" 95 #include "llvm/IR/DerivedTypes.h" 96 #include "llvm/IR/Dominators.h" 97 #include "llvm/IR/Function.h" 98 #include "llvm/IR/GlobalAlias.h" 99 #include "llvm/IR/GlobalValue.h" 100 #include "llvm/IR/GlobalVariable.h" 101 #include "llvm/IR/InstIterator.h" 102 #include "llvm/IR/InstrTypes.h" 103 #include "llvm/IR/Instruction.h" 104 #include "llvm/IR/Instructions.h" 105 #include "llvm/IR/IntrinsicInst.h" 106 #include "llvm/IR/Intrinsics.h" 107 #include "llvm/IR/LLVMContext.h" 108 #include "llvm/IR/Metadata.h" 109 #include "llvm/IR/Operator.h" 110 #include "llvm/IR/PatternMatch.h" 111 #include "llvm/IR/Type.h" 112 #include "llvm/IR/Use.h" 113 #include "llvm/IR/User.h" 114 #include "llvm/IR/Value.h" 115 #include "llvm/Pass.h" 116 #include "llvm/Support/Casting.h" 117 #include "llvm/Support/CommandLine.h" 118 #include "llvm/Support/Compiler.h" 119 #include "llvm/Support/Debug.h" 120 #include "llvm/Support/ErrorHandling.h" 121 #include "llvm/Support/KnownBits.h" 122 #include "llvm/Support/SaveAndRestore.h" 123 #include "llvm/Support/raw_ostream.h" 124 #include <algorithm> 125 #include <cassert> 126 #include <climits> 127 #include <cstddef> 128 #include <cstdint> 129 #include <cstdlib> 130 #include <map> 131 #include <memory> 132 #include <tuple> 133 #include <utility> 134 #include <vector> 135 136 using namespace llvm; 137 138 #define DEBUG_TYPE "scalar-evolution" 139 140 STATISTIC(NumArrayLenItCounts, 141 "Number of trip counts computed with array length"); 142 STATISTIC(NumTripCountsComputed, 143 "Number of loops with predictable loop counts"); 144 STATISTIC(NumTripCountsNotComputed, 145 "Number of loops without predictable loop counts"); 146 STATISTIC(NumBruteForceTripCountsComputed, 147 "Number of loops with trip counts computed by force"); 148 149 static cl::opt<unsigned> 150 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 151 cl::desc("Maximum number of iterations SCEV will " 152 "symbolically execute a constant " 153 "derived loop"), 154 cl::init(100)); 155 156 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 157 static cl::opt<bool> VerifySCEV( 158 "verify-scev", cl::Hidden, 159 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 160 static cl::opt<bool> 161 VerifySCEVMap("verify-scev-maps", cl::Hidden, 162 cl::desc("Verify no dangling value in ScalarEvolution's " 163 "ExprValueMap (slow)")); 164 165 static cl::opt<unsigned> MulOpsInlineThreshold( 166 "scev-mulops-inline-threshold", cl::Hidden, 167 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 168 cl::init(32)); 169 170 static cl::opt<unsigned> AddOpsInlineThreshold( 171 "scev-addops-inline-threshold", cl::Hidden, 172 cl::desc("Threshold for inlining addition operands into a SCEV"), 173 cl::init(500)); 174 175 static cl::opt<unsigned> MaxSCEVCompareDepth( 176 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 177 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 178 cl::init(32)); 179 180 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 181 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 182 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 183 cl::init(2)); 184 185 static cl::opt<unsigned> MaxValueCompareDepth( 186 "scalar-evolution-max-value-compare-depth", cl::Hidden, 187 cl::desc("Maximum depth of recursive value complexity comparisons"), 188 cl::init(2)); 189 190 static cl::opt<unsigned> 191 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 192 cl::desc("Maximum depth of recursive arithmetics"), 193 cl::init(32)); 194 195 static cl::opt<unsigned> MaxConstantEvolvingDepth( 196 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 197 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 198 199 static cl::opt<unsigned> 200 MaxExtDepth("scalar-evolution-max-ext-depth", cl::Hidden, 201 cl::desc("Maximum depth of recursive SExt/ZExt"), 202 cl::init(8)); 203 204 static cl::opt<unsigned> 205 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 206 cl::desc("Max coefficients in AddRec during evolving"), 207 cl::init(16)); 208 209 //===----------------------------------------------------------------------===// 210 // SCEV class definitions 211 //===----------------------------------------------------------------------===// 212 213 //===----------------------------------------------------------------------===// 214 // Implementation of the SCEV class. 215 // 216 217 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 218 LLVM_DUMP_METHOD void SCEV::dump() const { 219 print(dbgs()); 220 dbgs() << '\n'; 221 } 222 #endif 223 224 void SCEV::print(raw_ostream &OS) const { 225 switch (static_cast<SCEVTypes>(getSCEVType())) { 226 case scConstant: 227 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 228 return; 229 case scTruncate: { 230 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 231 const SCEV *Op = Trunc->getOperand(); 232 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 233 << *Trunc->getType() << ")"; 234 return; 235 } 236 case scZeroExtend: { 237 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 238 const SCEV *Op = ZExt->getOperand(); 239 OS << "(zext " << *Op->getType() << " " << *Op << " to " 240 << *ZExt->getType() << ")"; 241 return; 242 } 243 case scSignExtend: { 244 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 245 const SCEV *Op = SExt->getOperand(); 246 OS << "(sext " << *Op->getType() << " " << *Op << " to " 247 << *SExt->getType() << ")"; 248 return; 249 } 250 case scAddRecExpr: { 251 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 252 OS << "{" << *AR->getOperand(0); 253 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 254 OS << ",+," << *AR->getOperand(i); 255 OS << "}<"; 256 if (AR->hasNoUnsignedWrap()) 257 OS << "nuw><"; 258 if (AR->hasNoSignedWrap()) 259 OS << "nsw><"; 260 if (AR->hasNoSelfWrap() && 261 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 262 OS << "nw><"; 263 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 264 OS << ">"; 265 return; 266 } 267 case scAddExpr: 268 case scMulExpr: 269 case scUMaxExpr: 270 case scSMaxExpr: { 271 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 272 const char *OpStr = nullptr; 273 switch (NAry->getSCEVType()) { 274 case scAddExpr: OpStr = " + "; break; 275 case scMulExpr: OpStr = " * "; break; 276 case scUMaxExpr: OpStr = " umax "; break; 277 case scSMaxExpr: OpStr = " smax "; break; 278 } 279 OS << "("; 280 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 281 I != E; ++I) { 282 OS << **I; 283 if (std::next(I) != E) 284 OS << OpStr; 285 } 286 OS << ")"; 287 switch (NAry->getSCEVType()) { 288 case scAddExpr: 289 case scMulExpr: 290 if (NAry->hasNoUnsignedWrap()) 291 OS << "<nuw>"; 292 if (NAry->hasNoSignedWrap()) 293 OS << "<nsw>"; 294 } 295 return; 296 } 297 case scUDivExpr: { 298 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 299 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 300 return; 301 } 302 case scUnknown: { 303 const SCEVUnknown *U = cast<SCEVUnknown>(this); 304 Type *AllocTy; 305 if (U->isSizeOf(AllocTy)) { 306 OS << "sizeof(" << *AllocTy << ")"; 307 return; 308 } 309 if (U->isAlignOf(AllocTy)) { 310 OS << "alignof(" << *AllocTy << ")"; 311 return; 312 } 313 314 Type *CTy; 315 Constant *FieldNo; 316 if (U->isOffsetOf(CTy, FieldNo)) { 317 OS << "offsetof(" << *CTy << ", "; 318 FieldNo->printAsOperand(OS, false); 319 OS << ")"; 320 return; 321 } 322 323 // Otherwise just print it normally. 324 U->getValue()->printAsOperand(OS, false); 325 return; 326 } 327 case scCouldNotCompute: 328 OS << "***COULDNOTCOMPUTE***"; 329 return; 330 } 331 llvm_unreachable("Unknown SCEV kind!"); 332 } 333 334 Type *SCEV::getType() const { 335 switch (static_cast<SCEVTypes>(getSCEVType())) { 336 case scConstant: 337 return cast<SCEVConstant>(this)->getType(); 338 case scTruncate: 339 case scZeroExtend: 340 case scSignExtend: 341 return cast<SCEVCastExpr>(this)->getType(); 342 case scAddRecExpr: 343 case scMulExpr: 344 case scUMaxExpr: 345 case scSMaxExpr: 346 return cast<SCEVNAryExpr>(this)->getType(); 347 case scAddExpr: 348 return cast<SCEVAddExpr>(this)->getType(); 349 case scUDivExpr: 350 return cast<SCEVUDivExpr>(this)->getType(); 351 case scUnknown: 352 return cast<SCEVUnknown>(this)->getType(); 353 case scCouldNotCompute: 354 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 355 } 356 llvm_unreachable("Unknown SCEV kind!"); 357 } 358 359 bool SCEV::isZero() const { 360 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 361 return SC->getValue()->isZero(); 362 return false; 363 } 364 365 bool SCEV::isOne() const { 366 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 367 return SC->getValue()->isOne(); 368 return false; 369 } 370 371 bool SCEV::isAllOnesValue() const { 372 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 373 return SC->getValue()->isMinusOne(); 374 return false; 375 } 376 377 bool SCEV::isNonConstantNegative() const { 378 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 379 if (!Mul) return false; 380 381 // If there is a constant factor, it will be first. 382 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 383 if (!SC) return false; 384 385 // Return true if the value is negative, this matches things like (-42 * V). 386 return SC->getAPInt().isNegative(); 387 } 388 389 SCEVCouldNotCompute::SCEVCouldNotCompute() : 390 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} 391 392 bool SCEVCouldNotCompute::classof(const SCEV *S) { 393 return S->getSCEVType() == scCouldNotCompute; 394 } 395 396 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 397 FoldingSetNodeID ID; 398 ID.AddInteger(scConstant); 399 ID.AddPointer(V); 400 void *IP = nullptr; 401 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 402 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 403 UniqueSCEVs.InsertNode(S, IP); 404 return S; 405 } 406 407 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 408 return getConstant(ConstantInt::get(getContext(), Val)); 409 } 410 411 const SCEV * 412 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 413 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 414 return getConstant(ConstantInt::get(ITy, V, isSigned)); 415 } 416 417 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 418 unsigned SCEVTy, const SCEV *op, Type *ty) 419 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 420 421 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 422 const SCEV *op, Type *ty) 423 : SCEVCastExpr(ID, scTruncate, op, ty) { 424 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 425 "Cannot truncate non-integer value!"); 426 } 427 428 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 429 const SCEV *op, Type *ty) 430 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 431 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 432 "Cannot zero extend non-integer value!"); 433 } 434 435 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 436 const SCEV *op, Type *ty) 437 : SCEVCastExpr(ID, scSignExtend, op, ty) { 438 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 439 "Cannot sign extend non-integer value!"); 440 } 441 442 void SCEVUnknown::deleted() { 443 // Clear this SCEVUnknown from various maps. 444 SE->forgetMemoizedResults(this); 445 446 // Remove this SCEVUnknown from the uniquing map. 447 SE->UniqueSCEVs.RemoveNode(this); 448 449 // Release the value. 450 setValPtr(nullptr); 451 } 452 453 void SCEVUnknown::allUsesReplacedWith(Value *New) { 454 // Remove this SCEVUnknown from the uniquing map. 455 SE->UniqueSCEVs.RemoveNode(this); 456 457 // Update this SCEVUnknown to point to the new value. This is needed 458 // because there may still be outstanding SCEVs which still point to 459 // this SCEVUnknown. 460 setValPtr(New); 461 } 462 463 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 464 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 465 if (VCE->getOpcode() == Instruction::PtrToInt) 466 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 467 if (CE->getOpcode() == Instruction::GetElementPtr && 468 CE->getOperand(0)->isNullValue() && 469 CE->getNumOperands() == 2) 470 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 471 if (CI->isOne()) { 472 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 473 ->getElementType(); 474 return true; 475 } 476 477 return false; 478 } 479 480 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 481 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 482 if (VCE->getOpcode() == Instruction::PtrToInt) 483 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 484 if (CE->getOpcode() == Instruction::GetElementPtr && 485 CE->getOperand(0)->isNullValue()) { 486 Type *Ty = 487 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 488 if (StructType *STy = dyn_cast<StructType>(Ty)) 489 if (!STy->isPacked() && 490 CE->getNumOperands() == 3 && 491 CE->getOperand(1)->isNullValue()) { 492 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 493 if (CI->isOne() && 494 STy->getNumElements() == 2 && 495 STy->getElementType(0)->isIntegerTy(1)) { 496 AllocTy = STy->getElementType(1); 497 return true; 498 } 499 } 500 } 501 502 return false; 503 } 504 505 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 506 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 507 if (VCE->getOpcode() == Instruction::PtrToInt) 508 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 509 if (CE->getOpcode() == Instruction::GetElementPtr && 510 CE->getNumOperands() == 3 && 511 CE->getOperand(0)->isNullValue() && 512 CE->getOperand(1)->isNullValue()) { 513 Type *Ty = 514 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 515 // Ignore vector types here so that ScalarEvolutionExpander doesn't 516 // emit getelementptrs that index into vectors. 517 if (Ty->isStructTy() || Ty->isArrayTy()) { 518 CTy = Ty; 519 FieldNo = CE->getOperand(2); 520 return true; 521 } 522 } 523 524 return false; 525 } 526 527 //===----------------------------------------------------------------------===// 528 // SCEV Utilities 529 //===----------------------------------------------------------------------===// 530 531 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 532 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 533 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 534 /// have been previously deemed to be "equally complex" by this routine. It is 535 /// intended to avoid exponential time complexity in cases like: 536 /// 537 /// %a = f(%x, %y) 538 /// %b = f(%a, %a) 539 /// %c = f(%b, %b) 540 /// 541 /// %d = f(%x, %y) 542 /// %e = f(%d, %d) 543 /// %f = f(%e, %e) 544 /// 545 /// CompareValueComplexity(%f, %c) 546 /// 547 /// Since we do not continue running this routine on expression trees once we 548 /// have seen unequal values, there is no need to track them in the cache. 549 static int 550 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 551 const LoopInfo *const LI, Value *LV, Value *RV, 552 unsigned Depth) { 553 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 554 return 0; 555 556 // Order pointer values after integer values. This helps SCEVExpander form 557 // GEPs. 558 bool LIsPointer = LV->getType()->isPointerTy(), 559 RIsPointer = RV->getType()->isPointerTy(); 560 if (LIsPointer != RIsPointer) 561 return (int)LIsPointer - (int)RIsPointer; 562 563 // Compare getValueID values. 564 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 565 if (LID != RID) 566 return (int)LID - (int)RID; 567 568 // Sort arguments by their position. 569 if (const auto *LA = dyn_cast<Argument>(LV)) { 570 const auto *RA = cast<Argument>(RV); 571 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 572 return (int)LArgNo - (int)RArgNo; 573 } 574 575 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 576 const auto *RGV = cast<GlobalValue>(RV); 577 578 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 579 auto LT = GV->getLinkage(); 580 return !(GlobalValue::isPrivateLinkage(LT) || 581 GlobalValue::isInternalLinkage(LT)); 582 }; 583 584 // Use the names to distinguish the two values, but only if the 585 // names are semantically important. 586 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 587 return LGV->getName().compare(RGV->getName()); 588 } 589 590 // For instructions, compare their loop depth, and their operand count. This 591 // is pretty loose. 592 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 593 const auto *RInst = cast<Instruction>(RV); 594 595 // Compare loop depths. 596 const BasicBlock *LParent = LInst->getParent(), 597 *RParent = RInst->getParent(); 598 if (LParent != RParent) { 599 unsigned LDepth = LI->getLoopDepth(LParent), 600 RDepth = LI->getLoopDepth(RParent); 601 if (LDepth != RDepth) 602 return (int)LDepth - (int)RDepth; 603 } 604 605 // Compare the number of operands. 606 unsigned LNumOps = LInst->getNumOperands(), 607 RNumOps = RInst->getNumOperands(); 608 if (LNumOps != RNumOps) 609 return (int)LNumOps - (int)RNumOps; 610 611 for (unsigned Idx : seq(0u, LNumOps)) { 612 int Result = 613 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 614 RInst->getOperand(Idx), Depth + 1); 615 if (Result != 0) 616 return Result; 617 } 618 } 619 620 EqCacheValue.unionSets(LV, RV); 621 return 0; 622 } 623 624 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 625 // than RHS, respectively. A three-way result allows recursive comparisons to be 626 // more efficient. 627 static int CompareSCEVComplexity( 628 EquivalenceClasses<const SCEV *> &EqCacheSCEV, 629 EquivalenceClasses<const Value *> &EqCacheValue, 630 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 631 DominatorTree &DT, unsigned Depth = 0) { 632 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 633 if (LHS == RHS) 634 return 0; 635 636 // Primarily, sort the SCEVs by their getSCEVType(). 637 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 638 if (LType != RType) 639 return (int)LType - (int)RType; 640 641 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS)) 642 return 0; 643 // Aside from the getSCEVType() ordering, the particular ordering 644 // isn't very important except that it's beneficial to be consistent, 645 // so that (a + b) and (b + a) don't end up as different expressions. 646 switch (static_cast<SCEVTypes>(LType)) { 647 case scUnknown: { 648 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 649 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 650 651 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 652 RU->getValue(), Depth + 1); 653 if (X == 0) 654 EqCacheSCEV.unionSets(LHS, RHS); 655 return X; 656 } 657 658 case scConstant: { 659 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 660 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 661 662 // Compare constant values. 663 const APInt &LA = LC->getAPInt(); 664 const APInt &RA = RC->getAPInt(); 665 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 666 if (LBitWidth != RBitWidth) 667 return (int)LBitWidth - (int)RBitWidth; 668 return LA.ult(RA) ? -1 : 1; 669 } 670 671 case scAddRecExpr: { 672 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 673 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 674 675 // There is always a dominance between two recs that are used by one SCEV, 676 // so we can safely sort recs by loop header dominance. We require such 677 // order in getAddExpr. 678 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 679 if (LLoop != RLoop) { 680 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 681 assert(LHead != RHead && "Two loops share the same header?"); 682 if (DT.dominates(LHead, RHead)) 683 return 1; 684 else 685 assert(DT.dominates(RHead, LHead) && 686 "No dominance between recurrences used by one SCEV?"); 687 return -1; 688 } 689 690 // Addrec complexity grows with operand count. 691 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 692 if (LNumOps != RNumOps) 693 return (int)LNumOps - (int)RNumOps; 694 695 // Compare NoWrap flags. 696 if (LA->getNoWrapFlags() != RA->getNoWrapFlags()) 697 return (int)LA->getNoWrapFlags() - (int)RA->getNoWrapFlags(); 698 699 // Lexicographically compare. 700 for (unsigned i = 0; i != LNumOps; ++i) { 701 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 702 LA->getOperand(i), RA->getOperand(i), DT, 703 Depth + 1); 704 if (X != 0) 705 return X; 706 } 707 EqCacheSCEV.unionSets(LHS, RHS); 708 return 0; 709 } 710 711 case scAddExpr: 712 case scMulExpr: 713 case scSMaxExpr: 714 case scUMaxExpr: { 715 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 716 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 717 718 // Lexicographically compare n-ary expressions. 719 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 720 if (LNumOps != RNumOps) 721 return (int)LNumOps - (int)RNumOps; 722 723 // Compare NoWrap flags. 724 if (LC->getNoWrapFlags() != RC->getNoWrapFlags()) 725 return (int)LC->getNoWrapFlags() - (int)RC->getNoWrapFlags(); 726 727 for (unsigned i = 0; i != LNumOps; ++i) { 728 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 729 LC->getOperand(i), RC->getOperand(i), DT, 730 Depth + 1); 731 if (X != 0) 732 return X; 733 } 734 EqCacheSCEV.unionSets(LHS, RHS); 735 return 0; 736 } 737 738 case scUDivExpr: { 739 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 740 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 741 742 // Lexicographically compare udiv expressions. 743 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 744 RC->getLHS(), DT, Depth + 1); 745 if (X != 0) 746 return X; 747 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 748 RC->getRHS(), DT, Depth + 1); 749 if (X == 0) 750 EqCacheSCEV.unionSets(LHS, RHS); 751 return X; 752 } 753 754 case scTruncate: 755 case scZeroExtend: 756 case scSignExtend: { 757 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 758 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 759 760 // Compare cast expressions by operand. 761 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 762 LC->getOperand(), RC->getOperand(), DT, 763 Depth + 1); 764 if (X == 0) 765 EqCacheSCEV.unionSets(LHS, RHS); 766 return X; 767 } 768 769 case scCouldNotCompute: 770 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 771 } 772 llvm_unreachable("Unknown SCEV kind!"); 773 } 774 775 /// Given a list of SCEV objects, order them by their complexity, and group 776 /// objects of the same complexity together by value. When this routine is 777 /// finished, we know that any duplicates in the vector are consecutive and that 778 /// complexity is monotonically increasing. 779 /// 780 /// Note that we go take special precautions to ensure that we get deterministic 781 /// results from this routine. In other words, we don't want the results of 782 /// this to depend on where the addresses of various SCEV objects happened to 783 /// land in memory. 784 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 785 LoopInfo *LI, DominatorTree &DT) { 786 if (Ops.size() < 2) return; // Noop 787 788 EquivalenceClasses<const SCEV *> EqCacheSCEV; 789 EquivalenceClasses<const Value *> EqCacheValue; 790 if (Ops.size() == 2) { 791 // This is the common case, which also happens to be trivially simple. 792 // Special case it. 793 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 794 if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0) 795 std::swap(LHS, RHS); 796 return; 797 } 798 799 // Do the rough sort by complexity. 800 std::stable_sort(Ops.begin(), Ops.end(), 801 [&](const SCEV *LHS, const SCEV *RHS) { 802 return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 803 LHS, RHS, DT) < 0; 804 }); 805 806 // Now that we are sorted by complexity, group elements of the same 807 // complexity. Note that this is, at worst, N^2, but the vector is likely to 808 // be extremely short in practice. Note that we take this approach because we 809 // do not want to depend on the addresses of the objects we are grouping. 810 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 811 const SCEV *S = Ops[i]; 812 unsigned Complexity = S->getSCEVType(); 813 814 // If there are any objects of the same complexity and same value as this 815 // one, group them. 816 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 817 if (Ops[j] == S) { // Found a duplicate. 818 // Move it to immediately after i'th element. 819 std::swap(Ops[i+1], Ops[j]); 820 ++i; // no need to rescan it. 821 if (i == e-2) return; // Done! 822 } 823 } 824 } 825 } 826 827 // Returns the size of the SCEV S. 828 static inline int sizeOfSCEV(const SCEV *S) { 829 struct FindSCEVSize { 830 int Size = 0; 831 832 FindSCEVSize() = default; 833 834 bool follow(const SCEV *S) { 835 ++Size; 836 // Keep looking at all operands of S. 837 return true; 838 } 839 840 bool isDone() const { 841 return false; 842 } 843 }; 844 845 FindSCEVSize F; 846 SCEVTraversal<FindSCEVSize> ST(F); 847 ST.visitAll(S); 848 return F.Size; 849 } 850 851 namespace { 852 853 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { 854 public: 855 // Computes the Quotient and Remainder of the division of Numerator by 856 // Denominator. 857 static void divide(ScalarEvolution &SE, const SCEV *Numerator, 858 const SCEV *Denominator, const SCEV **Quotient, 859 const SCEV **Remainder) { 860 assert(Numerator && Denominator && "Uninitialized SCEV"); 861 862 SCEVDivision D(SE, Numerator, Denominator); 863 864 // Check for the trivial case here to avoid having to check for it in the 865 // rest of the code. 866 if (Numerator == Denominator) { 867 *Quotient = D.One; 868 *Remainder = D.Zero; 869 return; 870 } 871 872 if (Numerator->isZero()) { 873 *Quotient = D.Zero; 874 *Remainder = D.Zero; 875 return; 876 } 877 878 // A simple case when N/1. The quotient is N. 879 if (Denominator->isOne()) { 880 *Quotient = Numerator; 881 *Remainder = D.Zero; 882 return; 883 } 884 885 // Split the Denominator when it is a product. 886 if (const SCEVMulExpr *T = dyn_cast<SCEVMulExpr>(Denominator)) { 887 const SCEV *Q, *R; 888 *Quotient = Numerator; 889 for (const SCEV *Op : T->operands()) { 890 divide(SE, *Quotient, Op, &Q, &R); 891 *Quotient = Q; 892 893 // Bail out when the Numerator is not divisible by one of the terms of 894 // the Denominator. 895 if (!R->isZero()) { 896 *Quotient = D.Zero; 897 *Remainder = Numerator; 898 return; 899 } 900 } 901 *Remainder = D.Zero; 902 return; 903 } 904 905 D.visit(Numerator); 906 *Quotient = D.Quotient; 907 *Remainder = D.Remainder; 908 } 909 910 // Except in the trivial case described above, we do not know how to divide 911 // Expr by Denominator for the following functions with empty implementation. 912 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} 913 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} 914 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} 915 void visitUDivExpr(const SCEVUDivExpr *Numerator) {} 916 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} 917 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} 918 void visitUnknown(const SCEVUnknown *Numerator) {} 919 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} 920 921 void visitConstant(const SCEVConstant *Numerator) { 922 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { 923 APInt NumeratorVal = Numerator->getAPInt(); 924 APInt DenominatorVal = D->getAPInt(); 925 uint32_t NumeratorBW = NumeratorVal.getBitWidth(); 926 uint32_t DenominatorBW = DenominatorVal.getBitWidth(); 927 928 if (NumeratorBW > DenominatorBW) 929 DenominatorVal = DenominatorVal.sext(NumeratorBW); 930 else if (NumeratorBW < DenominatorBW) 931 NumeratorVal = NumeratorVal.sext(DenominatorBW); 932 933 APInt QuotientVal(NumeratorVal.getBitWidth(), 0); 934 APInt RemainderVal(NumeratorVal.getBitWidth(), 0); 935 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); 936 Quotient = SE.getConstant(QuotientVal); 937 Remainder = SE.getConstant(RemainderVal); 938 return; 939 } 940 } 941 942 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { 943 const SCEV *StartQ, *StartR, *StepQ, *StepR; 944 if (!Numerator->isAffine()) 945 return cannotDivide(Numerator); 946 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); 947 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); 948 // Bail out if the types do not match. 949 Type *Ty = Denominator->getType(); 950 if (Ty != StartQ->getType() || Ty != StartR->getType() || 951 Ty != StepQ->getType() || Ty != StepR->getType()) 952 return cannotDivide(Numerator); 953 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), 954 Numerator->getNoWrapFlags()); 955 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), 956 Numerator->getNoWrapFlags()); 957 } 958 959 void visitAddExpr(const SCEVAddExpr *Numerator) { 960 SmallVector<const SCEV *, 2> Qs, Rs; 961 Type *Ty = Denominator->getType(); 962 963 for (const SCEV *Op : Numerator->operands()) { 964 const SCEV *Q, *R; 965 divide(SE, Op, Denominator, &Q, &R); 966 967 // Bail out if types do not match. 968 if (Ty != Q->getType() || Ty != R->getType()) 969 return cannotDivide(Numerator); 970 971 Qs.push_back(Q); 972 Rs.push_back(R); 973 } 974 975 if (Qs.size() == 1) { 976 Quotient = Qs[0]; 977 Remainder = Rs[0]; 978 return; 979 } 980 981 Quotient = SE.getAddExpr(Qs); 982 Remainder = SE.getAddExpr(Rs); 983 } 984 985 void visitMulExpr(const SCEVMulExpr *Numerator) { 986 SmallVector<const SCEV *, 2> Qs; 987 Type *Ty = Denominator->getType(); 988 989 bool FoundDenominatorTerm = false; 990 for (const SCEV *Op : Numerator->operands()) { 991 // Bail out if types do not match. 992 if (Ty != Op->getType()) 993 return cannotDivide(Numerator); 994 995 if (FoundDenominatorTerm) { 996 Qs.push_back(Op); 997 continue; 998 } 999 1000 // Check whether Denominator divides one of the product operands. 1001 const SCEV *Q, *R; 1002 divide(SE, Op, Denominator, &Q, &R); 1003 if (!R->isZero()) { 1004 Qs.push_back(Op); 1005 continue; 1006 } 1007 1008 // Bail out if types do not match. 1009 if (Ty != Q->getType()) 1010 return cannotDivide(Numerator); 1011 1012 FoundDenominatorTerm = true; 1013 Qs.push_back(Q); 1014 } 1015 1016 if (FoundDenominatorTerm) { 1017 Remainder = Zero; 1018 if (Qs.size() == 1) 1019 Quotient = Qs[0]; 1020 else 1021 Quotient = SE.getMulExpr(Qs); 1022 return; 1023 } 1024 1025 if (!isa<SCEVUnknown>(Denominator)) 1026 return cannotDivide(Numerator); 1027 1028 // The Remainder is obtained by replacing Denominator by 0 in Numerator. 1029 ValueToValueMap RewriteMap; 1030 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1031 cast<SCEVConstant>(Zero)->getValue(); 1032 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1033 1034 if (Remainder->isZero()) { 1035 // The Quotient is obtained by replacing Denominator by 1 in Numerator. 1036 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1037 cast<SCEVConstant>(One)->getValue(); 1038 Quotient = 1039 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1040 return; 1041 } 1042 1043 // Quotient is (Numerator - Remainder) divided by Denominator. 1044 const SCEV *Q, *R; 1045 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); 1046 // This SCEV does not seem to simplify: fail the division here. 1047 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) 1048 return cannotDivide(Numerator); 1049 divide(SE, Diff, Denominator, &Q, &R); 1050 if (R != Zero) 1051 return cannotDivide(Numerator); 1052 Quotient = Q; 1053 } 1054 1055 private: 1056 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, 1057 const SCEV *Denominator) 1058 : SE(S), Denominator(Denominator) { 1059 Zero = SE.getZero(Denominator->getType()); 1060 One = SE.getOne(Denominator->getType()); 1061 1062 // We generally do not know how to divide Expr by Denominator. We 1063 // initialize the division to a "cannot divide" state to simplify the rest 1064 // of the code. 1065 cannotDivide(Numerator); 1066 } 1067 1068 // Convenience function for giving up on the division. We set the quotient to 1069 // be equal to zero and the remainder to be equal to the numerator. 1070 void cannotDivide(const SCEV *Numerator) { 1071 Quotient = Zero; 1072 Remainder = Numerator; 1073 } 1074 1075 ScalarEvolution &SE; 1076 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; 1077 }; 1078 1079 } // end anonymous namespace 1080 1081 //===----------------------------------------------------------------------===// 1082 // Simple SCEV method implementations 1083 //===----------------------------------------------------------------------===// 1084 1085 /// Compute BC(It, K). The result has width W. Assume, K > 0. 1086 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 1087 ScalarEvolution &SE, 1088 Type *ResultTy) { 1089 // Handle the simplest case efficiently. 1090 if (K == 1) 1091 return SE.getTruncateOrZeroExtend(It, ResultTy); 1092 1093 // We are using the following formula for BC(It, K): 1094 // 1095 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 1096 // 1097 // Suppose, W is the bitwidth of the return value. We must be prepared for 1098 // overflow. Hence, we must assure that the result of our computation is 1099 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 1100 // safe in modular arithmetic. 1101 // 1102 // However, this code doesn't use exactly that formula; the formula it uses 1103 // is something like the following, where T is the number of factors of 2 in 1104 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 1105 // exponentiation: 1106 // 1107 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 1108 // 1109 // This formula is trivially equivalent to the previous formula. However, 1110 // this formula can be implemented much more efficiently. The trick is that 1111 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 1112 // arithmetic. To do exact division in modular arithmetic, all we have 1113 // to do is multiply by the inverse. Therefore, this step can be done at 1114 // width W. 1115 // 1116 // The next issue is how to safely do the division by 2^T. The way this 1117 // is done is by doing the multiplication step at a width of at least W + T 1118 // bits. This way, the bottom W+T bits of the product are accurate. Then, 1119 // when we perform the division by 2^T (which is equivalent to a right shift 1120 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 1121 // truncated out after the division by 2^T. 1122 // 1123 // In comparison to just directly using the first formula, this technique 1124 // is much more efficient; using the first formula requires W * K bits, 1125 // but this formula less than W + K bits. Also, the first formula requires 1126 // a division step, whereas this formula only requires multiplies and shifts. 1127 // 1128 // It doesn't matter whether the subtraction step is done in the calculation 1129 // width or the input iteration count's width; if the subtraction overflows, 1130 // the result must be zero anyway. We prefer here to do it in the width of 1131 // the induction variable because it helps a lot for certain cases; CodeGen 1132 // isn't smart enough to ignore the overflow, which leads to much less 1133 // efficient code if the width of the subtraction is wider than the native 1134 // register width. 1135 // 1136 // (It's possible to not widen at all by pulling out factors of 2 before 1137 // the multiplication; for example, K=2 can be calculated as 1138 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 1139 // extra arithmetic, so it's not an obvious win, and it gets 1140 // much more complicated for K > 3.) 1141 1142 // Protection from insane SCEVs; this bound is conservative, 1143 // but it probably doesn't matter. 1144 if (K > 1000) 1145 return SE.getCouldNotCompute(); 1146 1147 unsigned W = SE.getTypeSizeInBits(ResultTy); 1148 1149 // Calculate K! / 2^T and T; we divide out the factors of two before 1150 // multiplying for calculating K! / 2^T to avoid overflow. 1151 // Other overflow doesn't matter because we only care about the bottom 1152 // W bits of the result. 1153 APInt OddFactorial(W, 1); 1154 unsigned T = 1; 1155 for (unsigned i = 3; i <= K; ++i) { 1156 APInt Mult(W, i); 1157 unsigned TwoFactors = Mult.countTrailingZeros(); 1158 T += TwoFactors; 1159 Mult.lshrInPlace(TwoFactors); 1160 OddFactorial *= Mult; 1161 } 1162 1163 // We need at least W + T bits for the multiplication step 1164 unsigned CalculationBits = W + T; 1165 1166 // Calculate 2^T, at width T+W. 1167 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1168 1169 // Calculate the multiplicative inverse of K! / 2^T; 1170 // this multiplication factor will perform the exact division by 1171 // K! / 2^T. 1172 APInt Mod = APInt::getSignedMinValue(W+1); 1173 APInt MultiplyFactor = OddFactorial.zext(W+1); 1174 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1175 MultiplyFactor = MultiplyFactor.trunc(W); 1176 1177 // Calculate the product, at width T+W 1178 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1179 CalculationBits); 1180 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1181 for (unsigned i = 1; i != K; ++i) { 1182 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1183 Dividend = SE.getMulExpr(Dividend, 1184 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1185 } 1186 1187 // Divide by 2^T 1188 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1189 1190 // Truncate the result, and divide by K! / 2^T. 1191 1192 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1193 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1194 } 1195 1196 /// Return the value of this chain of recurrences at the specified iteration 1197 /// number. We can evaluate this recurrence by multiplying each element in the 1198 /// chain by the binomial coefficient corresponding to it. In other words, we 1199 /// can evaluate {A,+,B,+,C,+,D} as: 1200 /// 1201 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1202 /// 1203 /// where BC(It, k) stands for binomial coefficient. 1204 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1205 ScalarEvolution &SE) const { 1206 const SCEV *Result = getStart(); 1207 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1208 // The computation is correct in the face of overflow provided that the 1209 // multiplication is performed _after_ the evaluation of the binomial 1210 // coefficient. 1211 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1212 if (isa<SCEVCouldNotCompute>(Coeff)) 1213 return Coeff; 1214 1215 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1216 } 1217 return Result; 1218 } 1219 1220 //===----------------------------------------------------------------------===// 1221 // SCEV Expression folder implementations 1222 //===----------------------------------------------------------------------===// 1223 1224 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 1225 Type *Ty) { 1226 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1227 "This is not a truncating conversion!"); 1228 assert(isSCEVable(Ty) && 1229 "This is not a conversion to a SCEVable type!"); 1230 Ty = getEffectiveSCEVType(Ty); 1231 1232 FoldingSetNodeID ID; 1233 ID.AddInteger(scTruncate); 1234 ID.AddPointer(Op); 1235 ID.AddPointer(Ty); 1236 void *IP = nullptr; 1237 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1238 1239 // Fold if the operand is constant. 1240 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1241 return getConstant( 1242 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1243 1244 // trunc(trunc(x)) --> trunc(x) 1245 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1246 return getTruncateExpr(ST->getOperand(), Ty); 1247 1248 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1249 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1250 return getTruncateOrSignExtend(SS->getOperand(), Ty); 1251 1252 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1253 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1254 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 1255 1256 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1257 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1258 // if after transforming we have at most one truncate, not counting truncates 1259 // that replace other casts. 1260 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1261 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1262 SmallVector<const SCEV *, 4> Operands; 1263 unsigned numTruncs = 0; 1264 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1265 ++i) { 1266 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty); 1267 if (!isa<SCEVCastExpr>(CommOp->getOperand(i)) && isa<SCEVTruncateExpr>(S)) 1268 numTruncs++; 1269 Operands.push_back(S); 1270 } 1271 if (numTruncs < 2) { 1272 if (isa<SCEVAddExpr>(Op)) 1273 return getAddExpr(Operands); 1274 else if (isa<SCEVMulExpr>(Op)) 1275 return getMulExpr(Operands); 1276 else 1277 llvm_unreachable("Unexpected SCEV type for Op."); 1278 } 1279 // Although we checked in the beginning that ID is not in the cache, it is 1280 // possible that during recursion and different modification ID was inserted 1281 // into the cache. So if we find it, just return it. 1282 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1283 return S; 1284 } 1285 1286 // If the input value is a chrec scev, truncate the chrec's operands. 1287 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1288 SmallVector<const SCEV *, 4> Operands; 1289 for (const SCEV *Op : AddRec->operands()) 1290 Operands.push_back(getTruncateExpr(Op, Ty)); 1291 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1292 } 1293 1294 // The cast wasn't folded; create an explicit cast node. We can reuse 1295 // the existing insert position since if we get here, we won't have 1296 // made any changes which would invalidate it. 1297 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1298 Op, Ty); 1299 UniqueSCEVs.InsertNode(S, IP); 1300 addToLoopUseLists(S); 1301 return S; 1302 } 1303 1304 // Get the limit of a recurrence such that incrementing by Step cannot cause 1305 // signed overflow as long as the value of the recurrence within the 1306 // loop does not exceed this limit before incrementing. 1307 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1308 ICmpInst::Predicate *Pred, 1309 ScalarEvolution *SE) { 1310 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1311 if (SE->isKnownPositive(Step)) { 1312 *Pred = ICmpInst::ICMP_SLT; 1313 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1314 SE->getSignedRangeMax(Step)); 1315 } 1316 if (SE->isKnownNegative(Step)) { 1317 *Pred = ICmpInst::ICMP_SGT; 1318 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1319 SE->getSignedRangeMin(Step)); 1320 } 1321 return nullptr; 1322 } 1323 1324 // Get the limit of a recurrence such that incrementing by Step cannot cause 1325 // unsigned overflow as long as the value of the recurrence within the loop does 1326 // not exceed this limit before incrementing. 1327 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1328 ICmpInst::Predicate *Pred, 1329 ScalarEvolution *SE) { 1330 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1331 *Pred = ICmpInst::ICMP_ULT; 1332 1333 return SE->getConstant(APInt::getMinValue(BitWidth) - 1334 SE->getUnsignedRangeMax(Step)); 1335 } 1336 1337 namespace { 1338 1339 struct ExtendOpTraitsBase { 1340 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1341 unsigned); 1342 }; 1343 1344 // Used to make code generic over signed and unsigned overflow. 1345 template <typename ExtendOp> struct ExtendOpTraits { 1346 // Members present: 1347 // 1348 // static const SCEV::NoWrapFlags WrapType; 1349 // 1350 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1351 // 1352 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1353 // ICmpInst::Predicate *Pred, 1354 // ScalarEvolution *SE); 1355 }; 1356 1357 template <> 1358 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1359 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1360 1361 static const GetExtendExprTy GetExtendExpr; 1362 1363 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1364 ICmpInst::Predicate *Pred, 1365 ScalarEvolution *SE) { 1366 return getSignedOverflowLimitForStep(Step, Pred, SE); 1367 } 1368 }; 1369 1370 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1371 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1372 1373 template <> 1374 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1375 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1376 1377 static const GetExtendExprTy GetExtendExpr; 1378 1379 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1380 ICmpInst::Predicate *Pred, 1381 ScalarEvolution *SE) { 1382 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1383 } 1384 }; 1385 1386 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1387 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1388 1389 } // end anonymous namespace 1390 1391 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1392 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1393 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1394 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1395 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1396 // expression "Step + sext/zext(PreIncAR)" is congruent with 1397 // "sext/zext(PostIncAR)" 1398 template <typename ExtendOpTy> 1399 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1400 ScalarEvolution *SE, unsigned Depth) { 1401 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1402 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1403 1404 const Loop *L = AR->getLoop(); 1405 const SCEV *Start = AR->getStart(); 1406 const SCEV *Step = AR->getStepRecurrence(*SE); 1407 1408 // Check for a simple looking step prior to loop entry. 1409 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1410 if (!SA) 1411 return nullptr; 1412 1413 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1414 // subtraction is expensive. For this purpose, perform a quick and dirty 1415 // difference, by checking for Step in the operand list. 1416 SmallVector<const SCEV *, 4> DiffOps; 1417 for (const SCEV *Op : SA->operands()) 1418 if (Op != Step) 1419 DiffOps.push_back(Op); 1420 1421 if (DiffOps.size() == SA->getNumOperands()) 1422 return nullptr; 1423 1424 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1425 // `Step`: 1426 1427 // 1. NSW/NUW flags on the step increment. 1428 auto PreStartFlags = 1429 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1430 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1431 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1432 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1433 1434 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1435 // "S+X does not sign/unsign-overflow". 1436 // 1437 1438 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1439 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1440 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1441 return PreStart; 1442 1443 // 2. Direct overflow check on the step operation's expression. 1444 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1445 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1446 const SCEV *OperandExtendedStart = 1447 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1448 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1449 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1450 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1451 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1452 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1453 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1454 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1455 } 1456 return PreStart; 1457 } 1458 1459 // 3. Loop precondition. 1460 ICmpInst::Predicate Pred; 1461 const SCEV *OverflowLimit = 1462 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1463 1464 if (OverflowLimit && 1465 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1466 return PreStart; 1467 1468 return nullptr; 1469 } 1470 1471 // Get the normalized zero or sign extended expression for this AddRec's Start. 1472 template <typename ExtendOpTy> 1473 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1474 ScalarEvolution *SE, 1475 unsigned Depth) { 1476 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1477 1478 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1479 if (!PreStart) 1480 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1481 1482 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1483 Depth), 1484 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1485 } 1486 1487 // Try to prove away overflow by looking at "nearby" add recurrences. A 1488 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1489 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1490 // 1491 // Formally: 1492 // 1493 // {S,+,X} == {S-T,+,X} + T 1494 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1495 // 1496 // If ({S-T,+,X} + T) does not overflow ... (1) 1497 // 1498 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1499 // 1500 // If {S-T,+,X} does not overflow ... (2) 1501 // 1502 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1503 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1504 // 1505 // If (S-T)+T does not overflow ... (3) 1506 // 1507 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1508 // == {Ext(S),+,Ext(X)} == LHS 1509 // 1510 // Thus, if (1), (2) and (3) are true for some T, then 1511 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1512 // 1513 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1514 // does not overflow" restricted to the 0th iteration. Therefore we only need 1515 // to check for (1) and (2). 1516 // 1517 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1518 // is `Delta` (defined below). 1519 template <typename ExtendOpTy> 1520 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1521 const SCEV *Step, 1522 const Loop *L) { 1523 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1524 1525 // We restrict `Start` to a constant to prevent SCEV from spending too much 1526 // time here. It is correct (but more expensive) to continue with a 1527 // non-constant `Start` and do a general SCEV subtraction to compute 1528 // `PreStart` below. 1529 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1530 if (!StartC) 1531 return false; 1532 1533 APInt StartAI = StartC->getAPInt(); 1534 1535 for (unsigned Delta : {-2, -1, 1, 2}) { 1536 const SCEV *PreStart = getConstant(StartAI - Delta); 1537 1538 FoldingSetNodeID ID; 1539 ID.AddInteger(scAddRecExpr); 1540 ID.AddPointer(PreStart); 1541 ID.AddPointer(Step); 1542 ID.AddPointer(L); 1543 void *IP = nullptr; 1544 const auto *PreAR = 1545 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1546 1547 // Give up if we don't already have the add recurrence we need because 1548 // actually constructing an add recurrence is relatively expensive. 1549 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1550 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1551 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1552 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1553 DeltaS, &Pred, this); 1554 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1555 return true; 1556 } 1557 } 1558 1559 return false; 1560 } 1561 1562 // Finds an integer D for an expression (C + x + y + ...) such that the top 1563 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1564 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1565 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1566 // the (C + x + y + ...) expression is \p WholeAddExpr. 1567 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1568 const SCEVConstant *ConstantTerm, 1569 const SCEVAddExpr *WholeAddExpr) { 1570 const APInt C = ConstantTerm->getAPInt(); 1571 const unsigned BitWidth = C.getBitWidth(); 1572 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1573 uint32_t TZ = BitWidth; 1574 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1575 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1576 if (TZ) { 1577 // Set D to be as many least significant bits of C as possible while still 1578 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1579 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1580 } 1581 return APInt(BitWidth, 0); 1582 } 1583 1584 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1585 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1586 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1587 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1588 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1589 const APInt &ConstantStart, 1590 const SCEV *Step) { 1591 const unsigned BitWidth = ConstantStart.getBitWidth(); 1592 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1593 if (TZ) 1594 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1595 : ConstantStart; 1596 return APInt(BitWidth, 0); 1597 } 1598 1599 const SCEV * 1600 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1601 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1602 "This is not an extending conversion!"); 1603 assert(isSCEVable(Ty) && 1604 "This is not a conversion to a SCEVable type!"); 1605 Ty = getEffectiveSCEVType(Ty); 1606 1607 // Fold if the operand is constant. 1608 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1609 return getConstant( 1610 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1611 1612 // zext(zext(x)) --> zext(x) 1613 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1614 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1615 1616 // Before doing any expensive analysis, check to see if we've already 1617 // computed a SCEV for this Op and Ty. 1618 FoldingSetNodeID ID; 1619 ID.AddInteger(scZeroExtend); 1620 ID.AddPointer(Op); 1621 ID.AddPointer(Ty); 1622 void *IP = nullptr; 1623 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1624 if (Depth > MaxExtDepth) { 1625 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1626 Op, Ty); 1627 UniqueSCEVs.InsertNode(S, IP); 1628 addToLoopUseLists(S); 1629 return S; 1630 } 1631 1632 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1633 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1634 // It's possible the bits taken off by the truncate were all zero bits. If 1635 // so, we should be able to simplify this further. 1636 const SCEV *X = ST->getOperand(); 1637 ConstantRange CR = getUnsignedRange(X); 1638 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1639 unsigned NewBits = getTypeSizeInBits(Ty); 1640 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1641 CR.zextOrTrunc(NewBits))) 1642 return getTruncateOrZeroExtend(X, Ty); 1643 } 1644 1645 // If the input value is a chrec scev, and we can prove that the value 1646 // did not overflow the old, smaller, value, we can zero extend all of the 1647 // operands (often constants). This allows analysis of something like 1648 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1649 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1650 if (AR->isAffine()) { 1651 const SCEV *Start = AR->getStart(); 1652 const SCEV *Step = AR->getStepRecurrence(*this); 1653 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1654 const Loop *L = AR->getLoop(); 1655 1656 if (!AR->hasNoUnsignedWrap()) { 1657 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1658 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1659 } 1660 1661 // If we have special knowledge that this addrec won't overflow, 1662 // we don't need to do any further analysis. 1663 if (AR->hasNoUnsignedWrap()) 1664 return getAddRecExpr( 1665 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1666 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1667 1668 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1669 // Note that this serves two purposes: It filters out loops that are 1670 // simply not analyzable, and it covers the case where this code is 1671 // being called from within backedge-taken count analysis, such that 1672 // attempting to ask for the backedge-taken count would likely result 1673 // in infinite recursion. In the later case, the analysis code will 1674 // cope with a conservative value, and it will take care to purge 1675 // that value once it has finished. 1676 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1677 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1678 // Manually compute the final value for AR, checking for 1679 // overflow. 1680 1681 // Check whether the backedge-taken count can be losslessly casted to 1682 // the addrec's type. The count is always unsigned. 1683 const SCEV *CastedMaxBECount = 1684 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1685 const SCEV *RecastedMaxBECount = 1686 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1687 if (MaxBECount == RecastedMaxBECount) { 1688 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1689 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1690 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1691 SCEV::FlagAnyWrap, Depth + 1); 1692 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1693 SCEV::FlagAnyWrap, 1694 Depth + 1), 1695 WideTy, Depth + 1); 1696 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1697 const SCEV *WideMaxBECount = 1698 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1699 const SCEV *OperandExtendedAdd = 1700 getAddExpr(WideStart, 1701 getMulExpr(WideMaxBECount, 1702 getZeroExtendExpr(Step, WideTy, Depth + 1), 1703 SCEV::FlagAnyWrap, Depth + 1), 1704 SCEV::FlagAnyWrap, Depth + 1); 1705 if (ZAdd == OperandExtendedAdd) { 1706 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1707 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1708 // Return the expression with the addrec on the outside. 1709 return getAddRecExpr( 1710 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1711 Depth + 1), 1712 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1713 AR->getNoWrapFlags()); 1714 } 1715 // Similar to above, only this time treat the step value as signed. 1716 // This covers loops that count down. 1717 OperandExtendedAdd = 1718 getAddExpr(WideStart, 1719 getMulExpr(WideMaxBECount, 1720 getSignExtendExpr(Step, WideTy, Depth + 1), 1721 SCEV::FlagAnyWrap, Depth + 1), 1722 SCEV::FlagAnyWrap, Depth + 1); 1723 if (ZAdd == OperandExtendedAdd) { 1724 // Cache knowledge of AR NW, which is propagated to this AddRec. 1725 // Negative step causes unsigned wrap, but it still can't self-wrap. 1726 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1727 // Return the expression with the addrec on the outside. 1728 return getAddRecExpr( 1729 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1730 Depth + 1), 1731 getSignExtendExpr(Step, Ty, Depth + 1), L, 1732 AR->getNoWrapFlags()); 1733 } 1734 } 1735 } 1736 1737 // Normally, in the cases we can prove no-overflow via a 1738 // backedge guarding condition, we can also compute a backedge 1739 // taken count for the loop. The exceptions are assumptions and 1740 // guards present in the loop -- SCEV is not great at exploiting 1741 // these to compute max backedge taken counts, but can still use 1742 // these to prove lack of overflow. Use this fact to avoid 1743 // doing extra work that may not pay off. 1744 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1745 !AC.assumptions().empty()) { 1746 // If the backedge is guarded by a comparison with the pre-inc 1747 // value the addrec is safe. Also, if the entry is guarded by 1748 // a comparison with the start value and the backedge is 1749 // guarded by a comparison with the post-inc value, the addrec 1750 // is safe. 1751 if (isKnownPositive(Step)) { 1752 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1753 getUnsignedRangeMax(Step)); 1754 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1755 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 1756 // Cache knowledge of AR NUW, which is propagated to this 1757 // AddRec. 1758 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1759 // Return the expression with the addrec on the outside. 1760 return getAddRecExpr( 1761 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1762 Depth + 1), 1763 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1764 AR->getNoWrapFlags()); 1765 } 1766 } else if (isKnownNegative(Step)) { 1767 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1768 getSignedRangeMin(Step)); 1769 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1770 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1771 // Cache knowledge of AR NW, which is propagated to this 1772 // AddRec. Negative step causes unsigned wrap, but it 1773 // still can't self-wrap. 1774 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1775 // Return the expression with the addrec on the outside. 1776 return getAddRecExpr( 1777 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1778 Depth + 1), 1779 getSignExtendExpr(Step, Ty, Depth + 1), L, 1780 AR->getNoWrapFlags()); 1781 } 1782 } 1783 } 1784 1785 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1786 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1787 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1788 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1789 const APInt &C = SC->getAPInt(); 1790 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1791 if (D != 0) { 1792 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1793 const SCEV *SResidual = 1794 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1795 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1796 return getAddExpr(SZExtD, SZExtR, 1797 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1798 Depth + 1); 1799 } 1800 } 1801 1802 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1803 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1804 return getAddRecExpr( 1805 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1806 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1807 } 1808 } 1809 1810 // zext(A % B) --> zext(A) % zext(B) 1811 { 1812 const SCEV *LHS; 1813 const SCEV *RHS; 1814 if (matchURem(Op, LHS, RHS)) 1815 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1816 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1817 } 1818 1819 // zext(A / B) --> zext(A) / zext(B). 1820 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1821 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1822 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1823 1824 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1825 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1826 if (SA->hasNoUnsignedWrap()) { 1827 // If the addition does not unsign overflow then we can, by definition, 1828 // commute the zero extension with the addition operation. 1829 SmallVector<const SCEV *, 4> Ops; 1830 for (const auto *Op : SA->operands()) 1831 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1832 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1833 } 1834 1835 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1836 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1837 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1838 // 1839 // Often address arithmetics contain expressions like 1840 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1841 // This transformation is useful while proving that such expressions are 1842 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1843 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1844 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1845 if (D != 0) { 1846 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1847 const SCEV *SResidual = 1848 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1849 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1850 return getAddExpr(SZExtD, SZExtR, 1851 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1852 Depth + 1); 1853 } 1854 } 1855 } 1856 1857 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1858 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1859 if (SM->hasNoUnsignedWrap()) { 1860 // If the multiply does not unsign overflow then we can, by definition, 1861 // commute the zero extension with the multiply operation. 1862 SmallVector<const SCEV *, 4> Ops; 1863 for (const auto *Op : SM->operands()) 1864 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1865 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1866 } 1867 1868 // zext(2^K * (trunc X to iN)) to iM -> 1869 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1870 // 1871 // Proof: 1872 // 1873 // zext(2^K * (trunc X to iN)) to iM 1874 // = zext((trunc X to iN) << K) to iM 1875 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1876 // (because shl removes the top K bits) 1877 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1878 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1879 // 1880 if (SM->getNumOperands() == 2) 1881 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1882 if (MulLHS->getAPInt().isPowerOf2()) 1883 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1884 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1885 MulLHS->getAPInt().logBase2(); 1886 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1887 return getMulExpr( 1888 getZeroExtendExpr(MulLHS, Ty), 1889 getZeroExtendExpr( 1890 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1891 SCEV::FlagNUW, Depth + 1); 1892 } 1893 } 1894 1895 // The cast wasn't folded; create an explicit cast node. 1896 // Recompute the insert position, as it may have been invalidated. 1897 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1898 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1899 Op, Ty); 1900 UniqueSCEVs.InsertNode(S, IP); 1901 addToLoopUseLists(S); 1902 return S; 1903 } 1904 1905 const SCEV * 1906 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1907 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1908 "This is not an extending conversion!"); 1909 assert(isSCEVable(Ty) && 1910 "This is not a conversion to a SCEVable type!"); 1911 Ty = getEffectiveSCEVType(Ty); 1912 1913 // Fold if the operand is constant. 1914 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1915 return getConstant( 1916 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1917 1918 // sext(sext(x)) --> sext(x) 1919 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1920 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1921 1922 // sext(zext(x)) --> zext(x) 1923 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1924 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1925 1926 // Before doing any expensive analysis, check to see if we've already 1927 // computed a SCEV for this Op and Ty. 1928 FoldingSetNodeID ID; 1929 ID.AddInteger(scSignExtend); 1930 ID.AddPointer(Op); 1931 ID.AddPointer(Ty); 1932 void *IP = nullptr; 1933 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1934 // Limit recursion depth. 1935 if (Depth > MaxExtDepth) { 1936 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1937 Op, Ty); 1938 UniqueSCEVs.InsertNode(S, IP); 1939 addToLoopUseLists(S); 1940 return S; 1941 } 1942 1943 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1944 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1945 // It's possible the bits taken off by the truncate were all sign bits. If 1946 // so, we should be able to simplify this further. 1947 const SCEV *X = ST->getOperand(); 1948 ConstantRange CR = getSignedRange(X); 1949 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1950 unsigned NewBits = getTypeSizeInBits(Ty); 1951 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1952 CR.sextOrTrunc(NewBits))) 1953 return getTruncateOrSignExtend(X, Ty); 1954 } 1955 1956 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1957 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1958 if (SA->hasNoSignedWrap()) { 1959 // If the addition does not sign overflow then we can, by definition, 1960 // commute the sign extension with the addition operation. 1961 SmallVector<const SCEV *, 4> Ops; 1962 for (const auto *Op : SA->operands()) 1963 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1964 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1965 } 1966 1967 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 1968 // if D + (C - D + x + y + ...) could be proven to not signed wrap 1969 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1970 // 1971 // For instance, this will bring two seemingly different expressions: 1972 // 1 + sext(5 + 20 * %x + 24 * %y) and 1973 // sext(6 + 20 * %x + 24 * %y) 1974 // to the same form: 1975 // 2 + sext(4 + 20 * %x + 24 * %y) 1976 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1977 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1978 if (D != 0) { 1979 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1980 const SCEV *SResidual = 1981 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1982 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1983 return getAddExpr(SSExtD, SSExtR, 1984 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1985 Depth + 1); 1986 } 1987 } 1988 } 1989 // If the input value is a chrec scev, and we can prove that the value 1990 // did not overflow the old, smaller, value, we can sign extend all of the 1991 // operands (often constants). This allows analysis of something like 1992 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1993 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1994 if (AR->isAffine()) { 1995 const SCEV *Start = AR->getStart(); 1996 const SCEV *Step = AR->getStepRecurrence(*this); 1997 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1998 const Loop *L = AR->getLoop(); 1999 2000 if (!AR->hasNoSignedWrap()) { 2001 auto NewFlags = proveNoWrapViaConstantRanges(AR); 2002 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 2003 } 2004 2005 // If we have special knowledge that this addrec won't overflow, 2006 // we don't need to do any further analysis. 2007 if (AR->hasNoSignedWrap()) 2008 return getAddRecExpr( 2009 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2010 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 2011 2012 // Check whether the backedge-taken count is SCEVCouldNotCompute. 2013 // Note that this serves two purposes: It filters out loops that are 2014 // simply not analyzable, and it covers the case where this code is 2015 // being called from within backedge-taken count analysis, such that 2016 // attempting to ask for the backedge-taken count would likely result 2017 // in infinite recursion. In the later case, the analysis code will 2018 // cope with a conservative value, and it will take care to purge 2019 // that value once it has finished. 2020 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 2021 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 2022 // Manually compute the final value for AR, checking for 2023 // overflow. 2024 2025 // Check whether the backedge-taken count can be losslessly casted to 2026 // the addrec's type. The count is always unsigned. 2027 const SCEV *CastedMaxBECount = 2028 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 2029 const SCEV *RecastedMaxBECount = 2030 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 2031 if (MaxBECount == RecastedMaxBECount) { 2032 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 2033 // Check whether Start+Step*MaxBECount has no signed overflow. 2034 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 2035 SCEV::FlagAnyWrap, Depth + 1); 2036 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 2037 SCEV::FlagAnyWrap, 2038 Depth + 1), 2039 WideTy, Depth + 1); 2040 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 2041 const SCEV *WideMaxBECount = 2042 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 2043 const SCEV *OperandExtendedAdd = 2044 getAddExpr(WideStart, 2045 getMulExpr(WideMaxBECount, 2046 getSignExtendExpr(Step, WideTy, Depth + 1), 2047 SCEV::FlagAnyWrap, Depth + 1), 2048 SCEV::FlagAnyWrap, Depth + 1); 2049 if (SAdd == OperandExtendedAdd) { 2050 // Cache knowledge of AR NSW, which is propagated to this AddRec. 2051 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2052 // Return the expression with the addrec on the outside. 2053 return getAddRecExpr( 2054 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2055 Depth + 1), 2056 getSignExtendExpr(Step, Ty, Depth + 1), L, 2057 AR->getNoWrapFlags()); 2058 } 2059 // Similar to above, only this time treat the step value as unsigned. 2060 // This covers loops that count up with an unsigned step. 2061 OperandExtendedAdd = 2062 getAddExpr(WideStart, 2063 getMulExpr(WideMaxBECount, 2064 getZeroExtendExpr(Step, WideTy, Depth + 1), 2065 SCEV::FlagAnyWrap, Depth + 1), 2066 SCEV::FlagAnyWrap, Depth + 1); 2067 if (SAdd == OperandExtendedAdd) { 2068 // If AR wraps around then 2069 // 2070 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 2071 // => SAdd != OperandExtendedAdd 2072 // 2073 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 2074 // (SAdd == OperandExtendedAdd => AR is NW) 2075 2076 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 2077 2078 // Return the expression with the addrec on the outside. 2079 return getAddRecExpr( 2080 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2081 Depth + 1), 2082 getZeroExtendExpr(Step, Ty, Depth + 1), L, 2083 AR->getNoWrapFlags()); 2084 } 2085 } 2086 } 2087 2088 // Normally, in the cases we can prove no-overflow via a 2089 // backedge guarding condition, we can also compute a backedge 2090 // taken count for the loop. The exceptions are assumptions and 2091 // guards present in the loop -- SCEV is not great at exploiting 2092 // these to compute max backedge taken counts, but can still use 2093 // these to prove lack of overflow. Use this fact to avoid 2094 // doing extra work that may not pay off. 2095 2096 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 2097 !AC.assumptions().empty()) { 2098 // If the backedge is guarded by a comparison with the pre-inc 2099 // value the addrec is safe. Also, if the entry is guarded by 2100 // a comparison with the start value and the backedge is 2101 // guarded by a comparison with the post-inc value, the addrec 2102 // is safe. 2103 ICmpInst::Predicate Pred; 2104 const SCEV *OverflowLimit = 2105 getSignedOverflowLimitForStep(Step, &Pred, this); 2106 if (OverflowLimit && 2107 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 2108 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 2109 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 2110 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2111 return getAddRecExpr( 2112 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2113 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2114 } 2115 } 2116 2117 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 2118 // if D + (C - D + Step * n) could be proven to not signed wrap 2119 // where D maximizes the number of trailing zeros of (C - D + Step * n) 2120 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 2121 const APInt &C = SC->getAPInt(); 2122 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 2123 if (D != 0) { 2124 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2125 const SCEV *SResidual = 2126 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 2127 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2128 return getAddExpr(SSExtD, SSExtR, 2129 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2130 Depth + 1); 2131 } 2132 } 2133 2134 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2135 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2136 return getAddRecExpr( 2137 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2138 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2139 } 2140 } 2141 2142 // If the input value is provably positive and we could not simplify 2143 // away the sext build a zext instead. 2144 if (isKnownNonNegative(Op)) 2145 return getZeroExtendExpr(Op, Ty, Depth + 1); 2146 2147 // The cast wasn't folded; create an explicit cast node. 2148 // Recompute the insert position, as it may have been invalidated. 2149 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2150 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2151 Op, Ty); 2152 UniqueSCEVs.InsertNode(S, IP); 2153 addToLoopUseLists(S); 2154 return S; 2155 } 2156 2157 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2158 /// unspecified bits out to the given type. 2159 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2160 Type *Ty) { 2161 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2162 "This is not an extending conversion!"); 2163 assert(isSCEVable(Ty) && 2164 "This is not a conversion to a SCEVable type!"); 2165 Ty = getEffectiveSCEVType(Ty); 2166 2167 // Sign-extend negative constants. 2168 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2169 if (SC->getAPInt().isNegative()) 2170 return getSignExtendExpr(Op, Ty); 2171 2172 // Peel off a truncate cast. 2173 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2174 const SCEV *NewOp = T->getOperand(); 2175 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2176 return getAnyExtendExpr(NewOp, Ty); 2177 return getTruncateOrNoop(NewOp, Ty); 2178 } 2179 2180 // Next try a zext cast. If the cast is folded, use it. 2181 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2182 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2183 return ZExt; 2184 2185 // Next try a sext cast. If the cast is folded, use it. 2186 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2187 if (!isa<SCEVSignExtendExpr>(SExt)) 2188 return SExt; 2189 2190 // Force the cast to be folded into the operands of an addrec. 2191 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2192 SmallVector<const SCEV *, 4> Ops; 2193 for (const SCEV *Op : AR->operands()) 2194 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2195 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2196 } 2197 2198 // If the expression is obviously signed, use the sext cast value. 2199 if (isa<SCEVSMaxExpr>(Op)) 2200 return SExt; 2201 2202 // Absent any other information, use the zext cast value. 2203 return ZExt; 2204 } 2205 2206 /// Process the given Ops list, which is a list of operands to be added under 2207 /// the given scale, update the given map. This is a helper function for 2208 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2209 /// that would form an add expression like this: 2210 /// 2211 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2212 /// 2213 /// where A and B are constants, update the map with these values: 2214 /// 2215 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2216 /// 2217 /// and add 13 + A*B*29 to AccumulatedConstant. 2218 /// This will allow getAddRecExpr to produce this: 2219 /// 2220 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2221 /// 2222 /// This form often exposes folding opportunities that are hidden in 2223 /// the original operand list. 2224 /// 2225 /// Return true iff it appears that any interesting folding opportunities 2226 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2227 /// the common case where no interesting opportunities are present, and 2228 /// is also used as a check to avoid infinite recursion. 2229 static bool 2230 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2231 SmallVectorImpl<const SCEV *> &NewOps, 2232 APInt &AccumulatedConstant, 2233 const SCEV *const *Ops, size_t NumOperands, 2234 const APInt &Scale, 2235 ScalarEvolution &SE) { 2236 bool Interesting = false; 2237 2238 // Iterate over the add operands. They are sorted, with constants first. 2239 unsigned i = 0; 2240 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2241 ++i; 2242 // Pull a buried constant out to the outside. 2243 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2244 Interesting = true; 2245 AccumulatedConstant += Scale * C->getAPInt(); 2246 } 2247 2248 // Next comes everything else. We're especially interested in multiplies 2249 // here, but they're in the middle, so just visit the rest with one loop. 2250 for (; i != NumOperands; ++i) { 2251 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2252 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2253 APInt NewScale = 2254 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2255 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2256 // A multiplication of a constant with another add; recurse. 2257 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2258 Interesting |= 2259 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2260 Add->op_begin(), Add->getNumOperands(), 2261 NewScale, SE); 2262 } else { 2263 // A multiplication of a constant with some other value. Update 2264 // the map. 2265 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2266 const SCEV *Key = SE.getMulExpr(MulOps); 2267 auto Pair = M.insert({Key, NewScale}); 2268 if (Pair.second) { 2269 NewOps.push_back(Pair.first->first); 2270 } else { 2271 Pair.first->second += NewScale; 2272 // The map already had an entry for this value, which may indicate 2273 // a folding opportunity. 2274 Interesting = true; 2275 } 2276 } 2277 } else { 2278 // An ordinary operand. Update the map. 2279 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2280 M.insert({Ops[i], Scale}); 2281 if (Pair.second) { 2282 NewOps.push_back(Pair.first->first); 2283 } else { 2284 Pair.first->second += Scale; 2285 // The map already had an entry for this value, which may indicate 2286 // a folding opportunity. 2287 Interesting = true; 2288 } 2289 } 2290 } 2291 2292 return Interesting; 2293 } 2294 2295 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2296 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2297 // can't-overflow flags for the operation if possible. 2298 static SCEV::NoWrapFlags 2299 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2300 const SmallVectorImpl<const SCEV *> &Ops, 2301 SCEV::NoWrapFlags Flags) { 2302 using namespace std::placeholders; 2303 2304 using OBO = OverflowingBinaryOperator; 2305 2306 bool CanAnalyze = 2307 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2308 (void)CanAnalyze; 2309 assert(CanAnalyze && "don't call from other places!"); 2310 2311 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2312 SCEV::NoWrapFlags SignOrUnsignWrap = 2313 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2314 2315 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2316 auto IsKnownNonNegative = [&](const SCEV *S) { 2317 return SE->isKnownNonNegative(S); 2318 }; 2319 2320 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2321 Flags = 2322 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2323 2324 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2325 2326 if (SignOrUnsignWrap != SignOrUnsignMask && 2327 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2328 isa<SCEVConstant>(Ops[0])) { 2329 2330 auto Opcode = [&] { 2331 switch (Type) { 2332 case scAddExpr: 2333 return Instruction::Add; 2334 case scMulExpr: 2335 return Instruction::Mul; 2336 default: 2337 llvm_unreachable("Unexpected SCEV op."); 2338 } 2339 }(); 2340 2341 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2342 2343 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2344 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2345 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2346 Opcode, C, OBO::NoSignedWrap); 2347 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2348 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2349 } 2350 2351 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2352 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2353 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2354 Opcode, C, OBO::NoUnsignedWrap); 2355 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2356 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2357 } 2358 } 2359 2360 return Flags; 2361 } 2362 2363 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2364 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2365 } 2366 2367 /// Get a canonical add expression, or something simpler if possible. 2368 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2369 SCEV::NoWrapFlags Flags, 2370 unsigned Depth) { 2371 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2372 "only nuw or nsw allowed"); 2373 assert(!Ops.empty() && "Cannot get empty add!"); 2374 if (Ops.size() == 1) return Ops[0]; 2375 #ifndef NDEBUG 2376 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2377 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2378 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2379 "SCEVAddExpr operand types don't match!"); 2380 #endif 2381 2382 // Sort by complexity, this groups all similar expression types together. 2383 GroupByComplexity(Ops, &LI, DT); 2384 2385 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2386 2387 // If there are any constants, fold them together. 2388 unsigned Idx = 0; 2389 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2390 ++Idx; 2391 assert(Idx < Ops.size()); 2392 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2393 // We found two constants, fold them together! 2394 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2395 if (Ops.size() == 2) return Ops[0]; 2396 Ops.erase(Ops.begin()+1); // Erase the folded element 2397 LHSC = cast<SCEVConstant>(Ops[0]); 2398 } 2399 2400 // If we are left with a constant zero being added, strip it off. 2401 if (LHSC->getValue()->isZero()) { 2402 Ops.erase(Ops.begin()); 2403 --Idx; 2404 } 2405 2406 if (Ops.size() == 1) return Ops[0]; 2407 } 2408 2409 // Limit recursion calls depth. 2410 if (Depth > MaxArithDepth) 2411 return getOrCreateAddExpr(Ops, Flags); 2412 2413 // Okay, check to see if the same value occurs in the operand list more than 2414 // once. If so, merge them together into an multiply expression. Since we 2415 // sorted the list, these values are required to be adjacent. 2416 Type *Ty = Ops[0]->getType(); 2417 bool FoundMatch = false; 2418 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2419 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2420 // Scan ahead to count how many equal operands there are. 2421 unsigned Count = 2; 2422 while (i+Count != e && Ops[i+Count] == Ops[i]) 2423 ++Count; 2424 // Merge the values into a multiply. 2425 const SCEV *Scale = getConstant(Ty, Count); 2426 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2427 if (Ops.size() == Count) 2428 return Mul; 2429 Ops[i] = Mul; 2430 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2431 --i; e -= Count - 1; 2432 FoundMatch = true; 2433 } 2434 if (FoundMatch) 2435 return getAddExpr(Ops, Flags, Depth + 1); 2436 2437 // Check for truncates. If all the operands are truncated from the same 2438 // type, see if factoring out the truncate would permit the result to be 2439 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2440 // if the contents of the resulting outer trunc fold to something simple. 2441 auto FindTruncSrcType = [&]() -> Type * { 2442 // We're ultimately looking to fold an addrec of truncs and muls of only 2443 // constants and truncs, so if we find any other types of SCEV 2444 // as operands of the addrec then we bail and return nullptr here. 2445 // Otherwise, we return the type of the operand of a trunc that we find. 2446 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2447 return T->getOperand()->getType(); 2448 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2449 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2450 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2451 return T->getOperand()->getType(); 2452 } 2453 return nullptr; 2454 }; 2455 if (auto *SrcType = FindTruncSrcType()) { 2456 SmallVector<const SCEV *, 8> LargeOps; 2457 bool Ok = true; 2458 // Check all the operands to see if they can be represented in the 2459 // source type of the truncate. 2460 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2461 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2462 if (T->getOperand()->getType() != SrcType) { 2463 Ok = false; 2464 break; 2465 } 2466 LargeOps.push_back(T->getOperand()); 2467 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2468 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2469 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2470 SmallVector<const SCEV *, 8> LargeMulOps; 2471 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2472 if (const SCEVTruncateExpr *T = 2473 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2474 if (T->getOperand()->getType() != SrcType) { 2475 Ok = false; 2476 break; 2477 } 2478 LargeMulOps.push_back(T->getOperand()); 2479 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2480 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2481 } else { 2482 Ok = false; 2483 break; 2484 } 2485 } 2486 if (Ok) 2487 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2488 } else { 2489 Ok = false; 2490 break; 2491 } 2492 } 2493 if (Ok) { 2494 // Evaluate the expression in the larger type. 2495 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2496 // If it folds to something simple, use it. Otherwise, don't. 2497 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2498 return getTruncateExpr(Fold, Ty); 2499 } 2500 } 2501 2502 // Skip past any other cast SCEVs. 2503 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2504 ++Idx; 2505 2506 // If there are add operands they would be next. 2507 if (Idx < Ops.size()) { 2508 bool DeletedAdd = false; 2509 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2510 if (Ops.size() > AddOpsInlineThreshold || 2511 Add->getNumOperands() > AddOpsInlineThreshold) 2512 break; 2513 // If we have an add, expand the add operands onto the end of the operands 2514 // list. 2515 Ops.erase(Ops.begin()+Idx); 2516 Ops.append(Add->op_begin(), Add->op_end()); 2517 DeletedAdd = true; 2518 } 2519 2520 // If we deleted at least one add, we added operands to the end of the list, 2521 // and they are not necessarily sorted. Recurse to resort and resimplify 2522 // any operands we just acquired. 2523 if (DeletedAdd) 2524 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2525 } 2526 2527 // Skip over the add expression until we get to a multiply. 2528 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2529 ++Idx; 2530 2531 // Check to see if there are any folding opportunities present with 2532 // operands multiplied by constant values. 2533 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2534 uint64_t BitWidth = getTypeSizeInBits(Ty); 2535 DenseMap<const SCEV *, APInt> M; 2536 SmallVector<const SCEV *, 8> NewOps; 2537 APInt AccumulatedConstant(BitWidth, 0); 2538 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2539 Ops.data(), Ops.size(), 2540 APInt(BitWidth, 1), *this)) { 2541 struct APIntCompare { 2542 bool operator()(const APInt &LHS, const APInt &RHS) const { 2543 return LHS.ult(RHS); 2544 } 2545 }; 2546 2547 // Some interesting folding opportunity is present, so its worthwhile to 2548 // re-generate the operands list. Group the operands by constant scale, 2549 // to avoid multiplying by the same constant scale multiple times. 2550 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2551 for (const SCEV *NewOp : NewOps) 2552 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2553 // Re-generate the operands list. 2554 Ops.clear(); 2555 if (AccumulatedConstant != 0) 2556 Ops.push_back(getConstant(AccumulatedConstant)); 2557 for (auto &MulOp : MulOpLists) 2558 if (MulOp.first != 0) 2559 Ops.push_back(getMulExpr( 2560 getConstant(MulOp.first), 2561 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2562 SCEV::FlagAnyWrap, Depth + 1)); 2563 if (Ops.empty()) 2564 return getZero(Ty); 2565 if (Ops.size() == 1) 2566 return Ops[0]; 2567 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2568 } 2569 } 2570 2571 // If we are adding something to a multiply expression, make sure the 2572 // something is not already an operand of the multiply. If so, merge it into 2573 // the multiply. 2574 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2575 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2576 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2577 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2578 if (isa<SCEVConstant>(MulOpSCEV)) 2579 continue; 2580 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2581 if (MulOpSCEV == Ops[AddOp]) { 2582 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2583 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2584 if (Mul->getNumOperands() != 2) { 2585 // If the multiply has more than two operands, we must get the 2586 // Y*Z term. 2587 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2588 Mul->op_begin()+MulOp); 2589 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2590 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2591 } 2592 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2593 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2594 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2595 SCEV::FlagAnyWrap, Depth + 1); 2596 if (Ops.size() == 2) return OuterMul; 2597 if (AddOp < Idx) { 2598 Ops.erase(Ops.begin()+AddOp); 2599 Ops.erase(Ops.begin()+Idx-1); 2600 } else { 2601 Ops.erase(Ops.begin()+Idx); 2602 Ops.erase(Ops.begin()+AddOp-1); 2603 } 2604 Ops.push_back(OuterMul); 2605 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2606 } 2607 2608 // Check this multiply against other multiplies being added together. 2609 for (unsigned OtherMulIdx = Idx+1; 2610 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2611 ++OtherMulIdx) { 2612 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2613 // If MulOp occurs in OtherMul, we can fold the two multiplies 2614 // together. 2615 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2616 OMulOp != e; ++OMulOp) 2617 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2618 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2619 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2620 if (Mul->getNumOperands() != 2) { 2621 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2622 Mul->op_begin()+MulOp); 2623 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2624 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2625 } 2626 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2627 if (OtherMul->getNumOperands() != 2) { 2628 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2629 OtherMul->op_begin()+OMulOp); 2630 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2631 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2632 } 2633 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2634 const SCEV *InnerMulSum = 2635 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2636 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2637 SCEV::FlagAnyWrap, Depth + 1); 2638 if (Ops.size() == 2) return OuterMul; 2639 Ops.erase(Ops.begin()+Idx); 2640 Ops.erase(Ops.begin()+OtherMulIdx-1); 2641 Ops.push_back(OuterMul); 2642 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2643 } 2644 } 2645 } 2646 } 2647 2648 // If there are any add recurrences in the operands list, see if any other 2649 // added values are loop invariant. If so, we can fold them into the 2650 // recurrence. 2651 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2652 ++Idx; 2653 2654 // Scan over all recurrences, trying to fold loop invariants into them. 2655 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2656 // Scan all of the other operands to this add and add them to the vector if 2657 // they are loop invariant w.r.t. the recurrence. 2658 SmallVector<const SCEV *, 8> LIOps; 2659 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2660 const Loop *AddRecLoop = AddRec->getLoop(); 2661 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2662 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2663 LIOps.push_back(Ops[i]); 2664 Ops.erase(Ops.begin()+i); 2665 --i; --e; 2666 } 2667 2668 // If we found some loop invariants, fold them into the recurrence. 2669 if (!LIOps.empty()) { 2670 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2671 LIOps.push_back(AddRec->getStart()); 2672 2673 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2674 AddRec->op_end()); 2675 // This follows from the fact that the no-wrap flags on the outer add 2676 // expression are applicable on the 0th iteration, when the add recurrence 2677 // will be equal to its start value. 2678 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2679 2680 // Build the new addrec. Propagate the NUW and NSW flags if both the 2681 // outer add and the inner addrec are guaranteed to have no overflow. 2682 // Always propagate NW. 2683 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2684 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2685 2686 // If all of the other operands were loop invariant, we are done. 2687 if (Ops.size() == 1) return NewRec; 2688 2689 // Otherwise, add the folded AddRec by the non-invariant parts. 2690 for (unsigned i = 0;; ++i) 2691 if (Ops[i] == AddRec) { 2692 Ops[i] = NewRec; 2693 break; 2694 } 2695 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2696 } 2697 2698 // Okay, if there weren't any loop invariants to be folded, check to see if 2699 // there are multiple AddRec's with the same loop induction variable being 2700 // added together. If so, we can fold them. 2701 for (unsigned OtherIdx = Idx+1; 2702 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2703 ++OtherIdx) { 2704 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2705 // so that the 1st found AddRecExpr is dominated by all others. 2706 assert(DT.dominates( 2707 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2708 AddRec->getLoop()->getHeader()) && 2709 "AddRecExprs are not sorted in reverse dominance order?"); 2710 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2711 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2712 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2713 AddRec->op_end()); 2714 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2715 ++OtherIdx) { 2716 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2717 if (OtherAddRec->getLoop() == AddRecLoop) { 2718 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2719 i != e; ++i) { 2720 if (i >= AddRecOps.size()) { 2721 AddRecOps.append(OtherAddRec->op_begin()+i, 2722 OtherAddRec->op_end()); 2723 break; 2724 } 2725 SmallVector<const SCEV *, 2> TwoOps = { 2726 AddRecOps[i], OtherAddRec->getOperand(i)}; 2727 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2728 } 2729 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2730 } 2731 } 2732 // Step size has changed, so we cannot guarantee no self-wraparound. 2733 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2734 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2735 } 2736 } 2737 2738 // Otherwise couldn't fold anything into this recurrence. Move onto the 2739 // next one. 2740 } 2741 2742 // Okay, it looks like we really DO need an add expr. Check to see if we 2743 // already have one, otherwise create a new one. 2744 return getOrCreateAddExpr(Ops, Flags); 2745 } 2746 2747 const SCEV * 2748 ScalarEvolution::getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2749 SCEV::NoWrapFlags Flags) { 2750 FoldingSetNodeID ID; 2751 ID.AddInteger(scAddExpr); 2752 for (const SCEV *Op : Ops) 2753 ID.AddPointer(Op); 2754 void *IP = nullptr; 2755 SCEVAddExpr *S = 2756 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2757 if (!S) { 2758 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2759 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2760 S = new (SCEVAllocator) 2761 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2762 UniqueSCEVs.InsertNode(S, IP); 2763 addToLoopUseLists(S); 2764 } 2765 S->setNoWrapFlags(Flags); 2766 return S; 2767 } 2768 2769 const SCEV * 2770 ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2771 SCEV::NoWrapFlags Flags) { 2772 FoldingSetNodeID ID; 2773 ID.AddInteger(scMulExpr); 2774 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2775 ID.AddPointer(Ops[i]); 2776 void *IP = nullptr; 2777 SCEVMulExpr *S = 2778 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2779 if (!S) { 2780 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2781 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2782 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2783 O, Ops.size()); 2784 UniqueSCEVs.InsertNode(S, IP); 2785 addToLoopUseLists(S); 2786 } 2787 S->setNoWrapFlags(Flags); 2788 return S; 2789 } 2790 2791 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2792 uint64_t k = i*j; 2793 if (j > 1 && k / j != i) Overflow = true; 2794 return k; 2795 } 2796 2797 /// Compute the result of "n choose k", the binomial coefficient. If an 2798 /// intermediate computation overflows, Overflow will be set and the return will 2799 /// be garbage. Overflow is not cleared on absence of overflow. 2800 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2801 // We use the multiplicative formula: 2802 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2803 // At each iteration, we take the n-th term of the numeral and divide by the 2804 // (k-n)th term of the denominator. This division will always produce an 2805 // integral result, and helps reduce the chance of overflow in the 2806 // intermediate computations. However, we can still overflow even when the 2807 // final result would fit. 2808 2809 if (n == 0 || n == k) return 1; 2810 if (k > n) return 0; 2811 2812 if (k > n/2) 2813 k = n-k; 2814 2815 uint64_t r = 1; 2816 for (uint64_t i = 1; i <= k; ++i) { 2817 r = umul_ov(r, n-(i-1), Overflow); 2818 r /= i; 2819 } 2820 return r; 2821 } 2822 2823 /// Determine if any of the operands in this SCEV are a constant or if 2824 /// any of the add or multiply expressions in this SCEV contain a constant. 2825 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2826 struct FindConstantInAddMulChain { 2827 bool FoundConstant = false; 2828 2829 bool follow(const SCEV *S) { 2830 FoundConstant |= isa<SCEVConstant>(S); 2831 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2832 } 2833 2834 bool isDone() const { 2835 return FoundConstant; 2836 } 2837 }; 2838 2839 FindConstantInAddMulChain F; 2840 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2841 ST.visitAll(StartExpr); 2842 return F.FoundConstant; 2843 } 2844 2845 /// Get a canonical multiply expression, or something simpler if possible. 2846 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2847 SCEV::NoWrapFlags Flags, 2848 unsigned Depth) { 2849 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2850 "only nuw or nsw allowed"); 2851 assert(!Ops.empty() && "Cannot get empty mul!"); 2852 if (Ops.size() == 1) return Ops[0]; 2853 #ifndef NDEBUG 2854 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2855 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2856 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2857 "SCEVMulExpr operand types don't match!"); 2858 #endif 2859 2860 // Sort by complexity, this groups all similar expression types together. 2861 GroupByComplexity(Ops, &LI, DT); 2862 2863 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2864 2865 // Limit recursion calls depth. 2866 if (Depth > MaxArithDepth) 2867 return getOrCreateMulExpr(Ops, Flags); 2868 2869 // If there are any constants, fold them together. 2870 unsigned Idx = 0; 2871 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2872 2873 if (Ops.size() == 2) 2874 // C1*(C2+V) -> C1*C2 + C1*V 2875 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2876 // If any of Add's ops are Adds or Muls with a constant, apply this 2877 // transformation as well. 2878 // 2879 // TODO: There are some cases where this transformation is not 2880 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 2881 // this transformation should be narrowed down. 2882 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) 2883 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2884 SCEV::FlagAnyWrap, Depth + 1), 2885 getMulExpr(LHSC, Add->getOperand(1), 2886 SCEV::FlagAnyWrap, Depth + 1), 2887 SCEV::FlagAnyWrap, Depth + 1); 2888 2889 ++Idx; 2890 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2891 // We found two constants, fold them together! 2892 ConstantInt *Fold = 2893 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2894 Ops[0] = getConstant(Fold); 2895 Ops.erase(Ops.begin()+1); // Erase the folded element 2896 if (Ops.size() == 1) return Ops[0]; 2897 LHSC = cast<SCEVConstant>(Ops[0]); 2898 } 2899 2900 // If we are left with a constant one being multiplied, strip it off. 2901 if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) { 2902 Ops.erase(Ops.begin()); 2903 --Idx; 2904 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2905 // If we have a multiply of zero, it will always be zero. 2906 return Ops[0]; 2907 } else if (Ops[0]->isAllOnesValue()) { 2908 // If we have a mul by -1 of an add, try distributing the -1 among the 2909 // add operands. 2910 if (Ops.size() == 2) { 2911 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2912 SmallVector<const SCEV *, 4> NewOps; 2913 bool AnyFolded = false; 2914 for (const SCEV *AddOp : Add->operands()) { 2915 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2916 Depth + 1); 2917 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2918 NewOps.push_back(Mul); 2919 } 2920 if (AnyFolded) 2921 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2922 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2923 // Negation preserves a recurrence's no self-wrap property. 2924 SmallVector<const SCEV *, 4> Operands; 2925 for (const SCEV *AddRecOp : AddRec->operands()) 2926 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2927 Depth + 1)); 2928 2929 return getAddRecExpr(Operands, AddRec->getLoop(), 2930 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2931 } 2932 } 2933 } 2934 2935 if (Ops.size() == 1) 2936 return Ops[0]; 2937 } 2938 2939 // Skip over the add expression until we get to a multiply. 2940 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2941 ++Idx; 2942 2943 // If there are mul operands inline them all into this expression. 2944 if (Idx < Ops.size()) { 2945 bool DeletedMul = false; 2946 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2947 if (Ops.size() > MulOpsInlineThreshold) 2948 break; 2949 // If we have an mul, expand the mul operands onto the end of the 2950 // operands list. 2951 Ops.erase(Ops.begin()+Idx); 2952 Ops.append(Mul->op_begin(), Mul->op_end()); 2953 DeletedMul = true; 2954 } 2955 2956 // If we deleted at least one mul, we added operands to the end of the 2957 // list, and they are not necessarily sorted. Recurse to resort and 2958 // resimplify any operands we just acquired. 2959 if (DeletedMul) 2960 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2961 } 2962 2963 // If there are any add recurrences in the operands list, see if any other 2964 // added values are loop invariant. If so, we can fold them into the 2965 // recurrence. 2966 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2967 ++Idx; 2968 2969 // Scan over all recurrences, trying to fold loop invariants into them. 2970 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2971 // Scan all of the other operands to this mul and add them to the vector 2972 // if they are loop invariant w.r.t. the recurrence. 2973 SmallVector<const SCEV *, 8> LIOps; 2974 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2975 const Loop *AddRecLoop = AddRec->getLoop(); 2976 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2977 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2978 LIOps.push_back(Ops[i]); 2979 Ops.erase(Ops.begin()+i); 2980 --i; --e; 2981 } 2982 2983 // If we found some loop invariants, fold them into the recurrence. 2984 if (!LIOps.empty()) { 2985 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2986 SmallVector<const SCEV *, 4> NewOps; 2987 NewOps.reserve(AddRec->getNumOperands()); 2988 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 2989 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2990 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 2991 SCEV::FlagAnyWrap, Depth + 1)); 2992 2993 // Build the new addrec. Propagate the NUW and NSW flags if both the 2994 // outer mul and the inner addrec are guaranteed to have no overflow. 2995 // 2996 // No self-wrap cannot be guaranteed after changing the step size, but 2997 // will be inferred if either NUW or NSW is true. 2998 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 2999 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 3000 3001 // If all of the other operands were loop invariant, we are done. 3002 if (Ops.size() == 1) return NewRec; 3003 3004 // Otherwise, multiply the folded AddRec by the non-invariant parts. 3005 for (unsigned i = 0;; ++i) 3006 if (Ops[i] == AddRec) { 3007 Ops[i] = NewRec; 3008 break; 3009 } 3010 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3011 } 3012 3013 // Okay, if there weren't any loop invariants to be folded, check to see 3014 // if there are multiple AddRec's with the same loop induction variable 3015 // being multiplied together. If so, we can fold them. 3016 3017 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 3018 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 3019 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 3020 // ]]],+,...up to x=2n}. 3021 // Note that the arguments to choose() are always integers with values 3022 // known at compile time, never SCEV objects. 3023 // 3024 // The implementation avoids pointless extra computations when the two 3025 // addrec's are of different length (mathematically, it's equivalent to 3026 // an infinite stream of zeros on the right). 3027 bool OpsModified = false; 3028 for (unsigned OtherIdx = Idx+1; 3029 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 3030 ++OtherIdx) { 3031 const SCEVAddRecExpr *OtherAddRec = 3032 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 3033 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 3034 continue; 3035 3036 // Limit max number of arguments to avoid creation of unreasonably big 3037 // SCEVAddRecs with very complex operands. 3038 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 3039 MaxAddRecSize) 3040 continue; 3041 3042 bool Overflow = false; 3043 Type *Ty = AddRec->getType(); 3044 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 3045 SmallVector<const SCEV*, 7> AddRecOps; 3046 for (int x = 0, xe = AddRec->getNumOperands() + 3047 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 3048 const SCEV *Term = getZero(Ty); 3049 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 3050 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 3051 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 3052 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 3053 z < ze && !Overflow; ++z) { 3054 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 3055 uint64_t Coeff; 3056 if (LargerThan64Bits) 3057 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 3058 else 3059 Coeff = Coeff1*Coeff2; 3060 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 3061 const SCEV *Term1 = AddRec->getOperand(y-z); 3062 const SCEV *Term2 = OtherAddRec->getOperand(z); 3063 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1, Term2, 3064 SCEV::FlagAnyWrap, Depth + 1), 3065 SCEV::FlagAnyWrap, Depth + 1); 3066 } 3067 } 3068 AddRecOps.push_back(Term); 3069 } 3070 if (!Overflow) { 3071 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(), 3072 SCEV::FlagAnyWrap); 3073 if (Ops.size() == 2) return NewAddRec; 3074 Ops[Idx] = NewAddRec; 3075 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3076 OpsModified = true; 3077 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 3078 if (!AddRec) 3079 break; 3080 } 3081 } 3082 if (OpsModified) 3083 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3084 3085 // Otherwise couldn't fold anything into this recurrence. Move onto the 3086 // next one. 3087 } 3088 3089 // Okay, it looks like we really DO need an mul expr. Check to see if we 3090 // already have one, otherwise create a new one. 3091 return getOrCreateMulExpr(Ops, Flags); 3092 } 3093 3094 /// Represents an unsigned remainder expression based on unsigned division. 3095 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3096 const SCEV *RHS) { 3097 assert(getEffectiveSCEVType(LHS->getType()) == 3098 getEffectiveSCEVType(RHS->getType()) && 3099 "SCEVURemExpr operand types don't match!"); 3100 3101 // Short-circuit easy cases 3102 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3103 // If constant is one, the result is trivial 3104 if (RHSC->getValue()->isOne()) 3105 return getZero(LHS->getType()); // X urem 1 --> 0 3106 3107 // If constant is a power of two, fold into a zext(trunc(LHS)). 3108 if (RHSC->getAPInt().isPowerOf2()) { 3109 Type *FullTy = LHS->getType(); 3110 Type *TruncTy = 3111 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3112 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3113 } 3114 } 3115 3116 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3117 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3118 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3119 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3120 } 3121 3122 /// Get a canonical unsigned division expression, or something simpler if 3123 /// possible. 3124 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3125 const SCEV *RHS) { 3126 assert(getEffectiveSCEVType(LHS->getType()) == 3127 getEffectiveSCEVType(RHS->getType()) && 3128 "SCEVUDivExpr operand types don't match!"); 3129 3130 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3131 if (RHSC->getValue()->isOne()) 3132 return LHS; // X udiv 1 --> x 3133 // If the denominator is zero, the result of the udiv is undefined. Don't 3134 // try to analyze it, because the resolution chosen here may differ from 3135 // the resolution chosen in other parts of the compiler. 3136 if (!RHSC->getValue()->isZero()) { 3137 // Determine if the division can be folded into the operands of 3138 // its operands. 3139 // TODO: Generalize this to non-constants by using known-bits information. 3140 Type *Ty = LHS->getType(); 3141 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3142 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3143 // For non-power-of-two values, effectively round the value up to the 3144 // nearest power of two. 3145 if (!RHSC->getAPInt().isPowerOf2()) 3146 ++MaxShiftAmt; 3147 IntegerType *ExtTy = 3148 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3149 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3150 if (const SCEVConstant *Step = 3151 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3152 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3153 const APInt &StepInt = Step->getAPInt(); 3154 const APInt &DivInt = RHSC->getAPInt(); 3155 if (!StepInt.urem(DivInt) && 3156 getZeroExtendExpr(AR, ExtTy) == 3157 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3158 getZeroExtendExpr(Step, ExtTy), 3159 AR->getLoop(), SCEV::FlagAnyWrap)) { 3160 SmallVector<const SCEV *, 4> Operands; 3161 for (const SCEV *Op : AR->operands()) 3162 Operands.push_back(getUDivExpr(Op, RHS)); 3163 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3164 } 3165 /// Get a canonical UDivExpr for a recurrence. 3166 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3167 // We can currently only fold X%N if X is constant. 3168 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3169 if (StartC && !DivInt.urem(StepInt) && 3170 getZeroExtendExpr(AR, ExtTy) == 3171 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3172 getZeroExtendExpr(Step, ExtTy), 3173 AR->getLoop(), SCEV::FlagAnyWrap)) { 3174 const APInt &StartInt = StartC->getAPInt(); 3175 const APInt &StartRem = StartInt.urem(StepInt); 3176 if (StartRem != 0) 3177 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, 3178 AR->getLoop(), SCEV::FlagNW); 3179 } 3180 } 3181 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3182 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3183 SmallVector<const SCEV *, 4> Operands; 3184 for (const SCEV *Op : M->operands()) 3185 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3186 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3187 // Find an operand that's safely divisible. 3188 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3189 const SCEV *Op = M->getOperand(i); 3190 const SCEV *Div = getUDivExpr(Op, RHSC); 3191 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3192 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3193 M->op_end()); 3194 Operands[i] = Div; 3195 return getMulExpr(Operands); 3196 } 3197 } 3198 } 3199 3200 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3201 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3202 if (auto *DivisorConstant = 3203 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3204 bool Overflow = false; 3205 APInt NewRHS = 3206 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3207 if (Overflow) { 3208 return getConstant(RHSC->getType(), 0, false); 3209 } 3210 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3211 } 3212 } 3213 3214 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3215 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3216 SmallVector<const SCEV *, 4> Operands; 3217 for (const SCEV *Op : A->operands()) 3218 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3219 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3220 Operands.clear(); 3221 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3222 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3223 if (isa<SCEVUDivExpr>(Op) || 3224 getMulExpr(Op, RHS) != A->getOperand(i)) 3225 break; 3226 Operands.push_back(Op); 3227 } 3228 if (Operands.size() == A->getNumOperands()) 3229 return getAddExpr(Operands); 3230 } 3231 } 3232 3233 // Fold if both operands are constant. 3234 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3235 Constant *LHSCV = LHSC->getValue(); 3236 Constant *RHSCV = RHSC->getValue(); 3237 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3238 RHSCV))); 3239 } 3240 } 3241 } 3242 3243 FoldingSetNodeID ID; 3244 ID.AddInteger(scUDivExpr); 3245 ID.AddPointer(LHS); 3246 ID.AddPointer(RHS); 3247 void *IP = nullptr; 3248 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3249 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3250 LHS, RHS); 3251 UniqueSCEVs.InsertNode(S, IP); 3252 addToLoopUseLists(S); 3253 return S; 3254 } 3255 3256 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3257 APInt A = C1->getAPInt().abs(); 3258 APInt B = C2->getAPInt().abs(); 3259 uint32_t ABW = A.getBitWidth(); 3260 uint32_t BBW = B.getBitWidth(); 3261 3262 if (ABW > BBW) 3263 B = B.zext(ABW); 3264 else if (ABW < BBW) 3265 A = A.zext(BBW); 3266 3267 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3268 } 3269 3270 /// Get a canonical unsigned division expression, or something simpler if 3271 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3272 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3273 /// it's not exact because the udiv may be clearing bits. 3274 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3275 const SCEV *RHS) { 3276 // TODO: we could try to find factors in all sorts of things, but for now we 3277 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3278 // end of this file for inspiration. 3279 3280 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3281 if (!Mul || !Mul->hasNoUnsignedWrap()) 3282 return getUDivExpr(LHS, RHS); 3283 3284 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3285 // If the mulexpr multiplies by a constant, then that constant must be the 3286 // first element of the mulexpr. 3287 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3288 if (LHSCst == RHSCst) { 3289 SmallVector<const SCEV *, 2> Operands; 3290 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3291 return getMulExpr(Operands); 3292 } 3293 3294 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3295 // that there's a factor provided by one of the other terms. We need to 3296 // check. 3297 APInt Factor = gcd(LHSCst, RHSCst); 3298 if (!Factor.isIntN(1)) { 3299 LHSCst = 3300 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3301 RHSCst = 3302 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3303 SmallVector<const SCEV *, 2> Operands; 3304 Operands.push_back(LHSCst); 3305 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3306 LHS = getMulExpr(Operands); 3307 RHS = RHSCst; 3308 Mul = dyn_cast<SCEVMulExpr>(LHS); 3309 if (!Mul) 3310 return getUDivExactExpr(LHS, RHS); 3311 } 3312 } 3313 } 3314 3315 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3316 if (Mul->getOperand(i) == RHS) { 3317 SmallVector<const SCEV *, 2> Operands; 3318 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3319 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3320 return getMulExpr(Operands); 3321 } 3322 } 3323 3324 return getUDivExpr(LHS, RHS); 3325 } 3326 3327 /// Get an add recurrence expression for the specified loop. Simplify the 3328 /// expression as much as possible. 3329 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3330 const Loop *L, 3331 SCEV::NoWrapFlags Flags) { 3332 SmallVector<const SCEV *, 4> Operands; 3333 Operands.push_back(Start); 3334 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3335 if (StepChrec->getLoop() == L) { 3336 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3337 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3338 } 3339 3340 Operands.push_back(Step); 3341 return getAddRecExpr(Operands, L, Flags); 3342 } 3343 3344 /// Get an add recurrence expression for the specified loop. Simplify the 3345 /// expression as much as possible. 3346 const SCEV * 3347 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3348 const Loop *L, SCEV::NoWrapFlags Flags) { 3349 if (Operands.size() == 1) return Operands[0]; 3350 #ifndef NDEBUG 3351 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3352 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3353 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3354 "SCEVAddRecExpr operand types don't match!"); 3355 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3356 assert(isLoopInvariant(Operands[i], L) && 3357 "SCEVAddRecExpr operand is not loop-invariant!"); 3358 #endif 3359 3360 if (Operands.back()->isZero()) { 3361 Operands.pop_back(); 3362 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3363 } 3364 3365 // It's tempting to want to call getMaxBackedgeTakenCount count here and 3366 // use that information to infer NUW and NSW flags. However, computing a 3367 // BE count requires calling getAddRecExpr, so we may not yet have a 3368 // meaningful BE count at this point (and if we don't, we'd be stuck 3369 // with a SCEVCouldNotCompute as the cached BE count). 3370 3371 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3372 3373 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3374 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3375 const Loop *NestedLoop = NestedAR->getLoop(); 3376 if (L->contains(NestedLoop) 3377 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3378 : (!NestedLoop->contains(L) && 3379 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3380 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3381 NestedAR->op_end()); 3382 Operands[0] = NestedAR->getStart(); 3383 // AddRecs require their operands be loop-invariant with respect to their 3384 // loops. Don't perform this transformation if it would break this 3385 // requirement. 3386 bool AllInvariant = all_of( 3387 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3388 3389 if (AllInvariant) { 3390 // Create a recurrence for the outer loop with the same step size. 3391 // 3392 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3393 // inner recurrence has the same property. 3394 SCEV::NoWrapFlags OuterFlags = 3395 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3396 3397 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3398 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3399 return isLoopInvariant(Op, NestedLoop); 3400 }); 3401 3402 if (AllInvariant) { 3403 // Ok, both add recurrences are valid after the transformation. 3404 // 3405 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3406 // the outer recurrence has the same property. 3407 SCEV::NoWrapFlags InnerFlags = 3408 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3409 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3410 } 3411 } 3412 // Reset Operands to its original state. 3413 Operands[0] = NestedAR; 3414 } 3415 } 3416 3417 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3418 // already have one, otherwise create a new one. 3419 FoldingSetNodeID ID; 3420 ID.AddInteger(scAddRecExpr); 3421 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3422 ID.AddPointer(Operands[i]); 3423 ID.AddPointer(L); 3424 void *IP = nullptr; 3425 SCEVAddRecExpr *S = 3426 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 3427 if (!S) { 3428 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); 3429 std::uninitialized_copy(Operands.begin(), Operands.end(), O); 3430 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), 3431 O, Operands.size(), L); 3432 UniqueSCEVs.InsertNode(S, IP); 3433 addToLoopUseLists(S); 3434 } 3435 S->setNoWrapFlags(Flags); 3436 return S; 3437 } 3438 3439 const SCEV * 3440 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3441 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3442 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3443 // getSCEV(Base)->getType() has the same address space as Base->getType() 3444 // because SCEV::getType() preserves the address space. 3445 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); 3446 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3447 // instruction to its SCEV, because the Instruction may be guarded by control 3448 // flow and the no-overflow bits may not be valid for the expression in any 3449 // context. This can be fixed similarly to how these flags are handled for 3450 // adds. 3451 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3452 : SCEV::FlagAnyWrap; 3453 3454 const SCEV *TotalOffset = getZero(IntPtrTy); 3455 // The array size is unimportant. The first thing we do on CurTy is getting 3456 // its element type. 3457 Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0); 3458 for (const SCEV *IndexExpr : IndexExprs) { 3459 // Compute the (potentially symbolic) offset in bytes for this index. 3460 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3461 // For a struct, add the member offset. 3462 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3463 unsigned FieldNo = Index->getZExtValue(); 3464 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); 3465 3466 // Add the field offset to the running total offset. 3467 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3468 3469 // Update CurTy to the type of the field at Index. 3470 CurTy = STy->getTypeAtIndex(Index); 3471 } else { 3472 // Update CurTy to its element type. 3473 CurTy = cast<SequentialType>(CurTy)->getElementType(); 3474 // For an array, add the element offset, explicitly scaled. 3475 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); 3476 // Getelementptr indices are signed. 3477 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); 3478 3479 // Multiply the index by the element size to compute the element offset. 3480 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3481 3482 // Add the element offset to the running total offset. 3483 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3484 } 3485 } 3486 3487 // Add the total offset from all the GEP indices to the base. 3488 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3489 } 3490 3491 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 3492 const SCEV *RHS) { 3493 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3494 return getSMaxExpr(Ops); 3495 } 3496 3497 const SCEV * 3498 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3499 assert(!Ops.empty() && "Cannot get empty smax!"); 3500 if (Ops.size() == 1) return Ops[0]; 3501 #ifndef NDEBUG 3502 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3503 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3504 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3505 "SCEVSMaxExpr operand types don't match!"); 3506 #endif 3507 3508 // Sort by complexity, this groups all similar expression types together. 3509 GroupByComplexity(Ops, &LI, DT); 3510 3511 // If there are any constants, fold them together. 3512 unsigned Idx = 0; 3513 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3514 ++Idx; 3515 assert(Idx < Ops.size()); 3516 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3517 // We found two constants, fold them together! 3518 ConstantInt *Fold = ConstantInt::get( 3519 getContext(), APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt())); 3520 Ops[0] = getConstant(Fold); 3521 Ops.erase(Ops.begin()+1); // Erase the folded element 3522 if (Ops.size() == 1) return Ops[0]; 3523 LHSC = cast<SCEVConstant>(Ops[0]); 3524 } 3525 3526 // If we are left with a constant minimum-int, strip it off. 3527 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 3528 Ops.erase(Ops.begin()); 3529 --Idx; 3530 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 3531 // If we have an smax with a constant maximum-int, it will always be 3532 // maximum-int. 3533 return Ops[0]; 3534 } 3535 3536 if (Ops.size() == 1) return Ops[0]; 3537 } 3538 3539 // Find the first SMax 3540 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 3541 ++Idx; 3542 3543 // Check to see if one of the operands is an SMax. If so, expand its operands 3544 // onto our operand list, and recurse to simplify. 3545 if (Idx < Ops.size()) { 3546 bool DeletedSMax = false; 3547 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 3548 Ops.erase(Ops.begin()+Idx); 3549 Ops.append(SMax->op_begin(), SMax->op_end()); 3550 DeletedSMax = true; 3551 } 3552 3553 if (DeletedSMax) 3554 return getSMaxExpr(Ops); 3555 } 3556 3557 // Okay, check to see if the same value occurs in the operand list twice. If 3558 // so, delete one. Since we sorted the list, these values are required to 3559 // be adjacent. 3560 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3561 // X smax Y smax Y --> X smax Y 3562 // X smax Y --> X, if X is always greater than Y 3563 if (Ops[i] == Ops[i+1] || 3564 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 3565 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3566 --i; --e; 3567 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 3568 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3569 --i; --e; 3570 } 3571 3572 if (Ops.size() == 1) return Ops[0]; 3573 3574 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3575 3576 // Okay, it looks like we really DO need an smax expr. Check to see if we 3577 // already have one, otherwise create a new one. 3578 FoldingSetNodeID ID; 3579 ID.AddInteger(scSMaxExpr); 3580 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3581 ID.AddPointer(Ops[i]); 3582 void *IP = nullptr; 3583 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3584 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3585 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3586 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), 3587 O, Ops.size()); 3588 UniqueSCEVs.InsertNode(S, IP); 3589 addToLoopUseLists(S); 3590 return S; 3591 } 3592 3593 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 3594 const SCEV *RHS) { 3595 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3596 return getUMaxExpr(Ops); 3597 } 3598 3599 const SCEV * 3600 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3601 assert(!Ops.empty() && "Cannot get empty umax!"); 3602 if (Ops.size() == 1) return Ops[0]; 3603 #ifndef NDEBUG 3604 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3605 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3606 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3607 "SCEVUMaxExpr operand types don't match!"); 3608 #endif 3609 3610 // Sort by complexity, this groups all similar expression types together. 3611 GroupByComplexity(Ops, &LI, DT); 3612 3613 // If there are any constants, fold them together. 3614 unsigned Idx = 0; 3615 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3616 ++Idx; 3617 assert(Idx < Ops.size()); 3618 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3619 // We found two constants, fold them together! 3620 ConstantInt *Fold = ConstantInt::get( 3621 getContext(), APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt())); 3622 Ops[0] = getConstant(Fold); 3623 Ops.erase(Ops.begin()+1); // Erase the folded element 3624 if (Ops.size() == 1) return Ops[0]; 3625 LHSC = cast<SCEVConstant>(Ops[0]); 3626 } 3627 3628 // If we are left with a constant minimum-int, strip it off. 3629 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 3630 Ops.erase(Ops.begin()); 3631 --Idx; 3632 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 3633 // If we have an umax with a constant maximum-int, it will always be 3634 // maximum-int. 3635 return Ops[0]; 3636 } 3637 3638 if (Ops.size() == 1) return Ops[0]; 3639 } 3640 3641 // Find the first UMax 3642 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 3643 ++Idx; 3644 3645 // Check to see if one of the operands is a UMax. If so, expand its operands 3646 // onto our operand list, and recurse to simplify. 3647 if (Idx < Ops.size()) { 3648 bool DeletedUMax = false; 3649 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 3650 Ops.erase(Ops.begin()+Idx); 3651 Ops.append(UMax->op_begin(), UMax->op_end()); 3652 DeletedUMax = true; 3653 } 3654 3655 if (DeletedUMax) 3656 return getUMaxExpr(Ops); 3657 } 3658 3659 // Okay, check to see if the same value occurs in the operand list twice. If 3660 // so, delete one. Since we sorted the list, these values are required to 3661 // be adjacent. 3662 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3663 // X umax Y umax Y --> X umax Y 3664 // X umax Y --> X, if X is always greater than Y 3665 if (Ops[i] == Ops[i + 1] || isKnownViaNonRecursiveReasoning( 3666 ICmpInst::ICMP_UGE, Ops[i], Ops[i + 1])) { 3667 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3668 --i; --e; 3669 } else if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, Ops[i], 3670 Ops[i + 1])) { 3671 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3672 --i; --e; 3673 } 3674 3675 if (Ops.size() == 1) return Ops[0]; 3676 3677 assert(!Ops.empty() && "Reduced umax down to nothing!"); 3678 3679 // Okay, it looks like we really DO need a umax expr. Check to see if we 3680 // already have one, otherwise create a new one. 3681 FoldingSetNodeID ID; 3682 ID.AddInteger(scUMaxExpr); 3683 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3684 ID.AddPointer(Ops[i]); 3685 void *IP = nullptr; 3686 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3687 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3688 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3689 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 3690 O, Ops.size()); 3691 UniqueSCEVs.InsertNode(S, IP); 3692 addToLoopUseLists(S); 3693 return S; 3694 } 3695 3696 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3697 const SCEV *RHS) { 3698 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3699 return getSMinExpr(Ops); 3700 } 3701 3702 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3703 // ~smax(~x, ~y, ~z) == smin(x, y, z). 3704 SmallVector<const SCEV *, 2> NotOps; 3705 for (auto *S : Ops) 3706 NotOps.push_back(getNotSCEV(S)); 3707 return getNotSCEV(getSMaxExpr(NotOps)); 3708 } 3709 3710 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3711 const SCEV *RHS) { 3712 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3713 return getUMinExpr(Ops); 3714 } 3715 3716 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3717 assert(!Ops.empty() && "At least one operand must be!"); 3718 // Trivial case. 3719 if (Ops.size() == 1) 3720 return Ops[0]; 3721 3722 // ~umax(~x, ~y, ~z) == umin(x, y, z). 3723 SmallVector<const SCEV *, 2> NotOps; 3724 for (auto *S : Ops) 3725 NotOps.push_back(getNotSCEV(S)); 3726 return getNotSCEV(getUMaxExpr(NotOps)); 3727 } 3728 3729 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3730 // We can bypass creating a target-independent 3731 // constant expression and then folding it back into a ConstantInt. 3732 // This is just a compile-time optimization. 3733 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3734 } 3735 3736 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3737 StructType *STy, 3738 unsigned FieldNo) { 3739 // We can bypass creating a target-independent 3740 // constant expression and then folding it back into a ConstantInt. 3741 // This is just a compile-time optimization. 3742 return getConstant( 3743 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3744 } 3745 3746 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3747 // Don't attempt to do anything other than create a SCEVUnknown object 3748 // here. createSCEV only calls getUnknown after checking for all other 3749 // interesting possibilities, and any other code that calls getUnknown 3750 // is doing so in order to hide a value from SCEV canonicalization. 3751 3752 FoldingSetNodeID ID; 3753 ID.AddInteger(scUnknown); 3754 ID.AddPointer(V); 3755 void *IP = nullptr; 3756 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3757 assert(cast<SCEVUnknown>(S)->getValue() == V && 3758 "Stale SCEVUnknown in uniquing map!"); 3759 return S; 3760 } 3761 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3762 FirstUnknown); 3763 FirstUnknown = cast<SCEVUnknown>(S); 3764 UniqueSCEVs.InsertNode(S, IP); 3765 return S; 3766 } 3767 3768 //===----------------------------------------------------------------------===// 3769 // Basic SCEV Analysis and PHI Idiom Recognition Code 3770 // 3771 3772 /// Test if values of the given type are analyzable within the SCEV 3773 /// framework. This primarily includes integer types, and it can optionally 3774 /// include pointer types if the ScalarEvolution class has access to 3775 /// target-specific information. 3776 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3777 // Integers and pointers are always SCEVable. 3778 return Ty->isIntOrPtrTy(); 3779 } 3780 3781 /// Return the size in bits of the specified type, for which isSCEVable must 3782 /// return true. 3783 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3784 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3785 if (Ty->isPointerTy()) 3786 return getDataLayout().getIndexTypeSizeInBits(Ty); 3787 return getDataLayout().getTypeSizeInBits(Ty); 3788 } 3789 3790 /// Return a type with the same bitwidth as the given type and which represents 3791 /// how SCEV will treat the given type, for which isSCEVable must return 3792 /// true. For pointer types, this is the pointer-sized integer type. 3793 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3794 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3795 3796 if (Ty->isIntegerTy()) 3797 return Ty; 3798 3799 // The only other support type is pointer. 3800 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3801 return getDataLayout().getIntPtrType(Ty); 3802 } 3803 3804 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3805 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3806 } 3807 3808 const SCEV *ScalarEvolution::getCouldNotCompute() { 3809 return CouldNotCompute.get(); 3810 } 3811 3812 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3813 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3814 auto *SU = dyn_cast<SCEVUnknown>(S); 3815 return SU && SU->getValue() == nullptr; 3816 }); 3817 3818 return !ContainsNulls; 3819 } 3820 3821 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3822 HasRecMapType::iterator I = HasRecMap.find(S); 3823 if (I != HasRecMap.end()) 3824 return I->second; 3825 3826 bool FoundAddRec = SCEVExprContains(S, isa<SCEVAddRecExpr, const SCEV *>); 3827 HasRecMap.insert({S, FoundAddRec}); 3828 return FoundAddRec; 3829 } 3830 3831 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3832 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3833 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3834 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3835 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3836 if (!Add) 3837 return {S, nullptr}; 3838 3839 if (Add->getNumOperands() != 2) 3840 return {S, nullptr}; 3841 3842 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3843 if (!ConstOp) 3844 return {S, nullptr}; 3845 3846 return {Add->getOperand(1), ConstOp->getValue()}; 3847 } 3848 3849 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3850 /// by the value and offset from any ValueOffsetPair in the set. 3851 SetVector<ScalarEvolution::ValueOffsetPair> * 3852 ScalarEvolution::getSCEVValues(const SCEV *S) { 3853 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3854 if (SI == ExprValueMap.end()) 3855 return nullptr; 3856 #ifndef NDEBUG 3857 if (VerifySCEVMap) { 3858 // Check there is no dangling Value in the set returned. 3859 for (const auto &VE : SI->second) 3860 assert(ValueExprMap.count(VE.first)); 3861 } 3862 #endif 3863 return &SI->second; 3864 } 3865 3866 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3867 /// cannot be used separately. eraseValueFromMap should be used to remove 3868 /// V from ValueExprMap and ExprValueMap at the same time. 3869 void ScalarEvolution::eraseValueFromMap(Value *V) { 3870 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3871 if (I != ValueExprMap.end()) { 3872 const SCEV *S = I->second; 3873 // Remove {V, 0} from the set of ExprValueMap[S] 3874 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3875 SV->remove({V, nullptr}); 3876 3877 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3878 const SCEV *Stripped; 3879 ConstantInt *Offset; 3880 std::tie(Stripped, Offset) = splitAddExpr(S); 3881 if (Offset != nullptr) { 3882 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3883 SV->remove({V, Offset}); 3884 } 3885 ValueExprMap.erase(V); 3886 } 3887 } 3888 3889 /// Check whether value has nuw/nsw/exact set but SCEV does not. 3890 /// TODO: In reality it is better to check the poison recursevely 3891 /// but this is better than nothing. 3892 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { 3893 if (auto *I = dyn_cast<Instruction>(V)) { 3894 if (isa<OverflowingBinaryOperator>(I)) { 3895 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { 3896 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) 3897 return true; 3898 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) 3899 return true; 3900 } 3901 } else if (isa<PossiblyExactOperator>(I) && I->isExact()) 3902 return true; 3903 } 3904 return false; 3905 } 3906 3907 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3908 /// create a new one. 3909 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3910 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3911 3912 const SCEV *S = getExistingSCEV(V); 3913 if (S == nullptr) { 3914 S = createSCEV(V); 3915 // During PHI resolution, it is possible to create two SCEVs for the same 3916 // V, so it is needed to double check whether V->S is inserted into 3917 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3918 std::pair<ValueExprMapType::iterator, bool> Pair = 3919 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3920 if (Pair.second && !SCEVLostPoisonFlags(S, V)) { 3921 ExprValueMap[S].insert({V, nullptr}); 3922 3923 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3924 // ExprValueMap. 3925 const SCEV *Stripped = S; 3926 ConstantInt *Offset = nullptr; 3927 std::tie(Stripped, Offset) = splitAddExpr(S); 3928 // If stripped is SCEVUnknown, don't bother to save 3929 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3930 // increase the complexity of the expansion code. 3931 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3932 // because it may generate add/sub instead of GEP in SCEV expansion. 3933 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3934 !isa<GetElementPtrInst>(V)) 3935 ExprValueMap[Stripped].insert({V, Offset}); 3936 } 3937 } 3938 return S; 3939 } 3940 3941 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3942 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3943 3944 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3945 if (I != ValueExprMap.end()) { 3946 const SCEV *S = I->second; 3947 if (checkValidity(S)) 3948 return S; 3949 eraseValueFromMap(V); 3950 forgetMemoizedResults(S); 3951 } 3952 return nullptr; 3953 } 3954 3955 /// Return a SCEV corresponding to -V = -1*V 3956 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3957 SCEV::NoWrapFlags Flags) { 3958 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3959 return getConstant( 3960 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3961 3962 Type *Ty = V->getType(); 3963 Ty = getEffectiveSCEVType(Ty); 3964 return getMulExpr( 3965 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 3966 } 3967 3968 /// Return a SCEV corresponding to ~V = -1-V 3969 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3970 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3971 return getConstant( 3972 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3973 3974 Type *Ty = V->getType(); 3975 Ty = getEffectiveSCEVType(Ty); 3976 const SCEV *AllOnes = 3977 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 3978 return getMinusSCEV(AllOnes, V); 3979 } 3980 3981 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3982 SCEV::NoWrapFlags Flags, 3983 unsigned Depth) { 3984 // Fast path: X - X --> 0. 3985 if (LHS == RHS) 3986 return getZero(LHS->getType()); 3987 3988 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3989 // makes it so that we cannot make much use of NUW. 3990 auto AddFlags = SCEV::FlagAnyWrap; 3991 const bool RHSIsNotMinSigned = 3992 !getSignedRangeMin(RHS).isMinSignedValue(); 3993 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3994 // Let M be the minimum representable signed value. Then (-1)*RHS 3995 // signed-wraps if and only if RHS is M. That can happen even for 3996 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3997 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3998 // (-1)*RHS, we need to prove that RHS != M. 3999 // 4000 // If LHS is non-negative and we know that LHS - RHS does not 4001 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 4002 // either by proving that RHS > M or that LHS >= 0. 4003 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 4004 AddFlags = SCEV::FlagNSW; 4005 } 4006 } 4007 4008 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 4009 // RHS is NSW and LHS >= 0. 4010 // 4011 // The difficulty here is that the NSW flag may have been proven 4012 // relative to a loop that is to be found in a recurrence in LHS and 4013 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 4014 // larger scope than intended. 4015 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 4016 4017 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 4018 } 4019 4020 const SCEV * 4021 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) { 4022 Type *SrcTy = V->getType(); 4023 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4024 "Cannot truncate or zero extend with non-integer arguments!"); 4025 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4026 return V; // No conversion 4027 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4028 return getTruncateExpr(V, Ty); 4029 return getZeroExtendExpr(V, Ty); 4030 } 4031 4032 const SCEV * 4033 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 4034 Type *Ty) { 4035 Type *SrcTy = V->getType(); 4036 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4037 "Cannot truncate or zero extend with non-integer arguments!"); 4038 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4039 return V; // No conversion 4040 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4041 return getTruncateExpr(V, Ty); 4042 return getSignExtendExpr(V, Ty); 4043 } 4044 4045 const SCEV * 4046 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 4047 Type *SrcTy = V->getType(); 4048 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4049 "Cannot noop or zero extend with non-integer arguments!"); 4050 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4051 "getNoopOrZeroExtend cannot truncate!"); 4052 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4053 return V; // No conversion 4054 return getZeroExtendExpr(V, Ty); 4055 } 4056 4057 const SCEV * 4058 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 4059 Type *SrcTy = V->getType(); 4060 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4061 "Cannot noop or sign extend with non-integer arguments!"); 4062 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4063 "getNoopOrSignExtend cannot truncate!"); 4064 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4065 return V; // No conversion 4066 return getSignExtendExpr(V, Ty); 4067 } 4068 4069 const SCEV * 4070 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 4071 Type *SrcTy = V->getType(); 4072 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4073 "Cannot noop or any extend with non-integer arguments!"); 4074 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4075 "getNoopOrAnyExtend cannot truncate!"); 4076 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4077 return V; // No conversion 4078 return getAnyExtendExpr(V, Ty); 4079 } 4080 4081 const SCEV * 4082 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 4083 Type *SrcTy = V->getType(); 4084 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4085 "Cannot truncate or noop with non-integer arguments!"); 4086 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4087 "getTruncateOrNoop cannot extend!"); 4088 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4089 return V; // No conversion 4090 return getTruncateExpr(V, Ty); 4091 } 4092 4093 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4094 const SCEV *RHS) { 4095 const SCEV *PromotedLHS = LHS; 4096 const SCEV *PromotedRHS = RHS; 4097 4098 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4099 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4100 else 4101 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4102 4103 return getUMaxExpr(PromotedLHS, PromotedRHS); 4104 } 4105 4106 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4107 const SCEV *RHS) { 4108 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4109 return getUMinFromMismatchedTypes(Ops); 4110 } 4111 4112 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( 4113 SmallVectorImpl<const SCEV *> &Ops) { 4114 assert(!Ops.empty() && "At least one operand must be!"); 4115 // Trivial case. 4116 if (Ops.size() == 1) 4117 return Ops[0]; 4118 4119 // Find the max type first. 4120 Type *MaxType = nullptr; 4121 for (auto *S : Ops) 4122 if (MaxType) 4123 MaxType = getWiderType(MaxType, S->getType()); 4124 else 4125 MaxType = S->getType(); 4126 4127 // Extend all ops to max type. 4128 SmallVector<const SCEV *, 2> PromotedOps; 4129 for (auto *S : Ops) 4130 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4131 4132 // Generate umin. 4133 return getUMinExpr(PromotedOps); 4134 } 4135 4136 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4137 // A pointer operand may evaluate to a nonpointer expression, such as null. 4138 if (!V->getType()->isPointerTy()) 4139 return V; 4140 4141 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 4142 return getPointerBase(Cast->getOperand()); 4143 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 4144 const SCEV *PtrOp = nullptr; 4145 for (const SCEV *NAryOp : NAry->operands()) { 4146 if (NAryOp->getType()->isPointerTy()) { 4147 // Cannot find the base of an expression with multiple pointer operands. 4148 if (PtrOp) 4149 return V; 4150 PtrOp = NAryOp; 4151 } 4152 } 4153 if (!PtrOp) 4154 return V; 4155 return getPointerBase(PtrOp); 4156 } 4157 return V; 4158 } 4159 4160 /// Push users of the given Instruction onto the given Worklist. 4161 static void 4162 PushDefUseChildren(Instruction *I, 4163 SmallVectorImpl<Instruction *> &Worklist) { 4164 // Push the def-use children onto the Worklist stack. 4165 for (User *U : I->users()) 4166 Worklist.push_back(cast<Instruction>(U)); 4167 } 4168 4169 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 4170 SmallVector<Instruction *, 16> Worklist; 4171 PushDefUseChildren(PN, Worklist); 4172 4173 SmallPtrSet<Instruction *, 8> Visited; 4174 Visited.insert(PN); 4175 while (!Worklist.empty()) { 4176 Instruction *I = Worklist.pop_back_val(); 4177 if (!Visited.insert(I).second) 4178 continue; 4179 4180 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4181 if (It != ValueExprMap.end()) { 4182 const SCEV *Old = It->second; 4183 4184 // Short-circuit the def-use traversal if the symbolic name 4185 // ceases to appear in expressions. 4186 if (Old != SymName && !hasOperand(Old, SymName)) 4187 continue; 4188 4189 // SCEVUnknown for a PHI either means that it has an unrecognized 4190 // structure, it's a PHI that's in the progress of being computed 4191 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4192 // additional loop trip count information isn't going to change anything. 4193 // In the second case, createNodeForPHI will perform the necessary 4194 // updates on its own when it gets to that point. In the third, we do 4195 // want to forget the SCEVUnknown. 4196 if (!isa<PHINode>(I) || 4197 !isa<SCEVUnknown>(Old) || 4198 (I != PN && Old == SymName)) { 4199 eraseValueFromMap(It->first); 4200 forgetMemoizedResults(Old); 4201 } 4202 } 4203 4204 PushDefUseChildren(I, Worklist); 4205 } 4206 } 4207 4208 namespace { 4209 4210 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4211 /// expression in case its Loop is L. If it is not L then 4212 /// if IgnoreOtherLoops is true then use AddRec itself 4213 /// otherwise rewrite cannot be done. 4214 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4215 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4216 public: 4217 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4218 bool IgnoreOtherLoops = true) { 4219 SCEVInitRewriter Rewriter(L, SE); 4220 const SCEV *Result = Rewriter.visit(S); 4221 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4222 return SE.getCouldNotCompute(); 4223 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4224 ? SE.getCouldNotCompute() 4225 : Result; 4226 } 4227 4228 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4229 if (!SE.isLoopInvariant(Expr, L)) 4230 SeenLoopVariantSCEVUnknown = true; 4231 return Expr; 4232 } 4233 4234 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4235 // Only re-write AddRecExprs for this loop. 4236 if (Expr->getLoop() == L) 4237 return Expr->getStart(); 4238 SeenOtherLoops = true; 4239 return Expr; 4240 } 4241 4242 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4243 4244 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4245 4246 private: 4247 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4248 : SCEVRewriteVisitor(SE), L(L) {} 4249 4250 const Loop *L; 4251 bool SeenLoopVariantSCEVUnknown = false; 4252 bool SeenOtherLoops = false; 4253 }; 4254 4255 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4256 /// increment expression in case its Loop is L. If it is not L then 4257 /// use AddRec itself. 4258 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4259 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4260 public: 4261 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4262 SCEVPostIncRewriter Rewriter(L, SE); 4263 const SCEV *Result = Rewriter.visit(S); 4264 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4265 ? SE.getCouldNotCompute() 4266 : Result; 4267 } 4268 4269 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4270 if (!SE.isLoopInvariant(Expr, L)) 4271 SeenLoopVariantSCEVUnknown = true; 4272 return Expr; 4273 } 4274 4275 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4276 // Only re-write AddRecExprs for this loop. 4277 if (Expr->getLoop() == L) 4278 return Expr->getPostIncExpr(SE); 4279 SeenOtherLoops = true; 4280 return Expr; 4281 } 4282 4283 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4284 4285 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4286 4287 private: 4288 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4289 : SCEVRewriteVisitor(SE), L(L) {} 4290 4291 const Loop *L; 4292 bool SeenLoopVariantSCEVUnknown = false; 4293 bool SeenOtherLoops = false; 4294 }; 4295 4296 /// This class evaluates the compare condition by matching it against the 4297 /// condition of loop latch. If there is a match we assume a true value 4298 /// for the condition while building SCEV nodes. 4299 class SCEVBackedgeConditionFolder 4300 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4301 public: 4302 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4303 ScalarEvolution &SE) { 4304 bool IsPosBECond = false; 4305 Value *BECond = nullptr; 4306 if (BasicBlock *Latch = L->getLoopLatch()) { 4307 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4308 if (BI && BI->isConditional()) { 4309 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4310 "Both outgoing branches should not target same header!"); 4311 BECond = BI->getCondition(); 4312 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4313 } else { 4314 return S; 4315 } 4316 } 4317 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4318 return Rewriter.visit(S); 4319 } 4320 4321 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4322 const SCEV *Result = Expr; 4323 bool InvariantF = SE.isLoopInvariant(Expr, L); 4324 4325 if (!InvariantF) { 4326 Instruction *I = cast<Instruction>(Expr->getValue()); 4327 switch (I->getOpcode()) { 4328 case Instruction::Select: { 4329 SelectInst *SI = cast<SelectInst>(I); 4330 Optional<const SCEV *> Res = 4331 compareWithBackedgeCondition(SI->getCondition()); 4332 if (Res.hasValue()) { 4333 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4334 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4335 } 4336 break; 4337 } 4338 default: { 4339 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4340 if (Res.hasValue()) 4341 Result = Res.getValue(); 4342 break; 4343 } 4344 } 4345 } 4346 return Result; 4347 } 4348 4349 private: 4350 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4351 bool IsPosBECond, ScalarEvolution &SE) 4352 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4353 IsPositiveBECond(IsPosBECond) {} 4354 4355 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4356 4357 const Loop *L; 4358 /// Loop back condition. 4359 Value *BackedgeCond = nullptr; 4360 /// Set to true if loop back is on positive branch condition. 4361 bool IsPositiveBECond; 4362 }; 4363 4364 Optional<const SCEV *> 4365 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4366 4367 // If value matches the backedge condition for loop latch, 4368 // then return a constant evolution node based on loopback 4369 // branch taken. 4370 if (BackedgeCond == IC) 4371 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4372 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4373 return None; 4374 } 4375 4376 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4377 public: 4378 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4379 ScalarEvolution &SE) { 4380 SCEVShiftRewriter Rewriter(L, SE); 4381 const SCEV *Result = Rewriter.visit(S); 4382 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4383 } 4384 4385 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4386 // Only allow AddRecExprs for this loop. 4387 if (!SE.isLoopInvariant(Expr, L)) 4388 Valid = false; 4389 return Expr; 4390 } 4391 4392 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4393 if (Expr->getLoop() == L && Expr->isAffine()) 4394 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4395 Valid = false; 4396 return Expr; 4397 } 4398 4399 bool isValid() { return Valid; } 4400 4401 private: 4402 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4403 : SCEVRewriteVisitor(SE), L(L) {} 4404 4405 const Loop *L; 4406 bool Valid = true; 4407 }; 4408 4409 } // end anonymous namespace 4410 4411 SCEV::NoWrapFlags 4412 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4413 if (!AR->isAffine()) 4414 return SCEV::FlagAnyWrap; 4415 4416 using OBO = OverflowingBinaryOperator; 4417 4418 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4419 4420 if (!AR->hasNoSignedWrap()) { 4421 ConstantRange AddRecRange = getSignedRange(AR); 4422 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4423 4424 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4425 Instruction::Add, IncRange, OBO::NoSignedWrap); 4426 if (NSWRegion.contains(AddRecRange)) 4427 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4428 } 4429 4430 if (!AR->hasNoUnsignedWrap()) { 4431 ConstantRange AddRecRange = getUnsignedRange(AR); 4432 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4433 4434 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4435 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4436 if (NUWRegion.contains(AddRecRange)) 4437 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4438 } 4439 4440 return Result; 4441 } 4442 4443 namespace { 4444 4445 /// Represents an abstract binary operation. This may exist as a 4446 /// normal instruction or constant expression, or may have been 4447 /// derived from an expression tree. 4448 struct BinaryOp { 4449 unsigned Opcode; 4450 Value *LHS; 4451 Value *RHS; 4452 bool IsNSW = false; 4453 bool IsNUW = false; 4454 4455 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4456 /// constant expression. 4457 Operator *Op = nullptr; 4458 4459 explicit BinaryOp(Operator *Op) 4460 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4461 Op(Op) { 4462 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4463 IsNSW = OBO->hasNoSignedWrap(); 4464 IsNUW = OBO->hasNoUnsignedWrap(); 4465 } 4466 } 4467 4468 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4469 bool IsNUW = false) 4470 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 4471 }; 4472 4473 } // end anonymous namespace 4474 4475 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4476 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4477 auto *Op = dyn_cast<Operator>(V); 4478 if (!Op) 4479 return None; 4480 4481 // Implementation detail: all the cleverness here should happen without 4482 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4483 // SCEV expressions when possible, and we should not break that. 4484 4485 switch (Op->getOpcode()) { 4486 case Instruction::Add: 4487 case Instruction::Sub: 4488 case Instruction::Mul: 4489 case Instruction::UDiv: 4490 case Instruction::URem: 4491 case Instruction::And: 4492 case Instruction::Or: 4493 case Instruction::AShr: 4494 case Instruction::Shl: 4495 return BinaryOp(Op); 4496 4497 case Instruction::Xor: 4498 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4499 // If the RHS of the xor is a signmask, then this is just an add. 4500 // Instcombine turns add of signmask into xor as a strength reduction step. 4501 if (RHSC->getValue().isSignMask()) 4502 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4503 return BinaryOp(Op); 4504 4505 case Instruction::LShr: 4506 // Turn logical shift right of a constant into a unsigned divide. 4507 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4508 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4509 4510 // If the shift count is not less than the bitwidth, the result of 4511 // the shift is undefined. Don't try to analyze it, because the 4512 // resolution chosen here may differ from the resolution chosen in 4513 // other parts of the compiler. 4514 if (SA->getValue().ult(BitWidth)) { 4515 Constant *X = 4516 ConstantInt::get(SA->getContext(), 4517 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4518 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4519 } 4520 } 4521 return BinaryOp(Op); 4522 4523 case Instruction::ExtractValue: { 4524 auto *EVI = cast<ExtractValueInst>(Op); 4525 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4526 break; 4527 4528 auto *CI = dyn_cast<CallInst>(EVI->getAggregateOperand()); 4529 if (!CI) 4530 break; 4531 4532 if (auto *F = CI->getCalledFunction()) 4533 switch (F->getIntrinsicID()) { 4534 case Intrinsic::sadd_with_overflow: 4535 case Intrinsic::uadd_with_overflow: 4536 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4537 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4538 CI->getArgOperand(1)); 4539 4540 // Now that we know that all uses of the arithmetic-result component of 4541 // CI are guarded by the overflow check, we can go ahead and pretend 4542 // that the arithmetic is non-overflowing. 4543 if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow) 4544 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4545 CI->getArgOperand(1), /* IsNSW = */ true, 4546 /* IsNUW = */ false); 4547 else 4548 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4549 CI->getArgOperand(1), /* IsNSW = */ false, 4550 /* IsNUW*/ true); 4551 case Intrinsic::ssub_with_overflow: 4552 case Intrinsic::usub_with_overflow: 4553 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4554 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4555 CI->getArgOperand(1)); 4556 4557 // The same reasoning as sadd/uadd above. 4558 if (F->getIntrinsicID() == Intrinsic::ssub_with_overflow) 4559 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4560 CI->getArgOperand(1), /* IsNSW = */ true, 4561 /* IsNUW = */ false); 4562 else 4563 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4564 CI->getArgOperand(1), /* IsNSW = */ false, 4565 /* IsNUW = */ true); 4566 case Intrinsic::smul_with_overflow: 4567 case Intrinsic::umul_with_overflow: 4568 return BinaryOp(Instruction::Mul, CI->getArgOperand(0), 4569 CI->getArgOperand(1)); 4570 default: 4571 break; 4572 } 4573 break; 4574 } 4575 4576 default: 4577 break; 4578 } 4579 4580 return None; 4581 } 4582 4583 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4584 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4585 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4586 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4587 /// follows one of the following patterns: 4588 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4589 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4590 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4591 /// we return the type of the truncation operation, and indicate whether the 4592 /// truncated type should be treated as signed/unsigned by setting 4593 /// \p Signed to true/false, respectively. 4594 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4595 bool &Signed, ScalarEvolution &SE) { 4596 // The case where Op == SymbolicPHI (that is, with no type conversions on 4597 // the way) is handled by the regular add recurrence creating logic and 4598 // would have already been triggered in createAddRecForPHI. Reaching it here 4599 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4600 // because one of the other operands of the SCEVAddExpr updating this PHI is 4601 // not invariant). 4602 // 4603 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4604 // this case predicates that allow us to prove that Op == SymbolicPHI will 4605 // be added. 4606 if (Op == SymbolicPHI) 4607 return nullptr; 4608 4609 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4610 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4611 if (SourceBits != NewBits) 4612 return nullptr; 4613 4614 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4615 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4616 if (!SExt && !ZExt) 4617 return nullptr; 4618 const SCEVTruncateExpr *Trunc = 4619 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4620 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4621 if (!Trunc) 4622 return nullptr; 4623 const SCEV *X = Trunc->getOperand(); 4624 if (X != SymbolicPHI) 4625 return nullptr; 4626 Signed = SExt != nullptr; 4627 return Trunc->getType(); 4628 } 4629 4630 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4631 if (!PN->getType()->isIntegerTy()) 4632 return nullptr; 4633 const Loop *L = LI.getLoopFor(PN->getParent()); 4634 if (!L || L->getHeader() != PN->getParent()) 4635 return nullptr; 4636 return L; 4637 } 4638 4639 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4640 // computation that updates the phi follows the following pattern: 4641 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4642 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4643 // If so, try to see if it can be rewritten as an AddRecExpr under some 4644 // Predicates. If successful, return them as a pair. Also cache the results 4645 // of the analysis. 4646 // 4647 // Example usage scenario: 4648 // Say the Rewriter is called for the following SCEV: 4649 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4650 // where: 4651 // %X = phi i64 (%Start, %BEValue) 4652 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4653 // and call this function with %SymbolicPHI = %X. 4654 // 4655 // The analysis will find that the value coming around the backedge has 4656 // the following SCEV: 4657 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4658 // Upon concluding that this matches the desired pattern, the function 4659 // will return the pair {NewAddRec, SmallPredsVec} where: 4660 // NewAddRec = {%Start,+,%Step} 4661 // SmallPredsVec = {P1, P2, P3} as follows: 4662 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4663 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4664 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4665 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4666 // under the predicates {P1,P2,P3}. 4667 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4668 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4669 // 4670 // TODO's: 4671 // 4672 // 1) Extend the Induction descriptor to also support inductions that involve 4673 // casts: When needed (namely, when we are called in the context of the 4674 // vectorizer induction analysis), a Set of cast instructions will be 4675 // populated by this method, and provided back to isInductionPHI. This is 4676 // needed to allow the vectorizer to properly record them to be ignored by 4677 // the cost model and to avoid vectorizing them (otherwise these casts, 4678 // which are redundant under the runtime overflow checks, will be 4679 // vectorized, which can be costly). 4680 // 4681 // 2) Support additional induction/PHISCEV patterns: We also want to support 4682 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4683 // after the induction update operation (the induction increment): 4684 // 4685 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4686 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4687 // 4688 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4689 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4690 // 4691 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4692 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4693 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4694 SmallVector<const SCEVPredicate *, 3> Predicates; 4695 4696 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4697 // return an AddRec expression under some predicate. 4698 4699 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4700 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4701 assert(L && "Expecting an integer loop header phi"); 4702 4703 // The loop may have multiple entrances or multiple exits; we can analyze 4704 // this phi as an addrec if it has a unique entry value and a unique 4705 // backedge value. 4706 Value *BEValueV = nullptr, *StartValueV = nullptr; 4707 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4708 Value *V = PN->getIncomingValue(i); 4709 if (L->contains(PN->getIncomingBlock(i))) { 4710 if (!BEValueV) { 4711 BEValueV = V; 4712 } else if (BEValueV != V) { 4713 BEValueV = nullptr; 4714 break; 4715 } 4716 } else if (!StartValueV) { 4717 StartValueV = V; 4718 } else if (StartValueV != V) { 4719 StartValueV = nullptr; 4720 break; 4721 } 4722 } 4723 if (!BEValueV || !StartValueV) 4724 return None; 4725 4726 const SCEV *BEValue = getSCEV(BEValueV); 4727 4728 // If the value coming around the backedge is an add with the symbolic 4729 // value we just inserted, possibly with casts that we can ignore under 4730 // an appropriate runtime guard, then we found a simple induction variable! 4731 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4732 if (!Add) 4733 return None; 4734 4735 // If there is a single occurrence of the symbolic value, possibly 4736 // casted, replace it with a recurrence. 4737 unsigned FoundIndex = Add->getNumOperands(); 4738 Type *TruncTy = nullptr; 4739 bool Signed; 4740 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4741 if ((TruncTy = 4742 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4743 if (FoundIndex == e) { 4744 FoundIndex = i; 4745 break; 4746 } 4747 4748 if (FoundIndex == Add->getNumOperands()) 4749 return None; 4750 4751 // Create an add with everything but the specified operand. 4752 SmallVector<const SCEV *, 8> Ops; 4753 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4754 if (i != FoundIndex) 4755 Ops.push_back(Add->getOperand(i)); 4756 const SCEV *Accum = getAddExpr(Ops); 4757 4758 // The runtime checks will not be valid if the step amount is 4759 // varying inside the loop. 4760 if (!isLoopInvariant(Accum, L)) 4761 return None; 4762 4763 // *** Part2: Create the predicates 4764 4765 // Analysis was successful: we have a phi-with-cast pattern for which we 4766 // can return an AddRec expression under the following predicates: 4767 // 4768 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4769 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4770 // P2: An Equal predicate that guarantees that 4771 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4772 // P3: An Equal predicate that guarantees that 4773 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4774 // 4775 // As we next prove, the above predicates guarantee that: 4776 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4777 // 4778 // 4779 // More formally, we want to prove that: 4780 // Expr(i+1) = Start + (i+1) * Accum 4781 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4782 // 4783 // Given that: 4784 // 1) Expr(0) = Start 4785 // 2) Expr(1) = Start + Accum 4786 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4787 // 3) Induction hypothesis (step i): 4788 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4789 // 4790 // Proof: 4791 // Expr(i+1) = 4792 // = Start + (i+1)*Accum 4793 // = (Start + i*Accum) + Accum 4794 // = Expr(i) + Accum 4795 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4796 // :: from step i 4797 // 4798 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4799 // 4800 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4801 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4802 // + Accum :: from P3 4803 // 4804 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4805 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4806 // 4807 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4808 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4809 // 4810 // By induction, the same applies to all iterations 1<=i<n: 4811 // 4812 4813 // Create a truncated addrec for which we will add a no overflow check (P1). 4814 const SCEV *StartVal = getSCEV(StartValueV); 4815 const SCEV *PHISCEV = 4816 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4817 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4818 4819 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4820 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4821 // will be constant. 4822 // 4823 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4824 // add P1. 4825 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4826 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4827 Signed ? SCEVWrapPredicate::IncrementNSSW 4828 : SCEVWrapPredicate::IncrementNUSW; 4829 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4830 Predicates.push_back(AddRecPred); 4831 } 4832 4833 // Create the Equal Predicates P2,P3: 4834 4835 // It is possible that the predicates P2 and/or P3 are computable at 4836 // compile time due to StartVal and/or Accum being constants. 4837 // If either one is, then we can check that now and escape if either P2 4838 // or P3 is false. 4839 4840 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4841 // for each of StartVal and Accum 4842 auto getExtendedExpr = [&](const SCEV *Expr, 4843 bool CreateSignExtend) -> const SCEV * { 4844 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4845 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4846 const SCEV *ExtendedExpr = 4847 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4848 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4849 return ExtendedExpr; 4850 }; 4851 4852 // Given: 4853 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4854 // = getExtendedExpr(Expr) 4855 // Determine whether the predicate P: Expr == ExtendedExpr 4856 // is known to be false at compile time 4857 auto PredIsKnownFalse = [&](const SCEV *Expr, 4858 const SCEV *ExtendedExpr) -> bool { 4859 return Expr != ExtendedExpr && 4860 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4861 }; 4862 4863 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 4864 if (PredIsKnownFalse(StartVal, StartExtended)) { 4865 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 4866 return None; 4867 } 4868 4869 // The Step is always Signed (because the overflow checks are either 4870 // NSSW or NUSW) 4871 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 4872 if (PredIsKnownFalse(Accum, AccumExtended)) { 4873 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 4874 return None; 4875 } 4876 4877 auto AppendPredicate = [&](const SCEV *Expr, 4878 const SCEV *ExtendedExpr) -> void { 4879 if (Expr != ExtendedExpr && 4880 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4881 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4882 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 4883 Predicates.push_back(Pred); 4884 } 4885 }; 4886 4887 AppendPredicate(StartVal, StartExtended); 4888 AppendPredicate(Accum, AccumExtended); 4889 4890 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4891 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4892 // into NewAR if it will also add the runtime overflow checks specified in 4893 // Predicates. 4894 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4895 4896 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4897 std::make_pair(NewAR, Predicates); 4898 // Remember the result of the analysis for this SCEV at this locayyytion. 4899 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4900 return PredRewrite; 4901 } 4902 4903 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4904 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4905 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4906 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4907 if (!L) 4908 return None; 4909 4910 // Check to see if we already analyzed this PHI. 4911 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4912 if (I != PredicatedSCEVRewrites.end()) { 4913 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 4914 I->second; 4915 // Analysis was done before and failed to create an AddRec: 4916 if (Rewrite.first == SymbolicPHI) 4917 return None; 4918 // Analysis was done before and succeeded to create an AddRec under 4919 // a predicate: 4920 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 4921 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 4922 return Rewrite; 4923 } 4924 4925 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4926 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 4927 4928 // Record in the cache that the analysis failed 4929 if (!Rewrite) { 4930 SmallVector<const SCEVPredicate *, 3> Predicates; 4931 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 4932 return None; 4933 } 4934 4935 return Rewrite; 4936 } 4937 4938 // FIXME: This utility is currently required because the Rewriter currently 4939 // does not rewrite this expression: 4940 // {0, +, (sext ix (trunc iy to ix) to iy)} 4941 // into {0, +, %step}, 4942 // even when the following Equal predicate exists: 4943 // "%step == (sext ix (trunc iy to ix) to iy)". 4944 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 4945 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 4946 if (AR1 == AR2) 4947 return true; 4948 4949 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 4950 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 4951 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 4952 return false; 4953 return true; 4954 }; 4955 4956 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 4957 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 4958 return false; 4959 return true; 4960 } 4961 4962 /// A helper function for createAddRecFromPHI to handle simple cases. 4963 /// 4964 /// This function tries to find an AddRec expression for the simplest (yet most 4965 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 4966 /// If it fails, createAddRecFromPHI will use a more general, but slow, 4967 /// technique for finding the AddRec expression. 4968 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 4969 Value *BEValueV, 4970 Value *StartValueV) { 4971 const Loop *L = LI.getLoopFor(PN->getParent()); 4972 assert(L && L->getHeader() == PN->getParent()); 4973 assert(BEValueV && StartValueV); 4974 4975 auto BO = MatchBinaryOp(BEValueV, DT); 4976 if (!BO) 4977 return nullptr; 4978 4979 if (BO->Opcode != Instruction::Add) 4980 return nullptr; 4981 4982 const SCEV *Accum = nullptr; 4983 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 4984 Accum = getSCEV(BO->RHS); 4985 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 4986 Accum = getSCEV(BO->LHS); 4987 4988 if (!Accum) 4989 return nullptr; 4990 4991 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4992 if (BO->IsNUW) 4993 Flags = setFlags(Flags, SCEV::FlagNUW); 4994 if (BO->IsNSW) 4995 Flags = setFlags(Flags, SCEV::FlagNSW); 4996 4997 const SCEV *StartVal = getSCEV(StartValueV); 4998 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4999 5000 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5001 5002 // We can add Flags to the post-inc expression only if we 5003 // know that it is *undefined behavior* for BEValueV to 5004 // overflow. 5005 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5006 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5007 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5008 5009 return PHISCEV; 5010 } 5011 5012 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 5013 const Loop *L = LI.getLoopFor(PN->getParent()); 5014 if (!L || L->getHeader() != PN->getParent()) 5015 return nullptr; 5016 5017 // The loop may have multiple entrances or multiple exits; we can analyze 5018 // this phi as an addrec if it has a unique entry value and a unique 5019 // backedge value. 5020 Value *BEValueV = nullptr, *StartValueV = nullptr; 5021 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5022 Value *V = PN->getIncomingValue(i); 5023 if (L->contains(PN->getIncomingBlock(i))) { 5024 if (!BEValueV) { 5025 BEValueV = V; 5026 } else if (BEValueV != V) { 5027 BEValueV = nullptr; 5028 break; 5029 } 5030 } else if (!StartValueV) { 5031 StartValueV = V; 5032 } else if (StartValueV != V) { 5033 StartValueV = nullptr; 5034 break; 5035 } 5036 } 5037 if (!BEValueV || !StartValueV) 5038 return nullptr; 5039 5040 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 5041 "PHI node already processed?"); 5042 5043 // First, try to find AddRec expression without creating a fictituos symbolic 5044 // value for PN. 5045 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 5046 return S; 5047 5048 // Handle PHI node value symbolically. 5049 const SCEV *SymbolicName = getUnknown(PN); 5050 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 5051 5052 // Using this symbolic name for the PHI, analyze the value coming around 5053 // the back-edge. 5054 const SCEV *BEValue = getSCEV(BEValueV); 5055 5056 // NOTE: If BEValue is loop invariant, we know that the PHI node just 5057 // has a special value for the first iteration of the loop. 5058 5059 // If the value coming around the backedge is an add with the symbolic 5060 // value we just inserted, then we found a simple induction variable! 5061 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 5062 // If there is a single occurrence of the symbolic value, replace it 5063 // with a recurrence. 5064 unsigned FoundIndex = Add->getNumOperands(); 5065 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5066 if (Add->getOperand(i) == SymbolicName) 5067 if (FoundIndex == e) { 5068 FoundIndex = i; 5069 break; 5070 } 5071 5072 if (FoundIndex != Add->getNumOperands()) { 5073 // Create an add with everything but the specified operand. 5074 SmallVector<const SCEV *, 8> Ops; 5075 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5076 if (i != FoundIndex) 5077 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 5078 L, *this)); 5079 const SCEV *Accum = getAddExpr(Ops); 5080 5081 // This is not a valid addrec if the step amount is varying each 5082 // loop iteration, but is not itself an addrec in this loop. 5083 if (isLoopInvariant(Accum, L) || 5084 (isa<SCEVAddRecExpr>(Accum) && 5085 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5086 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5087 5088 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 5089 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5090 if (BO->IsNUW) 5091 Flags = setFlags(Flags, SCEV::FlagNUW); 5092 if (BO->IsNSW) 5093 Flags = setFlags(Flags, SCEV::FlagNSW); 5094 } 5095 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5096 // If the increment is an inbounds GEP, then we know the address 5097 // space cannot be wrapped around. We cannot make any guarantee 5098 // about signed or unsigned overflow because pointers are 5099 // unsigned but we may have a negative index from the base 5100 // pointer. We can guarantee that no unsigned wrap occurs if the 5101 // indices form a positive value. 5102 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5103 Flags = setFlags(Flags, SCEV::FlagNW); 5104 5105 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 5106 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 5107 Flags = setFlags(Flags, SCEV::FlagNUW); 5108 } 5109 5110 // We cannot transfer nuw and nsw flags from subtraction 5111 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5112 // for instance. 5113 } 5114 5115 const SCEV *StartVal = getSCEV(StartValueV); 5116 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5117 5118 // Okay, for the entire analysis of this edge we assumed the PHI 5119 // to be symbolic. We now need to go back and purge all of the 5120 // entries for the scalars that use the symbolic expression. 5121 forgetSymbolicName(PN, SymbolicName); 5122 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5123 5124 // We can add Flags to the post-inc expression only if we 5125 // know that it is *undefined behavior* for BEValueV to 5126 // overflow. 5127 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5128 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5129 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5130 5131 return PHISCEV; 5132 } 5133 } 5134 } else { 5135 // Otherwise, this could be a loop like this: 5136 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5137 // In this case, j = {1,+,1} and BEValue is j. 5138 // Because the other in-value of i (0) fits the evolution of BEValue 5139 // i really is an addrec evolution. 5140 // 5141 // We can generalize this saying that i is the shifted value of BEValue 5142 // by one iteration: 5143 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5144 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5145 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5146 if (Shifted != getCouldNotCompute() && 5147 Start != getCouldNotCompute()) { 5148 const SCEV *StartVal = getSCEV(StartValueV); 5149 if (Start == StartVal) { 5150 // Okay, for the entire analysis of this edge we assumed the PHI 5151 // to be symbolic. We now need to go back and purge all of the 5152 // entries for the scalars that use the symbolic expression. 5153 forgetSymbolicName(PN, SymbolicName); 5154 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 5155 return Shifted; 5156 } 5157 } 5158 } 5159 5160 // Remove the temporary PHI node SCEV that has been inserted while intending 5161 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5162 // as it will prevent later (possibly simpler) SCEV expressions to be added 5163 // to the ValueExprMap. 5164 eraseValueFromMap(PN); 5165 5166 return nullptr; 5167 } 5168 5169 // Checks if the SCEV S is available at BB. S is considered available at BB 5170 // if S can be materialized at BB without introducing a fault. 5171 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 5172 BasicBlock *BB) { 5173 struct CheckAvailable { 5174 bool TraversalDone = false; 5175 bool Available = true; 5176 5177 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 5178 BasicBlock *BB = nullptr; 5179 DominatorTree &DT; 5180 5181 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5182 : L(L), BB(BB), DT(DT) {} 5183 5184 bool setUnavailable() { 5185 TraversalDone = true; 5186 Available = false; 5187 return false; 5188 } 5189 5190 bool follow(const SCEV *S) { 5191 switch (S->getSCEVType()) { 5192 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 5193 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 5194 // These expressions are available if their operand(s) is/are. 5195 return true; 5196 5197 case scAddRecExpr: { 5198 // We allow add recurrences that are on the loop BB is in, or some 5199 // outer loop. This guarantees availability because the value of the 5200 // add recurrence at BB is simply the "current" value of the induction 5201 // variable. We can relax this in the future; for instance an add 5202 // recurrence on a sibling dominating loop is also available at BB. 5203 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5204 if (L && (ARLoop == L || ARLoop->contains(L))) 5205 return true; 5206 5207 return setUnavailable(); 5208 } 5209 5210 case scUnknown: { 5211 // For SCEVUnknown, we check for simple dominance. 5212 const auto *SU = cast<SCEVUnknown>(S); 5213 Value *V = SU->getValue(); 5214 5215 if (isa<Argument>(V)) 5216 return false; 5217 5218 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5219 return false; 5220 5221 return setUnavailable(); 5222 } 5223 5224 case scUDivExpr: 5225 case scCouldNotCompute: 5226 // We do not try to smart about these at all. 5227 return setUnavailable(); 5228 } 5229 llvm_unreachable("switch should be fully covered!"); 5230 } 5231 5232 bool isDone() { return TraversalDone; } 5233 }; 5234 5235 CheckAvailable CA(L, BB, DT); 5236 SCEVTraversal<CheckAvailable> ST(CA); 5237 5238 ST.visitAll(S); 5239 return CA.Available; 5240 } 5241 5242 // Try to match a control flow sequence that branches out at BI and merges back 5243 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5244 // match. 5245 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5246 Value *&C, Value *&LHS, Value *&RHS) { 5247 C = BI->getCondition(); 5248 5249 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5250 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5251 5252 if (!LeftEdge.isSingleEdge()) 5253 return false; 5254 5255 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5256 5257 Use &LeftUse = Merge->getOperandUse(0); 5258 Use &RightUse = Merge->getOperandUse(1); 5259 5260 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5261 LHS = LeftUse; 5262 RHS = RightUse; 5263 return true; 5264 } 5265 5266 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5267 LHS = RightUse; 5268 RHS = LeftUse; 5269 return true; 5270 } 5271 5272 return false; 5273 } 5274 5275 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5276 auto IsReachable = 5277 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5278 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5279 const Loop *L = LI.getLoopFor(PN->getParent()); 5280 5281 // We don't want to break LCSSA, even in a SCEV expression tree. 5282 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5283 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5284 return nullptr; 5285 5286 // Try to match 5287 // 5288 // br %cond, label %left, label %right 5289 // left: 5290 // br label %merge 5291 // right: 5292 // br label %merge 5293 // merge: 5294 // V = phi [ %x, %left ], [ %y, %right ] 5295 // 5296 // as "select %cond, %x, %y" 5297 5298 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5299 assert(IDom && "At least the entry block should dominate PN"); 5300 5301 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5302 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5303 5304 if (BI && BI->isConditional() && 5305 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5306 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5307 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5308 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5309 } 5310 5311 return nullptr; 5312 } 5313 5314 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5315 if (const SCEV *S = createAddRecFromPHI(PN)) 5316 return S; 5317 5318 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5319 return S; 5320 5321 // If the PHI has a single incoming value, follow that value, unless the 5322 // PHI's incoming blocks are in a different loop, in which case doing so 5323 // risks breaking LCSSA form. Instcombine would normally zap these, but 5324 // it doesn't have DominatorTree information, so it may miss cases. 5325 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5326 if (LI.replacementPreservesLCSSAForm(PN, V)) 5327 return getSCEV(V); 5328 5329 // If it's not a loop phi, we can't handle it yet. 5330 return getUnknown(PN); 5331 } 5332 5333 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5334 Value *Cond, 5335 Value *TrueVal, 5336 Value *FalseVal) { 5337 // Handle "constant" branch or select. This can occur for instance when a 5338 // loop pass transforms an inner loop and moves on to process the outer loop. 5339 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5340 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5341 5342 // Try to match some simple smax or umax patterns. 5343 auto *ICI = dyn_cast<ICmpInst>(Cond); 5344 if (!ICI) 5345 return getUnknown(I); 5346 5347 Value *LHS = ICI->getOperand(0); 5348 Value *RHS = ICI->getOperand(1); 5349 5350 switch (ICI->getPredicate()) { 5351 case ICmpInst::ICMP_SLT: 5352 case ICmpInst::ICMP_SLE: 5353 std::swap(LHS, RHS); 5354 LLVM_FALLTHROUGH; 5355 case ICmpInst::ICMP_SGT: 5356 case ICmpInst::ICMP_SGE: 5357 // a >s b ? a+x : b+x -> smax(a, b)+x 5358 // a >s b ? b+x : a+x -> smin(a, b)+x 5359 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5360 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5361 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5362 const SCEV *LA = getSCEV(TrueVal); 5363 const SCEV *RA = getSCEV(FalseVal); 5364 const SCEV *LDiff = getMinusSCEV(LA, LS); 5365 const SCEV *RDiff = getMinusSCEV(RA, RS); 5366 if (LDiff == RDiff) 5367 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5368 LDiff = getMinusSCEV(LA, RS); 5369 RDiff = getMinusSCEV(RA, LS); 5370 if (LDiff == RDiff) 5371 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5372 } 5373 break; 5374 case ICmpInst::ICMP_ULT: 5375 case ICmpInst::ICMP_ULE: 5376 std::swap(LHS, RHS); 5377 LLVM_FALLTHROUGH; 5378 case ICmpInst::ICMP_UGT: 5379 case ICmpInst::ICMP_UGE: 5380 // a >u b ? a+x : b+x -> umax(a, b)+x 5381 // a >u b ? b+x : a+x -> umin(a, b)+x 5382 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5383 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5384 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5385 const SCEV *LA = getSCEV(TrueVal); 5386 const SCEV *RA = getSCEV(FalseVal); 5387 const SCEV *LDiff = getMinusSCEV(LA, LS); 5388 const SCEV *RDiff = getMinusSCEV(RA, RS); 5389 if (LDiff == RDiff) 5390 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5391 LDiff = getMinusSCEV(LA, RS); 5392 RDiff = getMinusSCEV(RA, LS); 5393 if (LDiff == RDiff) 5394 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5395 } 5396 break; 5397 case ICmpInst::ICMP_NE: 5398 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5399 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5400 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5401 const SCEV *One = getOne(I->getType()); 5402 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5403 const SCEV *LA = getSCEV(TrueVal); 5404 const SCEV *RA = getSCEV(FalseVal); 5405 const SCEV *LDiff = getMinusSCEV(LA, LS); 5406 const SCEV *RDiff = getMinusSCEV(RA, One); 5407 if (LDiff == RDiff) 5408 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5409 } 5410 break; 5411 case ICmpInst::ICMP_EQ: 5412 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5413 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5414 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5415 const SCEV *One = getOne(I->getType()); 5416 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5417 const SCEV *LA = getSCEV(TrueVal); 5418 const SCEV *RA = getSCEV(FalseVal); 5419 const SCEV *LDiff = getMinusSCEV(LA, One); 5420 const SCEV *RDiff = getMinusSCEV(RA, LS); 5421 if (LDiff == RDiff) 5422 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5423 } 5424 break; 5425 default: 5426 break; 5427 } 5428 5429 return getUnknown(I); 5430 } 5431 5432 /// Expand GEP instructions into add and multiply operations. This allows them 5433 /// to be analyzed by regular SCEV code. 5434 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5435 // Don't attempt to analyze GEPs over unsized objects. 5436 if (!GEP->getSourceElementType()->isSized()) 5437 return getUnknown(GEP); 5438 5439 SmallVector<const SCEV *, 4> IndexExprs; 5440 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5441 IndexExprs.push_back(getSCEV(*Index)); 5442 return getGEPExpr(GEP, IndexExprs); 5443 } 5444 5445 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5446 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5447 return C->getAPInt().countTrailingZeros(); 5448 5449 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5450 return std::min(GetMinTrailingZeros(T->getOperand()), 5451 (uint32_t)getTypeSizeInBits(T->getType())); 5452 5453 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5454 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5455 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5456 ? getTypeSizeInBits(E->getType()) 5457 : OpRes; 5458 } 5459 5460 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5461 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5462 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5463 ? getTypeSizeInBits(E->getType()) 5464 : OpRes; 5465 } 5466 5467 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5468 // The result is the min of all operands results. 5469 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5470 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5471 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5472 return MinOpRes; 5473 } 5474 5475 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5476 // The result is the sum of all operands results. 5477 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5478 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5479 for (unsigned i = 1, e = M->getNumOperands(); 5480 SumOpRes != BitWidth && i != e; ++i) 5481 SumOpRes = 5482 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5483 return SumOpRes; 5484 } 5485 5486 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5487 // The result is the min of all operands results. 5488 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5489 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5490 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5491 return MinOpRes; 5492 } 5493 5494 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5495 // The result is the min of all operands results. 5496 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5497 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5498 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5499 return MinOpRes; 5500 } 5501 5502 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5503 // The result is the min of all operands results. 5504 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5505 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5506 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5507 return MinOpRes; 5508 } 5509 5510 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5511 // For a SCEVUnknown, ask ValueTracking. 5512 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5513 return Known.countMinTrailingZeros(); 5514 } 5515 5516 // SCEVUDivExpr 5517 return 0; 5518 } 5519 5520 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5521 auto I = MinTrailingZerosCache.find(S); 5522 if (I != MinTrailingZerosCache.end()) 5523 return I->second; 5524 5525 uint32_t Result = GetMinTrailingZerosImpl(S); 5526 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5527 assert(InsertPair.second && "Should insert a new key"); 5528 return InsertPair.first->second; 5529 } 5530 5531 /// Helper method to assign a range to V from metadata present in the IR. 5532 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5533 if (Instruction *I = dyn_cast<Instruction>(V)) 5534 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5535 return getConstantRangeFromMetadata(*MD); 5536 5537 return None; 5538 } 5539 5540 /// Determine the range for a particular SCEV. If SignHint is 5541 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5542 /// with a "cleaner" unsigned (resp. signed) representation. 5543 const ConstantRange & 5544 ScalarEvolution::getRangeRef(const SCEV *S, 5545 ScalarEvolution::RangeSignHint SignHint) { 5546 DenseMap<const SCEV *, ConstantRange> &Cache = 5547 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5548 : SignedRanges; 5549 5550 // See if we've computed this range already. 5551 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5552 if (I != Cache.end()) 5553 return I->second; 5554 5555 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5556 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5557 5558 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5559 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5560 5561 // If the value has known zeros, the maximum value will have those known zeros 5562 // as well. 5563 uint32_t TZ = GetMinTrailingZeros(S); 5564 if (TZ != 0) { 5565 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5566 ConservativeResult = 5567 ConstantRange(APInt::getMinValue(BitWidth), 5568 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5569 else 5570 ConservativeResult = ConstantRange( 5571 APInt::getSignedMinValue(BitWidth), 5572 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5573 } 5574 5575 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5576 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5577 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5578 X = X.add(getRangeRef(Add->getOperand(i), SignHint)); 5579 return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); 5580 } 5581 5582 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5583 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5584 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5585 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5586 return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); 5587 } 5588 5589 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5590 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5591 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5592 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5593 return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); 5594 } 5595 5596 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5597 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5598 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5599 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5600 return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); 5601 } 5602 5603 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5604 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5605 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5606 return setRange(UDiv, SignHint, 5607 ConservativeResult.intersectWith(X.udiv(Y))); 5608 } 5609 5610 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5611 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5612 return setRange(ZExt, SignHint, 5613 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 5614 } 5615 5616 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5617 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5618 return setRange(SExt, SignHint, 5619 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 5620 } 5621 5622 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5623 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5624 return setRange(Trunc, SignHint, 5625 ConservativeResult.intersectWith(X.truncate(BitWidth))); 5626 } 5627 5628 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5629 // If there's no unsigned wrap, the value will never be less than its 5630 // initial value. 5631 if (AddRec->hasNoUnsignedWrap()) 5632 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 5633 if (!C->getValue()->isZero()) 5634 ConservativeResult = ConservativeResult.intersectWith( 5635 ConstantRange(C->getAPInt(), APInt(BitWidth, 0))); 5636 5637 // If there's no signed wrap, and all the operands have the same sign or 5638 // zero, the value won't ever change sign. 5639 if (AddRec->hasNoSignedWrap()) { 5640 bool AllNonNeg = true; 5641 bool AllNonPos = true; 5642 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 5643 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 5644 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 5645 } 5646 if (AllNonNeg) 5647 ConservativeResult = ConservativeResult.intersectWith( 5648 ConstantRange(APInt(BitWidth, 0), 5649 APInt::getSignedMinValue(BitWidth))); 5650 else if (AllNonPos) 5651 ConservativeResult = ConservativeResult.intersectWith( 5652 ConstantRange(APInt::getSignedMinValue(BitWidth), 5653 APInt(BitWidth, 1))); 5654 } 5655 5656 // TODO: non-affine addrec 5657 if (AddRec->isAffine()) { 5658 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 5659 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5660 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5661 auto RangeFromAffine = getRangeForAffineAR( 5662 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5663 BitWidth); 5664 if (!RangeFromAffine.isFullSet()) 5665 ConservativeResult = 5666 ConservativeResult.intersectWith(RangeFromAffine); 5667 5668 auto RangeFromFactoring = getRangeViaFactoring( 5669 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5670 BitWidth); 5671 if (!RangeFromFactoring.isFullSet()) 5672 ConservativeResult = 5673 ConservativeResult.intersectWith(RangeFromFactoring); 5674 } 5675 } 5676 5677 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5678 } 5679 5680 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5681 // Check if the IR explicitly contains !range metadata. 5682 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5683 if (MDRange.hasValue()) 5684 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue()); 5685 5686 // Split here to avoid paying the compile-time cost of calling both 5687 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 5688 // if needed. 5689 const DataLayout &DL = getDataLayout(); 5690 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 5691 // For a SCEVUnknown, ask ValueTracking. 5692 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5693 if (Known.One != ~Known.Zero + 1) 5694 ConservativeResult = 5695 ConservativeResult.intersectWith(ConstantRange(Known.One, 5696 ~Known.Zero + 1)); 5697 } else { 5698 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 5699 "generalize as needed!"); 5700 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5701 if (NS > 1) 5702 ConservativeResult = ConservativeResult.intersectWith( 5703 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5704 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1)); 5705 } 5706 5707 // A range of Phi is a subset of union of all ranges of its input. 5708 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 5709 // Make sure that we do not run over cycled Phis. 5710 if (PendingPhiRanges.insert(Phi).second) { 5711 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 5712 for (auto &Op : Phi->operands()) { 5713 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 5714 RangeFromOps = RangeFromOps.unionWith(OpRange); 5715 // No point to continue if we already have a full set. 5716 if (RangeFromOps.isFullSet()) 5717 break; 5718 } 5719 ConservativeResult = ConservativeResult.intersectWith(RangeFromOps); 5720 bool Erased = PendingPhiRanges.erase(Phi); 5721 assert(Erased && "Failed to erase Phi properly?"); 5722 (void) Erased; 5723 } 5724 } 5725 5726 return setRange(U, SignHint, std::move(ConservativeResult)); 5727 } 5728 5729 return setRange(S, SignHint, std::move(ConservativeResult)); 5730 } 5731 5732 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5733 // values that the expression can take. Initially, the expression has a value 5734 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5735 // argument defines if we treat Step as signed or unsigned. 5736 static ConstantRange getRangeForAffineARHelper(APInt Step, 5737 const ConstantRange &StartRange, 5738 const APInt &MaxBECount, 5739 unsigned BitWidth, bool Signed) { 5740 // If either Step or MaxBECount is 0, then the expression won't change, and we 5741 // just need to return the initial range. 5742 if (Step == 0 || MaxBECount == 0) 5743 return StartRange; 5744 5745 // If we don't know anything about the initial value (i.e. StartRange is 5746 // FullRange), then we don't know anything about the final range either. 5747 // Return FullRange. 5748 if (StartRange.isFullSet()) 5749 return ConstantRange(BitWidth, /* isFullSet = */ true); 5750 5751 // If Step is signed and negative, then we use its absolute value, but we also 5752 // note that we're moving in the opposite direction. 5753 bool Descending = Signed && Step.isNegative(); 5754 5755 if (Signed) 5756 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 5757 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 5758 // This equations hold true due to the well-defined wrap-around behavior of 5759 // APInt. 5760 Step = Step.abs(); 5761 5762 // Check if Offset is more than full span of BitWidth. If it is, the 5763 // expression is guaranteed to overflow. 5764 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 5765 return ConstantRange(BitWidth, /* isFullSet = */ true); 5766 5767 // Offset is by how much the expression can change. Checks above guarantee no 5768 // overflow here. 5769 APInt Offset = Step * MaxBECount; 5770 5771 // Minimum value of the final range will match the minimal value of StartRange 5772 // if the expression is increasing and will be decreased by Offset otherwise. 5773 // Maximum value of the final range will match the maximal value of StartRange 5774 // if the expression is decreasing and will be increased by Offset otherwise. 5775 APInt StartLower = StartRange.getLower(); 5776 APInt StartUpper = StartRange.getUpper() - 1; 5777 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 5778 : (StartUpper + std::move(Offset)); 5779 5780 // It's possible that the new minimum/maximum value will fall into the initial 5781 // range (due to wrap around). This means that the expression can take any 5782 // value in this bitwidth, and we have to return full range. 5783 if (StartRange.contains(MovedBoundary)) 5784 return ConstantRange(BitWidth, /* isFullSet = */ true); 5785 5786 APInt NewLower = 5787 Descending ? std::move(MovedBoundary) : std::move(StartLower); 5788 APInt NewUpper = 5789 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 5790 NewUpper += 1; 5791 5792 // If we end up with full range, return a proper full range. 5793 if (NewLower == NewUpper) 5794 return ConstantRange(BitWidth, /* isFullSet = */ true); 5795 5796 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 5797 return ConstantRange(std::move(NewLower), std::move(NewUpper)); 5798 } 5799 5800 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5801 const SCEV *Step, 5802 const SCEV *MaxBECount, 5803 unsigned BitWidth) { 5804 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5805 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5806 "Precondition!"); 5807 5808 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5809 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5810 5811 // First, consider step signed. 5812 ConstantRange StartSRange = getSignedRange(Start); 5813 ConstantRange StepSRange = getSignedRange(Step); 5814 5815 // If Step can be both positive and negative, we need to find ranges for the 5816 // maximum absolute step values in both directions and union them. 5817 ConstantRange SR = 5818 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5819 MaxBECountValue, BitWidth, /* Signed = */ true); 5820 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5821 StartSRange, MaxBECountValue, 5822 BitWidth, /* Signed = */ true)); 5823 5824 // Next, consider step unsigned. 5825 ConstantRange UR = getRangeForAffineARHelper( 5826 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5827 MaxBECountValue, BitWidth, /* Signed = */ false); 5828 5829 // Finally, intersect signed and unsigned ranges. 5830 return SR.intersectWith(UR); 5831 } 5832 5833 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 5834 const SCEV *Step, 5835 const SCEV *MaxBECount, 5836 unsigned BitWidth) { 5837 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 5838 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 5839 5840 struct SelectPattern { 5841 Value *Condition = nullptr; 5842 APInt TrueValue; 5843 APInt FalseValue; 5844 5845 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 5846 const SCEV *S) { 5847 Optional<unsigned> CastOp; 5848 APInt Offset(BitWidth, 0); 5849 5850 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 5851 "Should be!"); 5852 5853 // Peel off a constant offset: 5854 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 5855 // In the future we could consider being smarter here and handle 5856 // {Start+Step,+,Step} too. 5857 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 5858 return; 5859 5860 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 5861 S = SA->getOperand(1); 5862 } 5863 5864 // Peel off a cast operation 5865 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 5866 CastOp = SCast->getSCEVType(); 5867 S = SCast->getOperand(); 5868 } 5869 5870 using namespace llvm::PatternMatch; 5871 5872 auto *SU = dyn_cast<SCEVUnknown>(S); 5873 const APInt *TrueVal, *FalseVal; 5874 if (!SU || 5875 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 5876 m_APInt(FalseVal)))) { 5877 Condition = nullptr; 5878 return; 5879 } 5880 5881 TrueValue = *TrueVal; 5882 FalseValue = *FalseVal; 5883 5884 // Re-apply the cast we peeled off earlier 5885 if (CastOp.hasValue()) 5886 switch (*CastOp) { 5887 default: 5888 llvm_unreachable("Unknown SCEV cast type!"); 5889 5890 case scTruncate: 5891 TrueValue = TrueValue.trunc(BitWidth); 5892 FalseValue = FalseValue.trunc(BitWidth); 5893 break; 5894 case scZeroExtend: 5895 TrueValue = TrueValue.zext(BitWidth); 5896 FalseValue = FalseValue.zext(BitWidth); 5897 break; 5898 case scSignExtend: 5899 TrueValue = TrueValue.sext(BitWidth); 5900 FalseValue = FalseValue.sext(BitWidth); 5901 break; 5902 } 5903 5904 // Re-apply the constant offset we peeled off earlier 5905 TrueValue += Offset; 5906 FalseValue += Offset; 5907 } 5908 5909 bool isRecognized() { return Condition != nullptr; } 5910 }; 5911 5912 SelectPattern StartPattern(*this, BitWidth, Start); 5913 if (!StartPattern.isRecognized()) 5914 return ConstantRange(BitWidth, /* isFullSet = */ true); 5915 5916 SelectPattern StepPattern(*this, BitWidth, Step); 5917 if (!StepPattern.isRecognized()) 5918 return ConstantRange(BitWidth, /* isFullSet = */ true); 5919 5920 if (StartPattern.Condition != StepPattern.Condition) { 5921 // We don't handle this case today; but we could, by considering four 5922 // possibilities below instead of two. I'm not sure if there are cases where 5923 // that will help over what getRange already does, though. 5924 return ConstantRange(BitWidth, /* isFullSet = */ true); 5925 } 5926 5927 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 5928 // construct arbitrary general SCEV expressions here. This function is called 5929 // from deep in the call stack, and calling getSCEV (on a sext instruction, 5930 // say) can end up caching a suboptimal value. 5931 5932 // FIXME: without the explicit `this` receiver below, MSVC errors out with 5933 // C2352 and C2512 (otherwise it isn't needed). 5934 5935 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 5936 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 5937 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 5938 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 5939 5940 ConstantRange TrueRange = 5941 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 5942 ConstantRange FalseRange = 5943 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 5944 5945 return TrueRange.unionWith(FalseRange); 5946 } 5947 5948 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 5949 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 5950 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 5951 5952 // Return early if there are no flags to propagate to the SCEV. 5953 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5954 if (BinOp->hasNoUnsignedWrap()) 5955 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 5956 if (BinOp->hasNoSignedWrap()) 5957 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 5958 if (Flags == SCEV::FlagAnyWrap) 5959 return SCEV::FlagAnyWrap; 5960 5961 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 5962 } 5963 5964 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 5965 // Here we check that I is in the header of the innermost loop containing I, 5966 // since we only deal with instructions in the loop header. The actual loop we 5967 // need to check later will come from an add recurrence, but getting that 5968 // requires computing the SCEV of the operands, which can be expensive. This 5969 // check we can do cheaply to rule out some cases early. 5970 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 5971 if (InnermostContainingLoop == nullptr || 5972 InnermostContainingLoop->getHeader() != I->getParent()) 5973 return false; 5974 5975 // Only proceed if we can prove that I does not yield poison. 5976 if (!programUndefinedIfFullPoison(I)) 5977 return false; 5978 5979 // At this point we know that if I is executed, then it does not wrap 5980 // according to at least one of NSW or NUW. If I is not executed, then we do 5981 // not know if the calculation that I represents would wrap. Multiple 5982 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 5983 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 5984 // derived from other instructions that map to the same SCEV. We cannot make 5985 // that guarantee for cases where I is not executed. So we need to find the 5986 // loop that I is considered in relation to and prove that I is executed for 5987 // every iteration of that loop. That implies that the value that I 5988 // calculates does not wrap anywhere in the loop, so then we can apply the 5989 // flags to the SCEV. 5990 // 5991 // We check isLoopInvariant to disambiguate in case we are adding recurrences 5992 // from different loops, so that we know which loop to prove that I is 5993 // executed in. 5994 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 5995 // I could be an extractvalue from a call to an overflow intrinsic. 5996 // TODO: We can do better here in some cases. 5997 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 5998 return false; 5999 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 6000 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 6001 bool AllOtherOpsLoopInvariant = true; 6002 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 6003 ++OtherOpIndex) { 6004 if (OtherOpIndex != OpIndex) { 6005 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 6006 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 6007 AllOtherOpsLoopInvariant = false; 6008 break; 6009 } 6010 } 6011 } 6012 if (AllOtherOpsLoopInvariant && 6013 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 6014 return true; 6015 } 6016 } 6017 return false; 6018 } 6019 6020 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 6021 // If we know that \c I can never be poison period, then that's enough. 6022 if (isSCEVExprNeverPoison(I)) 6023 return true; 6024 6025 // For an add recurrence specifically, we assume that infinite loops without 6026 // side effects are undefined behavior, and then reason as follows: 6027 // 6028 // If the add recurrence is poison in any iteration, it is poison on all 6029 // future iterations (since incrementing poison yields poison). If the result 6030 // of the add recurrence is fed into the loop latch condition and the loop 6031 // does not contain any throws or exiting blocks other than the latch, we now 6032 // have the ability to "choose" whether the backedge is taken or not (by 6033 // choosing a sufficiently evil value for the poison feeding into the branch) 6034 // for every iteration including and after the one in which \p I first became 6035 // poison. There are two possibilities (let's call the iteration in which \p 6036 // I first became poison as K): 6037 // 6038 // 1. In the set of iterations including and after K, the loop body executes 6039 // no side effects. In this case executing the backege an infinte number 6040 // of times will yield undefined behavior. 6041 // 6042 // 2. In the set of iterations including and after K, the loop body executes 6043 // at least one side effect. In this case, that specific instance of side 6044 // effect is control dependent on poison, which also yields undefined 6045 // behavior. 6046 6047 auto *ExitingBB = L->getExitingBlock(); 6048 auto *LatchBB = L->getLoopLatch(); 6049 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 6050 return false; 6051 6052 SmallPtrSet<const Instruction *, 16> Pushed; 6053 SmallVector<const Instruction *, 8> PoisonStack; 6054 6055 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 6056 // things that are known to be fully poison under that assumption go on the 6057 // PoisonStack. 6058 Pushed.insert(I); 6059 PoisonStack.push_back(I); 6060 6061 bool LatchControlDependentOnPoison = false; 6062 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 6063 const Instruction *Poison = PoisonStack.pop_back_val(); 6064 6065 for (auto *PoisonUser : Poison->users()) { 6066 if (propagatesFullPoison(cast<Instruction>(PoisonUser))) { 6067 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 6068 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 6069 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 6070 assert(BI->isConditional() && "Only possibility!"); 6071 if (BI->getParent() == LatchBB) { 6072 LatchControlDependentOnPoison = true; 6073 break; 6074 } 6075 } 6076 } 6077 } 6078 6079 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 6080 } 6081 6082 ScalarEvolution::LoopProperties 6083 ScalarEvolution::getLoopProperties(const Loop *L) { 6084 using LoopProperties = ScalarEvolution::LoopProperties; 6085 6086 auto Itr = LoopPropertiesCache.find(L); 6087 if (Itr == LoopPropertiesCache.end()) { 6088 auto HasSideEffects = [](Instruction *I) { 6089 if (auto *SI = dyn_cast<StoreInst>(I)) 6090 return !SI->isSimple(); 6091 6092 return I->mayHaveSideEffects(); 6093 }; 6094 6095 LoopProperties LP = {/* HasNoAbnormalExits */ true, 6096 /*HasNoSideEffects*/ true}; 6097 6098 for (auto *BB : L->getBlocks()) 6099 for (auto &I : *BB) { 6100 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6101 LP.HasNoAbnormalExits = false; 6102 if (HasSideEffects(&I)) 6103 LP.HasNoSideEffects = false; 6104 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 6105 break; // We're already as pessimistic as we can get. 6106 } 6107 6108 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 6109 assert(InsertPair.second && "We just checked!"); 6110 Itr = InsertPair.first; 6111 } 6112 6113 return Itr->second; 6114 } 6115 6116 const SCEV *ScalarEvolution::createSCEV(Value *V) { 6117 if (!isSCEVable(V->getType())) 6118 return getUnknown(V); 6119 6120 if (Instruction *I = dyn_cast<Instruction>(V)) { 6121 // Don't attempt to analyze instructions in blocks that aren't 6122 // reachable. Such instructions don't matter, and they aren't required 6123 // to obey basic rules for definitions dominating uses which this 6124 // analysis depends on. 6125 if (!DT.isReachableFromEntry(I->getParent())) 6126 return getUnknown(V); 6127 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 6128 return getConstant(CI); 6129 else if (isa<ConstantPointerNull>(V)) 6130 return getZero(V->getType()); 6131 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 6132 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 6133 else if (!isa<ConstantExpr>(V)) 6134 return getUnknown(V); 6135 6136 Operator *U = cast<Operator>(V); 6137 if (auto BO = MatchBinaryOp(U, DT)) { 6138 switch (BO->Opcode) { 6139 case Instruction::Add: { 6140 // The simple thing to do would be to just call getSCEV on both operands 6141 // and call getAddExpr with the result. However if we're looking at a 6142 // bunch of things all added together, this can be quite inefficient, 6143 // because it leads to N-1 getAddExpr calls for N ultimate operands. 6144 // Instead, gather up all the operands and make a single getAddExpr call. 6145 // LLVM IR canonical form means we need only traverse the left operands. 6146 SmallVector<const SCEV *, 4> AddOps; 6147 do { 6148 if (BO->Op) { 6149 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6150 AddOps.push_back(OpSCEV); 6151 break; 6152 } 6153 6154 // If a NUW or NSW flag can be applied to the SCEV for this 6155 // addition, then compute the SCEV for this addition by itself 6156 // with a separate call to getAddExpr. We need to do that 6157 // instead of pushing the operands of the addition onto AddOps, 6158 // since the flags are only known to apply to this particular 6159 // addition - they may not apply to other additions that can be 6160 // formed with operands from AddOps. 6161 const SCEV *RHS = getSCEV(BO->RHS); 6162 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6163 if (Flags != SCEV::FlagAnyWrap) { 6164 const SCEV *LHS = getSCEV(BO->LHS); 6165 if (BO->Opcode == Instruction::Sub) 6166 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 6167 else 6168 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 6169 break; 6170 } 6171 } 6172 6173 if (BO->Opcode == Instruction::Sub) 6174 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 6175 else 6176 AddOps.push_back(getSCEV(BO->RHS)); 6177 6178 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6179 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6180 NewBO->Opcode != Instruction::Sub)) { 6181 AddOps.push_back(getSCEV(BO->LHS)); 6182 break; 6183 } 6184 BO = NewBO; 6185 } while (true); 6186 6187 return getAddExpr(AddOps); 6188 } 6189 6190 case Instruction::Mul: { 6191 SmallVector<const SCEV *, 4> MulOps; 6192 do { 6193 if (BO->Op) { 6194 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6195 MulOps.push_back(OpSCEV); 6196 break; 6197 } 6198 6199 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6200 if (Flags != SCEV::FlagAnyWrap) { 6201 MulOps.push_back( 6202 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6203 break; 6204 } 6205 } 6206 6207 MulOps.push_back(getSCEV(BO->RHS)); 6208 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6209 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6210 MulOps.push_back(getSCEV(BO->LHS)); 6211 break; 6212 } 6213 BO = NewBO; 6214 } while (true); 6215 6216 return getMulExpr(MulOps); 6217 } 6218 case Instruction::UDiv: 6219 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6220 case Instruction::URem: 6221 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6222 case Instruction::Sub: { 6223 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6224 if (BO->Op) 6225 Flags = getNoWrapFlagsFromUB(BO->Op); 6226 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6227 } 6228 case Instruction::And: 6229 // For an expression like x&255 that merely masks off the high bits, 6230 // use zext(trunc(x)) as the SCEV expression. 6231 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6232 if (CI->isZero()) 6233 return getSCEV(BO->RHS); 6234 if (CI->isMinusOne()) 6235 return getSCEV(BO->LHS); 6236 const APInt &A = CI->getValue(); 6237 6238 // Instcombine's ShrinkDemandedConstant may strip bits out of 6239 // constants, obscuring what would otherwise be a low-bits mask. 6240 // Use computeKnownBits to compute what ShrinkDemandedConstant 6241 // knew about to reconstruct a low-bits mask value. 6242 unsigned LZ = A.countLeadingZeros(); 6243 unsigned TZ = A.countTrailingZeros(); 6244 unsigned BitWidth = A.getBitWidth(); 6245 KnownBits Known(BitWidth); 6246 computeKnownBits(BO->LHS, Known, getDataLayout(), 6247 0, &AC, nullptr, &DT); 6248 6249 APInt EffectiveMask = 6250 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6251 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6252 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6253 const SCEV *LHS = getSCEV(BO->LHS); 6254 const SCEV *ShiftedLHS = nullptr; 6255 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6256 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6257 // For an expression like (x * 8) & 8, simplify the multiply. 6258 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6259 unsigned GCD = std::min(MulZeros, TZ); 6260 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6261 SmallVector<const SCEV*, 4> MulOps; 6262 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6263 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6264 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6265 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6266 } 6267 } 6268 if (!ShiftedLHS) 6269 ShiftedLHS = getUDivExpr(LHS, MulCount); 6270 return getMulExpr( 6271 getZeroExtendExpr( 6272 getTruncateExpr(ShiftedLHS, 6273 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6274 BO->LHS->getType()), 6275 MulCount); 6276 } 6277 } 6278 break; 6279 6280 case Instruction::Or: 6281 // If the RHS of the Or is a constant, we may have something like: 6282 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6283 // optimizations will transparently handle this case. 6284 // 6285 // In order for this transformation to be safe, the LHS must be of the 6286 // form X*(2^n) and the Or constant must be less than 2^n. 6287 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6288 const SCEV *LHS = getSCEV(BO->LHS); 6289 const APInt &CIVal = CI->getValue(); 6290 if (GetMinTrailingZeros(LHS) >= 6291 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6292 // Build a plain add SCEV. 6293 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 6294 // If the LHS of the add was an addrec and it has no-wrap flags, 6295 // transfer the no-wrap flags, since an or won't introduce a wrap. 6296 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 6297 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 6298 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( 6299 OldAR->getNoWrapFlags()); 6300 } 6301 return S; 6302 } 6303 } 6304 break; 6305 6306 case Instruction::Xor: 6307 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6308 // If the RHS of xor is -1, then this is a not operation. 6309 if (CI->isMinusOne()) 6310 return getNotSCEV(getSCEV(BO->LHS)); 6311 6312 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6313 // This is a variant of the check for xor with -1, and it handles 6314 // the case where instcombine has trimmed non-demanded bits out 6315 // of an xor with -1. 6316 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6317 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6318 if (LBO->getOpcode() == Instruction::And && 6319 LCI->getValue() == CI->getValue()) 6320 if (const SCEVZeroExtendExpr *Z = 6321 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6322 Type *UTy = BO->LHS->getType(); 6323 const SCEV *Z0 = Z->getOperand(); 6324 Type *Z0Ty = Z0->getType(); 6325 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6326 6327 // If C is a low-bits mask, the zero extend is serving to 6328 // mask off the high bits. Complement the operand and 6329 // re-apply the zext. 6330 if (CI->getValue().isMask(Z0TySize)) 6331 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6332 6333 // If C is a single bit, it may be in the sign-bit position 6334 // before the zero-extend. In this case, represent the xor 6335 // using an add, which is equivalent, and re-apply the zext. 6336 APInt Trunc = CI->getValue().trunc(Z0TySize); 6337 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6338 Trunc.isSignMask()) 6339 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6340 UTy); 6341 } 6342 } 6343 break; 6344 6345 case Instruction::Shl: 6346 // Turn shift left of a constant amount into a multiply. 6347 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6348 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6349 6350 // If the shift count is not less than the bitwidth, the result of 6351 // the shift is undefined. Don't try to analyze it, because the 6352 // resolution chosen here may differ from the resolution chosen in 6353 // other parts of the compiler. 6354 if (SA->getValue().uge(BitWidth)) 6355 break; 6356 6357 // It is currently not resolved how to interpret NSW for left 6358 // shift by BitWidth - 1, so we avoid applying flags in that 6359 // case. Remove this check (or this comment) once the situation 6360 // is resolved. See 6361 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html 6362 // and http://reviews.llvm.org/D8890 . 6363 auto Flags = SCEV::FlagAnyWrap; 6364 if (BO->Op && SA->getValue().ult(BitWidth - 1)) 6365 Flags = getNoWrapFlagsFromUB(BO->Op); 6366 6367 Constant *X = ConstantInt::get( 6368 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6369 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6370 } 6371 break; 6372 6373 case Instruction::AShr: { 6374 // AShr X, C, where C is a constant. 6375 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6376 if (!CI) 6377 break; 6378 6379 Type *OuterTy = BO->LHS->getType(); 6380 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6381 // If the shift count is not less than the bitwidth, the result of 6382 // the shift is undefined. Don't try to analyze it, because the 6383 // resolution chosen here may differ from the resolution chosen in 6384 // other parts of the compiler. 6385 if (CI->getValue().uge(BitWidth)) 6386 break; 6387 6388 if (CI->isZero()) 6389 return getSCEV(BO->LHS); // shift by zero --> noop 6390 6391 uint64_t AShrAmt = CI->getZExtValue(); 6392 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6393 6394 Operator *L = dyn_cast<Operator>(BO->LHS); 6395 if (L && L->getOpcode() == Instruction::Shl) { 6396 // X = Shl A, n 6397 // Y = AShr X, m 6398 // Both n and m are constant. 6399 6400 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6401 if (L->getOperand(1) == BO->RHS) 6402 // For a two-shift sext-inreg, i.e. n = m, 6403 // use sext(trunc(x)) as the SCEV expression. 6404 return getSignExtendExpr( 6405 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6406 6407 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6408 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6409 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6410 if (ShlAmt > AShrAmt) { 6411 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6412 // expression. We already checked that ShlAmt < BitWidth, so 6413 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6414 // ShlAmt - AShrAmt < Amt. 6415 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6416 ShlAmt - AShrAmt); 6417 return getSignExtendExpr( 6418 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6419 getConstant(Mul)), OuterTy); 6420 } 6421 } 6422 } 6423 break; 6424 } 6425 } 6426 } 6427 6428 switch (U->getOpcode()) { 6429 case Instruction::Trunc: 6430 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6431 6432 case Instruction::ZExt: 6433 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6434 6435 case Instruction::SExt: 6436 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6437 // The NSW flag of a subtract does not always survive the conversion to 6438 // A + (-1)*B. By pushing sign extension onto its operands we are much 6439 // more likely to preserve NSW and allow later AddRec optimisations. 6440 // 6441 // NOTE: This is effectively duplicating this logic from getSignExtend: 6442 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6443 // but by that point the NSW information has potentially been lost. 6444 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6445 Type *Ty = U->getType(); 6446 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6447 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6448 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6449 } 6450 } 6451 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6452 6453 case Instruction::BitCast: 6454 // BitCasts are no-op casts so we just eliminate the cast. 6455 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6456 return getSCEV(U->getOperand(0)); 6457 break; 6458 6459 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 6460 // lead to pointer expressions which cannot safely be expanded to GEPs, 6461 // because ScalarEvolution doesn't respect the GEP aliasing rules when 6462 // simplifying integer expressions. 6463 6464 case Instruction::GetElementPtr: 6465 return createNodeForGEP(cast<GEPOperator>(U)); 6466 6467 case Instruction::PHI: 6468 return createNodeForPHI(cast<PHINode>(U)); 6469 6470 case Instruction::Select: 6471 // U can also be a select constant expr, which let fall through. Since 6472 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6473 // constant expressions cannot have instructions as operands, we'd have 6474 // returned getUnknown for a select constant expressions anyway. 6475 if (isa<Instruction>(U)) 6476 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6477 U->getOperand(1), U->getOperand(2)); 6478 break; 6479 6480 case Instruction::Call: 6481 case Instruction::Invoke: 6482 if (Value *RV = CallSite(U).getReturnedArgOperand()) 6483 return getSCEV(RV); 6484 break; 6485 } 6486 6487 return getUnknown(V); 6488 } 6489 6490 //===----------------------------------------------------------------------===// 6491 // Iteration Count Computation Code 6492 // 6493 6494 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6495 if (!ExitCount) 6496 return 0; 6497 6498 ConstantInt *ExitConst = ExitCount->getValue(); 6499 6500 // Guard against huge trip counts. 6501 if (ExitConst->getValue().getActiveBits() > 32) 6502 return 0; 6503 6504 // In case of integer overflow, this returns 0, which is correct. 6505 return ((unsigned)ExitConst->getZExtValue()) + 1; 6506 } 6507 6508 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6509 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6510 return getSmallConstantTripCount(L, ExitingBB); 6511 6512 // No trip count information for multiple exits. 6513 return 0; 6514 } 6515 6516 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6517 BasicBlock *ExitingBlock) { 6518 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6519 assert(L->isLoopExiting(ExitingBlock) && 6520 "Exiting block must actually branch out of the loop!"); 6521 const SCEVConstant *ExitCount = 6522 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6523 return getConstantTripCount(ExitCount); 6524 } 6525 6526 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6527 const auto *MaxExitCount = 6528 dyn_cast<SCEVConstant>(getMaxBackedgeTakenCount(L)); 6529 return getConstantTripCount(MaxExitCount); 6530 } 6531 6532 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6533 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6534 return getSmallConstantTripMultiple(L, ExitingBB); 6535 6536 // No trip multiple information for multiple exits. 6537 return 0; 6538 } 6539 6540 /// Returns the largest constant divisor of the trip count of this loop as a 6541 /// normal unsigned value, if possible. This means that the actual trip count is 6542 /// always a multiple of the returned value (don't forget the trip count could 6543 /// very well be zero as well!). 6544 /// 6545 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6546 /// multiple of a constant (which is also the case if the trip count is simply 6547 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6548 /// if the trip count is very large (>= 2^32). 6549 /// 6550 /// As explained in the comments for getSmallConstantTripCount, this assumes 6551 /// that control exits the loop via ExitingBlock. 6552 unsigned 6553 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6554 BasicBlock *ExitingBlock) { 6555 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6556 assert(L->isLoopExiting(ExitingBlock) && 6557 "Exiting block must actually branch out of the loop!"); 6558 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6559 if (ExitCount == getCouldNotCompute()) 6560 return 1; 6561 6562 // Get the trip count from the BE count by adding 1. 6563 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6564 6565 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6566 if (!TC) 6567 // Attempt to factor more general cases. Returns the greatest power of 6568 // two divisor. If overflow happens, the trip count expression is still 6569 // divisible by the greatest power of 2 divisor returned. 6570 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 6571 6572 ConstantInt *Result = TC->getValue(); 6573 6574 // Guard against huge trip counts (this requires checking 6575 // for zero to handle the case where the trip count == -1 and the 6576 // addition wraps). 6577 if (!Result || Result->getValue().getActiveBits() > 32 || 6578 Result->getValue().getActiveBits() == 0) 6579 return 1; 6580 6581 return (unsigned)Result->getZExtValue(); 6582 } 6583 6584 /// Get the expression for the number of loop iterations for which this loop is 6585 /// guaranteed not to exit via ExitingBlock. Otherwise return 6586 /// SCEVCouldNotCompute. 6587 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6588 BasicBlock *ExitingBlock) { 6589 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6590 } 6591 6592 const SCEV * 6593 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6594 SCEVUnionPredicate &Preds) { 6595 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 6596 } 6597 6598 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 6599 return getBackedgeTakenInfo(L).getExact(L, this); 6600 } 6601 6602 /// Similar to getBackedgeTakenCount, except return the least SCEV value that is 6603 /// known never to be less than the actual backedge taken count. 6604 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 6605 return getBackedgeTakenInfo(L).getMax(this); 6606 } 6607 6608 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 6609 return getBackedgeTakenInfo(L).isMaxOrZero(this); 6610 } 6611 6612 /// Push PHI nodes in the header of the given loop onto the given Worklist. 6613 static void 6614 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 6615 BasicBlock *Header = L->getHeader(); 6616 6617 // Push all Loop-header PHIs onto the Worklist stack. 6618 for (PHINode &PN : Header->phis()) 6619 Worklist.push_back(&PN); 6620 } 6621 6622 const ScalarEvolution::BackedgeTakenInfo & 6623 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 6624 auto &BTI = getBackedgeTakenInfo(L); 6625 if (BTI.hasFullInfo()) 6626 return BTI; 6627 6628 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6629 6630 if (!Pair.second) 6631 return Pair.first->second; 6632 6633 BackedgeTakenInfo Result = 6634 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 6635 6636 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 6637 } 6638 6639 const ScalarEvolution::BackedgeTakenInfo & 6640 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 6641 // Initially insert an invalid entry for this loop. If the insertion 6642 // succeeds, proceed to actually compute a backedge-taken count and 6643 // update the value. The temporary CouldNotCompute value tells SCEV 6644 // code elsewhere that it shouldn't attempt to request a new 6645 // backedge-taken count, which could result in infinite recursion. 6646 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 6647 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6648 if (!Pair.second) 6649 return Pair.first->second; 6650 6651 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 6652 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 6653 // must be cleared in this scope. 6654 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 6655 6656 // In product build, there are no usage of statistic. 6657 (void)NumTripCountsComputed; 6658 (void)NumTripCountsNotComputed; 6659 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 6660 const SCEV *BEExact = Result.getExact(L, this); 6661 if (BEExact != getCouldNotCompute()) { 6662 assert(isLoopInvariant(BEExact, L) && 6663 isLoopInvariant(Result.getMax(this), L) && 6664 "Computed backedge-taken count isn't loop invariant for loop!"); 6665 ++NumTripCountsComputed; 6666 } 6667 else if (Result.getMax(this) == getCouldNotCompute() && 6668 isa<PHINode>(L->getHeader()->begin())) { 6669 // Only count loops that have phi nodes as not being computable. 6670 ++NumTripCountsNotComputed; 6671 } 6672 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 6673 6674 // Now that we know more about the trip count for this loop, forget any 6675 // existing SCEV values for PHI nodes in this loop since they are only 6676 // conservative estimates made without the benefit of trip count 6677 // information. This is similar to the code in forgetLoop, except that 6678 // it handles SCEVUnknown PHI nodes specially. 6679 if (Result.hasAnyInfo()) { 6680 SmallVector<Instruction *, 16> Worklist; 6681 PushLoopPHIs(L, Worklist); 6682 6683 SmallPtrSet<Instruction *, 8> Discovered; 6684 while (!Worklist.empty()) { 6685 Instruction *I = Worklist.pop_back_val(); 6686 6687 ValueExprMapType::iterator It = 6688 ValueExprMap.find_as(static_cast<Value *>(I)); 6689 if (It != ValueExprMap.end()) { 6690 const SCEV *Old = It->second; 6691 6692 // SCEVUnknown for a PHI either means that it has an unrecognized 6693 // structure, or it's a PHI that's in the progress of being computed 6694 // by createNodeForPHI. In the former case, additional loop trip 6695 // count information isn't going to change anything. In the later 6696 // case, createNodeForPHI will perform the necessary updates on its 6697 // own when it gets to that point. 6698 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 6699 eraseValueFromMap(It->first); 6700 forgetMemoizedResults(Old); 6701 } 6702 if (PHINode *PN = dyn_cast<PHINode>(I)) 6703 ConstantEvolutionLoopExitValue.erase(PN); 6704 } 6705 6706 // Since we don't need to invalidate anything for correctness and we're 6707 // only invalidating to make SCEV's results more precise, we get to stop 6708 // early to avoid invalidating too much. This is especially important in 6709 // cases like: 6710 // 6711 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 6712 // loop0: 6713 // %pn0 = phi 6714 // ... 6715 // loop1: 6716 // %pn1 = phi 6717 // ... 6718 // 6719 // where both loop0 and loop1's backedge taken count uses the SCEV 6720 // expression for %v. If we don't have the early stop below then in cases 6721 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 6722 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 6723 // count for loop1, effectively nullifying SCEV's trip count cache. 6724 for (auto *U : I->users()) 6725 if (auto *I = dyn_cast<Instruction>(U)) { 6726 auto *LoopForUser = LI.getLoopFor(I->getParent()); 6727 if (LoopForUser && L->contains(LoopForUser) && 6728 Discovered.insert(I).second) 6729 Worklist.push_back(I); 6730 } 6731 } 6732 } 6733 6734 // Re-lookup the insert position, since the call to 6735 // computeBackedgeTakenCount above could result in a 6736 // recusive call to getBackedgeTakenInfo (on a different 6737 // loop), which would invalidate the iterator computed 6738 // earlier. 6739 return BackedgeTakenCounts.find(L)->second = std::move(Result); 6740 } 6741 6742 void ScalarEvolution::forgetLoop(const Loop *L) { 6743 // Drop any stored trip count value. 6744 auto RemoveLoopFromBackedgeMap = 6745 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 6746 auto BTCPos = Map.find(L); 6747 if (BTCPos != Map.end()) { 6748 BTCPos->second.clear(); 6749 Map.erase(BTCPos); 6750 } 6751 }; 6752 6753 SmallVector<const Loop *, 16> LoopWorklist(1, L); 6754 SmallVector<Instruction *, 32> Worklist; 6755 SmallPtrSet<Instruction *, 16> Visited; 6756 6757 // Iterate over all the loops and sub-loops to drop SCEV information. 6758 while (!LoopWorklist.empty()) { 6759 auto *CurrL = LoopWorklist.pop_back_val(); 6760 6761 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 6762 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 6763 6764 // Drop information about predicated SCEV rewrites for this loop. 6765 for (auto I = PredicatedSCEVRewrites.begin(); 6766 I != PredicatedSCEVRewrites.end();) { 6767 std::pair<const SCEV *, const Loop *> Entry = I->first; 6768 if (Entry.second == CurrL) 6769 PredicatedSCEVRewrites.erase(I++); 6770 else 6771 ++I; 6772 } 6773 6774 auto LoopUsersItr = LoopUsers.find(CurrL); 6775 if (LoopUsersItr != LoopUsers.end()) { 6776 for (auto *S : LoopUsersItr->second) 6777 forgetMemoizedResults(S); 6778 LoopUsers.erase(LoopUsersItr); 6779 } 6780 6781 // Drop information about expressions based on loop-header PHIs. 6782 PushLoopPHIs(CurrL, Worklist); 6783 6784 while (!Worklist.empty()) { 6785 Instruction *I = Worklist.pop_back_val(); 6786 if (!Visited.insert(I).second) 6787 continue; 6788 6789 ValueExprMapType::iterator It = 6790 ValueExprMap.find_as(static_cast<Value *>(I)); 6791 if (It != ValueExprMap.end()) { 6792 eraseValueFromMap(It->first); 6793 forgetMemoizedResults(It->second); 6794 if (PHINode *PN = dyn_cast<PHINode>(I)) 6795 ConstantEvolutionLoopExitValue.erase(PN); 6796 } 6797 6798 PushDefUseChildren(I, Worklist); 6799 } 6800 6801 LoopPropertiesCache.erase(CurrL); 6802 // Forget all contained loops too, to avoid dangling entries in the 6803 // ValuesAtScopes map. 6804 LoopWorklist.append(CurrL->begin(), CurrL->end()); 6805 } 6806 } 6807 6808 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 6809 while (Loop *Parent = L->getParentLoop()) 6810 L = Parent; 6811 forgetLoop(L); 6812 } 6813 6814 void ScalarEvolution::forgetValue(Value *V) { 6815 Instruction *I = dyn_cast<Instruction>(V); 6816 if (!I) return; 6817 6818 // Drop information about expressions based on loop-header PHIs. 6819 SmallVector<Instruction *, 16> Worklist; 6820 Worklist.push_back(I); 6821 6822 SmallPtrSet<Instruction *, 8> Visited; 6823 while (!Worklist.empty()) { 6824 I = Worklist.pop_back_val(); 6825 if (!Visited.insert(I).second) 6826 continue; 6827 6828 ValueExprMapType::iterator It = 6829 ValueExprMap.find_as(static_cast<Value *>(I)); 6830 if (It != ValueExprMap.end()) { 6831 eraseValueFromMap(It->first); 6832 forgetMemoizedResults(It->second); 6833 if (PHINode *PN = dyn_cast<PHINode>(I)) 6834 ConstantEvolutionLoopExitValue.erase(PN); 6835 } 6836 6837 PushDefUseChildren(I, Worklist); 6838 } 6839 } 6840 6841 /// Get the exact loop backedge taken count considering all loop exits. A 6842 /// computable result can only be returned for loops with all exiting blocks 6843 /// dominating the latch. howFarToZero assumes that the limit of each loop test 6844 /// is never skipped. This is a valid assumption as long as the loop exits via 6845 /// that test. For precise results, it is the caller's responsibility to specify 6846 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 6847 const SCEV * 6848 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 6849 SCEVUnionPredicate *Preds) const { 6850 // If any exits were not computable, the loop is not computable. 6851 if (!isComplete() || ExitNotTaken.empty()) 6852 return SE->getCouldNotCompute(); 6853 6854 const BasicBlock *Latch = L->getLoopLatch(); 6855 // All exiting blocks we have collected must dominate the only backedge. 6856 if (!Latch) 6857 return SE->getCouldNotCompute(); 6858 6859 // All exiting blocks we have gathered dominate loop's latch, so exact trip 6860 // count is simply a minimum out of all these calculated exit counts. 6861 SmallVector<const SCEV *, 2> Ops; 6862 for (auto &ENT : ExitNotTaken) { 6863 const SCEV *BECount = ENT.ExactNotTaken; 6864 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 6865 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 6866 "We should only have known counts for exiting blocks that dominate " 6867 "latch!"); 6868 6869 Ops.push_back(BECount); 6870 6871 if (Preds && !ENT.hasAlwaysTruePredicate()) 6872 Preds->add(ENT.Predicate.get()); 6873 6874 assert((Preds || ENT.hasAlwaysTruePredicate()) && 6875 "Predicate should be always true!"); 6876 } 6877 6878 return SE->getUMinFromMismatchedTypes(Ops); 6879 } 6880 6881 /// Get the exact not taken count for this loop exit. 6882 const SCEV * 6883 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 6884 ScalarEvolution *SE) const { 6885 for (auto &ENT : ExitNotTaken) 6886 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6887 return ENT.ExactNotTaken; 6888 6889 return SE->getCouldNotCompute(); 6890 } 6891 6892 /// getMax - Get the max backedge taken count for the loop. 6893 const SCEV * 6894 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 6895 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6896 return !ENT.hasAlwaysTruePredicate(); 6897 }; 6898 6899 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax()) 6900 return SE->getCouldNotCompute(); 6901 6902 assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) && 6903 "No point in having a non-constant max backedge taken count!"); 6904 return getMax(); 6905 } 6906 6907 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const { 6908 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6909 return !ENT.hasAlwaysTruePredicate(); 6910 }; 6911 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 6912 } 6913 6914 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 6915 ScalarEvolution *SE) const { 6916 if (getMax() && getMax() != SE->getCouldNotCompute() && 6917 SE->hasOperand(getMax(), S)) 6918 return true; 6919 6920 for (auto &ENT : ExitNotTaken) 6921 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 6922 SE->hasOperand(ENT.ExactNotTaken, S)) 6923 return true; 6924 6925 return false; 6926 } 6927 6928 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 6929 : ExactNotTaken(E), MaxNotTaken(E) { 6930 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6931 isa<SCEVConstant>(MaxNotTaken)) && 6932 "No point in having a non-constant max backedge taken count!"); 6933 } 6934 6935 ScalarEvolution::ExitLimit::ExitLimit( 6936 const SCEV *E, const SCEV *M, bool MaxOrZero, 6937 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 6938 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 6939 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 6940 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 6941 "Exact is not allowed to be less precise than Max"); 6942 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6943 isa<SCEVConstant>(MaxNotTaken)) && 6944 "No point in having a non-constant max backedge taken count!"); 6945 for (auto *PredSet : PredSetList) 6946 for (auto *P : *PredSet) 6947 addPredicate(P); 6948 } 6949 6950 ScalarEvolution::ExitLimit::ExitLimit( 6951 const SCEV *E, const SCEV *M, bool MaxOrZero, 6952 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 6953 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 6954 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6955 isa<SCEVConstant>(MaxNotTaken)) && 6956 "No point in having a non-constant max backedge taken count!"); 6957 } 6958 6959 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 6960 bool MaxOrZero) 6961 : ExitLimit(E, M, MaxOrZero, None) { 6962 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6963 isa<SCEVConstant>(MaxNotTaken)) && 6964 "No point in having a non-constant max backedge taken count!"); 6965 } 6966 6967 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 6968 /// computable exit into a persistent ExitNotTakenInfo array. 6969 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 6970 SmallVectorImpl<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> 6971 &&ExitCounts, 6972 bool Complete, const SCEV *MaxCount, bool MaxOrZero) 6973 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { 6974 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 6975 6976 ExitNotTaken.reserve(ExitCounts.size()); 6977 std::transform( 6978 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 6979 [&](const EdgeExitInfo &EEI) { 6980 BasicBlock *ExitBB = EEI.first; 6981 const ExitLimit &EL = EEI.second; 6982 if (EL.Predicates.empty()) 6983 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, nullptr); 6984 6985 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 6986 for (auto *Pred : EL.Predicates) 6987 Predicate->add(Pred); 6988 6989 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, std::move(Predicate)); 6990 }); 6991 assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) && 6992 "No point in having a non-constant max backedge taken count!"); 6993 } 6994 6995 /// Invalidate this result and free the ExitNotTakenInfo array. 6996 void ScalarEvolution::BackedgeTakenInfo::clear() { 6997 ExitNotTaken.clear(); 6998 } 6999 7000 /// Compute the number of times the backedge of the specified loop will execute. 7001 ScalarEvolution::BackedgeTakenInfo 7002 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 7003 bool AllowPredicates) { 7004 SmallVector<BasicBlock *, 8> ExitingBlocks; 7005 L->getExitingBlocks(ExitingBlocks); 7006 7007 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7008 7009 SmallVector<EdgeExitInfo, 4> ExitCounts; 7010 bool CouldComputeBECount = true; 7011 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 7012 const SCEV *MustExitMaxBECount = nullptr; 7013 const SCEV *MayExitMaxBECount = nullptr; 7014 bool MustExitMaxOrZero = false; 7015 7016 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 7017 // and compute maxBECount. 7018 // Do a union of all the predicates here. 7019 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 7020 BasicBlock *ExitBB = ExitingBlocks[i]; 7021 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 7022 7023 assert((AllowPredicates || EL.Predicates.empty()) && 7024 "Predicated exit limit when predicates are not allowed!"); 7025 7026 // 1. For each exit that can be computed, add an entry to ExitCounts. 7027 // CouldComputeBECount is true only if all exits can be computed. 7028 if (EL.ExactNotTaken == getCouldNotCompute()) 7029 // We couldn't compute an exact value for this exit, so 7030 // we won't be able to compute an exact value for the loop. 7031 CouldComputeBECount = false; 7032 else 7033 ExitCounts.emplace_back(ExitBB, EL); 7034 7035 // 2. Derive the loop's MaxBECount from each exit's max number of 7036 // non-exiting iterations. Partition the loop exits into two kinds: 7037 // LoopMustExits and LoopMayExits. 7038 // 7039 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 7040 // is a LoopMayExit. If any computable LoopMustExit is found, then 7041 // MaxBECount is the minimum EL.MaxNotTaken of computable 7042 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 7043 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 7044 // computable EL.MaxNotTaken. 7045 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 7046 DT.dominates(ExitBB, Latch)) { 7047 if (!MustExitMaxBECount) { 7048 MustExitMaxBECount = EL.MaxNotTaken; 7049 MustExitMaxOrZero = EL.MaxOrZero; 7050 } else { 7051 MustExitMaxBECount = 7052 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 7053 } 7054 } else if (MayExitMaxBECount != getCouldNotCompute()) { 7055 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 7056 MayExitMaxBECount = EL.MaxNotTaken; 7057 else { 7058 MayExitMaxBECount = 7059 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 7060 } 7061 } 7062 } 7063 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 7064 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 7065 // The loop backedge will be taken the maximum or zero times if there's 7066 // a single exit that must be taken the maximum or zero times. 7067 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 7068 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 7069 MaxBECount, MaxOrZero); 7070 } 7071 7072 ScalarEvolution::ExitLimit 7073 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 7074 bool AllowPredicates) { 7075 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 7076 // If our exiting block does not dominate the latch, then its connection with 7077 // loop's exit limit may be far from trivial. 7078 const BasicBlock *Latch = L->getLoopLatch(); 7079 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 7080 return getCouldNotCompute(); 7081 7082 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 7083 TerminatorInst *Term = ExitingBlock->getTerminator(); 7084 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 7085 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 7086 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7087 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 7088 "It should have one successor in loop and one exit block!"); 7089 // Proceed to the next level to examine the exit condition expression. 7090 return computeExitLimitFromCond( 7091 L, BI->getCondition(), ExitIfTrue, 7092 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 7093 } 7094 7095 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 7096 // For switch, make sure that there is a single exit from the loop. 7097 BasicBlock *Exit = nullptr; 7098 for (auto *SBB : successors(ExitingBlock)) 7099 if (!L->contains(SBB)) { 7100 if (Exit) // Multiple exit successors. 7101 return getCouldNotCompute(); 7102 Exit = SBB; 7103 } 7104 assert(Exit && "Exiting block must have at least one exit"); 7105 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 7106 /*ControlsExit=*/IsOnlyExit); 7107 } 7108 7109 return getCouldNotCompute(); 7110 } 7111 7112 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 7113 const Loop *L, Value *ExitCond, bool ExitIfTrue, 7114 bool ControlsExit, bool AllowPredicates) { 7115 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 7116 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 7117 ControlsExit, AllowPredicates); 7118 } 7119 7120 Optional<ScalarEvolution::ExitLimit> 7121 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 7122 bool ExitIfTrue, bool ControlsExit, 7123 bool AllowPredicates) { 7124 (void)this->L; 7125 (void)this->ExitIfTrue; 7126 (void)this->AllowPredicates; 7127 7128 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7129 this->AllowPredicates == AllowPredicates && 7130 "Variance in assumed invariant key components!"); 7131 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 7132 if (Itr == TripCountMap.end()) 7133 return None; 7134 return Itr->second; 7135 } 7136 7137 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 7138 bool ExitIfTrue, 7139 bool ControlsExit, 7140 bool AllowPredicates, 7141 const ExitLimit &EL) { 7142 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7143 this->AllowPredicates == AllowPredicates && 7144 "Variance in assumed invariant key components!"); 7145 7146 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 7147 assert(InsertResult.second && "Expected successful insertion!"); 7148 (void)InsertResult; 7149 (void)ExitIfTrue; 7150 } 7151 7152 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 7153 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7154 bool ControlsExit, bool AllowPredicates) { 7155 7156 if (auto MaybeEL = 7157 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7158 return *MaybeEL; 7159 7160 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 7161 ControlsExit, AllowPredicates); 7162 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 7163 return EL; 7164 } 7165 7166 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 7167 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7168 bool ControlsExit, bool AllowPredicates) { 7169 // Check if the controlling expression for this loop is an And or Or. 7170 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 7171 if (BO->getOpcode() == Instruction::And) { 7172 // Recurse on the operands of the and. 7173 bool EitherMayExit = !ExitIfTrue; 7174 ExitLimit EL0 = computeExitLimitFromCondCached( 7175 Cache, L, BO->getOperand(0), ExitIfTrue, 7176 ControlsExit && !EitherMayExit, AllowPredicates); 7177 ExitLimit EL1 = computeExitLimitFromCondCached( 7178 Cache, L, BO->getOperand(1), ExitIfTrue, 7179 ControlsExit && !EitherMayExit, AllowPredicates); 7180 const SCEV *BECount = getCouldNotCompute(); 7181 const SCEV *MaxBECount = getCouldNotCompute(); 7182 if (EitherMayExit) { 7183 // Both conditions must be true for the loop to continue executing. 7184 // Choose the less conservative count. 7185 if (EL0.ExactNotTaken == getCouldNotCompute() || 7186 EL1.ExactNotTaken == getCouldNotCompute()) 7187 BECount = getCouldNotCompute(); 7188 else 7189 BECount = 7190 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7191 if (EL0.MaxNotTaken == getCouldNotCompute()) 7192 MaxBECount = EL1.MaxNotTaken; 7193 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7194 MaxBECount = EL0.MaxNotTaken; 7195 else 7196 MaxBECount = 7197 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7198 } else { 7199 // Both conditions must be true at the same time for the loop to exit. 7200 // For now, be conservative. 7201 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7202 MaxBECount = EL0.MaxNotTaken; 7203 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7204 BECount = EL0.ExactNotTaken; 7205 } 7206 7207 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7208 // to be more aggressive when computing BECount than when computing 7209 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7210 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7211 // to not. 7212 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7213 !isa<SCEVCouldNotCompute>(BECount)) 7214 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7215 7216 return ExitLimit(BECount, MaxBECount, false, 7217 {&EL0.Predicates, &EL1.Predicates}); 7218 } 7219 if (BO->getOpcode() == Instruction::Or) { 7220 // Recurse on the operands of the or. 7221 bool EitherMayExit = ExitIfTrue; 7222 ExitLimit EL0 = computeExitLimitFromCondCached( 7223 Cache, L, BO->getOperand(0), ExitIfTrue, 7224 ControlsExit && !EitherMayExit, AllowPredicates); 7225 ExitLimit EL1 = computeExitLimitFromCondCached( 7226 Cache, L, BO->getOperand(1), ExitIfTrue, 7227 ControlsExit && !EitherMayExit, AllowPredicates); 7228 const SCEV *BECount = getCouldNotCompute(); 7229 const SCEV *MaxBECount = getCouldNotCompute(); 7230 if (EitherMayExit) { 7231 // Both conditions must be false for the loop to continue executing. 7232 // Choose the less conservative count. 7233 if (EL0.ExactNotTaken == getCouldNotCompute() || 7234 EL1.ExactNotTaken == getCouldNotCompute()) 7235 BECount = getCouldNotCompute(); 7236 else 7237 BECount = 7238 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7239 if (EL0.MaxNotTaken == getCouldNotCompute()) 7240 MaxBECount = EL1.MaxNotTaken; 7241 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7242 MaxBECount = EL0.MaxNotTaken; 7243 else 7244 MaxBECount = 7245 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7246 } else { 7247 // Both conditions must be false at the same time for the loop to exit. 7248 // For now, be conservative. 7249 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7250 MaxBECount = EL0.MaxNotTaken; 7251 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7252 BECount = EL0.ExactNotTaken; 7253 } 7254 7255 return ExitLimit(BECount, MaxBECount, false, 7256 {&EL0.Predicates, &EL1.Predicates}); 7257 } 7258 } 7259 7260 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7261 // Proceed to the next level to examine the icmp. 7262 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7263 ExitLimit EL = 7264 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 7265 if (EL.hasFullInfo() || !AllowPredicates) 7266 return EL; 7267 7268 // Try again, but use SCEV predicates this time. 7269 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 7270 /*AllowPredicates=*/true); 7271 } 7272 7273 // Check for a constant condition. These are normally stripped out by 7274 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7275 // preserve the CFG and is temporarily leaving constant conditions 7276 // in place. 7277 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7278 if (ExitIfTrue == !CI->getZExtValue()) 7279 // The backedge is always taken. 7280 return getCouldNotCompute(); 7281 else 7282 // The backedge is never taken. 7283 return getZero(CI->getType()); 7284 } 7285 7286 // If it's not an integer or pointer comparison then compute it the hard way. 7287 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7288 } 7289 7290 ScalarEvolution::ExitLimit 7291 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7292 ICmpInst *ExitCond, 7293 bool ExitIfTrue, 7294 bool ControlsExit, 7295 bool AllowPredicates) { 7296 // If the condition was exit on true, convert the condition to exit on false 7297 ICmpInst::Predicate Pred; 7298 if (!ExitIfTrue) 7299 Pred = ExitCond->getPredicate(); 7300 else 7301 Pred = ExitCond->getInversePredicate(); 7302 const ICmpInst::Predicate OriginalPred = Pred; 7303 7304 // Handle common loops like: for (X = "string"; *X; ++X) 7305 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7306 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7307 ExitLimit ItCnt = 7308 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); 7309 if (ItCnt.hasAnyInfo()) 7310 return ItCnt; 7311 } 7312 7313 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7314 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7315 7316 // Try to evaluate any dependencies out of the loop. 7317 LHS = getSCEVAtScope(LHS, L); 7318 RHS = getSCEVAtScope(RHS, L); 7319 7320 // At this point, we would like to compute how many iterations of the 7321 // loop the predicate will return true for these inputs. 7322 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7323 // If there is a loop-invariant, force it into the RHS. 7324 std::swap(LHS, RHS); 7325 Pred = ICmpInst::getSwappedPredicate(Pred); 7326 } 7327 7328 // Simplify the operands before analyzing them. 7329 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7330 7331 // If we have a comparison of a chrec against a constant, try to use value 7332 // ranges to answer this query. 7333 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7334 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7335 if (AddRec->getLoop() == L) { 7336 // Form the constant range. 7337 ConstantRange CompRange = 7338 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 7339 7340 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7341 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7342 } 7343 7344 switch (Pred) { 7345 case ICmpInst::ICMP_NE: { // while (X != Y) 7346 // Convert to: while (X-Y != 0) 7347 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7348 AllowPredicates); 7349 if (EL.hasAnyInfo()) return EL; 7350 break; 7351 } 7352 case ICmpInst::ICMP_EQ: { // while (X == Y) 7353 // Convert to: while (X-Y == 0) 7354 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7355 if (EL.hasAnyInfo()) return EL; 7356 break; 7357 } 7358 case ICmpInst::ICMP_SLT: 7359 case ICmpInst::ICMP_ULT: { // while (X < Y) 7360 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 7361 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7362 AllowPredicates); 7363 if (EL.hasAnyInfo()) return EL; 7364 break; 7365 } 7366 case ICmpInst::ICMP_SGT: 7367 case ICmpInst::ICMP_UGT: { // while (X > Y) 7368 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 7369 ExitLimit EL = 7370 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7371 AllowPredicates); 7372 if (EL.hasAnyInfo()) return EL; 7373 break; 7374 } 7375 default: 7376 break; 7377 } 7378 7379 auto *ExhaustiveCount = 7380 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7381 7382 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7383 return ExhaustiveCount; 7384 7385 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7386 ExitCond->getOperand(1), L, OriginalPred); 7387 } 7388 7389 ScalarEvolution::ExitLimit 7390 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7391 SwitchInst *Switch, 7392 BasicBlock *ExitingBlock, 7393 bool ControlsExit) { 7394 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7395 7396 // Give up if the exit is the default dest of a switch. 7397 if (Switch->getDefaultDest() == ExitingBlock) 7398 return getCouldNotCompute(); 7399 7400 assert(L->contains(Switch->getDefaultDest()) && 7401 "Default case must not exit the loop!"); 7402 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7403 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7404 7405 // while (X != Y) --> while (X-Y != 0) 7406 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7407 if (EL.hasAnyInfo()) 7408 return EL; 7409 7410 return getCouldNotCompute(); 7411 } 7412 7413 static ConstantInt * 7414 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7415 ScalarEvolution &SE) { 7416 const SCEV *InVal = SE.getConstant(C); 7417 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7418 assert(isa<SCEVConstant>(Val) && 7419 "Evaluation of SCEV at constant didn't fold correctly?"); 7420 return cast<SCEVConstant>(Val)->getValue(); 7421 } 7422 7423 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7424 /// compute the backedge execution count. 7425 ScalarEvolution::ExitLimit 7426 ScalarEvolution::computeLoadConstantCompareExitLimit( 7427 LoadInst *LI, 7428 Constant *RHS, 7429 const Loop *L, 7430 ICmpInst::Predicate predicate) { 7431 if (LI->isVolatile()) return getCouldNotCompute(); 7432 7433 // Check to see if the loaded pointer is a getelementptr of a global. 7434 // TODO: Use SCEV instead of manually grubbing with GEPs. 7435 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7436 if (!GEP) return getCouldNotCompute(); 7437 7438 // Make sure that it is really a constant global we are gepping, with an 7439 // initializer, and make sure the first IDX is really 0. 7440 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7441 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7442 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7443 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7444 return getCouldNotCompute(); 7445 7446 // Okay, we allow one non-constant index into the GEP instruction. 7447 Value *VarIdx = nullptr; 7448 std::vector<Constant*> Indexes; 7449 unsigned VarIdxNum = 0; 7450 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7451 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7452 Indexes.push_back(CI); 7453 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7454 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7455 VarIdx = GEP->getOperand(i); 7456 VarIdxNum = i-2; 7457 Indexes.push_back(nullptr); 7458 } 7459 7460 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7461 if (!VarIdx) 7462 return getCouldNotCompute(); 7463 7464 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7465 // Check to see if X is a loop variant variable value now. 7466 const SCEV *Idx = getSCEV(VarIdx); 7467 Idx = getSCEVAtScope(Idx, L); 7468 7469 // We can only recognize very limited forms of loop index expressions, in 7470 // particular, only affine AddRec's like {C1,+,C2}. 7471 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7472 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 7473 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7474 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7475 return getCouldNotCompute(); 7476 7477 unsigned MaxSteps = MaxBruteForceIterations; 7478 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7479 ConstantInt *ItCst = ConstantInt::get( 7480 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7481 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7482 7483 // Form the GEP offset. 7484 Indexes[VarIdxNum] = Val; 7485 7486 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7487 Indexes); 7488 if (!Result) break; // Cannot compute! 7489 7490 // Evaluate the condition for this iteration. 7491 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7492 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7493 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7494 ++NumArrayLenItCounts; 7495 return getConstant(ItCst); // Found terminating iteration! 7496 } 7497 } 7498 return getCouldNotCompute(); 7499 } 7500 7501 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7502 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7503 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7504 if (!RHS) 7505 return getCouldNotCompute(); 7506 7507 const BasicBlock *Latch = L->getLoopLatch(); 7508 if (!Latch) 7509 return getCouldNotCompute(); 7510 7511 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7512 if (!Predecessor) 7513 return getCouldNotCompute(); 7514 7515 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7516 // Return LHS in OutLHS and shift_opt in OutOpCode. 7517 auto MatchPositiveShift = 7518 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7519 7520 using namespace PatternMatch; 7521 7522 ConstantInt *ShiftAmt; 7523 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7524 OutOpCode = Instruction::LShr; 7525 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7526 OutOpCode = Instruction::AShr; 7527 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7528 OutOpCode = Instruction::Shl; 7529 else 7530 return false; 7531 7532 return ShiftAmt->getValue().isStrictlyPositive(); 7533 }; 7534 7535 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 7536 // 7537 // loop: 7538 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 7539 // %iv.shifted = lshr i32 %iv, <positive constant> 7540 // 7541 // Return true on a successful match. Return the corresponding PHI node (%iv 7542 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 7543 auto MatchShiftRecurrence = 7544 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 7545 Optional<Instruction::BinaryOps> PostShiftOpCode; 7546 7547 { 7548 Instruction::BinaryOps OpC; 7549 Value *V; 7550 7551 // If we encounter a shift instruction, "peel off" the shift operation, 7552 // and remember that we did so. Later when we inspect %iv's backedge 7553 // value, we will make sure that the backedge value uses the same 7554 // operation. 7555 // 7556 // Note: the peeled shift operation does not have to be the same 7557 // instruction as the one feeding into the PHI's backedge value. We only 7558 // really care about it being the same *kind* of shift instruction -- 7559 // that's all that is required for our later inferences to hold. 7560 if (MatchPositiveShift(LHS, V, OpC)) { 7561 PostShiftOpCode = OpC; 7562 LHS = V; 7563 } 7564 } 7565 7566 PNOut = dyn_cast<PHINode>(LHS); 7567 if (!PNOut || PNOut->getParent() != L->getHeader()) 7568 return false; 7569 7570 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 7571 Value *OpLHS; 7572 7573 return 7574 // The backedge value for the PHI node must be a shift by a positive 7575 // amount 7576 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 7577 7578 // of the PHI node itself 7579 OpLHS == PNOut && 7580 7581 // and the kind of shift should be match the kind of shift we peeled 7582 // off, if any. 7583 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 7584 }; 7585 7586 PHINode *PN; 7587 Instruction::BinaryOps OpCode; 7588 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 7589 return getCouldNotCompute(); 7590 7591 const DataLayout &DL = getDataLayout(); 7592 7593 // The key rationale for this optimization is that for some kinds of shift 7594 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 7595 // within a finite number of iterations. If the condition guarding the 7596 // backedge (in the sense that the backedge is taken if the condition is true) 7597 // is false for the value the shift recurrence stabilizes to, then we know 7598 // that the backedge is taken only a finite number of times. 7599 7600 ConstantInt *StableValue = nullptr; 7601 switch (OpCode) { 7602 default: 7603 llvm_unreachable("Impossible case!"); 7604 7605 case Instruction::AShr: { 7606 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 7607 // bitwidth(K) iterations. 7608 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 7609 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 7610 Predecessor->getTerminator(), &DT); 7611 auto *Ty = cast<IntegerType>(RHS->getType()); 7612 if (Known.isNonNegative()) 7613 StableValue = ConstantInt::get(Ty, 0); 7614 else if (Known.isNegative()) 7615 StableValue = ConstantInt::get(Ty, -1, true); 7616 else 7617 return getCouldNotCompute(); 7618 7619 break; 7620 } 7621 case Instruction::LShr: 7622 case Instruction::Shl: 7623 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 7624 // stabilize to 0 in at most bitwidth(K) iterations. 7625 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 7626 break; 7627 } 7628 7629 auto *Result = 7630 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 7631 assert(Result->getType()->isIntegerTy(1) && 7632 "Otherwise cannot be an operand to a branch instruction"); 7633 7634 if (Result->isZeroValue()) { 7635 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 7636 const SCEV *UpperBound = 7637 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 7638 return ExitLimit(getCouldNotCompute(), UpperBound, false); 7639 } 7640 7641 return getCouldNotCompute(); 7642 } 7643 7644 /// Return true if we can constant fold an instruction of the specified type, 7645 /// assuming that all operands were constants. 7646 static bool CanConstantFold(const Instruction *I) { 7647 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 7648 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7649 isa<LoadInst>(I)) 7650 return true; 7651 7652 if (const CallInst *CI = dyn_cast<CallInst>(I)) 7653 if (const Function *F = CI->getCalledFunction()) 7654 return canConstantFoldCallTo(CI, F); 7655 return false; 7656 } 7657 7658 /// Determine whether this instruction can constant evolve within this loop 7659 /// assuming its operands can all constant evolve. 7660 static bool canConstantEvolve(Instruction *I, const Loop *L) { 7661 // An instruction outside of the loop can't be derived from a loop PHI. 7662 if (!L->contains(I)) return false; 7663 7664 if (isa<PHINode>(I)) { 7665 // We don't currently keep track of the control flow needed to evaluate 7666 // PHIs, so we cannot handle PHIs inside of loops. 7667 return L->getHeader() == I->getParent(); 7668 } 7669 7670 // If we won't be able to constant fold this expression even if the operands 7671 // are constants, bail early. 7672 return CanConstantFold(I); 7673 } 7674 7675 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 7676 /// recursing through each instruction operand until reaching a loop header phi. 7677 static PHINode * 7678 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 7679 DenseMap<Instruction *, PHINode *> &PHIMap, 7680 unsigned Depth) { 7681 if (Depth > MaxConstantEvolvingDepth) 7682 return nullptr; 7683 7684 // Otherwise, we can evaluate this instruction if all of its operands are 7685 // constant or derived from a PHI node themselves. 7686 PHINode *PHI = nullptr; 7687 for (Value *Op : UseInst->operands()) { 7688 if (isa<Constant>(Op)) continue; 7689 7690 Instruction *OpInst = dyn_cast<Instruction>(Op); 7691 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 7692 7693 PHINode *P = dyn_cast<PHINode>(OpInst); 7694 if (!P) 7695 // If this operand is already visited, reuse the prior result. 7696 // We may have P != PHI if this is the deepest point at which the 7697 // inconsistent paths meet. 7698 P = PHIMap.lookup(OpInst); 7699 if (!P) { 7700 // Recurse and memoize the results, whether a phi is found or not. 7701 // This recursive call invalidates pointers into PHIMap. 7702 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 7703 PHIMap[OpInst] = P; 7704 } 7705 if (!P) 7706 return nullptr; // Not evolving from PHI 7707 if (PHI && PHI != P) 7708 return nullptr; // Evolving from multiple different PHIs. 7709 PHI = P; 7710 } 7711 // This is a expression evolving from a constant PHI! 7712 return PHI; 7713 } 7714 7715 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 7716 /// in the loop that V is derived from. We allow arbitrary operations along the 7717 /// way, but the operands of an operation must either be constants or a value 7718 /// derived from a constant PHI. If this expression does not fit with these 7719 /// constraints, return null. 7720 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 7721 Instruction *I = dyn_cast<Instruction>(V); 7722 if (!I || !canConstantEvolve(I, L)) return nullptr; 7723 7724 if (PHINode *PN = dyn_cast<PHINode>(I)) 7725 return PN; 7726 7727 // Record non-constant instructions contained by the loop. 7728 DenseMap<Instruction *, PHINode *> PHIMap; 7729 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 7730 } 7731 7732 /// EvaluateExpression - Given an expression that passes the 7733 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 7734 /// in the loop has the value PHIVal. If we can't fold this expression for some 7735 /// reason, return null. 7736 static Constant *EvaluateExpression(Value *V, const Loop *L, 7737 DenseMap<Instruction *, Constant *> &Vals, 7738 const DataLayout &DL, 7739 const TargetLibraryInfo *TLI) { 7740 // Convenient constant check, but redundant for recursive calls. 7741 if (Constant *C = dyn_cast<Constant>(V)) return C; 7742 Instruction *I = dyn_cast<Instruction>(V); 7743 if (!I) return nullptr; 7744 7745 if (Constant *C = Vals.lookup(I)) return C; 7746 7747 // An instruction inside the loop depends on a value outside the loop that we 7748 // weren't given a mapping for, or a value such as a call inside the loop. 7749 if (!canConstantEvolve(I, L)) return nullptr; 7750 7751 // An unmapped PHI can be due to a branch or another loop inside this loop, 7752 // or due to this not being the initial iteration through a loop where we 7753 // couldn't compute the evolution of this particular PHI last time. 7754 if (isa<PHINode>(I)) return nullptr; 7755 7756 std::vector<Constant*> Operands(I->getNumOperands()); 7757 7758 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 7759 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 7760 if (!Operand) { 7761 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 7762 if (!Operands[i]) return nullptr; 7763 continue; 7764 } 7765 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 7766 Vals[Operand] = C; 7767 if (!C) return nullptr; 7768 Operands[i] = C; 7769 } 7770 7771 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 7772 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7773 Operands[1], DL, TLI); 7774 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7775 if (!LI->isVolatile()) 7776 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7777 } 7778 return ConstantFoldInstOperands(I, Operands, DL, TLI); 7779 } 7780 7781 7782 // If every incoming value to PN except the one for BB is a specific Constant, 7783 // return that, else return nullptr. 7784 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 7785 Constant *IncomingVal = nullptr; 7786 7787 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 7788 if (PN->getIncomingBlock(i) == BB) 7789 continue; 7790 7791 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 7792 if (!CurrentVal) 7793 return nullptr; 7794 7795 if (IncomingVal != CurrentVal) { 7796 if (IncomingVal) 7797 return nullptr; 7798 IncomingVal = CurrentVal; 7799 } 7800 } 7801 7802 return IncomingVal; 7803 } 7804 7805 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 7806 /// in the header of its containing loop, we know the loop executes a 7807 /// constant number of times, and the PHI node is just a recurrence 7808 /// involving constants, fold it. 7809 Constant * 7810 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 7811 const APInt &BEs, 7812 const Loop *L) { 7813 auto I = ConstantEvolutionLoopExitValue.find(PN); 7814 if (I != ConstantEvolutionLoopExitValue.end()) 7815 return I->second; 7816 7817 if (BEs.ugt(MaxBruteForceIterations)) 7818 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 7819 7820 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 7821 7822 DenseMap<Instruction *, Constant *> CurrentIterVals; 7823 BasicBlock *Header = L->getHeader(); 7824 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7825 7826 BasicBlock *Latch = L->getLoopLatch(); 7827 if (!Latch) 7828 return nullptr; 7829 7830 for (PHINode &PHI : Header->phis()) { 7831 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 7832 CurrentIterVals[&PHI] = StartCST; 7833 } 7834 if (!CurrentIterVals.count(PN)) 7835 return RetVal = nullptr; 7836 7837 Value *BEValue = PN->getIncomingValueForBlock(Latch); 7838 7839 // Execute the loop symbolically to determine the exit value. 7840 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 7841 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 7842 7843 unsigned NumIterations = BEs.getZExtValue(); // must be in range 7844 unsigned IterationNum = 0; 7845 const DataLayout &DL = getDataLayout(); 7846 for (; ; ++IterationNum) { 7847 if (IterationNum == NumIterations) 7848 return RetVal = CurrentIterVals[PN]; // Got exit value! 7849 7850 // Compute the value of the PHIs for the next iteration. 7851 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 7852 DenseMap<Instruction *, Constant *> NextIterVals; 7853 Constant *NextPHI = 7854 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7855 if (!NextPHI) 7856 return nullptr; // Couldn't evaluate! 7857 NextIterVals[PN] = NextPHI; 7858 7859 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 7860 7861 // Also evaluate the other PHI nodes. However, we don't get to stop if we 7862 // cease to be able to evaluate one of them or if they stop evolving, 7863 // because that doesn't necessarily prevent us from computing PN. 7864 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 7865 for (const auto &I : CurrentIterVals) { 7866 PHINode *PHI = dyn_cast<PHINode>(I.first); 7867 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 7868 PHIsToCompute.emplace_back(PHI, I.second); 7869 } 7870 // We use two distinct loops because EvaluateExpression may invalidate any 7871 // iterators into CurrentIterVals. 7872 for (const auto &I : PHIsToCompute) { 7873 PHINode *PHI = I.first; 7874 Constant *&NextPHI = NextIterVals[PHI]; 7875 if (!NextPHI) { // Not already computed. 7876 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7877 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7878 } 7879 if (NextPHI != I.second) 7880 StoppedEvolving = false; 7881 } 7882 7883 // If all entries in CurrentIterVals == NextIterVals then we can stop 7884 // iterating, the loop can't continue to change. 7885 if (StoppedEvolving) 7886 return RetVal = CurrentIterVals[PN]; 7887 7888 CurrentIterVals.swap(NextIterVals); 7889 } 7890 } 7891 7892 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 7893 Value *Cond, 7894 bool ExitWhen) { 7895 PHINode *PN = getConstantEvolvingPHI(Cond, L); 7896 if (!PN) return getCouldNotCompute(); 7897 7898 // If the loop is canonicalized, the PHI will have exactly two entries. 7899 // That's the only form we support here. 7900 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 7901 7902 DenseMap<Instruction *, Constant *> CurrentIterVals; 7903 BasicBlock *Header = L->getHeader(); 7904 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7905 7906 BasicBlock *Latch = L->getLoopLatch(); 7907 assert(Latch && "Should follow from NumIncomingValues == 2!"); 7908 7909 for (PHINode &PHI : Header->phis()) { 7910 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 7911 CurrentIterVals[&PHI] = StartCST; 7912 } 7913 if (!CurrentIterVals.count(PN)) 7914 return getCouldNotCompute(); 7915 7916 // Okay, we find a PHI node that defines the trip count of this loop. Execute 7917 // the loop symbolically to determine when the condition gets a value of 7918 // "ExitWhen". 7919 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 7920 const DataLayout &DL = getDataLayout(); 7921 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 7922 auto *CondVal = dyn_cast_or_null<ConstantInt>( 7923 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 7924 7925 // Couldn't symbolically evaluate. 7926 if (!CondVal) return getCouldNotCompute(); 7927 7928 if (CondVal->getValue() == uint64_t(ExitWhen)) { 7929 ++NumBruteForceTripCountsComputed; 7930 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 7931 } 7932 7933 // Update all the PHI nodes for the next iteration. 7934 DenseMap<Instruction *, Constant *> NextIterVals; 7935 7936 // Create a list of which PHIs we need to compute. We want to do this before 7937 // calling EvaluateExpression on them because that may invalidate iterators 7938 // into CurrentIterVals. 7939 SmallVector<PHINode *, 8> PHIsToCompute; 7940 for (const auto &I : CurrentIterVals) { 7941 PHINode *PHI = dyn_cast<PHINode>(I.first); 7942 if (!PHI || PHI->getParent() != Header) continue; 7943 PHIsToCompute.push_back(PHI); 7944 } 7945 for (PHINode *PHI : PHIsToCompute) { 7946 Constant *&NextPHI = NextIterVals[PHI]; 7947 if (NextPHI) continue; // Already computed! 7948 7949 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7950 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7951 } 7952 CurrentIterVals.swap(NextIterVals); 7953 } 7954 7955 // Too many iterations were needed to evaluate. 7956 return getCouldNotCompute(); 7957 } 7958 7959 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 7960 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 7961 ValuesAtScopes[V]; 7962 // Check to see if we've folded this expression at this loop before. 7963 for (auto &LS : Values) 7964 if (LS.first == L) 7965 return LS.second ? LS.second : V; 7966 7967 Values.emplace_back(L, nullptr); 7968 7969 // Otherwise compute it. 7970 const SCEV *C = computeSCEVAtScope(V, L); 7971 for (auto &LS : reverse(ValuesAtScopes[V])) 7972 if (LS.first == L) { 7973 LS.second = C; 7974 break; 7975 } 7976 return C; 7977 } 7978 7979 /// This builds up a Constant using the ConstantExpr interface. That way, we 7980 /// will return Constants for objects which aren't represented by a 7981 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 7982 /// Returns NULL if the SCEV isn't representable as a Constant. 7983 static Constant *BuildConstantFromSCEV(const SCEV *V) { 7984 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 7985 case scCouldNotCompute: 7986 case scAddRecExpr: 7987 break; 7988 case scConstant: 7989 return cast<SCEVConstant>(V)->getValue(); 7990 case scUnknown: 7991 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 7992 case scSignExtend: { 7993 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 7994 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 7995 return ConstantExpr::getSExt(CastOp, SS->getType()); 7996 break; 7997 } 7998 case scZeroExtend: { 7999 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 8000 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 8001 return ConstantExpr::getZExt(CastOp, SZ->getType()); 8002 break; 8003 } 8004 case scTruncate: { 8005 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 8006 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 8007 return ConstantExpr::getTrunc(CastOp, ST->getType()); 8008 break; 8009 } 8010 case scAddExpr: { 8011 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 8012 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 8013 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8014 unsigned AS = PTy->getAddressSpace(); 8015 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8016 C = ConstantExpr::getBitCast(C, DestPtrTy); 8017 } 8018 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 8019 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 8020 if (!C2) return nullptr; 8021 8022 // First pointer! 8023 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 8024 unsigned AS = C2->getType()->getPointerAddressSpace(); 8025 std::swap(C, C2); 8026 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8027 // The offsets have been converted to bytes. We can add bytes to an 8028 // i8* by GEP with the byte count in the first index. 8029 C = ConstantExpr::getBitCast(C, DestPtrTy); 8030 } 8031 8032 // Don't bother trying to sum two pointers. We probably can't 8033 // statically compute a load that results from it anyway. 8034 if (C2->getType()->isPointerTy()) 8035 return nullptr; 8036 8037 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8038 if (PTy->getElementType()->isStructTy()) 8039 C2 = ConstantExpr::getIntegerCast( 8040 C2, Type::getInt32Ty(C->getContext()), true); 8041 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 8042 } else 8043 C = ConstantExpr::getAdd(C, C2); 8044 } 8045 return C; 8046 } 8047 break; 8048 } 8049 case scMulExpr: { 8050 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 8051 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 8052 // Don't bother with pointers at all. 8053 if (C->getType()->isPointerTy()) return nullptr; 8054 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 8055 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 8056 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 8057 C = ConstantExpr::getMul(C, C2); 8058 } 8059 return C; 8060 } 8061 break; 8062 } 8063 case scUDivExpr: { 8064 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 8065 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 8066 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 8067 if (LHS->getType() == RHS->getType()) 8068 return ConstantExpr::getUDiv(LHS, RHS); 8069 break; 8070 } 8071 case scSMaxExpr: 8072 case scUMaxExpr: 8073 break; // TODO: smax, umax. 8074 } 8075 return nullptr; 8076 } 8077 8078 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 8079 if (isa<SCEVConstant>(V)) return V; 8080 8081 // If this instruction is evolved from a constant-evolving PHI, compute the 8082 // exit value from the loop without using SCEVs. 8083 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 8084 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 8085 const Loop *LI = this->LI[I->getParent()]; 8086 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 8087 if (PHINode *PN = dyn_cast<PHINode>(I)) 8088 if (PN->getParent() == LI->getHeader()) { 8089 // Okay, there is no closed form solution for the PHI node. Check 8090 // to see if the loop that contains it has a known backedge-taken 8091 // count. If so, we may be able to force computation of the exit 8092 // value. 8093 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 8094 if (const SCEVConstant *BTCC = 8095 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 8096 8097 // This trivial case can show up in some degenerate cases where 8098 // the incoming IR has not yet been fully simplified. 8099 if (BTCC->getValue()->isZero()) { 8100 Value *InitValue = nullptr; 8101 bool MultipleInitValues = false; 8102 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 8103 if (!LI->contains(PN->getIncomingBlock(i))) { 8104 if (!InitValue) 8105 InitValue = PN->getIncomingValue(i); 8106 else if (InitValue != PN->getIncomingValue(i)) { 8107 MultipleInitValues = true; 8108 break; 8109 } 8110 } 8111 if (!MultipleInitValues && InitValue) 8112 return getSCEV(InitValue); 8113 } 8114 } 8115 // Okay, we know how many times the containing loop executes. If 8116 // this is a constant evolving PHI node, get the final value at 8117 // the specified iteration number. 8118 Constant *RV = 8119 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 8120 if (RV) return getSCEV(RV); 8121 } 8122 } 8123 8124 // Okay, this is an expression that we cannot symbolically evaluate 8125 // into a SCEV. Check to see if it's possible to symbolically evaluate 8126 // the arguments into constants, and if so, try to constant propagate the 8127 // result. This is particularly useful for computing loop exit values. 8128 if (CanConstantFold(I)) { 8129 SmallVector<Constant *, 4> Operands; 8130 bool MadeImprovement = false; 8131 for (Value *Op : I->operands()) { 8132 if (Constant *C = dyn_cast<Constant>(Op)) { 8133 Operands.push_back(C); 8134 continue; 8135 } 8136 8137 // If any of the operands is non-constant and if they are 8138 // non-integer and non-pointer, don't even try to analyze them 8139 // with scev techniques. 8140 if (!isSCEVable(Op->getType())) 8141 return V; 8142 8143 const SCEV *OrigV = getSCEV(Op); 8144 const SCEV *OpV = getSCEVAtScope(OrigV, L); 8145 MadeImprovement |= OrigV != OpV; 8146 8147 Constant *C = BuildConstantFromSCEV(OpV); 8148 if (!C) return V; 8149 if (C->getType() != Op->getType()) 8150 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 8151 Op->getType(), 8152 false), 8153 C, Op->getType()); 8154 Operands.push_back(C); 8155 } 8156 8157 // Check to see if getSCEVAtScope actually made an improvement. 8158 if (MadeImprovement) { 8159 Constant *C = nullptr; 8160 const DataLayout &DL = getDataLayout(); 8161 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 8162 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8163 Operands[1], DL, &TLI); 8164 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 8165 if (!LI->isVolatile()) 8166 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 8167 } else 8168 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 8169 if (!C) return V; 8170 return getSCEV(C); 8171 } 8172 } 8173 } 8174 8175 // This is some other type of SCEVUnknown, just return it. 8176 return V; 8177 } 8178 8179 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 8180 // Avoid performing the look-up in the common case where the specified 8181 // expression has no loop-variant portions. 8182 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 8183 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8184 if (OpAtScope != Comm->getOperand(i)) { 8185 // Okay, at least one of these operands is loop variant but might be 8186 // foldable. Build a new instance of the folded commutative expression. 8187 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 8188 Comm->op_begin()+i); 8189 NewOps.push_back(OpAtScope); 8190 8191 for (++i; i != e; ++i) { 8192 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8193 NewOps.push_back(OpAtScope); 8194 } 8195 if (isa<SCEVAddExpr>(Comm)) 8196 return getAddExpr(NewOps); 8197 if (isa<SCEVMulExpr>(Comm)) 8198 return getMulExpr(NewOps); 8199 if (isa<SCEVSMaxExpr>(Comm)) 8200 return getSMaxExpr(NewOps); 8201 if (isa<SCEVUMaxExpr>(Comm)) 8202 return getUMaxExpr(NewOps); 8203 llvm_unreachable("Unknown commutative SCEV type!"); 8204 } 8205 } 8206 // If we got here, all operands are loop invariant. 8207 return Comm; 8208 } 8209 8210 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 8211 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 8212 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 8213 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 8214 return Div; // must be loop invariant 8215 return getUDivExpr(LHS, RHS); 8216 } 8217 8218 // If this is a loop recurrence for a loop that does not contain L, then we 8219 // are dealing with the final value computed by the loop. 8220 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 8221 // First, attempt to evaluate each operand. 8222 // Avoid performing the look-up in the common case where the specified 8223 // expression has no loop-variant portions. 8224 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 8225 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 8226 if (OpAtScope == AddRec->getOperand(i)) 8227 continue; 8228 8229 // Okay, at least one of these operands is loop variant but might be 8230 // foldable. Build a new instance of the folded commutative expression. 8231 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8232 AddRec->op_begin()+i); 8233 NewOps.push_back(OpAtScope); 8234 for (++i; i != e; ++i) 8235 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8236 8237 const SCEV *FoldedRec = 8238 getAddRecExpr(NewOps, AddRec->getLoop(), 8239 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8240 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8241 // The addrec may be folded to a nonrecurrence, for example, if the 8242 // induction variable is multiplied by zero after constant folding. Go 8243 // ahead and return the folded value. 8244 if (!AddRec) 8245 return FoldedRec; 8246 break; 8247 } 8248 8249 // If the scope is outside the addrec's loop, evaluate it by using the 8250 // loop exit value of the addrec. 8251 if (!AddRec->getLoop()->contains(L)) { 8252 // To evaluate this recurrence, we need to know how many times the AddRec 8253 // loop iterates. Compute this now. 8254 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8255 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8256 8257 // Then, evaluate the AddRec. 8258 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8259 } 8260 8261 return AddRec; 8262 } 8263 8264 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8265 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8266 if (Op == Cast->getOperand()) 8267 return Cast; // must be loop invariant 8268 return getZeroExtendExpr(Op, Cast->getType()); 8269 } 8270 8271 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 8272 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8273 if (Op == Cast->getOperand()) 8274 return Cast; // must be loop invariant 8275 return getSignExtendExpr(Op, Cast->getType()); 8276 } 8277 8278 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 8279 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8280 if (Op == Cast->getOperand()) 8281 return Cast; // must be loop invariant 8282 return getTruncateExpr(Op, Cast->getType()); 8283 } 8284 8285 llvm_unreachable("Unknown SCEV type!"); 8286 } 8287 8288 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 8289 return getSCEVAtScope(getSCEV(V), L); 8290 } 8291 8292 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 8293 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 8294 return stripInjectiveFunctions(ZExt->getOperand()); 8295 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 8296 return stripInjectiveFunctions(SExt->getOperand()); 8297 return S; 8298 } 8299 8300 /// Finds the minimum unsigned root of the following equation: 8301 /// 8302 /// A * X = B (mod N) 8303 /// 8304 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 8305 /// A and B isn't important. 8306 /// 8307 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 8308 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 8309 ScalarEvolution &SE) { 8310 uint32_t BW = A.getBitWidth(); 8311 assert(BW == SE.getTypeSizeInBits(B->getType())); 8312 assert(A != 0 && "A must be non-zero."); 8313 8314 // 1. D = gcd(A, N) 8315 // 8316 // The gcd of A and N may have only one prime factor: 2. The number of 8317 // trailing zeros in A is its multiplicity 8318 uint32_t Mult2 = A.countTrailingZeros(); 8319 // D = 2^Mult2 8320 8321 // 2. Check if B is divisible by D. 8322 // 8323 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8324 // is not less than multiplicity of this prime factor for D. 8325 if (SE.GetMinTrailingZeros(B) < Mult2) 8326 return SE.getCouldNotCompute(); 8327 8328 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8329 // modulo (N / D). 8330 // 8331 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8332 // (N / D) in general. The inverse itself always fits into BW bits, though, 8333 // so we immediately truncate it. 8334 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8335 APInt Mod(BW + 1, 0); 8336 Mod.setBit(BW - Mult2); // Mod = N / D 8337 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8338 8339 // 4. Compute the minimum unsigned root of the equation: 8340 // I * (B / D) mod (N / D) 8341 // To simplify the computation, we factor out the divide by D: 8342 // (I * B mod N) / D 8343 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8344 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8345 } 8346 8347 /// For a given quadratic addrec, generate coefficients of the corresponding 8348 /// quadratic equation, multiplied by a common value to ensure that they are 8349 /// integers. 8350 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 8351 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 8352 /// were multiplied by, and BitWidth is the bit width of the original addrec 8353 /// coefficients. 8354 /// This function returns None if the addrec coefficients are not compile- 8355 /// time constants. 8356 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 8357 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 8358 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8359 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8360 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8361 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8362 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 8363 << *AddRec << '\n'); 8364 8365 // We currently can only solve this if the coefficients are constants. 8366 if (!LC || !MC || !NC) { 8367 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 8368 return None; 8369 } 8370 8371 APInt L = LC->getAPInt(); 8372 APInt M = MC->getAPInt(); 8373 APInt N = NC->getAPInt(); 8374 assert(!N.isNullValue() && "This is not a quadratic addrec"); 8375 8376 unsigned BitWidth = LC->getAPInt().getBitWidth(); 8377 unsigned NewWidth = BitWidth + 1; 8378 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 8379 << BitWidth << '\n'); 8380 // The sign-extension (as opposed to a zero-extension) here matches the 8381 // extension used in SolveQuadraticEquationWrap (with the same motivation). 8382 N = N.sext(NewWidth); 8383 M = M.sext(NewWidth); 8384 L = L.sext(NewWidth); 8385 8386 // The increments are M, M+N, M+2N, ..., so the accumulated values are 8387 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 8388 // L+M, L+2M+N, L+3M+3N, ... 8389 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 8390 // 8391 // The equation Acc = 0 is then 8392 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 8393 // In a quadratic form it becomes: 8394 // N n^2 + (2M-N) n + 2L = 0. 8395 8396 APInt A = N; 8397 APInt B = 2 * M - A; 8398 APInt C = 2 * L; 8399 APInt T = APInt(NewWidth, 2); 8400 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 8401 << "x + " << C << ", coeff bw: " << NewWidth 8402 << ", multiplied by " << T << '\n'); 8403 return std::make_tuple(A, B, C, T, BitWidth); 8404 } 8405 8406 /// Helper function to compare optional APInts: 8407 /// (a) if X and Y both exist, return min(X, Y), 8408 /// (b) if neither X nor Y exist, return None, 8409 /// (c) if exactly one of X and Y exists, return that value. 8410 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 8411 if (X.hasValue() && Y.hasValue()) { 8412 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 8413 APInt XW = X->sextOrSelf(W); 8414 APInt YW = Y->sextOrSelf(W); 8415 return XW.slt(YW) ? *X : *Y; 8416 } 8417 if (!X.hasValue() && !Y.hasValue()) 8418 return None; 8419 return X.hasValue() ? *X : *Y; 8420 } 8421 8422 /// Helper function to truncate an optional APInt to a given BitWidth. 8423 /// When solving addrec-related equations, it is preferable to return a value 8424 /// that has the same bit width as the original addrec's coefficients. If the 8425 /// solution fits in the original bit width, truncate it (except for i1). 8426 /// Returning a value of a different bit width may inhibit some optimizations. 8427 /// 8428 /// In general, a solution to a quadratic equation generated from an addrec 8429 /// may require BW+1 bits, where BW is the bit width of the addrec's 8430 /// coefficients. The reason is that the coefficients of the quadratic 8431 /// equation are BW+1 bits wide (to avoid truncation when converting from 8432 /// the addrec to the equation). 8433 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 8434 if (!X.hasValue()) 8435 return None; 8436 unsigned W = X->getBitWidth(); 8437 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 8438 return X->trunc(BitWidth); 8439 return X; 8440 } 8441 8442 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 8443 /// iterations. The values L, M, N are assumed to be signed, and they 8444 /// should all have the same bit widths. 8445 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 8446 /// where BW is the bit width of the addrec's coefficients. 8447 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 8448 /// returned as such, otherwise the bit width of the returned value may 8449 /// be greater than BW. 8450 /// 8451 /// This function returns None if 8452 /// (a) the addrec coefficients are not constant, or 8453 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 8454 /// like x^2 = 5, no integer solutions exist, in other cases an integer 8455 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 8456 static Optional<APInt> 8457 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8458 APInt A, B, C, M; 8459 unsigned BitWidth; 8460 auto T = GetQuadraticEquation(AddRec); 8461 if (!T.hasValue()) 8462 return None; 8463 8464 std::tie(A, B, C, M, BitWidth) = *T; 8465 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 8466 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 8467 if (!X.hasValue()) 8468 return None; 8469 8470 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 8471 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 8472 if (!V->isZero()) 8473 return None; 8474 8475 return TruncIfPossible(X, BitWidth); 8476 } 8477 8478 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 8479 /// iterations. The values M, N are assumed to be signed, and they 8480 /// should all have the same bit widths. 8481 /// Find the least n such that c(n) does not belong to the given range, 8482 /// while c(n-1) does. 8483 /// 8484 /// This function returns None if 8485 /// (a) the addrec coefficients are not constant, or 8486 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 8487 /// bounds of the range. 8488 static Optional<APInt> 8489 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 8490 const ConstantRange &Range, ScalarEvolution &SE) { 8491 assert(AddRec->getOperand(0)->isZero() && 8492 "Starting value of addrec should be 0"); 8493 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 8494 << Range << ", addrec " << *AddRec << '\n'); 8495 // This case is handled in getNumIterationsInRange. Here we can assume that 8496 // we start in the range. 8497 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 8498 "Addrec's initial value should be in range"); 8499 8500 APInt A, B, C, M; 8501 unsigned BitWidth; 8502 auto T = GetQuadraticEquation(AddRec); 8503 if (!T.hasValue()) 8504 return None; 8505 8506 // Be careful about the return value: there can be two reasons for not 8507 // returning an actual number. First, if no solutions to the equations 8508 // were found, and second, if the solutions don't leave the given range. 8509 // The first case means that the actual solution is "unknown", the second 8510 // means that it's known, but not valid. If the solution is unknown, we 8511 // cannot make any conclusions. 8512 // Return a pair: the optional solution and a flag indicating if the 8513 // solution was found. 8514 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 8515 // Solve for signed overflow and unsigned overflow, pick the lower 8516 // solution. 8517 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 8518 << Bound << " (before multiplying by " << M << ")\n"); 8519 Bound *= M; // The quadratic equation multiplier. 8520 8521 Optional<APInt> SO = None; 8522 if (BitWidth > 1) { 8523 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8524 "signed overflow\n"); 8525 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 8526 } 8527 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8528 "unsigned overflow\n"); 8529 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 8530 BitWidth+1); 8531 8532 auto LeavesRange = [&] (const APInt &X) { 8533 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 8534 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 8535 if (Range.contains(V0->getValue())) 8536 return false; 8537 // X should be at least 1, so X-1 is non-negative. 8538 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 8539 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 8540 if (Range.contains(V1->getValue())) 8541 return true; 8542 return false; 8543 }; 8544 8545 // If SolveQuadraticEquationWrap returns None, it means that there can 8546 // be a solution, but the function failed to find it. We cannot treat it 8547 // as "no solution". 8548 if (!SO.hasValue() || !UO.hasValue()) 8549 return { None, false }; 8550 8551 // Check the smaller value first to see if it leaves the range. 8552 // At this point, both SO and UO must have values. 8553 Optional<APInt> Min = MinOptional(SO, UO); 8554 if (LeavesRange(*Min)) 8555 return { Min, true }; 8556 Optional<APInt> Max = Min == SO ? UO : SO; 8557 if (LeavesRange(*Max)) 8558 return { Max, true }; 8559 8560 // Solutions were found, but were eliminated, hence the "true". 8561 return { None, true }; 8562 }; 8563 8564 std::tie(A, B, C, M, BitWidth) = *T; 8565 // Lower bound is inclusive, subtract 1 to represent the exiting value. 8566 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; 8567 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); 8568 auto SL = SolveForBoundary(Lower); 8569 auto SU = SolveForBoundary(Upper); 8570 // If any of the solutions was unknown, no meaninigful conclusions can 8571 // be made. 8572 if (!SL.second || !SU.second) 8573 return None; 8574 8575 // Claim: The correct solution is not some value between Min and Max. 8576 // 8577 // Justification: Assuming that Min and Max are different values, one of 8578 // them is when the first signed overflow happens, the other is when the 8579 // first unsigned overflow happens. Crossing the range boundary is only 8580 // possible via an overflow (treating 0 as a special case of it, modeling 8581 // an overflow as crossing k*2^W for some k). 8582 // 8583 // The interesting case here is when Min was eliminated as an invalid 8584 // solution, but Max was not. The argument is that if there was another 8585 // overflow between Min and Max, it would also have been eliminated if 8586 // it was considered. 8587 // 8588 // For a given boundary, it is possible to have two overflows of the same 8589 // type (signed/unsigned) without having the other type in between: this 8590 // can happen when the vertex of the parabola is between the iterations 8591 // corresponding to the overflows. This is only possible when the two 8592 // overflows cross k*2^W for the same k. In such case, if the second one 8593 // left the range (and was the first one to do so), the first overflow 8594 // would have to enter the range, which would mean that either we had left 8595 // the range before or that we started outside of it. Both of these cases 8596 // are contradictions. 8597 // 8598 // Claim: In the case where SolveForBoundary returns None, the correct 8599 // solution is not some value between the Max for this boundary and the 8600 // Min of the other boundary. 8601 // 8602 // Justification: Assume that we had such Max_A and Min_B corresponding 8603 // to range boundaries A and B and such that Max_A < Min_B. If there was 8604 // a solution between Max_A and Min_B, it would have to be caused by an 8605 // overflow corresponding to either A or B. It cannot correspond to B, 8606 // since Min_B is the first occurrence of such an overflow. If it 8607 // corresponded to A, it would have to be either a signed or an unsigned 8608 // overflow that is larger than both eliminated overflows for A. But 8609 // between the eliminated overflows and this overflow, the values would 8610 // cover the entire value space, thus crossing the other boundary, which 8611 // is a contradiction. 8612 8613 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 8614 } 8615 8616 ScalarEvolution::ExitLimit 8617 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 8618 bool AllowPredicates) { 8619 8620 // This is only used for loops with a "x != y" exit test. The exit condition 8621 // is now expressed as a single expression, V = x-y. So the exit test is 8622 // effectively V != 0. We know and take advantage of the fact that this 8623 // expression only being used in a comparison by zero context. 8624 8625 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 8626 // If the value is a constant 8627 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8628 // If the value is already zero, the branch will execute zero times. 8629 if (C->getValue()->isZero()) return C; 8630 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8631 } 8632 8633 const SCEVAddRecExpr *AddRec = 8634 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 8635 8636 if (!AddRec && AllowPredicates) 8637 // Try to make this an AddRec using runtime tests, in the first X 8638 // iterations of this loop, where X is the SCEV expression found by the 8639 // algorithm below. 8640 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 8641 8642 if (!AddRec || AddRec->getLoop() != L) 8643 return getCouldNotCompute(); 8644 8645 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 8646 // the quadratic equation to solve it. 8647 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 8648 // We can only use this value if the chrec ends up with an exact zero 8649 // value at this index. When solving for "X*X != 5", for example, we 8650 // should not accept a root of 2. 8651 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 8652 const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); 8653 return ExitLimit(R, R, false, Predicates); 8654 } 8655 return getCouldNotCompute(); 8656 } 8657 8658 // Otherwise we can only handle this if it is affine. 8659 if (!AddRec->isAffine()) 8660 return getCouldNotCompute(); 8661 8662 // If this is an affine expression, the execution count of this branch is 8663 // the minimum unsigned root of the following equation: 8664 // 8665 // Start + Step*N = 0 (mod 2^BW) 8666 // 8667 // equivalent to: 8668 // 8669 // Step*N = -Start (mod 2^BW) 8670 // 8671 // where BW is the common bit width of Start and Step. 8672 8673 // Get the initial value for the loop. 8674 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 8675 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 8676 8677 // For now we handle only constant steps. 8678 // 8679 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 8680 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 8681 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 8682 // We have not yet seen any such cases. 8683 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 8684 if (!StepC || StepC->getValue()->isZero()) 8685 return getCouldNotCompute(); 8686 8687 // For positive steps (counting up until unsigned overflow): 8688 // N = -Start/Step (as unsigned) 8689 // For negative steps (counting down to zero): 8690 // N = Start/-Step 8691 // First compute the unsigned distance from zero in the direction of Step. 8692 bool CountDown = StepC->getAPInt().isNegative(); 8693 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 8694 8695 // Handle unitary steps, which cannot wraparound. 8696 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 8697 // N = Distance (as unsigned) 8698 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 8699 APInt MaxBECount = getUnsignedRangeMax(Distance); 8700 8701 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 8702 // we end up with a loop whose backedge-taken count is n - 1. Detect this 8703 // case, and see if we can improve the bound. 8704 // 8705 // Explicitly handling this here is necessary because getUnsignedRange 8706 // isn't context-sensitive; it doesn't know that we only care about the 8707 // range inside the loop. 8708 const SCEV *Zero = getZero(Distance->getType()); 8709 const SCEV *One = getOne(Distance->getType()); 8710 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 8711 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 8712 // If Distance + 1 doesn't overflow, we can compute the maximum distance 8713 // as "unsigned_max(Distance + 1) - 1". 8714 ConstantRange CR = getUnsignedRange(DistancePlusOne); 8715 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 8716 } 8717 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 8718 } 8719 8720 // If the condition controls loop exit (the loop exits only if the expression 8721 // is true) and the addition is no-wrap we can use unsigned divide to 8722 // compute the backedge count. In this case, the step may not divide the 8723 // distance, but we don't care because if the condition is "missed" the loop 8724 // will have undefined behavior due to wrapping. 8725 if (ControlsExit && AddRec->hasNoSelfWrap() && 8726 loopHasNoAbnormalExits(AddRec->getLoop())) { 8727 const SCEV *Exact = 8728 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 8729 const SCEV *Max = 8730 Exact == getCouldNotCompute() 8731 ? Exact 8732 : getConstant(getUnsignedRangeMax(Exact)); 8733 return ExitLimit(Exact, Max, false, Predicates); 8734 } 8735 8736 // Solve the general equation. 8737 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 8738 getNegativeSCEV(Start), *this); 8739 const SCEV *M = E == getCouldNotCompute() 8740 ? E 8741 : getConstant(getUnsignedRangeMax(E)); 8742 return ExitLimit(E, M, false, Predicates); 8743 } 8744 8745 ScalarEvolution::ExitLimit 8746 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 8747 // Loops that look like: while (X == 0) are very strange indeed. We don't 8748 // handle them yet except for the trivial case. This could be expanded in the 8749 // future as needed. 8750 8751 // If the value is a constant, check to see if it is known to be non-zero 8752 // already. If so, the backedge will execute zero times. 8753 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8754 if (!C->getValue()->isZero()) 8755 return getZero(C->getType()); 8756 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8757 } 8758 8759 // We could implement others, but I really doubt anyone writes loops like 8760 // this, and if they did, they would already be constant folded. 8761 return getCouldNotCompute(); 8762 } 8763 8764 std::pair<BasicBlock *, BasicBlock *> 8765 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 8766 // If the block has a unique predecessor, then there is no path from the 8767 // predecessor to the block that does not go through the direct edge 8768 // from the predecessor to the block. 8769 if (BasicBlock *Pred = BB->getSinglePredecessor()) 8770 return {Pred, BB}; 8771 8772 // A loop's header is defined to be a block that dominates the loop. 8773 // If the header has a unique predecessor outside the loop, it must be 8774 // a block that has exactly one successor that can reach the loop. 8775 if (Loop *L = LI.getLoopFor(BB)) 8776 return {L->getLoopPredecessor(), L->getHeader()}; 8777 8778 return {nullptr, nullptr}; 8779 } 8780 8781 /// SCEV structural equivalence is usually sufficient for testing whether two 8782 /// expressions are equal, however for the purposes of looking for a condition 8783 /// guarding a loop, it can be useful to be a little more general, since a 8784 /// front-end may have replicated the controlling expression. 8785 static bool HasSameValue(const SCEV *A, const SCEV *B) { 8786 // Quick check to see if they are the same SCEV. 8787 if (A == B) return true; 8788 8789 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 8790 // Not all instructions that are "identical" compute the same value. For 8791 // instance, two distinct alloca instructions allocating the same type are 8792 // identical and do not read memory; but compute distinct values. 8793 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 8794 }; 8795 8796 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 8797 // two different instructions with the same value. Check for this case. 8798 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 8799 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 8800 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 8801 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 8802 if (ComputesEqualValues(AI, BI)) 8803 return true; 8804 8805 // Otherwise assume they may have a different value. 8806 return false; 8807 } 8808 8809 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 8810 const SCEV *&LHS, const SCEV *&RHS, 8811 unsigned Depth) { 8812 bool Changed = false; 8813 8814 // If we hit the max recursion limit bail out. 8815 if (Depth >= 3) 8816 return false; 8817 8818 // Canonicalize a constant to the right side. 8819 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 8820 // Check for both operands constant. 8821 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 8822 if (ConstantExpr::getICmp(Pred, 8823 LHSC->getValue(), 8824 RHSC->getValue())->isNullValue()) 8825 goto trivially_false; 8826 else 8827 goto trivially_true; 8828 } 8829 // Otherwise swap the operands to put the constant on the right. 8830 std::swap(LHS, RHS); 8831 Pred = ICmpInst::getSwappedPredicate(Pred); 8832 Changed = true; 8833 } 8834 8835 // If we're comparing an addrec with a value which is loop-invariant in the 8836 // addrec's loop, put the addrec on the left. Also make a dominance check, 8837 // as both operands could be addrecs loop-invariant in each other's loop. 8838 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 8839 const Loop *L = AR->getLoop(); 8840 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 8841 std::swap(LHS, RHS); 8842 Pred = ICmpInst::getSwappedPredicate(Pred); 8843 Changed = true; 8844 } 8845 } 8846 8847 // If there's a constant operand, canonicalize comparisons with boundary 8848 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 8849 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 8850 const APInt &RA = RC->getAPInt(); 8851 8852 bool SimplifiedByConstantRange = false; 8853 8854 if (!ICmpInst::isEquality(Pred)) { 8855 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 8856 if (ExactCR.isFullSet()) 8857 goto trivially_true; 8858 else if (ExactCR.isEmptySet()) 8859 goto trivially_false; 8860 8861 APInt NewRHS; 8862 CmpInst::Predicate NewPred; 8863 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 8864 ICmpInst::isEquality(NewPred)) { 8865 // We were able to convert an inequality to an equality. 8866 Pred = NewPred; 8867 RHS = getConstant(NewRHS); 8868 Changed = SimplifiedByConstantRange = true; 8869 } 8870 } 8871 8872 if (!SimplifiedByConstantRange) { 8873 switch (Pred) { 8874 default: 8875 break; 8876 case ICmpInst::ICMP_EQ: 8877 case ICmpInst::ICMP_NE: 8878 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 8879 if (!RA) 8880 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 8881 if (const SCEVMulExpr *ME = 8882 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 8883 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 8884 ME->getOperand(0)->isAllOnesValue()) { 8885 RHS = AE->getOperand(1); 8886 LHS = ME->getOperand(1); 8887 Changed = true; 8888 } 8889 break; 8890 8891 8892 // The "Should have been caught earlier!" messages refer to the fact 8893 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 8894 // should have fired on the corresponding cases, and canonicalized the 8895 // check to trivially_true or trivially_false. 8896 8897 case ICmpInst::ICMP_UGE: 8898 assert(!RA.isMinValue() && "Should have been caught earlier!"); 8899 Pred = ICmpInst::ICMP_UGT; 8900 RHS = getConstant(RA - 1); 8901 Changed = true; 8902 break; 8903 case ICmpInst::ICMP_ULE: 8904 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 8905 Pred = ICmpInst::ICMP_ULT; 8906 RHS = getConstant(RA + 1); 8907 Changed = true; 8908 break; 8909 case ICmpInst::ICMP_SGE: 8910 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 8911 Pred = ICmpInst::ICMP_SGT; 8912 RHS = getConstant(RA - 1); 8913 Changed = true; 8914 break; 8915 case ICmpInst::ICMP_SLE: 8916 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 8917 Pred = ICmpInst::ICMP_SLT; 8918 RHS = getConstant(RA + 1); 8919 Changed = true; 8920 break; 8921 } 8922 } 8923 } 8924 8925 // Check for obvious equality. 8926 if (HasSameValue(LHS, RHS)) { 8927 if (ICmpInst::isTrueWhenEqual(Pred)) 8928 goto trivially_true; 8929 if (ICmpInst::isFalseWhenEqual(Pred)) 8930 goto trivially_false; 8931 } 8932 8933 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 8934 // adding or subtracting 1 from one of the operands. 8935 switch (Pred) { 8936 case ICmpInst::ICMP_SLE: 8937 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 8938 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8939 SCEV::FlagNSW); 8940 Pred = ICmpInst::ICMP_SLT; 8941 Changed = true; 8942 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 8943 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 8944 SCEV::FlagNSW); 8945 Pred = ICmpInst::ICMP_SLT; 8946 Changed = true; 8947 } 8948 break; 8949 case ICmpInst::ICMP_SGE: 8950 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 8951 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 8952 SCEV::FlagNSW); 8953 Pred = ICmpInst::ICMP_SGT; 8954 Changed = true; 8955 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 8956 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8957 SCEV::FlagNSW); 8958 Pred = ICmpInst::ICMP_SGT; 8959 Changed = true; 8960 } 8961 break; 8962 case ICmpInst::ICMP_ULE: 8963 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 8964 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8965 SCEV::FlagNUW); 8966 Pred = ICmpInst::ICMP_ULT; 8967 Changed = true; 8968 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 8969 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 8970 Pred = ICmpInst::ICMP_ULT; 8971 Changed = true; 8972 } 8973 break; 8974 case ICmpInst::ICMP_UGE: 8975 if (!getUnsignedRangeMin(RHS).isMinValue()) { 8976 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 8977 Pred = ICmpInst::ICMP_UGT; 8978 Changed = true; 8979 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 8980 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8981 SCEV::FlagNUW); 8982 Pred = ICmpInst::ICMP_UGT; 8983 Changed = true; 8984 } 8985 break; 8986 default: 8987 break; 8988 } 8989 8990 // TODO: More simplifications are possible here. 8991 8992 // Recursively simplify until we either hit a recursion limit or nothing 8993 // changes. 8994 if (Changed) 8995 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 8996 8997 return Changed; 8998 8999 trivially_true: 9000 // Return 0 == 0. 9001 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 9002 Pred = ICmpInst::ICMP_EQ; 9003 return true; 9004 9005 trivially_false: 9006 // Return 0 != 0. 9007 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 9008 Pred = ICmpInst::ICMP_NE; 9009 return true; 9010 } 9011 9012 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 9013 return getSignedRangeMax(S).isNegative(); 9014 } 9015 9016 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 9017 return getSignedRangeMin(S).isStrictlyPositive(); 9018 } 9019 9020 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 9021 return !getSignedRangeMin(S).isNegative(); 9022 } 9023 9024 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 9025 return !getSignedRangeMax(S).isStrictlyPositive(); 9026 } 9027 9028 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 9029 return isKnownNegative(S) || isKnownPositive(S); 9030 } 9031 9032 std::pair<const SCEV *, const SCEV *> 9033 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 9034 // Compute SCEV on entry of loop L. 9035 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 9036 if (Start == getCouldNotCompute()) 9037 return { Start, Start }; 9038 // Compute post increment SCEV for loop L. 9039 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 9040 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 9041 return { Start, PostInc }; 9042 } 9043 9044 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 9045 const SCEV *LHS, const SCEV *RHS) { 9046 // First collect all loops. 9047 SmallPtrSet<const Loop *, 8> LoopsUsed; 9048 getUsedLoops(LHS, LoopsUsed); 9049 getUsedLoops(RHS, LoopsUsed); 9050 9051 if (LoopsUsed.empty()) 9052 return false; 9053 9054 // Domination relationship must be a linear order on collected loops. 9055 #ifndef NDEBUG 9056 for (auto *L1 : LoopsUsed) 9057 for (auto *L2 : LoopsUsed) 9058 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 9059 DT.dominates(L2->getHeader(), L1->getHeader())) && 9060 "Domination relationship is not a linear order"); 9061 #endif 9062 9063 const Loop *MDL = 9064 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 9065 [&](const Loop *L1, const Loop *L2) { 9066 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 9067 }); 9068 9069 // Get init and post increment value for LHS. 9070 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 9071 // if LHS contains unknown non-invariant SCEV then bail out. 9072 if (SplitLHS.first == getCouldNotCompute()) 9073 return false; 9074 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 9075 // Get init and post increment value for RHS. 9076 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 9077 // if RHS contains unknown non-invariant SCEV then bail out. 9078 if (SplitRHS.first == getCouldNotCompute()) 9079 return false; 9080 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 9081 // It is possible that init SCEV contains an invariant load but it does 9082 // not dominate MDL and is not available at MDL loop entry, so we should 9083 // check it here. 9084 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 9085 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 9086 return false; 9087 9088 return isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first) && 9089 isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 9090 SplitRHS.second); 9091 } 9092 9093 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 9094 const SCEV *LHS, const SCEV *RHS) { 9095 // Canonicalize the inputs first. 9096 (void)SimplifyICmpOperands(Pred, LHS, RHS); 9097 9098 if (isKnownViaInduction(Pred, LHS, RHS)) 9099 return true; 9100 9101 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 9102 return true; 9103 9104 // Otherwise see what can be done with some simple reasoning. 9105 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 9106 } 9107 9108 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 9109 const SCEVAddRecExpr *LHS, 9110 const SCEV *RHS) { 9111 const Loop *L = LHS->getLoop(); 9112 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 9113 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 9114 } 9115 9116 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 9117 ICmpInst::Predicate Pred, 9118 bool &Increasing) { 9119 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 9120 9121 #ifndef NDEBUG 9122 // Verify an invariant: inverting the predicate should turn a monotonically 9123 // increasing change to a monotonically decreasing one, and vice versa. 9124 bool IncreasingSwapped; 9125 bool ResultSwapped = isMonotonicPredicateImpl( 9126 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 9127 9128 assert(Result == ResultSwapped && "should be able to analyze both!"); 9129 if (ResultSwapped) 9130 assert(Increasing == !IncreasingSwapped && 9131 "monotonicity should flip as we flip the predicate"); 9132 #endif 9133 9134 return Result; 9135 } 9136 9137 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 9138 ICmpInst::Predicate Pred, 9139 bool &Increasing) { 9140 9141 // A zero step value for LHS means the induction variable is essentially a 9142 // loop invariant value. We don't really depend on the predicate actually 9143 // flipping from false to true (for increasing predicates, and the other way 9144 // around for decreasing predicates), all we care about is that *if* the 9145 // predicate changes then it only changes from false to true. 9146 // 9147 // A zero step value in itself is not very useful, but there may be places 9148 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 9149 // as general as possible. 9150 9151 switch (Pred) { 9152 default: 9153 return false; // Conservative answer 9154 9155 case ICmpInst::ICMP_UGT: 9156 case ICmpInst::ICMP_UGE: 9157 case ICmpInst::ICMP_ULT: 9158 case ICmpInst::ICMP_ULE: 9159 if (!LHS->hasNoUnsignedWrap()) 9160 return false; 9161 9162 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 9163 return true; 9164 9165 case ICmpInst::ICMP_SGT: 9166 case ICmpInst::ICMP_SGE: 9167 case ICmpInst::ICMP_SLT: 9168 case ICmpInst::ICMP_SLE: { 9169 if (!LHS->hasNoSignedWrap()) 9170 return false; 9171 9172 const SCEV *Step = LHS->getStepRecurrence(*this); 9173 9174 if (isKnownNonNegative(Step)) { 9175 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 9176 return true; 9177 } 9178 9179 if (isKnownNonPositive(Step)) { 9180 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 9181 return true; 9182 } 9183 9184 return false; 9185 } 9186 9187 } 9188 9189 llvm_unreachable("switch has default clause!"); 9190 } 9191 9192 bool ScalarEvolution::isLoopInvariantPredicate( 9193 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 9194 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 9195 const SCEV *&InvariantRHS) { 9196 9197 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9198 if (!isLoopInvariant(RHS, L)) { 9199 if (!isLoopInvariant(LHS, L)) 9200 return false; 9201 9202 std::swap(LHS, RHS); 9203 Pred = ICmpInst::getSwappedPredicate(Pred); 9204 } 9205 9206 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9207 if (!ArLHS || ArLHS->getLoop() != L) 9208 return false; 9209 9210 bool Increasing; 9211 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 9212 return false; 9213 9214 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 9215 // true as the loop iterates, and the backedge is control dependent on 9216 // "ArLHS `Pred` RHS" == true then we can reason as follows: 9217 // 9218 // * if the predicate was false in the first iteration then the predicate 9219 // is never evaluated again, since the loop exits without taking the 9220 // backedge. 9221 // * if the predicate was true in the first iteration then it will 9222 // continue to be true for all future iterations since it is 9223 // monotonically increasing. 9224 // 9225 // For both the above possibilities, we can replace the loop varying 9226 // predicate with its value on the first iteration of the loop (which is 9227 // loop invariant). 9228 // 9229 // A similar reasoning applies for a monotonically decreasing predicate, by 9230 // replacing true with false and false with true in the above two bullets. 9231 9232 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 9233 9234 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 9235 return false; 9236 9237 InvariantPred = Pred; 9238 InvariantLHS = ArLHS->getStart(); 9239 InvariantRHS = RHS; 9240 return true; 9241 } 9242 9243 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 9244 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 9245 if (HasSameValue(LHS, RHS)) 9246 return ICmpInst::isTrueWhenEqual(Pred); 9247 9248 // This code is split out from isKnownPredicate because it is called from 9249 // within isLoopEntryGuardedByCond. 9250 9251 auto CheckRanges = 9252 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 9253 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 9254 .contains(RangeLHS); 9255 }; 9256 9257 // The check at the top of the function catches the case where the values are 9258 // known to be equal. 9259 if (Pred == CmpInst::ICMP_EQ) 9260 return false; 9261 9262 if (Pred == CmpInst::ICMP_NE) 9263 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 9264 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 9265 isKnownNonZero(getMinusSCEV(LHS, RHS)); 9266 9267 if (CmpInst::isSigned(Pred)) 9268 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 9269 9270 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 9271 } 9272 9273 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 9274 const SCEV *LHS, 9275 const SCEV *RHS) { 9276 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 9277 // Return Y via OutY. 9278 auto MatchBinaryAddToConst = 9279 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 9280 SCEV::NoWrapFlags ExpectedFlags) { 9281 const SCEV *NonConstOp, *ConstOp; 9282 SCEV::NoWrapFlags FlagsPresent; 9283 9284 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 9285 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 9286 return false; 9287 9288 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 9289 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 9290 }; 9291 9292 APInt C; 9293 9294 switch (Pred) { 9295 default: 9296 break; 9297 9298 case ICmpInst::ICMP_SGE: 9299 std::swap(LHS, RHS); 9300 LLVM_FALLTHROUGH; 9301 case ICmpInst::ICMP_SLE: 9302 // X s<= (X + C)<nsw> if C >= 0 9303 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 9304 return true; 9305 9306 // (X + C)<nsw> s<= X if C <= 0 9307 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 9308 !C.isStrictlyPositive()) 9309 return true; 9310 break; 9311 9312 case ICmpInst::ICMP_SGT: 9313 std::swap(LHS, RHS); 9314 LLVM_FALLTHROUGH; 9315 case ICmpInst::ICMP_SLT: 9316 // X s< (X + C)<nsw> if C > 0 9317 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 9318 C.isStrictlyPositive()) 9319 return true; 9320 9321 // (X + C)<nsw> s< X if C < 0 9322 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 9323 return true; 9324 break; 9325 } 9326 9327 return false; 9328 } 9329 9330 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 9331 const SCEV *LHS, 9332 const SCEV *RHS) { 9333 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 9334 return false; 9335 9336 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 9337 // the stack can result in exponential time complexity. 9338 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 9339 9340 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 9341 // 9342 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 9343 // isKnownPredicate. isKnownPredicate is more powerful, but also more 9344 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 9345 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 9346 // use isKnownPredicate later if needed. 9347 return isKnownNonNegative(RHS) && 9348 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 9349 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 9350 } 9351 9352 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 9353 ICmpInst::Predicate Pred, 9354 const SCEV *LHS, const SCEV *RHS) { 9355 // No need to even try if we know the module has no guards. 9356 if (!HasGuards) 9357 return false; 9358 9359 return any_of(*BB, [&](Instruction &I) { 9360 using namespace llvm::PatternMatch; 9361 9362 Value *Condition; 9363 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 9364 m_Value(Condition))) && 9365 isImpliedCond(Pred, LHS, RHS, Condition, false); 9366 }); 9367 } 9368 9369 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 9370 /// protected by a conditional between LHS and RHS. This is used to 9371 /// to eliminate casts. 9372 bool 9373 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 9374 ICmpInst::Predicate Pred, 9375 const SCEV *LHS, const SCEV *RHS) { 9376 // Interpret a null as meaning no loop, where there is obviously no guard 9377 // (interprocedural conditions notwithstanding). 9378 if (!L) return true; 9379 9380 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9381 return true; 9382 9383 BasicBlock *Latch = L->getLoopLatch(); 9384 if (!Latch) 9385 return false; 9386 9387 BranchInst *LoopContinuePredicate = 9388 dyn_cast<BranchInst>(Latch->getTerminator()); 9389 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 9390 isImpliedCond(Pred, LHS, RHS, 9391 LoopContinuePredicate->getCondition(), 9392 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 9393 return true; 9394 9395 // We don't want more than one activation of the following loops on the stack 9396 // -- that can lead to O(n!) time complexity. 9397 if (WalkingBEDominatingConds) 9398 return false; 9399 9400 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 9401 9402 // See if we can exploit a trip count to prove the predicate. 9403 const auto &BETakenInfo = getBackedgeTakenInfo(L); 9404 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 9405 if (LatchBECount != getCouldNotCompute()) { 9406 // We know that Latch branches back to the loop header exactly 9407 // LatchBECount times. This means the backdege condition at Latch is 9408 // equivalent to "{0,+,1} u< LatchBECount". 9409 Type *Ty = LatchBECount->getType(); 9410 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 9411 const SCEV *LoopCounter = 9412 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 9413 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 9414 LatchBECount)) 9415 return true; 9416 } 9417 9418 // Check conditions due to any @llvm.assume intrinsics. 9419 for (auto &AssumeVH : AC.assumptions()) { 9420 if (!AssumeVH) 9421 continue; 9422 auto *CI = cast<CallInst>(AssumeVH); 9423 if (!DT.dominates(CI, Latch->getTerminator())) 9424 continue; 9425 9426 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 9427 return true; 9428 } 9429 9430 // If the loop is not reachable from the entry block, we risk running into an 9431 // infinite loop as we walk up into the dom tree. These loops do not matter 9432 // anyway, so we just return a conservative answer when we see them. 9433 if (!DT.isReachableFromEntry(L->getHeader())) 9434 return false; 9435 9436 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 9437 return true; 9438 9439 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 9440 DTN != HeaderDTN; DTN = DTN->getIDom()) { 9441 assert(DTN && "should reach the loop header before reaching the root!"); 9442 9443 BasicBlock *BB = DTN->getBlock(); 9444 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 9445 return true; 9446 9447 BasicBlock *PBB = BB->getSinglePredecessor(); 9448 if (!PBB) 9449 continue; 9450 9451 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 9452 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 9453 continue; 9454 9455 Value *Condition = ContinuePredicate->getCondition(); 9456 9457 // If we have an edge `E` within the loop body that dominates the only 9458 // latch, the condition guarding `E` also guards the backedge. This 9459 // reasoning works only for loops with a single latch. 9460 9461 BasicBlockEdge DominatingEdge(PBB, BB); 9462 if (DominatingEdge.isSingleEdge()) { 9463 // We're constructively (and conservatively) enumerating edges within the 9464 // loop body that dominate the latch. The dominator tree better agree 9465 // with us on this: 9466 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 9467 9468 if (isImpliedCond(Pred, LHS, RHS, Condition, 9469 BB != ContinuePredicate->getSuccessor(0))) 9470 return true; 9471 } 9472 } 9473 9474 return false; 9475 } 9476 9477 bool 9478 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 9479 ICmpInst::Predicate Pred, 9480 const SCEV *LHS, const SCEV *RHS) { 9481 // Interpret a null as meaning no loop, where there is obviously no guard 9482 // (interprocedural conditions notwithstanding). 9483 if (!L) return false; 9484 9485 // Both LHS and RHS must be available at loop entry. 9486 assert(isAvailableAtLoopEntry(LHS, L) && 9487 "LHS is not available at Loop Entry"); 9488 assert(isAvailableAtLoopEntry(RHS, L) && 9489 "RHS is not available at Loop Entry"); 9490 9491 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9492 return true; 9493 9494 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 9495 // the facts (a >= b && a != b) separately. A typical situation is when the 9496 // non-strict comparison is known from ranges and non-equality is known from 9497 // dominating predicates. If we are proving strict comparison, we always try 9498 // to prove non-equality and non-strict comparison separately. 9499 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 9500 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 9501 bool ProvedNonStrictComparison = false; 9502 bool ProvedNonEquality = false; 9503 9504 if (ProvingStrictComparison) { 9505 ProvedNonStrictComparison = 9506 isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS); 9507 ProvedNonEquality = 9508 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, LHS, RHS); 9509 if (ProvedNonStrictComparison && ProvedNonEquality) 9510 return true; 9511 } 9512 9513 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 9514 auto ProveViaGuard = [&](BasicBlock *Block) { 9515 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 9516 return true; 9517 if (ProvingStrictComparison) { 9518 if (!ProvedNonStrictComparison) 9519 ProvedNonStrictComparison = 9520 isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS); 9521 if (!ProvedNonEquality) 9522 ProvedNonEquality = 9523 isImpliedViaGuard(Block, ICmpInst::ICMP_NE, LHS, RHS); 9524 if (ProvedNonStrictComparison && ProvedNonEquality) 9525 return true; 9526 } 9527 return false; 9528 }; 9529 9530 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 9531 auto ProveViaCond = [&](Value *Condition, bool Inverse) { 9532 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse)) 9533 return true; 9534 if (ProvingStrictComparison) { 9535 if (!ProvedNonStrictComparison) 9536 ProvedNonStrictComparison = 9537 isImpliedCond(NonStrictPredicate, LHS, RHS, Condition, Inverse); 9538 if (!ProvedNonEquality) 9539 ProvedNonEquality = 9540 isImpliedCond(ICmpInst::ICMP_NE, LHS, RHS, Condition, Inverse); 9541 if (ProvedNonStrictComparison && ProvedNonEquality) 9542 return true; 9543 } 9544 return false; 9545 }; 9546 9547 // Starting at the loop predecessor, climb up the predecessor chain, as long 9548 // as there are predecessors that can be found that have unique successors 9549 // leading to the original header. 9550 for (std::pair<BasicBlock *, BasicBlock *> 9551 Pair(L->getLoopPredecessor(), L->getHeader()); 9552 Pair.first; 9553 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 9554 9555 if (ProveViaGuard(Pair.first)) 9556 return true; 9557 9558 BranchInst *LoopEntryPredicate = 9559 dyn_cast<BranchInst>(Pair.first->getTerminator()); 9560 if (!LoopEntryPredicate || 9561 LoopEntryPredicate->isUnconditional()) 9562 continue; 9563 9564 if (ProveViaCond(LoopEntryPredicate->getCondition(), 9565 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 9566 return true; 9567 } 9568 9569 // Check conditions due to any @llvm.assume intrinsics. 9570 for (auto &AssumeVH : AC.assumptions()) { 9571 if (!AssumeVH) 9572 continue; 9573 auto *CI = cast<CallInst>(AssumeVH); 9574 if (!DT.dominates(CI, L->getHeader())) 9575 continue; 9576 9577 if (ProveViaCond(CI->getArgOperand(0), false)) 9578 return true; 9579 } 9580 9581 return false; 9582 } 9583 9584 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 9585 const SCEV *LHS, const SCEV *RHS, 9586 Value *FoundCondValue, 9587 bool Inverse) { 9588 if (!PendingLoopPredicates.insert(FoundCondValue).second) 9589 return false; 9590 9591 auto ClearOnExit = 9592 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 9593 9594 // Recursively handle And and Or conditions. 9595 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 9596 if (BO->getOpcode() == Instruction::And) { 9597 if (!Inverse) 9598 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9599 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9600 } else if (BO->getOpcode() == Instruction::Or) { 9601 if (Inverse) 9602 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9603 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9604 } 9605 } 9606 9607 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 9608 if (!ICI) return false; 9609 9610 // Now that we found a conditional branch that dominates the loop or controls 9611 // the loop latch. Check to see if it is the comparison we are looking for. 9612 ICmpInst::Predicate FoundPred; 9613 if (Inverse) 9614 FoundPred = ICI->getInversePredicate(); 9615 else 9616 FoundPred = ICI->getPredicate(); 9617 9618 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 9619 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 9620 9621 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 9622 } 9623 9624 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 9625 const SCEV *RHS, 9626 ICmpInst::Predicate FoundPred, 9627 const SCEV *FoundLHS, 9628 const SCEV *FoundRHS) { 9629 // Balance the types. 9630 if (getTypeSizeInBits(LHS->getType()) < 9631 getTypeSizeInBits(FoundLHS->getType())) { 9632 if (CmpInst::isSigned(Pred)) { 9633 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 9634 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 9635 } else { 9636 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 9637 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 9638 } 9639 } else if (getTypeSizeInBits(LHS->getType()) > 9640 getTypeSizeInBits(FoundLHS->getType())) { 9641 if (CmpInst::isSigned(FoundPred)) { 9642 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 9643 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 9644 } else { 9645 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 9646 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 9647 } 9648 } 9649 9650 // Canonicalize the query to match the way instcombine will have 9651 // canonicalized the comparison. 9652 if (SimplifyICmpOperands(Pred, LHS, RHS)) 9653 if (LHS == RHS) 9654 return CmpInst::isTrueWhenEqual(Pred); 9655 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 9656 if (FoundLHS == FoundRHS) 9657 return CmpInst::isFalseWhenEqual(FoundPred); 9658 9659 // Check to see if we can make the LHS or RHS match. 9660 if (LHS == FoundRHS || RHS == FoundLHS) { 9661 if (isa<SCEVConstant>(RHS)) { 9662 std::swap(FoundLHS, FoundRHS); 9663 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 9664 } else { 9665 std::swap(LHS, RHS); 9666 Pred = ICmpInst::getSwappedPredicate(Pred); 9667 } 9668 } 9669 9670 // Check whether the found predicate is the same as the desired predicate. 9671 if (FoundPred == Pred) 9672 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9673 9674 // Check whether swapping the found predicate makes it the same as the 9675 // desired predicate. 9676 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 9677 if (isa<SCEVConstant>(RHS)) 9678 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 9679 else 9680 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 9681 RHS, LHS, FoundLHS, FoundRHS); 9682 } 9683 9684 // Unsigned comparison is the same as signed comparison when both the operands 9685 // are non-negative. 9686 if (CmpInst::isUnsigned(FoundPred) && 9687 CmpInst::getSignedPredicate(FoundPred) == Pred && 9688 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 9689 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9690 9691 // Check if we can make progress by sharpening ranges. 9692 if (FoundPred == ICmpInst::ICMP_NE && 9693 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 9694 9695 const SCEVConstant *C = nullptr; 9696 const SCEV *V = nullptr; 9697 9698 if (isa<SCEVConstant>(FoundLHS)) { 9699 C = cast<SCEVConstant>(FoundLHS); 9700 V = FoundRHS; 9701 } else { 9702 C = cast<SCEVConstant>(FoundRHS); 9703 V = FoundLHS; 9704 } 9705 9706 // The guarding predicate tells us that C != V. If the known range 9707 // of V is [C, t), we can sharpen the range to [C + 1, t). The 9708 // range we consider has to correspond to same signedness as the 9709 // predicate we're interested in folding. 9710 9711 APInt Min = ICmpInst::isSigned(Pred) ? 9712 getSignedRangeMin(V) : getUnsignedRangeMin(V); 9713 9714 if (Min == C->getAPInt()) { 9715 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 9716 // This is true even if (Min + 1) wraps around -- in case of 9717 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 9718 9719 APInt SharperMin = Min + 1; 9720 9721 switch (Pred) { 9722 case ICmpInst::ICMP_SGE: 9723 case ICmpInst::ICMP_UGE: 9724 // We know V `Pred` SharperMin. If this implies LHS `Pred` 9725 // RHS, we're done. 9726 if (isImpliedCondOperands(Pred, LHS, RHS, V, 9727 getConstant(SharperMin))) 9728 return true; 9729 LLVM_FALLTHROUGH; 9730 9731 case ICmpInst::ICMP_SGT: 9732 case ICmpInst::ICMP_UGT: 9733 // We know from the range information that (V `Pred` Min || 9734 // V == Min). We know from the guarding condition that !(V 9735 // == Min). This gives us 9736 // 9737 // V `Pred` Min || V == Min && !(V == Min) 9738 // => V `Pred` Min 9739 // 9740 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 9741 9742 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 9743 return true; 9744 LLVM_FALLTHROUGH; 9745 9746 default: 9747 // No change 9748 break; 9749 } 9750 } 9751 } 9752 9753 // Check whether the actual condition is beyond sufficient. 9754 if (FoundPred == ICmpInst::ICMP_EQ) 9755 if (ICmpInst::isTrueWhenEqual(Pred)) 9756 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9757 return true; 9758 if (Pred == ICmpInst::ICMP_NE) 9759 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 9760 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 9761 return true; 9762 9763 // Otherwise assume the worst. 9764 return false; 9765 } 9766 9767 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 9768 const SCEV *&L, const SCEV *&R, 9769 SCEV::NoWrapFlags &Flags) { 9770 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 9771 if (!AE || AE->getNumOperands() != 2) 9772 return false; 9773 9774 L = AE->getOperand(0); 9775 R = AE->getOperand(1); 9776 Flags = AE->getNoWrapFlags(); 9777 return true; 9778 } 9779 9780 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 9781 const SCEV *Less) { 9782 // We avoid subtracting expressions here because this function is usually 9783 // fairly deep in the call stack (i.e. is called many times). 9784 9785 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 9786 const auto *LAR = cast<SCEVAddRecExpr>(Less); 9787 const auto *MAR = cast<SCEVAddRecExpr>(More); 9788 9789 if (LAR->getLoop() != MAR->getLoop()) 9790 return None; 9791 9792 // We look at affine expressions only; not for correctness but to keep 9793 // getStepRecurrence cheap. 9794 if (!LAR->isAffine() || !MAR->isAffine()) 9795 return None; 9796 9797 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 9798 return None; 9799 9800 Less = LAR->getStart(); 9801 More = MAR->getStart(); 9802 9803 // fall through 9804 } 9805 9806 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 9807 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 9808 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 9809 return M - L; 9810 } 9811 9812 SCEV::NoWrapFlags Flags; 9813 const SCEV *LLess = nullptr, *RLess = nullptr; 9814 const SCEV *LMore = nullptr, *RMore = nullptr; 9815 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 9816 // Compare (X + C1) vs X. 9817 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 9818 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 9819 if (RLess == More) 9820 return -(C1->getAPInt()); 9821 9822 // Compare X vs (X + C2). 9823 if (splitBinaryAdd(More, LMore, RMore, Flags)) 9824 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 9825 if (RMore == Less) 9826 return C2->getAPInt(); 9827 9828 // Compare (X + C1) vs (X + C2). 9829 if (C1 && C2 && RLess == RMore) 9830 return C2->getAPInt() - C1->getAPInt(); 9831 9832 return None; 9833 } 9834 9835 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 9836 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 9837 const SCEV *FoundLHS, const SCEV *FoundRHS) { 9838 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 9839 return false; 9840 9841 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9842 if (!AddRecLHS) 9843 return false; 9844 9845 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 9846 if (!AddRecFoundLHS) 9847 return false; 9848 9849 // We'd like to let SCEV reason about control dependencies, so we constrain 9850 // both the inequalities to be about add recurrences on the same loop. This 9851 // way we can use isLoopEntryGuardedByCond later. 9852 9853 const Loop *L = AddRecFoundLHS->getLoop(); 9854 if (L != AddRecLHS->getLoop()) 9855 return false; 9856 9857 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 9858 // 9859 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 9860 // ... (2) 9861 // 9862 // Informal proof for (2), assuming (1) [*]: 9863 // 9864 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 9865 // 9866 // Then 9867 // 9868 // FoundLHS s< FoundRHS s< INT_MIN - C 9869 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 9870 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 9871 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 9872 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 9873 // <=> FoundLHS + C s< FoundRHS + C 9874 // 9875 // [*]: (1) can be proved by ruling out overflow. 9876 // 9877 // [**]: This can be proved by analyzing all the four possibilities: 9878 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 9879 // (A s>= 0, B s>= 0). 9880 // 9881 // Note: 9882 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 9883 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 9884 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 9885 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 9886 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 9887 // C)". 9888 9889 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 9890 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 9891 if (!LDiff || !RDiff || *LDiff != *RDiff) 9892 return false; 9893 9894 if (LDiff->isMinValue()) 9895 return true; 9896 9897 APInt FoundRHSLimit; 9898 9899 if (Pred == CmpInst::ICMP_ULT) { 9900 FoundRHSLimit = -(*RDiff); 9901 } else { 9902 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 9903 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 9904 } 9905 9906 // Try to prove (1) or (2), as needed. 9907 return isAvailableAtLoopEntry(FoundRHS, L) && 9908 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 9909 getConstant(FoundRHSLimit)); 9910 } 9911 9912 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 9913 const SCEV *LHS, const SCEV *RHS, 9914 const SCEV *FoundLHS, 9915 const SCEV *FoundRHS, unsigned Depth) { 9916 const PHINode *LPhi = nullptr, *RPhi = nullptr; 9917 9918 auto ClearOnExit = make_scope_exit([&]() { 9919 if (LPhi) { 9920 bool Erased = PendingMerges.erase(LPhi); 9921 assert(Erased && "Failed to erase LPhi!"); 9922 (void)Erased; 9923 } 9924 if (RPhi) { 9925 bool Erased = PendingMerges.erase(RPhi); 9926 assert(Erased && "Failed to erase RPhi!"); 9927 (void)Erased; 9928 } 9929 }); 9930 9931 // Find respective Phis and check that they are not being pending. 9932 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 9933 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 9934 if (!PendingMerges.insert(Phi).second) 9935 return false; 9936 LPhi = Phi; 9937 } 9938 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 9939 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 9940 // If we detect a loop of Phi nodes being processed by this method, for 9941 // example: 9942 // 9943 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 9944 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 9945 // 9946 // we don't want to deal with a case that complex, so return conservative 9947 // answer false. 9948 if (!PendingMerges.insert(Phi).second) 9949 return false; 9950 RPhi = Phi; 9951 } 9952 9953 // If none of LHS, RHS is a Phi, nothing to do here. 9954 if (!LPhi && !RPhi) 9955 return false; 9956 9957 // If there is a SCEVUnknown Phi we are interested in, make it left. 9958 if (!LPhi) { 9959 std::swap(LHS, RHS); 9960 std::swap(FoundLHS, FoundRHS); 9961 std::swap(LPhi, RPhi); 9962 Pred = ICmpInst::getSwappedPredicate(Pred); 9963 } 9964 9965 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 9966 const BasicBlock *LBB = LPhi->getParent(); 9967 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 9968 9969 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 9970 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 9971 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 9972 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 9973 }; 9974 9975 if (RPhi && RPhi->getParent() == LBB) { 9976 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 9977 // If we compare two Phis from the same block, and for each entry block 9978 // the predicate is true for incoming values from this block, then the 9979 // predicate is also true for the Phis. 9980 for (const BasicBlock *IncBB : predecessors(LBB)) { 9981 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 9982 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 9983 if (!ProvedEasily(L, R)) 9984 return false; 9985 } 9986 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 9987 // Case two: RHS is also a Phi from the same basic block, and it is an 9988 // AddRec. It means that there is a loop which has both AddRec and Unknown 9989 // PHIs, for it we can compare incoming values of AddRec from above the loop 9990 // and latch with their respective incoming values of LPhi. 9991 // TODO: Generalize to handle loops with many inputs in a header. 9992 if (LPhi->getNumIncomingValues() != 2) return false; 9993 9994 auto *RLoop = RAR->getLoop(); 9995 auto *Predecessor = RLoop->getLoopPredecessor(); 9996 assert(Predecessor && "Loop with AddRec with no predecessor?"); 9997 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 9998 if (!ProvedEasily(L1, RAR->getStart())) 9999 return false; 10000 auto *Latch = RLoop->getLoopLatch(); 10001 assert(Latch && "Loop with AddRec with no latch?"); 10002 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 10003 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 10004 return false; 10005 } else { 10006 // In all other cases go over inputs of LHS and compare each of them to RHS, 10007 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 10008 // At this point RHS is either a non-Phi, or it is a Phi from some block 10009 // different from LBB. 10010 for (const BasicBlock *IncBB : predecessors(LBB)) { 10011 // Check that RHS is available in this block. 10012 if (!dominates(RHS, IncBB)) 10013 return false; 10014 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10015 if (!ProvedEasily(L, RHS)) 10016 return false; 10017 } 10018 } 10019 return true; 10020 } 10021 10022 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 10023 const SCEV *LHS, const SCEV *RHS, 10024 const SCEV *FoundLHS, 10025 const SCEV *FoundRHS) { 10026 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10027 return true; 10028 10029 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10030 return true; 10031 10032 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 10033 FoundLHS, FoundRHS) || 10034 // ~x < ~y --> x > y 10035 isImpliedCondOperandsHelper(Pred, LHS, RHS, 10036 getNotSCEV(FoundRHS), 10037 getNotSCEV(FoundLHS)); 10038 } 10039 10040 /// If Expr computes ~A, return A else return nullptr 10041 static const SCEV *MatchNotExpr(const SCEV *Expr) { 10042 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 10043 if (!Add || Add->getNumOperands() != 2 || 10044 !Add->getOperand(0)->isAllOnesValue()) 10045 return nullptr; 10046 10047 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 10048 if (!AddRHS || AddRHS->getNumOperands() != 2 || 10049 !AddRHS->getOperand(0)->isAllOnesValue()) 10050 return nullptr; 10051 10052 return AddRHS->getOperand(1); 10053 } 10054 10055 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values? 10056 template<typename MaxExprType> 10057 static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, 10058 const SCEV *Candidate) { 10059 const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr); 10060 if (!MaxExpr) return false; 10061 10062 return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end(); 10063 } 10064 10065 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values? 10066 template<typename MaxExprType> 10067 static bool IsMinConsistingOf(ScalarEvolution &SE, 10068 const SCEV *MaybeMinExpr, 10069 const SCEV *Candidate) { 10070 const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr); 10071 if (!MaybeMaxExpr) 10072 return false; 10073 10074 return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate)); 10075 } 10076 10077 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 10078 ICmpInst::Predicate Pred, 10079 const SCEV *LHS, const SCEV *RHS) { 10080 // If both sides are affine addrecs for the same loop, with equal 10081 // steps, and we know the recurrences don't wrap, then we only 10082 // need to check the predicate on the starting values. 10083 10084 if (!ICmpInst::isRelational(Pred)) 10085 return false; 10086 10087 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 10088 if (!LAR) 10089 return false; 10090 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10091 if (!RAR) 10092 return false; 10093 if (LAR->getLoop() != RAR->getLoop()) 10094 return false; 10095 if (!LAR->isAffine() || !RAR->isAffine()) 10096 return false; 10097 10098 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 10099 return false; 10100 10101 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 10102 SCEV::FlagNSW : SCEV::FlagNUW; 10103 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 10104 return false; 10105 10106 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 10107 } 10108 10109 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 10110 /// expression? 10111 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 10112 ICmpInst::Predicate Pred, 10113 const SCEV *LHS, const SCEV *RHS) { 10114 switch (Pred) { 10115 default: 10116 return false; 10117 10118 case ICmpInst::ICMP_SGE: 10119 std::swap(LHS, RHS); 10120 LLVM_FALLTHROUGH; 10121 case ICmpInst::ICMP_SLE: 10122 return 10123 // min(A, ...) <= A 10124 IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) || 10125 // A <= max(A, ...) 10126 IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 10127 10128 case ICmpInst::ICMP_UGE: 10129 std::swap(LHS, RHS); 10130 LLVM_FALLTHROUGH; 10131 case ICmpInst::ICMP_ULE: 10132 return 10133 // min(A, ...) <= A 10134 IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) || 10135 // A <= max(A, ...) 10136 IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 10137 } 10138 10139 llvm_unreachable("covered switch fell through?!"); 10140 } 10141 10142 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 10143 const SCEV *LHS, const SCEV *RHS, 10144 const SCEV *FoundLHS, 10145 const SCEV *FoundRHS, 10146 unsigned Depth) { 10147 assert(getTypeSizeInBits(LHS->getType()) == 10148 getTypeSizeInBits(RHS->getType()) && 10149 "LHS and RHS have different sizes?"); 10150 assert(getTypeSizeInBits(FoundLHS->getType()) == 10151 getTypeSizeInBits(FoundRHS->getType()) && 10152 "FoundLHS and FoundRHS have different sizes?"); 10153 // We want to avoid hurting the compile time with analysis of too big trees. 10154 if (Depth > MaxSCEVOperationsImplicationDepth) 10155 return false; 10156 // We only want to work with ICMP_SGT comparison so far. 10157 // TODO: Extend to ICMP_UGT? 10158 if (Pred == ICmpInst::ICMP_SLT) { 10159 Pred = ICmpInst::ICMP_SGT; 10160 std::swap(LHS, RHS); 10161 std::swap(FoundLHS, FoundRHS); 10162 } 10163 if (Pred != ICmpInst::ICMP_SGT) 10164 return false; 10165 10166 auto GetOpFromSExt = [&](const SCEV *S) { 10167 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 10168 return Ext->getOperand(); 10169 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 10170 // the constant in some cases. 10171 return S; 10172 }; 10173 10174 // Acquire values from extensions. 10175 auto *OrigLHS = LHS; 10176 auto *OrigFoundLHS = FoundLHS; 10177 LHS = GetOpFromSExt(LHS); 10178 FoundLHS = GetOpFromSExt(FoundLHS); 10179 10180 // Is the SGT predicate can be proved trivially or using the found context. 10181 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 10182 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 10183 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 10184 FoundRHS, Depth + 1); 10185 }; 10186 10187 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 10188 // We want to avoid creation of any new non-constant SCEV. Since we are 10189 // going to compare the operands to RHS, we should be certain that we don't 10190 // need any size extensions for this. So let's decline all cases when the 10191 // sizes of types of LHS and RHS do not match. 10192 // TODO: Maybe try to get RHS from sext to catch more cases? 10193 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 10194 return false; 10195 10196 // Should not overflow. 10197 if (!LHSAddExpr->hasNoSignedWrap()) 10198 return false; 10199 10200 auto *LL = LHSAddExpr->getOperand(0); 10201 auto *LR = LHSAddExpr->getOperand(1); 10202 auto *MinusOne = getNegativeSCEV(getOne(RHS->getType())); 10203 10204 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 10205 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 10206 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 10207 }; 10208 // Try to prove the following rule: 10209 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 10210 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 10211 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 10212 return true; 10213 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 10214 Value *LL, *LR; 10215 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 10216 10217 using namespace llvm::PatternMatch; 10218 10219 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 10220 // Rules for division. 10221 // We are going to perform some comparisons with Denominator and its 10222 // derivative expressions. In general case, creating a SCEV for it may 10223 // lead to a complex analysis of the entire graph, and in particular it 10224 // can request trip count recalculation for the same loop. This would 10225 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 10226 // this, we only want to create SCEVs that are constants in this section. 10227 // So we bail if Denominator is not a constant. 10228 if (!isa<ConstantInt>(LR)) 10229 return false; 10230 10231 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 10232 10233 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 10234 // then a SCEV for the numerator already exists and matches with FoundLHS. 10235 auto *Numerator = getExistingSCEV(LL); 10236 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 10237 return false; 10238 10239 // Make sure that the numerator matches with FoundLHS and the denominator 10240 // is positive. 10241 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 10242 return false; 10243 10244 auto *DTy = Denominator->getType(); 10245 auto *FRHSTy = FoundRHS->getType(); 10246 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 10247 // One of types is a pointer and another one is not. We cannot extend 10248 // them properly to a wider type, so let us just reject this case. 10249 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 10250 // to avoid this check. 10251 return false; 10252 10253 // Given that: 10254 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 10255 auto *WTy = getWiderType(DTy, FRHSTy); 10256 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 10257 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 10258 10259 // Try to prove the following rule: 10260 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 10261 // For example, given that FoundLHS > 2. It means that FoundLHS is at 10262 // least 3. If we divide it by Denominator < 4, we will have at least 1. 10263 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 10264 if (isKnownNonPositive(RHS) && 10265 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 10266 return true; 10267 10268 // Try to prove the following rule: 10269 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 10270 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 10271 // If we divide it by Denominator > 2, then: 10272 // 1. If FoundLHS is negative, then the result is 0. 10273 // 2. If FoundLHS is non-negative, then the result is non-negative. 10274 // Anyways, the result is non-negative. 10275 auto *MinusOne = getNegativeSCEV(getOne(WTy)); 10276 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 10277 if (isKnownNegative(RHS) && 10278 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 10279 return true; 10280 } 10281 } 10282 10283 // If our expression contained SCEVUnknown Phis, and we split it down and now 10284 // need to prove something for them, try to prove the predicate for every 10285 // possible incoming values of those Phis. 10286 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 10287 return true; 10288 10289 return false; 10290 } 10291 10292 bool 10293 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 10294 const SCEV *LHS, const SCEV *RHS) { 10295 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 10296 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 10297 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 10298 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 10299 } 10300 10301 bool 10302 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 10303 const SCEV *LHS, const SCEV *RHS, 10304 const SCEV *FoundLHS, 10305 const SCEV *FoundRHS) { 10306 switch (Pred) { 10307 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 10308 case ICmpInst::ICMP_EQ: 10309 case ICmpInst::ICMP_NE: 10310 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 10311 return true; 10312 break; 10313 case ICmpInst::ICMP_SLT: 10314 case ICmpInst::ICMP_SLE: 10315 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 10316 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 10317 return true; 10318 break; 10319 case ICmpInst::ICMP_SGT: 10320 case ICmpInst::ICMP_SGE: 10321 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 10322 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 10323 return true; 10324 break; 10325 case ICmpInst::ICMP_ULT: 10326 case ICmpInst::ICMP_ULE: 10327 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 10328 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 10329 return true; 10330 break; 10331 case ICmpInst::ICMP_UGT: 10332 case ICmpInst::ICMP_UGE: 10333 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 10334 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 10335 return true; 10336 break; 10337 } 10338 10339 // Maybe it can be proved via operations? 10340 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10341 return true; 10342 10343 return false; 10344 } 10345 10346 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 10347 const SCEV *LHS, 10348 const SCEV *RHS, 10349 const SCEV *FoundLHS, 10350 const SCEV *FoundRHS) { 10351 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 10352 // The restriction on `FoundRHS` be lifted easily -- it exists only to 10353 // reduce the compile time impact of this optimization. 10354 return false; 10355 10356 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 10357 if (!Addend) 10358 return false; 10359 10360 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 10361 10362 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 10363 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 10364 ConstantRange FoundLHSRange = 10365 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 10366 10367 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 10368 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 10369 10370 // We can also compute the range of values for `LHS` that satisfy the 10371 // consequent, "`LHS` `Pred` `RHS`": 10372 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 10373 ConstantRange SatisfyingLHSRange = 10374 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 10375 10376 // The antecedent implies the consequent if every value of `LHS` that 10377 // satisfies the antecedent also satisfies the consequent. 10378 return SatisfyingLHSRange.contains(LHSRange); 10379 } 10380 10381 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 10382 bool IsSigned, bool NoWrap) { 10383 assert(isKnownPositive(Stride) && "Positive stride expected!"); 10384 10385 if (NoWrap) return false; 10386 10387 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10388 const SCEV *One = getOne(Stride->getType()); 10389 10390 if (IsSigned) { 10391 APInt MaxRHS = getSignedRangeMax(RHS); 10392 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 10393 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10394 10395 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 10396 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 10397 } 10398 10399 APInt MaxRHS = getUnsignedRangeMax(RHS); 10400 APInt MaxValue = APInt::getMaxValue(BitWidth); 10401 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10402 10403 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 10404 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 10405 } 10406 10407 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 10408 bool IsSigned, bool NoWrap) { 10409 if (NoWrap) return false; 10410 10411 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10412 const SCEV *One = getOne(Stride->getType()); 10413 10414 if (IsSigned) { 10415 APInt MinRHS = getSignedRangeMin(RHS); 10416 APInt MinValue = APInt::getSignedMinValue(BitWidth); 10417 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10418 10419 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 10420 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 10421 } 10422 10423 APInt MinRHS = getUnsignedRangeMin(RHS); 10424 APInt MinValue = APInt::getMinValue(BitWidth); 10425 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10426 10427 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 10428 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 10429 } 10430 10431 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 10432 bool Equality) { 10433 const SCEV *One = getOne(Step->getType()); 10434 Delta = Equality ? getAddExpr(Delta, Step) 10435 : getAddExpr(Delta, getMinusSCEV(Step, One)); 10436 return getUDivExpr(Delta, Step); 10437 } 10438 10439 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 10440 const SCEV *Stride, 10441 const SCEV *End, 10442 unsigned BitWidth, 10443 bool IsSigned) { 10444 10445 assert(!isKnownNonPositive(Stride) && 10446 "Stride is expected strictly positive!"); 10447 // Calculate the maximum backedge count based on the range of values 10448 // permitted by Start, End, and Stride. 10449 const SCEV *MaxBECount; 10450 APInt MinStart = 10451 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 10452 10453 APInt StrideForMaxBECount = 10454 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 10455 10456 // We already know that the stride is positive, so we paper over conservatism 10457 // in our range computation by forcing StrideForMaxBECount to be at least one. 10458 // In theory this is unnecessary, but we expect MaxBECount to be a 10459 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 10460 // is nothing to constant fold it to). 10461 APInt One(BitWidth, 1, IsSigned); 10462 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 10463 10464 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 10465 : APInt::getMaxValue(BitWidth); 10466 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 10467 10468 // Although End can be a MAX expression we estimate MaxEnd considering only 10469 // the case End = RHS of the loop termination condition. This is safe because 10470 // in the other case (End - Start) is zero, leading to a zero maximum backedge 10471 // taken count. 10472 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 10473 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 10474 10475 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 10476 getConstant(StrideForMaxBECount) /* Step */, 10477 false /* Equality */); 10478 10479 return MaxBECount; 10480 } 10481 10482 ScalarEvolution::ExitLimit 10483 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 10484 const Loop *L, bool IsSigned, 10485 bool ControlsExit, bool AllowPredicates) { 10486 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10487 10488 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10489 bool PredicatedIV = false; 10490 10491 if (!IV && AllowPredicates) { 10492 // Try to make this an AddRec using runtime tests, in the first X 10493 // iterations of this loop, where X is the SCEV expression found by the 10494 // algorithm below. 10495 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10496 PredicatedIV = true; 10497 } 10498 10499 // Avoid weird loops 10500 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10501 return getCouldNotCompute(); 10502 10503 bool NoWrap = ControlsExit && 10504 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10505 10506 const SCEV *Stride = IV->getStepRecurrence(*this); 10507 10508 bool PositiveStride = isKnownPositive(Stride); 10509 10510 // Avoid negative or zero stride values. 10511 if (!PositiveStride) { 10512 // We can compute the correct backedge taken count for loops with unknown 10513 // strides if we can prove that the loop is not an infinite loop with side 10514 // effects. Here's the loop structure we are trying to handle - 10515 // 10516 // i = start 10517 // do { 10518 // A[i] = i; 10519 // i += s; 10520 // } while (i < end); 10521 // 10522 // The backedge taken count for such loops is evaluated as - 10523 // (max(end, start + stride) - start - 1) /u stride 10524 // 10525 // The additional preconditions that we need to check to prove correctness 10526 // of the above formula is as follows - 10527 // 10528 // a) IV is either nuw or nsw depending upon signedness (indicated by the 10529 // NoWrap flag). 10530 // b) loop is single exit with no side effects. 10531 // 10532 // 10533 // Precondition a) implies that if the stride is negative, this is a single 10534 // trip loop. The backedge taken count formula reduces to zero in this case. 10535 // 10536 // Precondition b) implies that the unknown stride cannot be zero otherwise 10537 // we have UB. 10538 // 10539 // The positive stride case is the same as isKnownPositive(Stride) returning 10540 // true (original behavior of the function). 10541 // 10542 // We want to make sure that the stride is truly unknown as there are edge 10543 // cases where ScalarEvolution propagates no wrap flags to the 10544 // post-increment/decrement IV even though the increment/decrement operation 10545 // itself is wrapping. The computed backedge taken count may be wrong in 10546 // such cases. This is prevented by checking that the stride is not known to 10547 // be either positive or non-positive. For example, no wrap flags are 10548 // propagated to the post-increment IV of this loop with a trip count of 2 - 10549 // 10550 // unsigned char i; 10551 // for(i=127; i<128; i+=129) 10552 // A[i] = i; 10553 // 10554 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 10555 !loopHasNoSideEffects(L)) 10556 return getCouldNotCompute(); 10557 } else if (!Stride->isOne() && 10558 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 10559 // Avoid proven overflow cases: this will ensure that the backedge taken 10560 // count will not generate any unsigned overflow. Relaxed no-overflow 10561 // conditions exploit NoWrapFlags, allowing to optimize in presence of 10562 // undefined behaviors like the case of C language. 10563 return getCouldNotCompute(); 10564 10565 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 10566 : ICmpInst::ICMP_ULT; 10567 const SCEV *Start = IV->getStart(); 10568 const SCEV *End = RHS; 10569 // When the RHS is not invariant, we do not know the end bound of the loop and 10570 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 10571 // calculate the MaxBECount, given the start, stride and max value for the end 10572 // bound of the loop (RHS), and the fact that IV does not overflow (which is 10573 // checked above). 10574 if (!isLoopInvariant(RHS, L)) { 10575 const SCEV *MaxBECount = computeMaxBECountForLT( 10576 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10577 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 10578 false /*MaxOrZero*/, Predicates); 10579 } 10580 // If the backedge is taken at least once, then it will be taken 10581 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 10582 // is the LHS value of the less-than comparison the first time it is evaluated 10583 // and End is the RHS. 10584 const SCEV *BECountIfBackedgeTaken = 10585 computeBECount(getMinusSCEV(End, Start), Stride, false); 10586 // If the loop entry is guarded by the result of the backedge test of the 10587 // first loop iteration, then we know the backedge will be taken at least 10588 // once and so the backedge taken count is as above. If not then we use the 10589 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 10590 // as if the backedge is taken at least once max(End,Start) is End and so the 10591 // result is as above, and if not max(End,Start) is Start so we get a backedge 10592 // count of zero. 10593 const SCEV *BECount; 10594 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 10595 BECount = BECountIfBackedgeTaken; 10596 else { 10597 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 10598 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 10599 } 10600 10601 const SCEV *MaxBECount; 10602 bool MaxOrZero = false; 10603 if (isa<SCEVConstant>(BECount)) 10604 MaxBECount = BECount; 10605 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 10606 // If we know exactly how many times the backedge will be taken if it's 10607 // taken at least once, then the backedge count will either be that or 10608 // zero. 10609 MaxBECount = BECountIfBackedgeTaken; 10610 MaxOrZero = true; 10611 } else { 10612 MaxBECount = computeMaxBECountForLT( 10613 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10614 } 10615 10616 if (isa<SCEVCouldNotCompute>(MaxBECount) && 10617 !isa<SCEVCouldNotCompute>(BECount)) 10618 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 10619 10620 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 10621 } 10622 10623 ScalarEvolution::ExitLimit 10624 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 10625 const Loop *L, bool IsSigned, 10626 bool ControlsExit, bool AllowPredicates) { 10627 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10628 // We handle only IV > Invariant 10629 if (!isLoopInvariant(RHS, L)) 10630 return getCouldNotCompute(); 10631 10632 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10633 if (!IV && AllowPredicates) 10634 // Try to make this an AddRec using runtime tests, in the first X 10635 // iterations of this loop, where X is the SCEV expression found by the 10636 // algorithm below. 10637 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10638 10639 // Avoid weird loops 10640 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10641 return getCouldNotCompute(); 10642 10643 bool NoWrap = ControlsExit && 10644 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10645 10646 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 10647 10648 // Avoid negative or zero stride values 10649 if (!isKnownPositive(Stride)) 10650 return getCouldNotCompute(); 10651 10652 // Avoid proven overflow cases: this will ensure that the backedge taken count 10653 // will not generate any unsigned overflow. Relaxed no-overflow conditions 10654 // exploit NoWrapFlags, allowing to optimize in presence of undefined 10655 // behaviors like the case of C language. 10656 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 10657 return getCouldNotCompute(); 10658 10659 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 10660 : ICmpInst::ICMP_UGT; 10661 10662 const SCEV *Start = IV->getStart(); 10663 const SCEV *End = RHS; 10664 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) 10665 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 10666 10667 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 10668 10669 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 10670 : getUnsignedRangeMax(Start); 10671 10672 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 10673 : getUnsignedRangeMin(Stride); 10674 10675 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 10676 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 10677 : APInt::getMinValue(BitWidth) + (MinStride - 1); 10678 10679 // Although End can be a MIN expression we estimate MinEnd considering only 10680 // the case End = RHS. This is safe because in the other case (Start - End) 10681 // is zero, leading to a zero maximum backedge taken count. 10682 APInt MinEnd = 10683 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 10684 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 10685 10686 10687 const SCEV *MaxBECount = getCouldNotCompute(); 10688 if (isa<SCEVConstant>(BECount)) 10689 MaxBECount = BECount; 10690 else 10691 MaxBECount = computeBECount(getConstant(MaxStart - MinEnd), 10692 getConstant(MinStride), false); 10693 10694 if (isa<SCEVCouldNotCompute>(MaxBECount)) 10695 MaxBECount = BECount; 10696 10697 return ExitLimit(BECount, MaxBECount, false, Predicates); 10698 } 10699 10700 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 10701 ScalarEvolution &SE) const { 10702 if (Range.isFullSet()) // Infinite loop. 10703 return SE.getCouldNotCompute(); 10704 10705 // If the start is a non-zero constant, shift the range to simplify things. 10706 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 10707 if (!SC->getValue()->isZero()) { 10708 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 10709 Operands[0] = SE.getZero(SC->getType()); 10710 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 10711 getNoWrapFlags(FlagNW)); 10712 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 10713 return ShiftedAddRec->getNumIterationsInRange( 10714 Range.subtract(SC->getAPInt()), SE); 10715 // This is strange and shouldn't happen. 10716 return SE.getCouldNotCompute(); 10717 } 10718 10719 // The only time we can solve this is when we have all constant indices. 10720 // Otherwise, we cannot determine the overflow conditions. 10721 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 10722 return SE.getCouldNotCompute(); 10723 10724 // Okay at this point we know that all elements of the chrec are constants and 10725 // that the start element is zero. 10726 10727 // First check to see if the range contains zero. If not, the first 10728 // iteration exits. 10729 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 10730 if (!Range.contains(APInt(BitWidth, 0))) 10731 return SE.getZero(getType()); 10732 10733 if (isAffine()) { 10734 // If this is an affine expression then we have this situation: 10735 // Solve {0,+,A} in Range === Ax in Range 10736 10737 // We know that zero is in the range. If A is positive then we know that 10738 // the upper value of the range must be the first possible exit value. 10739 // If A is negative then the lower of the range is the last possible loop 10740 // value. Also note that we already checked for a full range. 10741 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 10742 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 10743 10744 // The exit value should be (End+A)/A. 10745 APInt ExitVal = (End + A).udiv(A); 10746 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 10747 10748 // Evaluate at the exit value. If we really did fall out of the valid 10749 // range, then we computed our trip count, otherwise wrap around or other 10750 // things must have happened. 10751 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 10752 if (Range.contains(Val->getValue())) 10753 return SE.getCouldNotCompute(); // Something strange happened 10754 10755 // Ensure that the previous value is in the range. This is a sanity check. 10756 assert(Range.contains( 10757 EvaluateConstantChrecAtConstant(this, 10758 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 10759 "Linear scev computation is off in a bad way!"); 10760 return SE.getConstant(ExitValue); 10761 } 10762 10763 if (isQuadratic()) { 10764 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 10765 return SE.getConstant(S.getValue()); 10766 } 10767 10768 return SE.getCouldNotCompute(); 10769 } 10770 10771 const SCEVAddRecExpr * 10772 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 10773 assert(getNumOperands() > 1 && "AddRec with zero step?"); 10774 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 10775 // but in this case we cannot guarantee that the value returned will be an 10776 // AddRec because SCEV does not have a fixed point where it stops 10777 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 10778 // may happen if we reach arithmetic depth limit while simplifying. So we 10779 // construct the returned value explicitly. 10780 SmallVector<const SCEV *, 3> Ops; 10781 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 10782 // (this + Step) is {A+B,+,B+C,+...,+,N}. 10783 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 10784 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 10785 // We know that the last operand is not a constant zero (otherwise it would 10786 // have been popped out earlier). This guarantees us that if the result has 10787 // the same last operand, then it will also not be popped out, meaning that 10788 // the returned value will be an AddRec. 10789 const SCEV *Last = getOperand(getNumOperands() - 1); 10790 assert(!Last->isZero() && "Recurrency with zero step?"); 10791 Ops.push_back(Last); 10792 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 10793 SCEV::FlagAnyWrap)); 10794 } 10795 10796 // Return true when S contains at least an undef value. 10797 static inline bool containsUndefs(const SCEV *S) { 10798 return SCEVExprContains(S, [](const SCEV *S) { 10799 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 10800 return isa<UndefValue>(SU->getValue()); 10801 else if (const auto *SC = dyn_cast<SCEVConstant>(S)) 10802 return isa<UndefValue>(SC->getValue()); 10803 return false; 10804 }); 10805 } 10806 10807 namespace { 10808 10809 // Collect all steps of SCEV expressions. 10810 struct SCEVCollectStrides { 10811 ScalarEvolution &SE; 10812 SmallVectorImpl<const SCEV *> &Strides; 10813 10814 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 10815 : SE(SE), Strides(S) {} 10816 10817 bool follow(const SCEV *S) { 10818 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 10819 Strides.push_back(AR->getStepRecurrence(SE)); 10820 return true; 10821 } 10822 10823 bool isDone() const { return false; } 10824 }; 10825 10826 // Collect all SCEVUnknown and SCEVMulExpr expressions. 10827 struct SCEVCollectTerms { 10828 SmallVectorImpl<const SCEV *> &Terms; 10829 10830 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 10831 10832 bool follow(const SCEV *S) { 10833 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 10834 isa<SCEVSignExtendExpr>(S)) { 10835 if (!containsUndefs(S)) 10836 Terms.push_back(S); 10837 10838 // Stop recursion: once we collected a term, do not walk its operands. 10839 return false; 10840 } 10841 10842 // Keep looking. 10843 return true; 10844 } 10845 10846 bool isDone() const { return false; } 10847 }; 10848 10849 // Check if a SCEV contains an AddRecExpr. 10850 struct SCEVHasAddRec { 10851 bool &ContainsAddRec; 10852 10853 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 10854 ContainsAddRec = false; 10855 } 10856 10857 bool follow(const SCEV *S) { 10858 if (isa<SCEVAddRecExpr>(S)) { 10859 ContainsAddRec = true; 10860 10861 // Stop recursion: once we collected a term, do not walk its operands. 10862 return false; 10863 } 10864 10865 // Keep looking. 10866 return true; 10867 } 10868 10869 bool isDone() const { return false; } 10870 }; 10871 10872 // Find factors that are multiplied with an expression that (possibly as a 10873 // subexpression) contains an AddRecExpr. In the expression: 10874 // 10875 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 10876 // 10877 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 10878 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 10879 // parameters as they form a product with an induction variable. 10880 // 10881 // This collector expects all array size parameters to be in the same MulExpr. 10882 // It might be necessary to later add support for collecting parameters that are 10883 // spread over different nested MulExpr. 10884 struct SCEVCollectAddRecMultiplies { 10885 SmallVectorImpl<const SCEV *> &Terms; 10886 ScalarEvolution &SE; 10887 10888 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 10889 : Terms(T), SE(SE) {} 10890 10891 bool follow(const SCEV *S) { 10892 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 10893 bool HasAddRec = false; 10894 SmallVector<const SCEV *, 0> Operands; 10895 for (auto Op : Mul->operands()) { 10896 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 10897 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 10898 Operands.push_back(Op); 10899 } else if (Unknown) { 10900 HasAddRec = true; 10901 } else { 10902 bool ContainsAddRec; 10903 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 10904 visitAll(Op, ContiansAddRec); 10905 HasAddRec |= ContainsAddRec; 10906 } 10907 } 10908 if (Operands.size() == 0) 10909 return true; 10910 10911 if (!HasAddRec) 10912 return false; 10913 10914 Terms.push_back(SE.getMulExpr(Operands)); 10915 // Stop recursion: once we collected a term, do not walk its operands. 10916 return false; 10917 } 10918 10919 // Keep looking. 10920 return true; 10921 } 10922 10923 bool isDone() const { return false; } 10924 }; 10925 10926 } // end anonymous namespace 10927 10928 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 10929 /// two places: 10930 /// 1) The strides of AddRec expressions. 10931 /// 2) Unknowns that are multiplied with AddRec expressions. 10932 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 10933 SmallVectorImpl<const SCEV *> &Terms) { 10934 SmallVector<const SCEV *, 4> Strides; 10935 SCEVCollectStrides StrideCollector(*this, Strides); 10936 visitAll(Expr, StrideCollector); 10937 10938 LLVM_DEBUG({ 10939 dbgs() << "Strides:\n"; 10940 for (const SCEV *S : Strides) 10941 dbgs() << *S << "\n"; 10942 }); 10943 10944 for (const SCEV *S : Strides) { 10945 SCEVCollectTerms TermCollector(Terms); 10946 visitAll(S, TermCollector); 10947 } 10948 10949 LLVM_DEBUG({ 10950 dbgs() << "Terms:\n"; 10951 for (const SCEV *T : Terms) 10952 dbgs() << *T << "\n"; 10953 }); 10954 10955 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 10956 visitAll(Expr, MulCollector); 10957 } 10958 10959 static bool findArrayDimensionsRec(ScalarEvolution &SE, 10960 SmallVectorImpl<const SCEV *> &Terms, 10961 SmallVectorImpl<const SCEV *> &Sizes) { 10962 int Last = Terms.size() - 1; 10963 const SCEV *Step = Terms[Last]; 10964 10965 // End of recursion. 10966 if (Last == 0) { 10967 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 10968 SmallVector<const SCEV *, 2> Qs; 10969 for (const SCEV *Op : M->operands()) 10970 if (!isa<SCEVConstant>(Op)) 10971 Qs.push_back(Op); 10972 10973 Step = SE.getMulExpr(Qs); 10974 } 10975 10976 Sizes.push_back(Step); 10977 return true; 10978 } 10979 10980 for (const SCEV *&Term : Terms) { 10981 // Normalize the terms before the next call to findArrayDimensionsRec. 10982 const SCEV *Q, *R; 10983 SCEVDivision::divide(SE, Term, Step, &Q, &R); 10984 10985 // Bail out when GCD does not evenly divide one of the terms. 10986 if (!R->isZero()) 10987 return false; 10988 10989 Term = Q; 10990 } 10991 10992 // Remove all SCEVConstants. 10993 Terms.erase( 10994 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 10995 Terms.end()); 10996 10997 if (Terms.size() > 0) 10998 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 10999 return false; 11000 11001 Sizes.push_back(Step); 11002 return true; 11003 } 11004 11005 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 11006 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 11007 for (const SCEV *T : Terms) 11008 if (SCEVExprContains(T, isa<SCEVUnknown, const SCEV *>)) 11009 return true; 11010 return false; 11011 } 11012 11013 // Return the number of product terms in S. 11014 static inline int numberOfTerms(const SCEV *S) { 11015 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 11016 return Expr->getNumOperands(); 11017 return 1; 11018 } 11019 11020 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 11021 if (isa<SCEVConstant>(T)) 11022 return nullptr; 11023 11024 if (isa<SCEVUnknown>(T)) 11025 return T; 11026 11027 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 11028 SmallVector<const SCEV *, 2> Factors; 11029 for (const SCEV *Op : M->operands()) 11030 if (!isa<SCEVConstant>(Op)) 11031 Factors.push_back(Op); 11032 11033 return SE.getMulExpr(Factors); 11034 } 11035 11036 return T; 11037 } 11038 11039 /// Return the size of an element read or written by Inst. 11040 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 11041 Type *Ty; 11042 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 11043 Ty = Store->getValueOperand()->getType(); 11044 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 11045 Ty = Load->getType(); 11046 else 11047 return nullptr; 11048 11049 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 11050 return getSizeOfExpr(ETy, Ty); 11051 } 11052 11053 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 11054 SmallVectorImpl<const SCEV *> &Sizes, 11055 const SCEV *ElementSize) { 11056 if (Terms.size() < 1 || !ElementSize) 11057 return; 11058 11059 // Early return when Terms do not contain parameters: we do not delinearize 11060 // non parametric SCEVs. 11061 if (!containsParameters(Terms)) 11062 return; 11063 11064 LLVM_DEBUG({ 11065 dbgs() << "Terms:\n"; 11066 for (const SCEV *T : Terms) 11067 dbgs() << *T << "\n"; 11068 }); 11069 11070 // Remove duplicates. 11071 array_pod_sort(Terms.begin(), Terms.end()); 11072 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 11073 11074 // Put larger terms first. 11075 llvm::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) { 11076 return numberOfTerms(LHS) > numberOfTerms(RHS); 11077 }); 11078 11079 // Try to divide all terms by the element size. If term is not divisible by 11080 // element size, proceed with the original term. 11081 for (const SCEV *&Term : Terms) { 11082 const SCEV *Q, *R; 11083 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 11084 if (!Q->isZero()) 11085 Term = Q; 11086 } 11087 11088 SmallVector<const SCEV *, 4> NewTerms; 11089 11090 // Remove constant factors. 11091 for (const SCEV *T : Terms) 11092 if (const SCEV *NewT = removeConstantFactors(*this, T)) 11093 NewTerms.push_back(NewT); 11094 11095 LLVM_DEBUG({ 11096 dbgs() << "Terms after sorting:\n"; 11097 for (const SCEV *T : NewTerms) 11098 dbgs() << *T << "\n"; 11099 }); 11100 11101 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 11102 Sizes.clear(); 11103 return; 11104 } 11105 11106 // The last element to be pushed into Sizes is the size of an element. 11107 Sizes.push_back(ElementSize); 11108 11109 LLVM_DEBUG({ 11110 dbgs() << "Sizes:\n"; 11111 for (const SCEV *S : Sizes) 11112 dbgs() << *S << "\n"; 11113 }); 11114 } 11115 11116 void ScalarEvolution::computeAccessFunctions( 11117 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 11118 SmallVectorImpl<const SCEV *> &Sizes) { 11119 // Early exit in case this SCEV is not an affine multivariate function. 11120 if (Sizes.empty()) 11121 return; 11122 11123 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 11124 if (!AR->isAffine()) 11125 return; 11126 11127 const SCEV *Res = Expr; 11128 int Last = Sizes.size() - 1; 11129 for (int i = Last; i >= 0; i--) { 11130 const SCEV *Q, *R; 11131 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 11132 11133 LLVM_DEBUG({ 11134 dbgs() << "Res: " << *Res << "\n"; 11135 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 11136 dbgs() << "Res divided by Sizes[i]:\n"; 11137 dbgs() << "Quotient: " << *Q << "\n"; 11138 dbgs() << "Remainder: " << *R << "\n"; 11139 }); 11140 11141 Res = Q; 11142 11143 // Do not record the last subscript corresponding to the size of elements in 11144 // the array. 11145 if (i == Last) { 11146 11147 // Bail out if the remainder is too complex. 11148 if (isa<SCEVAddRecExpr>(R)) { 11149 Subscripts.clear(); 11150 Sizes.clear(); 11151 return; 11152 } 11153 11154 continue; 11155 } 11156 11157 // Record the access function for the current subscript. 11158 Subscripts.push_back(R); 11159 } 11160 11161 // Also push in last position the remainder of the last division: it will be 11162 // the access function of the innermost dimension. 11163 Subscripts.push_back(Res); 11164 11165 std::reverse(Subscripts.begin(), Subscripts.end()); 11166 11167 LLVM_DEBUG({ 11168 dbgs() << "Subscripts:\n"; 11169 for (const SCEV *S : Subscripts) 11170 dbgs() << *S << "\n"; 11171 }); 11172 } 11173 11174 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 11175 /// sizes of an array access. Returns the remainder of the delinearization that 11176 /// is the offset start of the array. The SCEV->delinearize algorithm computes 11177 /// the multiples of SCEV coefficients: that is a pattern matching of sub 11178 /// expressions in the stride and base of a SCEV corresponding to the 11179 /// computation of a GCD (greatest common divisor) of base and stride. When 11180 /// SCEV->delinearize fails, it returns the SCEV unchanged. 11181 /// 11182 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 11183 /// 11184 /// void foo(long n, long m, long o, double A[n][m][o]) { 11185 /// 11186 /// for (long i = 0; i < n; i++) 11187 /// for (long j = 0; j < m; j++) 11188 /// for (long k = 0; k < o; k++) 11189 /// A[i][j][k] = 1.0; 11190 /// } 11191 /// 11192 /// the delinearization input is the following AddRec SCEV: 11193 /// 11194 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 11195 /// 11196 /// From this SCEV, we are able to say that the base offset of the access is %A 11197 /// because it appears as an offset that does not divide any of the strides in 11198 /// the loops: 11199 /// 11200 /// CHECK: Base offset: %A 11201 /// 11202 /// and then SCEV->delinearize determines the size of some of the dimensions of 11203 /// the array as these are the multiples by which the strides are happening: 11204 /// 11205 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 11206 /// 11207 /// Note that the outermost dimension remains of UnknownSize because there are 11208 /// no strides that would help identifying the size of the last dimension: when 11209 /// the array has been statically allocated, one could compute the size of that 11210 /// dimension by dividing the overall size of the array by the size of the known 11211 /// dimensions: %m * %o * 8. 11212 /// 11213 /// Finally delinearize provides the access functions for the array reference 11214 /// that does correspond to A[i][j][k] of the above C testcase: 11215 /// 11216 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 11217 /// 11218 /// The testcases are checking the output of a function pass: 11219 /// DelinearizationPass that walks through all loads and stores of a function 11220 /// asking for the SCEV of the memory access with respect to all enclosing 11221 /// loops, calling SCEV->delinearize on that and printing the results. 11222 void ScalarEvolution::delinearize(const SCEV *Expr, 11223 SmallVectorImpl<const SCEV *> &Subscripts, 11224 SmallVectorImpl<const SCEV *> &Sizes, 11225 const SCEV *ElementSize) { 11226 // First step: collect parametric terms. 11227 SmallVector<const SCEV *, 4> Terms; 11228 collectParametricTerms(Expr, Terms); 11229 11230 if (Terms.empty()) 11231 return; 11232 11233 // Second step: find subscript sizes. 11234 findArrayDimensions(Terms, Sizes, ElementSize); 11235 11236 if (Sizes.empty()) 11237 return; 11238 11239 // Third step: compute the access functions for each subscript. 11240 computeAccessFunctions(Expr, Subscripts, Sizes); 11241 11242 if (Subscripts.empty()) 11243 return; 11244 11245 LLVM_DEBUG({ 11246 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 11247 dbgs() << "ArrayDecl[UnknownSize]"; 11248 for (const SCEV *S : Sizes) 11249 dbgs() << "[" << *S << "]"; 11250 11251 dbgs() << "\nArrayRef"; 11252 for (const SCEV *S : Subscripts) 11253 dbgs() << "[" << *S << "]"; 11254 dbgs() << "\n"; 11255 }); 11256 } 11257 11258 //===----------------------------------------------------------------------===// 11259 // SCEVCallbackVH Class Implementation 11260 //===----------------------------------------------------------------------===// 11261 11262 void ScalarEvolution::SCEVCallbackVH::deleted() { 11263 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11264 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 11265 SE->ConstantEvolutionLoopExitValue.erase(PN); 11266 SE->eraseValueFromMap(getValPtr()); 11267 // this now dangles! 11268 } 11269 11270 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 11271 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11272 11273 // Forget all the expressions associated with users of the old value, 11274 // so that future queries will recompute the expressions using the new 11275 // value. 11276 Value *Old = getValPtr(); 11277 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 11278 SmallPtrSet<User *, 8> Visited; 11279 while (!Worklist.empty()) { 11280 User *U = Worklist.pop_back_val(); 11281 // Deleting the Old value will cause this to dangle. Postpone 11282 // that until everything else is done. 11283 if (U == Old) 11284 continue; 11285 if (!Visited.insert(U).second) 11286 continue; 11287 if (PHINode *PN = dyn_cast<PHINode>(U)) 11288 SE->ConstantEvolutionLoopExitValue.erase(PN); 11289 SE->eraseValueFromMap(U); 11290 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 11291 } 11292 // Delete the Old value. 11293 if (PHINode *PN = dyn_cast<PHINode>(Old)) 11294 SE->ConstantEvolutionLoopExitValue.erase(PN); 11295 SE->eraseValueFromMap(Old); 11296 // this now dangles! 11297 } 11298 11299 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 11300 : CallbackVH(V), SE(se) {} 11301 11302 //===----------------------------------------------------------------------===// 11303 // ScalarEvolution Class Implementation 11304 //===----------------------------------------------------------------------===// 11305 11306 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 11307 AssumptionCache &AC, DominatorTree &DT, 11308 LoopInfo &LI) 11309 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 11310 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 11311 LoopDispositions(64), BlockDispositions(64) { 11312 // To use guards for proving predicates, we need to scan every instruction in 11313 // relevant basic blocks, and not just terminators. Doing this is a waste of 11314 // time if the IR does not actually contain any calls to 11315 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 11316 // 11317 // This pessimizes the case where a pass that preserves ScalarEvolution wants 11318 // to _add_ guards to the module when there weren't any before, and wants 11319 // ScalarEvolution to optimize based on those guards. For now we prefer to be 11320 // efficient in lieu of being smart in that rather obscure case. 11321 11322 auto *GuardDecl = F.getParent()->getFunction( 11323 Intrinsic::getName(Intrinsic::experimental_guard)); 11324 HasGuards = GuardDecl && !GuardDecl->use_empty(); 11325 } 11326 11327 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 11328 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 11329 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 11330 ValueExprMap(std::move(Arg.ValueExprMap)), 11331 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 11332 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 11333 PendingMerges(std::move(Arg.PendingMerges)), 11334 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 11335 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 11336 PredicatedBackedgeTakenCounts( 11337 std::move(Arg.PredicatedBackedgeTakenCounts)), 11338 ConstantEvolutionLoopExitValue( 11339 std::move(Arg.ConstantEvolutionLoopExitValue)), 11340 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 11341 LoopDispositions(std::move(Arg.LoopDispositions)), 11342 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 11343 BlockDispositions(std::move(Arg.BlockDispositions)), 11344 UnsignedRanges(std::move(Arg.UnsignedRanges)), 11345 SignedRanges(std::move(Arg.SignedRanges)), 11346 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 11347 UniquePreds(std::move(Arg.UniquePreds)), 11348 SCEVAllocator(std::move(Arg.SCEVAllocator)), 11349 LoopUsers(std::move(Arg.LoopUsers)), 11350 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 11351 FirstUnknown(Arg.FirstUnknown) { 11352 Arg.FirstUnknown = nullptr; 11353 } 11354 11355 ScalarEvolution::~ScalarEvolution() { 11356 // Iterate through all the SCEVUnknown instances and call their 11357 // destructors, so that they release their references to their values. 11358 for (SCEVUnknown *U = FirstUnknown; U;) { 11359 SCEVUnknown *Tmp = U; 11360 U = U->Next; 11361 Tmp->~SCEVUnknown(); 11362 } 11363 FirstUnknown = nullptr; 11364 11365 ExprValueMap.clear(); 11366 ValueExprMap.clear(); 11367 HasRecMap.clear(); 11368 11369 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 11370 // that a loop had multiple computable exits. 11371 for (auto &BTCI : BackedgeTakenCounts) 11372 BTCI.second.clear(); 11373 for (auto &BTCI : PredicatedBackedgeTakenCounts) 11374 BTCI.second.clear(); 11375 11376 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 11377 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 11378 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 11379 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 11380 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 11381 } 11382 11383 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 11384 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 11385 } 11386 11387 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 11388 const Loop *L) { 11389 // Print all inner loops first 11390 for (Loop *I : *L) 11391 PrintLoopInfo(OS, SE, I); 11392 11393 OS << "Loop "; 11394 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11395 OS << ": "; 11396 11397 SmallVector<BasicBlock *, 8> ExitBlocks; 11398 L->getExitBlocks(ExitBlocks); 11399 if (ExitBlocks.size() != 1) 11400 OS << "<multiple exits> "; 11401 11402 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 11403 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 11404 } else { 11405 OS << "Unpredictable backedge-taken count. "; 11406 } 11407 11408 OS << "\n" 11409 "Loop "; 11410 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11411 OS << ": "; 11412 11413 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 11414 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 11415 if (SE->isBackedgeTakenCountMaxOrZero(L)) 11416 OS << ", actual taken count either this or zero."; 11417 } else { 11418 OS << "Unpredictable max backedge-taken count. "; 11419 } 11420 11421 OS << "\n" 11422 "Loop "; 11423 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11424 OS << ": "; 11425 11426 SCEVUnionPredicate Pred; 11427 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 11428 if (!isa<SCEVCouldNotCompute>(PBT)) { 11429 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 11430 OS << " Predicates:\n"; 11431 Pred.print(OS, 4); 11432 } else { 11433 OS << "Unpredictable predicated backedge-taken count. "; 11434 } 11435 OS << "\n"; 11436 11437 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 11438 OS << "Loop "; 11439 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11440 OS << ": "; 11441 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 11442 } 11443 } 11444 11445 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 11446 switch (LD) { 11447 case ScalarEvolution::LoopVariant: 11448 return "Variant"; 11449 case ScalarEvolution::LoopInvariant: 11450 return "Invariant"; 11451 case ScalarEvolution::LoopComputable: 11452 return "Computable"; 11453 } 11454 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 11455 } 11456 11457 void ScalarEvolution::print(raw_ostream &OS) const { 11458 // ScalarEvolution's implementation of the print method is to print 11459 // out SCEV values of all instructions that are interesting. Doing 11460 // this potentially causes it to create new SCEV objects though, 11461 // which technically conflicts with the const qualifier. This isn't 11462 // observable from outside the class though, so casting away the 11463 // const isn't dangerous. 11464 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11465 11466 OS << "Classifying expressions for: "; 11467 F.printAsOperand(OS, /*PrintType=*/false); 11468 OS << "\n"; 11469 for (Instruction &I : instructions(F)) 11470 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 11471 OS << I << '\n'; 11472 OS << " --> "; 11473 const SCEV *SV = SE.getSCEV(&I); 11474 SV->print(OS); 11475 if (!isa<SCEVCouldNotCompute>(SV)) { 11476 OS << " U: "; 11477 SE.getUnsignedRange(SV).print(OS); 11478 OS << " S: "; 11479 SE.getSignedRange(SV).print(OS); 11480 } 11481 11482 const Loop *L = LI.getLoopFor(I.getParent()); 11483 11484 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 11485 if (AtUse != SV) { 11486 OS << " --> "; 11487 AtUse->print(OS); 11488 if (!isa<SCEVCouldNotCompute>(AtUse)) { 11489 OS << " U: "; 11490 SE.getUnsignedRange(AtUse).print(OS); 11491 OS << " S: "; 11492 SE.getSignedRange(AtUse).print(OS); 11493 } 11494 } 11495 11496 if (L) { 11497 OS << "\t\t" "Exits: "; 11498 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 11499 if (!SE.isLoopInvariant(ExitValue, L)) { 11500 OS << "<<Unknown>>"; 11501 } else { 11502 OS << *ExitValue; 11503 } 11504 11505 bool First = true; 11506 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 11507 if (First) { 11508 OS << "\t\t" "LoopDispositions: { "; 11509 First = false; 11510 } else { 11511 OS << ", "; 11512 } 11513 11514 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11515 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 11516 } 11517 11518 for (auto *InnerL : depth_first(L)) { 11519 if (InnerL == L) 11520 continue; 11521 if (First) { 11522 OS << "\t\t" "LoopDispositions: { "; 11523 First = false; 11524 } else { 11525 OS << ", "; 11526 } 11527 11528 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11529 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 11530 } 11531 11532 OS << " }"; 11533 } 11534 11535 OS << "\n"; 11536 } 11537 11538 OS << "Determining loop execution counts for: "; 11539 F.printAsOperand(OS, /*PrintType=*/false); 11540 OS << "\n"; 11541 for (Loop *I : LI) 11542 PrintLoopInfo(OS, &SE, I); 11543 } 11544 11545 ScalarEvolution::LoopDisposition 11546 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 11547 auto &Values = LoopDispositions[S]; 11548 for (auto &V : Values) { 11549 if (V.getPointer() == L) 11550 return V.getInt(); 11551 } 11552 Values.emplace_back(L, LoopVariant); 11553 LoopDisposition D = computeLoopDisposition(S, L); 11554 auto &Values2 = LoopDispositions[S]; 11555 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11556 if (V.getPointer() == L) { 11557 V.setInt(D); 11558 break; 11559 } 11560 } 11561 return D; 11562 } 11563 11564 ScalarEvolution::LoopDisposition 11565 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 11566 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11567 case scConstant: 11568 return LoopInvariant; 11569 case scTruncate: 11570 case scZeroExtend: 11571 case scSignExtend: 11572 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 11573 case scAddRecExpr: { 11574 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11575 11576 // If L is the addrec's loop, it's computable. 11577 if (AR->getLoop() == L) 11578 return LoopComputable; 11579 11580 // Add recurrences are never invariant in the function-body (null loop). 11581 if (!L) 11582 return LoopVariant; 11583 11584 // Everything that is not defined at loop entry is variant. 11585 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 11586 return LoopVariant; 11587 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 11588 " dominate the contained loop's header?"); 11589 11590 // This recurrence is invariant w.r.t. L if AR's loop contains L. 11591 if (AR->getLoop()->contains(L)) 11592 return LoopInvariant; 11593 11594 // This recurrence is variant w.r.t. L if any of its operands 11595 // are variant. 11596 for (auto *Op : AR->operands()) 11597 if (!isLoopInvariant(Op, L)) 11598 return LoopVariant; 11599 11600 // Otherwise it's loop-invariant. 11601 return LoopInvariant; 11602 } 11603 case scAddExpr: 11604 case scMulExpr: 11605 case scUMaxExpr: 11606 case scSMaxExpr: { 11607 bool HasVarying = false; 11608 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 11609 LoopDisposition D = getLoopDisposition(Op, L); 11610 if (D == LoopVariant) 11611 return LoopVariant; 11612 if (D == LoopComputable) 11613 HasVarying = true; 11614 } 11615 return HasVarying ? LoopComputable : LoopInvariant; 11616 } 11617 case scUDivExpr: { 11618 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11619 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 11620 if (LD == LoopVariant) 11621 return LoopVariant; 11622 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 11623 if (RD == LoopVariant) 11624 return LoopVariant; 11625 return (LD == LoopInvariant && RD == LoopInvariant) ? 11626 LoopInvariant : LoopComputable; 11627 } 11628 case scUnknown: 11629 // All non-instruction values are loop invariant. All instructions are loop 11630 // invariant if they are not contained in the specified loop. 11631 // Instructions are never considered invariant in the function body 11632 // (null loop) because they are defined within the "loop". 11633 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 11634 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 11635 return LoopInvariant; 11636 case scCouldNotCompute: 11637 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11638 } 11639 llvm_unreachable("Unknown SCEV kind!"); 11640 } 11641 11642 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 11643 return getLoopDisposition(S, L) == LoopInvariant; 11644 } 11645 11646 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 11647 return getLoopDisposition(S, L) == LoopComputable; 11648 } 11649 11650 ScalarEvolution::BlockDisposition 11651 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11652 auto &Values = BlockDispositions[S]; 11653 for (auto &V : Values) { 11654 if (V.getPointer() == BB) 11655 return V.getInt(); 11656 } 11657 Values.emplace_back(BB, DoesNotDominateBlock); 11658 BlockDisposition D = computeBlockDisposition(S, BB); 11659 auto &Values2 = BlockDispositions[S]; 11660 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11661 if (V.getPointer() == BB) { 11662 V.setInt(D); 11663 break; 11664 } 11665 } 11666 return D; 11667 } 11668 11669 ScalarEvolution::BlockDisposition 11670 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11671 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11672 case scConstant: 11673 return ProperlyDominatesBlock; 11674 case scTruncate: 11675 case scZeroExtend: 11676 case scSignExtend: 11677 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 11678 case scAddRecExpr: { 11679 // This uses a "dominates" query instead of "properly dominates" query 11680 // to test for proper dominance too, because the instruction which 11681 // produces the addrec's value is a PHI, and a PHI effectively properly 11682 // dominates its entire containing block. 11683 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11684 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 11685 return DoesNotDominateBlock; 11686 11687 // Fall through into SCEVNAryExpr handling. 11688 LLVM_FALLTHROUGH; 11689 } 11690 case scAddExpr: 11691 case scMulExpr: 11692 case scUMaxExpr: 11693 case scSMaxExpr: { 11694 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 11695 bool Proper = true; 11696 for (const SCEV *NAryOp : NAry->operands()) { 11697 BlockDisposition D = getBlockDisposition(NAryOp, BB); 11698 if (D == DoesNotDominateBlock) 11699 return DoesNotDominateBlock; 11700 if (D == DominatesBlock) 11701 Proper = false; 11702 } 11703 return Proper ? ProperlyDominatesBlock : DominatesBlock; 11704 } 11705 case scUDivExpr: { 11706 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11707 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 11708 BlockDisposition LD = getBlockDisposition(LHS, BB); 11709 if (LD == DoesNotDominateBlock) 11710 return DoesNotDominateBlock; 11711 BlockDisposition RD = getBlockDisposition(RHS, BB); 11712 if (RD == DoesNotDominateBlock) 11713 return DoesNotDominateBlock; 11714 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 11715 ProperlyDominatesBlock : DominatesBlock; 11716 } 11717 case scUnknown: 11718 if (Instruction *I = 11719 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 11720 if (I->getParent() == BB) 11721 return DominatesBlock; 11722 if (DT.properlyDominates(I->getParent(), BB)) 11723 return ProperlyDominatesBlock; 11724 return DoesNotDominateBlock; 11725 } 11726 return ProperlyDominatesBlock; 11727 case scCouldNotCompute: 11728 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11729 } 11730 llvm_unreachable("Unknown SCEV kind!"); 11731 } 11732 11733 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 11734 return getBlockDisposition(S, BB) >= DominatesBlock; 11735 } 11736 11737 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 11738 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 11739 } 11740 11741 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 11742 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 11743 } 11744 11745 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 11746 auto IsS = [&](const SCEV *X) { return S == X; }; 11747 auto ContainsS = [&](const SCEV *X) { 11748 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 11749 }; 11750 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 11751 } 11752 11753 void 11754 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 11755 ValuesAtScopes.erase(S); 11756 LoopDispositions.erase(S); 11757 BlockDispositions.erase(S); 11758 UnsignedRanges.erase(S); 11759 SignedRanges.erase(S); 11760 ExprValueMap.erase(S); 11761 HasRecMap.erase(S); 11762 MinTrailingZerosCache.erase(S); 11763 11764 for (auto I = PredicatedSCEVRewrites.begin(); 11765 I != PredicatedSCEVRewrites.end();) { 11766 std::pair<const SCEV *, const Loop *> Entry = I->first; 11767 if (Entry.first == S) 11768 PredicatedSCEVRewrites.erase(I++); 11769 else 11770 ++I; 11771 } 11772 11773 auto RemoveSCEVFromBackedgeMap = 11774 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 11775 for (auto I = Map.begin(), E = Map.end(); I != E;) { 11776 BackedgeTakenInfo &BEInfo = I->second; 11777 if (BEInfo.hasOperand(S, this)) { 11778 BEInfo.clear(); 11779 Map.erase(I++); 11780 } else 11781 ++I; 11782 } 11783 }; 11784 11785 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 11786 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 11787 } 11788 11789 void 11790 ScalarEvolution::getUsedLoops(const SCEV *S, 11791 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 11792 struct FindUsedLoops { 11793 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 11794 : LoopsUsed(LoopsUsed) {} 11795 SmallPtrSetImpl<const Loop *> &LoopsUsed; 11796 bool follow(const SCEV *S) { 11797 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 11798 LoopsUsed.insert(AR->getLoop()); 11799 return true; 11800 } 11801 11802 bool isDone() const { return false; } 11803 }; 11804 11805 FindUsedLoops F(LoopsUsed); 11806 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 11807 } 11808 11809 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 11810 SmallPtrSet<const Loop *, 8> LoopsUsed; 11811 getUsedLoops(S, LoopsUsed); 11812 for (auto *L : LoopsUsed) 11813 LoopUsers[L].push_back(S); 11814 } 11815 11816 void ScalarEvolution::verify() const { 11817 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11818 ScalarEvolution SE2(F, TLI, AC, DT, LI); 11819 11820 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 11821 11822 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 11823 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 11824 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 11825 11826 const SCEV *visitConstant(const SCEVConstant *Constant) { 11827 return SE.getConstant(Constant->getAPInt()); 11828 } 11829 11830 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 11831 return SE.getUnknown(Expr->getValue()); 11832 } 11833 11834 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 11835 return SE.getCouldNotCompute(); 11836 } 11837 }; 11838 11839 SCEVMapper SCM(SE2); 11840 11841 while (!LoopStack.empty()) { 11842 auto *L = LoopStack.pop_back_val(); 11843 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 11844 11845 auto *CurBECount = SCM.visit( 11846 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 11847 auto *NewBECount = SE2.getBackedgeTakenCount(L); 11848 11849 if (CurBECount == SE2.getCouldNotCompute() || 11850 NewBECount == SE2.getCouldNotCompute()) { 11851 // NB! This situation is legal, but is very suspicious -- whatever pass 11852 // change the loop to make a trip count go from could not compute to 11853 // computable or vice-versa *should have* invalidated SCEV. However, we 11854 // choose not to assert here (for now) since we don't want false 11855 // positives. 11856 continue; 11857 } 11858 11859 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 11860 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 11861 // not propagate undef aggressively). This means we can (and do) fail 11862 // verification in cases where a transform makes the trip count of a loop 11863 // go from "undef" to "undef+1" (say). The transform is fine, since in 11864 // both cases the loop iterates "undef" times, but SCEV thinks we 11865 // increased the trip count of the loop by 1 incorrectly. 11866 continue; 11867 } 11868 11869 if (SE.getTypeSizeInBits(CurBECount->getType()) > 11870 SE.getTypeSizeInBits(NewBECount->getType())) 11871 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 11872 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 11873 SE.getTypeSizeInBits(NewBECount->getType())) 11874 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 11875 11876 auto *ConstantDelta = 11877 dyn_cast<SCEVConstant>(SE2.getMinusSCEV(CurBECount, NewBECount)); 11878 11879 if (ConstantDelta && ConstantDelta->getAPInt() != 0) { 11880 dbgs() << "Trip Count Changed!\n"; 11881 dbgs() << "Old: " << *CurBECount << "\n"; 11882 dbgs() << "New: " << *NewBECount << "\n"; 11883 dbgs() << "Delta: " << *ConstantDelta << "\n"; 11884 std::abort(); 11885 } 11886 } 11887 } 11888 11889 bool ScalarEvolution::invalidate( 11890 Function &F, const PreservedAnalyses &PA, 11891 FunctionAnalysisManager::Invalidator &Inv) { 11892 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 11893 // of its dependencies is invalidated. 11894 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 11895 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 11896 Inv.invalidate<AssumptionAnalysis>(F, PA) || 11897 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 11898 Inv.invalidate<LoopAnalysis>(F, PA); 11899 } 11900 11901 AnalysisKey ScalarEvolutionAnalysis::Key; 11902 11903 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 11904 FunctionAnalysisManager &AM) { 11905 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 11906 AM.getResult<AssumptionAnalysis>(F), 11907 AM.getResult<DominatorTreeAnalysis>(F), 11908 AM.getResult<LoopAnalysis>(F)); 11909 } 11910 11911 PreservedAnalyses 11912 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 11913 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 11914 return PreservedAnalyses::all(); 11915 } 11916 11917 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 11918 "Scalar Evolution Analysis", false, true) 11919 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 11920 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 11921 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 11922 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 11923 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 11924 "Scalar Evolution Analysis", false, true) 11925 11926 char ScalarEvolutionWrapperPass::ID = 0; 11927 11928 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 11929 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 11930 } 11931 11932 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 11933 SE.reset(new ScalarEvolution( 11934 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 11935 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 11936 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 11937 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 11938 return false; 11939 } 11940 11941 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 11942 11943 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 11944 SE->print(OS); 11945 } 11946 11947 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 11948 if (!VerifySCEV) 11949 return; 11950 11951 SE->verify(); 11952 } 11953 11954 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 11955 AU.setPreservesAll(); 11956 AU.addRequiredTransitive<AssumptionCacheTracker>(); 11957 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 11958 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 11959 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 11960 } 11961 11962 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 11963 const SCEV *RHS) { 11964 FoldingSetNodeID ID; 11965 assert(LHS->getType() == RHS->getType() && 11966 "Type mismatch between LHS and RHS"); 11967 // Unique this node based on the arguments 11968 ID.AddInteger(SCEVPredicate::P_Equal); 11969 ID.AddPointer(LHS); 11970 ID.AddPointer(RHS); 11971 void *IP = nullptr; 11972 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 11973 return S; 11974 SCEVEqualPredicate *Eq = new (SCEVAllocator) 11975 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 11976 UniquePreds.InsertNode(Eq, IP); 11977 return Eq; 11978 } 11979 11980 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 11981 const SCEVAddRecExpr *AR, 11982 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 11983 FoldingSetNodeID ID; 11984 // Unique this node based on the arguments 11985 ID.AddInteger(SCEVPredicate::P_Wrap); 11986 ID.AddPointer(AR); 11987 ID.AddInteger(AddedFlags); 11988 void *IP = nullptr; 11989 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 11990 return S; 11991 auto *OF = new (SCEVAllocator) 11992 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 11993 UniquePreds.InsertNode(OF, IP); 11994 return OF; 11995 } 11996 11997 namespace { 11998 11999 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 12000 public: 12001 12002 /// Rewrites \p S in the context of a loop L and the SCEV predication 12003 /// infrastructure. 12004 /// 12005 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 12006 /// equivalences present in \p Pred. 12007 /// 12008 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 12009 /// \p NewPreds such that the result will be an AddRecExpr. 12010 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 12011 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12012 SCEVUnionPredicate *Pred) { 12013 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 12014 return Rewriter.visit(S); 12015 } 12016 12017 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12018 if (Pred) { 12019 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 12020 for (auto *Pred : ExprPreds) 12021 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 12022 if (IPred->getLHS() == Expr) 12023 return IPred->getRHS(); 12024 } 12025 return convertToAddRecWithPreds(Expr); 12026 } 12027 12028 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 12029 const SCEV *Operand = visit(Expr->getOperand()); 12030 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12031 if (AR && AR->getLoop() == L && AR->isAffine()) { 12032 // This couldn't be folded because the operand didn't have the nuw 12033 // flag. Add the nusw flag as an assumption that we could make. 12034 const SCEV *Step = AR->getStepRecurrence(SE); 12035 Type *Ty = Expr->getType(); 12036 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 12037 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 12038 SE.getSignExtendExpr(Step, Ty), L, 12039 AR->getNoWrapFlags()); 12040 } 12041 return SE.getZeroExtendExpr(Operand, Expr->getType()); 12042 } 12043 12044 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 12045 const SCEV *Operand = visit(Expr->getOperand()); 12046 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12047 if (AR && AR->getLoop() == L && AR->isAffine()) { 12048 // This couldn't be folded because the operand didn't have the nsw 12049 // flag. Add the nssw flag as an assumption that we could make. 12050 const SCEV *Step = AR->getStepRecurrence(SE); 12051 Type *Ty = Expr->getType(); 12052 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 12053 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 12054 SE.getSignExtendExpr(Step, Ty), L, 12055 AR->getNoWrapFlags()); 12056 } 12057 return SE.getSignExtendExpr(Operand, Expr->getType()); 12058 } 12059 12060 private: 12061 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 12062 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12063 SCEVUnionPredicate *Pred) 12064 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 12065 12066 bool addOverflowAssumption(const SCEVPredicate *P) { 12067 if (!NewPreds) { 12068 // Check if we've already made this assumption. 12069 return Pred && Pred->implies(P); 12070 } 12071 NewPreds->insert(P); 12072 return true; 12073 } 12074 12075 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 12076 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12077 auto *A = SE.getWrapPredicate(AR, AddedFlags); 12078 return addOverflowAssumption(A); 12079 } 12080 12081 // If \p Expr represents a PHINode, we try to see if it can be represented 12082 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 12083 // to add this predicate as a runtime overflow check, we return the AddRec. 12084 // If \p Expr does not meet these conditions (is not a PHI node, or we 12085 // couldn't create an AddRec for it, or couldn't add the predicate), we just 12086 // return \p Expr. 12087 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 12088 if (!isa<PHINode>(Expr->getValue())) 12089 return Expr; 12090 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 12091 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 12092 if (!PredicatedRewrite) 12093 return Expr; 12094 for (auto *P : PredicatedRewrite->second){ 12095 // Wrap predicates from outer loops are not supported. 12096 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 12097 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 12098 if (L != AR->getLoop()) 12099 return Expr; 12100 } 12101 if (!addOverflowAssumption(P)) 12102 return Expr; 12103 } 12104 return PredicatedRewrite->first; 12105 } 12106 12107 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 12108 SCEVUnionPredicate *Pred; 12109 const Loop *L; 12110 }; 12111 12112 } // end anonymous namespace 12113 12114 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 12115 SCEVUnionPredicate &Preds) { 12116 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 12117 } 12118 12119 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 12120 const SCEV *S, const Loop *L, 12121 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 12122 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 12123 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 12124 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 12125 12126 if (!AddRec) 12127 return nullptr; 12128 12129 // Since the transformation was successful, we can now transfer the SCEV 12130 // predicates. 12131 for (auto *P : TransformPreds) 12132 Preds.insert(P); 12133 12134 return AddRec; 12135 } 12136 12137 /// SCEV predicates 12138 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 12139 SCEVPredicateKind Kind) 12140 : FastID(ID), Kind(Kind) {} 12141 12142 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 12143 const SCEV *LHS, const SCEV *RHS) 12144 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 12145 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 12146 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 12147 } 12148 12149 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 12150 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 12151 12152 if (!Op) 12153 return false; 12154 12155 return Op->LHS == LHS && Op->RHS == RHS; 12156 } 12157 12158 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 12159 12160 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 12161 12162 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 12163 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 12164 } 12165 12166 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 12167 const SCEVAddRecExpr *AR, 12168 IncrementWrapFlags Flags) 12169 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 12170 12171 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 12172 12173 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 12174 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 12175 12176 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 12177 } 12178 12179 bool SCEVWrapPredicate::isAlwaysTrue() const { 12180 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 12181 IncrementWrapFlags IFlags = Flags; 12182 12183 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 12184 IFlags = clearFlags(IFlags, IncrementNSSW); 12185 12186 return IFlags == IncrementAnyWrap; 12187 } 12188 12189 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 12190 OS.indent(Depth) << *getExpr() << " Added Flags: "; 12191 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 12192 OS << "<nusw>"; 12193 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 12194 OS << "<nssw>"; 12195 OS << "\n"; 12196 } 12197 12198 SCEVWrapPredicate::IncrementWrapFlags 12199 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 12200 ScalarEvolution &SE) { 12201 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 12202 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 12203 12204 // We can safely transfer the NSW flag as NSSW. 12205 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 12206 ImpliedFlags = IncrementNSSW; 12207 12208 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 12209 // If the increment is positive, the SCEV NUW flag will also imply the 12210 // WrapPredicate NUSW flag. 12211 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 12212 if (Step->getValue()->getValue().isNonNegative()) 12213 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 12214 } 12215 12216 return ImpliedFlags; 12217 } 12218 12219 /// Union predicates don't get cached so create a dummy set ID for it. 12220 SCEVUnionPredicate::SCEVUnionPredicate() 12221 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 12222 12223 bool SCEVUnionPredicate::isAlwaysTrue() const { 12224 return all_of(Preds, 12225 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 12226 } 12227 12228 ArrayRef<const SCEVPredicate *> 12229 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 12230 auto I = SCEVToPreds.find(Expr); 12231 if (I == SCEVToPreds.end()) 12232 return ArrayRef<const SCEVPredicate *>(); 12233 return I->second; 12234 } 12235 12236 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 12237 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 12238 return all_of(Set->Preds, 12239 [this](const SCEVPredicate *I) { return this->implies(I); }); 12240 12241 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 12242 if (ScevPredsIt == SCEVToPreds.end()) 12243 return false; 12244 auto &SCEVPreds = ScevPredsIt->second; 12245 12246 return any_of(SCEVPreds, 12247 [N](const SCEVPredicate *I) { return I->implies(N); }); 12248 } 12249 12250 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 12251 12252 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 12253 for (auto Pred : Preds) 12254 Pred->print(OS, Depth); 12255 } 12256 12257 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 12258 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 12259 for (auto Pred : Set->Preds) 12260 add(Pred); 12261 return; 12262 } 12263 12264 if (implies(N)) 12265 return; 12266 12267 const SCEV *Key = N->getExpr(); 12268 assert(Key && "Only SCEVUnionPredicate doesn't have an " 12269 " associated expression!"); 12270 12271 SCEVToPreds[Key].push_back(N); 12272 Preds.push_back(N); 12273 } 12274 12275 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 12276 Loop &L) 12277 : SE(SE), L(L) {} 12278 12279 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 12280 const SCEV *Expr = SE.getSCEV(V); 12281 RewriteEntry &Entry = RewriteMap[Expr]; 12282 12283 // If we already have an entry and the version matches, return it. 12284 if (Entry.second && Generation == Entry.first) 12285 return Entry.second; 12286 12287 // We found an entry but it's stale. Rewrite the stale entry 12288 // according to the current predicate. 12289 if (Entry.second) 12290 Expr = Entry.second; 12291 12292 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 12293 Entry = {Generation, NewSCEV}; 12294 12295 return NewSCEV; 12296 } 12297 12298 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 12299 if (!BackedgeCount) { 12300 SCEVUnionPredicate BackedgePred; 12301 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 12302 addPredicate(BackedgePred); 12303 } 12304 return BackedgeCount; 12305 } 12306 12307 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 12308 if (Preds.implies(&Pred)) 12309 return; 12310 Preds.add(&Pred); 12311 updateGeneration(); 12312 } 12313 12314 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 12315 return Preds; 12316 } 12317 12318 void PredicatedScalarEvolution::updateGeneration() { 12319 // If the generation number wrapped recompute everything. 12320 if (++Generation == 0) { 12321 for (auto &II : RewriteMap) { 12322 const SCEV *Rewritten = II.second.second; 12323 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 12324 } 12325 } 12326 } 12327 12328 void PredicatedScalarEvolution::setNoOverflow( 12329 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12330 const SCEV *Expr = getSCEV(V); 12331 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12332 12333 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 12334 12335 // Clear the statically implied flags. 12336 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 12337 addPredicate(*SE.getWrapPredicate(AR, Flags)); 12338 12339 auto II = FlagsMap.insert({V, Flags}); 12340 if (!II.second) 12341 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 12342 } 12343 12344 bool PredicatedScalarEvolution::hasNoOverflow( 12345 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12346 const SCEV *Expr = getSCEV(V); 12347 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12348 12349 Flags = SCEVWrapPredicate::clearFlags( 12350 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 12351 12352 auto II = FlagsMap.find(V); 12353 12354 if (II != FlagsMap.end()) 12355 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 12356 12357 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 12358 } 12359 12360 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 12361 const SCEV *Expr = this->getSCEV(V); 12362 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 12363 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 12364 12365 if (!New) 12366 return nullptr; 12367 12368 for (auto *P : NewPreds) 12369 Preds.add(P); 12370 12371 updateGeneration(); 12372 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 12373 return New; 12374 } 12375 12376 PredicatedScalarEvolution::PredicatedScalarEvolution( 12377 const PredicatedScalarEvolution &Init) 12378 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 12379 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 12380 for (const auto &I : Init.FlagsMap) 12381 FlagsMap.insert(I); 12382 } 12383 12384 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 12385 // For each block. 12386 for (auto *BB : L.getBlocks()) 12387 for (auto &I : *BB) { 12388 if (!SE.isSCEVable(I.getType())) 12389 continue; 12390 12391 auto *Expr = SE.getSCEV(&I); 12392 auto II = RewriteMap.find(Expr); 12393 12394 if (II == RewriteMap.end()) 12395 continue; 12396 12397 // Don't print things that are not interesting. 12398 if (II->second.second == Expr) 12399 continue; 12400 12401 OS.indent(Depth) << "[PSE]" << I << ":\n"; 12402 OS.indent(Depth + 2) << *Expr << "\n"; 12403 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 12404 } 12405 } 12406 12407 // Match the mathematical pattern A - (A / B) * B, where A and B can be 12408 // arbitrary expressions. 12409 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 12410 // 4, A / B becomes X / 8). 12411 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 12412 const SCEV *&RHS) { 12413 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 12414 if (Add == nullptr || Add->getNumOperands() != 2) 12415 return false; 12416 12417 const SCEV *A = Add->getOperand(1); 12418 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 12419 12420 if (Mul == nullptr) 12421 return false; 12422 12423 const auto MatchURemWithDivisor = [&](const SCEV *B) { 12424 // (SomeExpr + (-(SomeExpr / B) * B)). 12425 if (Expr == getURemExpr(A, B)) { 12426 LHS = A; 12427 RHS = B; 12428 return true; 12429 } 12430 return false; 12431 }; 12432 12433 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 12434 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 12435 return MatchURemWithDivisor(Mul->getOperand(1)) || 12436 MatchURemWithDivisor(Mul->getOperand(2)); 12437 12438 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 12439 if (Mul->getNumOperands() == 2) 12440 return MatchURemWithDivisor(Mul->getOperand(1)) || 12441 MatchURemWithDivisor(Mul->getOperand(0)) || 12442 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 12443 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 12444 return false; 12445 } 12446