1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #include "llvm/Analysis/ScalarEvolution.h" 62 #include "llvm/ADT/Optional.h" 63 #include "llvm/ADT/STLExtras.h" 64 #include "llvm/ADT/ScopeExit.h" 65 #include "llvm/ADT/Sequence.h" 66 #include "llvm/ADT/SmallPtrSet.h" 67 #include "llvm/ADT/Statistic.h" 68 #include "llvm/Analysis/AssumptionCache.h" 69 #include "llvm/Analysis/ConstantFolding.h" 70 #include "llvm/Analysis/InstructionSimplify.h" 71 #include "llvm/Analysis/LoopInfo.h" 72 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 73 #include "llvm/Analysis/TargetLibraryInfo.h" 74 #include "llvm/Analysis/ValueTracking.h" 75 #include "llvm/IR/ConstantRange.h" 76 #include "llvm/IR/Constants.h" 77 #include "llvm/IR/DataLayout.h" 78 #include "llvm/IR/DerivedTypes.h" 79 #include "llvm/IR/Dominators.h" 80 #include "llvm/IR/GetElementPtrTypeIterator.h" 81 #include "llvm/IR/GlobalAlias.h" 82 #include "llvm/IR/GlobalVariable.h" 83 #include "llvm/IR/InstIterator.h" 84 #include "llvm/IR/Instructions.h" 85 #include "llvm/IR/LLVMContext.h" 86 #include "llvm/IR/Metadata.h" 87 #include "llvm/IR/Operator.h" 88 #include "llvm/IR/PatternMatch.h" 89 #include "llvm/Support/CommandLine.h" 90 #include "llvm/Support/Debug.h" 91 #include "llvm/Support/ErrorHandling.h" 92 #include "llvm/Support/KnownBits.h" 93 #include "llvm/Support/MathExtras.h" 94 #include "llvm/Support/SaveAndRestore.h" 95 #include "llvm/Support/raw_ostream.h" 96 #include <algorithm> 97 using namespace llvm; 98 99 #define DEBUG_TYPE "scalar-evolution" 100 101 STATISTIC(NumArrayLenItCounts, 102 "Number of trip counts computed with array length"); 103 STATISTIC(NumTripCountsComputed, 104 "Number of loops with predictable loop counts"); 105 STATISTIC(NumTripCountsNotComputed, 106 "Number of loops without predictable loop counts"); 107 STATISTIC(NumBruteForceTripCountsComputed, 108 "Number of loops with trip counts computed by force"); 109 110 static cl::opt<unsigned> 111 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 112 cl::desc("Maximum number of iterations SCEV will " 113 "symbolically execute a constant " 114 "derived loop"), 115 cl::init(100)); 116 117 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 118 static cl::opt<bool> 119 VerifySCEV("verify-scev", 120 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 121 static cl::opt<bool> 122 VerifySCEVMap("verify-scev-maps", 123 cl::desc("Verify no dangling value in ScalarEvolution's " 124 "ExprValueMap (slow)")); 125 126 static cl::opt<unsigned> MulOpsInlineThreshold( 127 "scev-mulops-inline-threshold", cl::Hidden, 128 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 129 cl::init(32)); 130 131 static cl::opt<unsigned> AddOpsInlineThreshold( 132 "scev-addops-inline-threshold", cl::Hidden, 133 cl::desc("Threshold for inlining addition operands into a SCEV"), 134 cl::init(500)); 135 136 static cl::opt<unsigned> MaxSCEVCompareDepth( 137 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 138 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 139 cl::init(32)); 140 141 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 142 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 143 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 144 cl::init(2)); 145 146 static cl::opt<unsigned> MaxValueCompareDepth( 147 "scalar-evolution-max-value-compare-depth", cl::Hidden, 148 cl::desc("Maximum depth of recursive value complexity comparisons"), 149 cl::init(2)); 150 151 static cl::opt<unsigned> 152 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 153 cl::desc("Maximum depth of recursive arithmetics"), 154 cl::init(32)); 155 156 static cl::opt<unsigned> MaxConstantEvolvingDepth( 157 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 158 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 159 160 static cl::opt<unsigned> 161 MaxExtDepth("scalar-evolution-max-ext-depth", cl::Hidden, 162 cl::desc("Maximum depth of recursive SExt/ZExt"), 163 cl::init(8)); 164 165 static cl::opt<unsigned> 166 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 167 cl::desc("Max coefficients in AddRec during evolving"), 168 cl::init(16)); 169 170 //===----------------------------------------------------------------------===// 171 // SCEV class definitions 172 //===----------------------------------------------------------------------===// 173 174 //===----------------------------------------------------------------------===// 175 // Implementation of the SCEV class. 176 // 177 178 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 179 LLVM_DUMP_METHOD void SCEV::dump() const { 180 print(dbgs()); 181 dbgs() << '\n'; 182 } 183 #endif 184 185 void SCEV::print(raw_ostream &OS) const { 186 switch (static_cast<SCEVTypes>(getSCEVType())) { 187 case scConstant: 188 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 189 return; 190 case scTruncate: { 191 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 192 const SCEV *Op = Trunc->getOperand(); 193 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 194 << *Trunc->getType() << ")"; 195 return; 196 } 197 case scZeroExtend: { 198 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 199 const SCEV *Op = ZExt->getOperand(); 200 OS << "(zext " << *Op->getType() << " " << *Op << " to " 201 << *ZExt->getType() << ")"; 202 return; 203 } 204 case scSignExtend: { 205 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 206 const SCEV *Op = SExt->getOperand(); 207 OS << "(sext " << *Op->getType() << " " << *Op << " to " 208 << *SExt->getType() << ")"; 209 return; 210 } 211 case scAddRecExpr: { 212 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 213 OS << "{" << *AR->getOperand(0); 214 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 215 OS << ",+," << *AR->getOperand(i); 216 OS << "}<"; 217 if (AR->hasNoUnsignedWrap()) 218 OS << "nuw><"; 219 if (AR->hasNoSignedWrap()) 220 OS << "nsw><"; 221 if (AR->hasNoSelfWrap() && 222 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 223 OS << "nw><"; 224 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 225 OS << ">"; 226 return; 227 } 228 case scAddExpr: 229 case scMulExpr: 230 case scUMaxExpr: 231 case scSMaxExpr: { 232 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 233 const char *OpStr = nullptr; 234 switch (NAry->getSCEVType()) { 235 case scAddExpr: OpStr = " + "; break; 236 case scMulExpr: OpStr = " * "; break; 237 case scUMaxExpr: OpStr = " umax "; break; 238 case scSMaxExpr: OpStr = " smax "; break; 239 } 240 OS << "("; 241 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 242 I != E; ++I) { 243 OS << **I; 244 if (std::next(I) != E) 245 OS << OpStr; 246 } 247 OS << ")"; 248 switch (NAry->getSCEVType()) { 249 case scAddExpr: 250 case scMulExpr: 251 if (NAry->hasNoUnsignedWrap()) 252 OS << "<nuw>"; 253 if (NAry->hasNoSignedWrap()) 254 OS << "<nsw>"; 255 } 256 return; 257 } 258 case scUDivExpr: { 259 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 260 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 261 return; 262 } 263 case scUnknown: { 264 const SCEVUnknown *U = cast<SCEVUnknown>(this); 265 Type *AllocTy; 266 if (U->isSizeOf(AllocTy)) { 267 OS << "sizeof(" << *AllocTy << ")"; 268 return; 269 } 270 if (U->isAlignOf(AllocTy)) { 271 OS << "alignof(" << *AllocTy << ")"; 272 return; 273 } 274 275 Type *CTy; 276 Constant *FieldNo; 277 if (U->isOffsetOf(CTy, FieldNo)) { 278 OS << "offsetof(" << *CTy << ", "; 279 FieldNo->printAsOperand(OS, false); 280 OS << ")"; 281 return; 282 } 283 284 // Otherwise just print it normally. 285 U->getValue()->printAsOperand(OS, false); 286 return; 287 } 288 case scCouldNotCompute: 289 OS << "***COULDNOTCOMPUTE***"; 290 return; 291 } 292 llvm_unreachable("Unknown SCEV kind!"); 293 } 294 295 Type *SCEV::getType() const { 296 switch (static_cast<SCEVTypes>(getSCEVType())) { 297 case scConstant: 298 return cast<SCEVConstant>(this)->getType(); 299 case scTruncate: 300 case scZeroExtend: 301 case scSignExtend: 302 return cast<SCEVCastExpr>(this)->getType(); 303 case scAddRecExpr: 304 case scMulExpr: 305 case scUMaxExpr: 306 case scSMaxExpr: 307 return cast<SCEVNAryExpr>(this)->getType(); 308 case scAddExpr: 309 return cast<SCEVAddExpr>(this)->getType(); 310 case scUDivExpr: 311 return cast<SCEVUDivExpr>(this)->getType(); 312 case scUnknown: 313 return cast<SCEVUnknown>(this)->getType(); 314 case scCouldNotCompute: 315 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 316 } 317 llvm_unreachable("Unknown SCEV kind!"); 318 } 319 320 bool SCEV::isZero() const { 321 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 322 return SC->getValue()->isZero(); 323 return false; 324 } 325 326 bool SCEV::isOne() const { 327 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 328 return SC->getValue()->isOne(); 329 return false; 330 } 331 332 bool SCEV::isAllOnesValue() const { 333 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 334 return SC->getValue()->isMinusOne(); 335 return false; 336 } 337 338 bool SCEV::isNonConstantNegative() const { 339 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 340 if (!Mul) return false; 341 342 // If there is a constant factor, it will be first. 343 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 344 if (!SC) return false; 345 346 // Return true if the value is negative, this matches things like (-42 * V). 347 return SC->getAPInt().isNegative(); 348 } 349 350 SCEVCouldNotCompute::SCEVCouldNotCompute() : 351 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} 352 353 bool SCEVCouldNotCompute::classof(const SCEV *S) { 354 return S->getSCEVType() == scCouldNotCompute; 355 } 356 357 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 358 FoldingSetNodeID ID; 359 ID.AddInteger(scConstant); 360 ID.AddPointer(V); 361 void *IP = nullptr; 362 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 363 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 364 UniqueSCEVs.InsertNode(S, IP); 365 return S; 366 } 367 368 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 369 return getConstant(ConstantInt::get(getContext(), Val)); 370 } 371 372 const SCEV * 373 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 374 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 375 return getConstant(ConstantInt::get(ITy, V, isSigned)); 376 } 377 378 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 379 unsigned SCEVTy, const SCEV *op, Type *ty) 380 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 381 382 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 383 const SCEV *op, Type *ty) 384 : SCEVCastExpr(ID, scTruncate, op, ty) { 385 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 386 (Ty->isIntegerTy() || Ty->isPointerTy()) && 387 "Cannot truncate non-integer value!"); 388 } 389 390 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 391 const SCEV *op, Type *ty) 392 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 393 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 394 (Ty->isIntegerTy() || Ty->isPointerTy()) && 395 "Cannot zero extend non-integer value!"); 396 } 397 398 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 399 const SCEV *op, Type *ty) 400 : SCEVCastExpr(ID, scSignExtend, op, ty) { 401 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 402 (Ty->isIntegerTy() || Ty->isPointerTy()) && 403 "Cannot sign extend non-integer value!"); 404 } 405 406 void SCEVUnknown::deleted() { 407 // Clear this SCEVUnknown from various maps. 408 SE->forgetMemoizedResults(this); 409 410 // Remove this SCEVUnknown from the uniquing map. 411 SE->UniqueSCEVs.RemoveNode(this); 412 413 // Release the value. 414 setValPtr(nullptr); 415 } 416 417 void SCEVUnknown::allUsesReplacedWith(Value *New) { 418 // Clear this SCEVUnknown from various maps. 419 SE->forgetMemoizedResults(this); 420 421 // Remove this SCEVUnknown from the uniquing map. 422 SE->UniqueSCEVs.RemoveNode(this); 423 424 // Update this SCEVUnknown to point to the new value. This is needed 425 // because there may still be outstanding SCEVs which still point to 426 // this SCEVUnknown. 427 setValPtr(New); 428 } 429 430 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 431 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 432 if (VCE->getOpcode() == Instruction::PtrToInt) 433 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 434 if (CE->getOpcode() == Instruction::GetElementPtr && 435 CE->getOperand(0)->isNullValue() && 436 CE->getNumOperands() == 2) 437 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 438 if (CI->isOne()) { 439 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 440 ->getElementType(); 441 return true; 442 } 443 444 return false; 445 } 446 447 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 448 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 449 if (VCE->getOpcode() == Instruction::PtrToInt) 450 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 451 if (CE->getOpcode() == Instruction::GetElementPtr && 452 CE->getOperand(0)->isNullValue()) { 453 Type *Ty = 454 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 455 if (StructType *STy = dyn_cast<StructType>(Ty)) 456 if (!STy->isPacked() && 457 CE->getNumOperands() == 3 && 458 CE->getOperand(1)->isNullValue()) { 459 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 460 if (CI->isOne() && 461 STy->getNumElements() == 2 && 462 STy->getElementType(0)->isIntegerTy(1)) { 463 AllocTy = STy->getElementType(1); 464 return true; 465 } 466 } 467 } 468 469 return false; 470 } 471 472 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 473 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 474 if (VCE->getOpcode() == Instruction::PtrToInt) 475 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 476 if (CE->getOpcode() == Instruction::GetElementPtr && 477 CE->getNumOperands() == 3 && 478 CE->getOperand(0)->isNullValue() && 479 CE->getOperand(1)->isNullValue()) { 480 Type *Ty = 481 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 482 // Ignore vector types here so that ScalarEvolutionExpander doesn't 483 // emit getelementptrs that index into vectors. 484 if (Ty->isStructTy() || Ty->isArrayTy()) { 485 CTy = Ty; 486 FieldNo = CE->getOperand(2); 487 return true; 488 } 489 } 490 491 return false; 492 } 493 494 //===----------------------------------------------------------------------===// 495 // SCEV Utilities 496 //===----------------------------------------------------------------------===// 497 498 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 499 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 500 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 501 /// have been previously deemed to be "equally complex" by this routine. It is 502 /// intended to avoid exponential time complexity in cases like: 503 /// 504 /// %a = f(%x, %y) 505 /// %b = f(%a, %a) 506 /// %c = f(%b, %b) 507 /// 508 /// %d = f(%x, %y) 509 /// %e = f(%d, %d) 510 /// %f = f(%e, %e) 511 /// 512 /// CompareValueComplexity(%f, %c) 513 /// 514 /// Since we do not continue running this routine on expression trees once we 515 /// have seen unequal values, there is no need to track them in the cache. 516 static int 517 CompareValueComplexity(SmallSet<std::pair<Value *, Value *>, 8> &EqCache, 518 const LoopInfo *const LI, Value *LV, Value *RV, 519 unsigned Depth) { 520 if (Depth > MaxValueCompareDepth || EqCache.count({LV, RV})) 521 return 0; 522 523 // Order pointer values after integer values. This helps SCEVExpander form 524 // GEPs. 525 bool LIsPointer = LV->getType()->isPointerTy(), 526 RIsPointer = RV->getType()->isPointerTy(); 527 if (LIsPointer != RIsPointer) 528 return (int)LIsPointer - (int)RIsPointer; 529 530 // Compare getValueID values. 531 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 532 if (LID != RID) 533 return (int)LID - (int)RID; 534 535 // Sort arguments by their position. 536 if (const auto *LA = dyn_cast<Argument>(LV)) { 537 const auto *RA = cast<Argument>(RV); 538 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 539 return (int)LArgNo - (int)RArgNo; 540 } 541 542 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 543 const auto *RGV = cast<GlobalValue>(RV); 544 545 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 546 auto LT = GV->getLinkage(); 547 return !(GlobalValue::isPrivateLinkage(LT) || 548 GlobalValue::isInternalLinkage(LT)); 549 }; 550 551 // Use the names to distinguish the two values, but only if the 552 // names are semantically important. 553 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 554 return LGV->getName().compare(RGV->getName()); 555 } 556 557 // For instructions, compare their loop depth, and their operand count. This 558 // is pretty loose. 559 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 560 const auto *RInst = cast<Instruction>(RV); 561 562 // Compare loop depths. 563 const BasicBlock *LParent = LInst->getParent(), 564 *RParent = RInst->getParent(); 565 if (LParent != RParent) { 566 unsigned LDepth = LI->getLoopDepth(LParent), 567 RDepth = LI->getLoopDepth(RParent); 568 if (LDepth != RDepth) 569 return (int)LDepth - (int)RDepth; 570 } 571 572 // Compare the number of operands. 573 unsigned LNumOps = LInst->getNumOperands(), 574 RNumOps = RInst->getNumOperands(); 575 if (LNumOps != RNumOps) 576 return (int)LNumOps - (int)RNumOps; 577 578 for (unsigned Idx : seq(0u, LNumOps)) { 579 int Result = 580 CompareValueComplexity(EqCache, LI, LInst->getOperand(Idx), 581 RInst->getOperand(Idx), Depth + 1); 582 if (Result != 0) 583 return Result; 584 } 585 } 586 587 EqCache.insert({LV, RV}); 588 return 0; 589 } 590 591 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 592 // than RHS, respectively. A three-way result allows recursive comparisons to be 593 // more efficient. 594 static int CompareSCEVComplexity( 595 SmallSet<std::pair<const SCEV *, const SCEV *>, 8> &EqCacheSCEV, 596 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 597 DominatorTree &DT, unsigned Depth = 0) { 598 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 599 if (LHS == RHS) 600 return 0; 601 602 // Primarily, sort the SCEVs by their getSCEVType(). 603 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 604 if (LType != RType) 605 return (int)LType - (int)RType; 606 607 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.count({LHS, RHS})) 608 return 0; 609 // Aside from the getSCEVType() ordering, the particular ordering 610 // isn't very important except that it's beneficial to be consistent, 611 // so that (a + b) and (b + a) don't end up as different expressions. 612 switch (static_cast<SCEVTypes>(LType)) { 613 case scUnknown: { 614 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 615 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 616 617 SmallSet<std::pair<Value *, Value *>, 8> EqCache; 618 int X = CompareValueComplexity(EqCache, LI, LU->getValue(), RU->getValue(), 619 Depth + 1); 620 if (X == 0) 621 EqCacheSCEV.insert({LHS, RHS}); 622 return X; 623 } 624 625 case scConstant: { 626 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 627 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 628 629 // Compare constant values. 630 const APInt &LA = LC->getAPInt(); 631 const APInt &RA = RC->getAPInt(); 632 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 633 if (LBitWidth != RBitWidth) 634 return (int)LBitWidth - (int)RBitWidth; 635 return LA.ult(RA) ? -1 : 1; 636 } 637 638 case scAddRecExpr: { 639 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 640 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 641 642 // There is always a dominance between two recs that are used by one SCEV, 643 // so we can safely sort recs by loop header dominance. We require such 644 // order in getAddExpr. 645 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 646 if (LLoop != RLoop) { 647 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 648 assert(LHead != RHead && "Two loops share the same header?"); 649 if (DT.dominates(LHead, RHead)) 650 return 1; 651 else 652 assert(DT.dominates(RHead, LHead) && 653 "No dominance between recurrences used by one SCEV?"); 654 return -1; 655 } 656 657 // Addrec complexity grows with operand count. 658 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 659 if (LNumOps != RNumOps) 660 return (int)LNumOps - (int)RNumOps; 661 662 // Lexicographically compare. 663 for (unsigned i = 0; i != LNumOps; ++i) { 664 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LA->getOperand(i), 665 RA->getOperand(i), DT, Depth + 1); 666 if (X != 0) 667 return X; 668 } 669 EqCacheSCEV.insert({LHS, RHS}); 670 return 0; 671 } 672 673 case scAddExpr: 674 case scMulExpr: 675 case scSMaxExpr: 676 case scUMaxExpr: { 677 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 678 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 679 680 // Lexicographically compare n-ary expressions. 681 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 682 if (LNumOps != RNumOps) 683 return (int)LNumOps - (int)RNumOps; 684 685 for (unsigned i = 0; i != LNumOps; ++i) { 686 if (i >= RNumOps) 687 return 1; 688 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getOperand(i), 689 RC->getOperand(i), DT, Depth + 1); 690 if (X != 0) 691 return X; 692 } 693 EqCacheSCEV.insert({LHS, RHS}); 694 return 0; 695 } 696 697 case scUDivExpr: { 698 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 699 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 700 701 // Lexicographically compare udiv expressions. 702 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getLHS(), RC->getLHS(), 703 DT, Depth + 1); 704 if (X != 0) 705 return X; 706 X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getRHS(), RC->getRHS(), DT, 707 Depth + 1); 708 if (X == 0) 709 EqCacheSCEV.insert({LHS, RHS}); 710 return X; 711 } 712 713 case scTruncate: 714 case scZeroExtend: 715 case scSignExtend: { 716 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 717 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 718 719 // Compare cast expressions by operand. 720 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getOperand(), 721 RC->getOperand(), DT, Depth + 1); 722 if (X == 0) 723 EqCacheSCEV.insert({LHS, RHS}); 724 return X; 725 } 726 727 case scCouldNotCompute: 728 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 729 } 730 llvm_unreachable("Unknown SCEV kind!"); 731 } 732 733 /// Given a list of SCEV objects, order them by their complexity, and group 734 /// objects of the same complexity together by value. When this routine is 735 /// finished, we know that any duplicates in the vector are consecutive and that 736 /// complexity is monotonically increasing. 737 /// 738 /// Note that we go take special precautions to ensure that we get deterministic 739 /// results from this routine. In other words, we don't want the results of 740 /// this to depend on where the addresses of various SCEV objects happened to 741 /// land in memory. 742 /// 743 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 744 LoopInfo *LI, DominatorTree &DT) { 745 if (Ops.size() < 2) return; // Noop 746 747 SmallSet<std::pair<const SCEV *, const SCEV *>, 8> EqCache; 748 if (Ops.size() == 2) { 749 // This is the common case, which also happens to be trivially simple. 750 // Special case it. 751 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 752 if (CompareSCEVComplexity(EqCache, LI, RHS, LHS, DT) < 0) 753 std::swap(LHS, RHS); 754 return; 755 } 756 757 // Do the rough sort by complexity. 758 std::stable_sort(Ops.begin(), Ops.end(), 759 [&EqCache, LI, &DT](const SCEV *LHS, const SCEV *RHS) { 760 return 761 CompareSCEVComplexity(EqCache, LI, LHS, RHS, DT) < 0; 762 }); 763 764 // Now that we are sorted by complexity, group elements of the same 765 // complexity. Note that this is, at worst, N^2, but the vector is likely to 766 // be extremely short in practice. Note that we take this approach because we 767 // do not want to depend on the addresses of the objects we are grouping. 768 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 769 const SCEV *S = Ops[i]; 770 unsigned Complexity = S->getSCEVType(); 771 772 // If there are any objects of the same complexity and same value as this 773 // one, group them. 774 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 775 if (Ops[j] == S) { // Found a duplicate. 776 // Move it to immediately after i'th element. 777 std::swap(Ops[i+1], Ops[j]); 778 ++i; // no need to rescan it. 779 if (i == e-2) return; // Done! 780 } 781 } 782 } 783 } 784 785 // Returns the size of the SCEV S. 786 static inline int sizeOfSCEV(const SCEV *S) { 787 struct FindSCEVSize { 788 int Size; 789 FindSCEVSize() : Size(0) {} 790 791 bool follow(const SCEV *S) { 792 ++Size; 793 // Keep looking at all operands of S. 794 return true; 795 } 796 bool isDone() const { 797 return false; 798 } 799 }; 800 801 FindSCEVSize F; 802 SCEVTraversal<FindSCEVSize> ST(F); 803 ST.visitAll(S); 804 return F.Size; 805 } 806 807 namespace { 808 809 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { 810 public: 811 // Computes the Quotient and Remainder of the division of Numerator by 812 // Denominator. 813 static void divide(ScalarEvolution &SE, const SCEV *Numerator, 814 const SCEV *Denominator, const SCEV **Quotient, 815 const SCEV **Remainder) { 816 assert(Numerator && Denominator && "Uninitialized SCEV"); 817 818 SCEVDivision D(SE, Numerator, Denominator); 819 820 // Check for the trivial case here to avoid having to check for it in the 821 // rest of the code. 822 if (Numerator == Denominator) { 823 *Quotient = D.One; 824 *Remainder = D.Zero; 825 return; 826 } 827 828 if (Numerator->isZero()) { 829 *Quotient = D.Zero; 830 *Remainder = D.Zero; 831 return; 832 } 833 834 // A simple case when N/1. The quotient is N. 835 if (Denominator->isOne()) { 836 *Quotient = Numerator; 837 *Remainder = D.Zero; 838 return; 839 } 840 841 // Split the Denominator when it is a product. 842 if (const SCEVMulExpr *T = dyn_cast<SCEVMulExpr>(Denominator)) { 843 const SCEV *Q, *R; 844 *Quotient = Numerator; 845 for (const SCEV *Op : T->operands()) { 846 divide(SE, *Quotient, Op, &Q, &R); 847 *Quotient = Q; 848 849 // Bail out when the Numerator is not divisible by one of the terms of 850 // the Denominator. 851 if (!R->isZero()) { 852 *Quotient = D.Zero; 853 *Remainder = Numerator; 854 return; 855 } 856 } 857 *Remainder = D.Zero; 858 return; 859 } 860 861 D.visit(Numerator); 862 *Quotient = D.Quotient; 863 *Remainder = D.Remainder; 864 } 865 866 // Except in the trivial case described above, we do not know how to divide 867 // Expr by Denominator for the following functions with empty implementation. 868 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} 869 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} 870 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} 871 void visitUDivExpr(const SCEVUDivExpr *Numerator) {} 872 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} 873 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} 874 void visitUnknown(const SCEVUnknown *Numerator) {} 875 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} 876 877 void visitConstant(const SCEVConstant *Numerator) { 878 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { 879 APInt NumeratorVal = Numerator->getAPInt(); 880 APInt DenominatorVal = D->getAPInt(); 881 uint32_t NumeratorBW = NumeratorVal.getBitWidth(); 882 uint32_t DenominatorBW = DenominatorVal.getBitWidth(); 883 884 if (NumeratorBW > DenominatorBW) 885 DenominatorVal = DenominatorVal.sext(NumeratorBW); 886 else if (NumeratorBW < DenominatorBW) 887 NumeratorVal = NumeratorVal.sext(DenominatorBW); 888 889 APInt QuotientVal(NumeratorVal.getBitWidth(), 0); 890 APInt RemainderVal(NumeratorVal.getBitWidth(), 0); 891 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); 892 Quotient = SE.getConstant(QuotientVal); 893 Remainder = SE.getConstant(RemainderVal); 894 return; 895 } 896 } 897 898 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { 899 const SCEV *StartQ, *StartR, *StepQ, *StepR; 900 if (!Numerator->isAffine()) 901 return cannotDivide(Numerator); 902 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); 903 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); 904 // Bail out if the types do not match. 905 Type *Ty = Denominator->getType(); 906 if (Ty != StartQ->getType() || Ty != StartR->getType() || 907 Ty != StepQ->getType() || Ty != StepR->getType()) 908 return cannotDivide(Numerator); 909 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), 910 Numerator->getNoWrapFlags()); 911 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), 912 Numerator->getNoWrapFlags()); 913 } 914 915 void visitAddExpr(const SCEVAddExpr *Numerator) { 916 SmallVector<const SCEV *, 2> Qs, Rs; 917 Type *Ty = Denominator->getType(); 918 919 for (const SCEV *Op : Numerator->operands()) { 920 const SCEV *Q, *R; 921 divide(SE, Op, Denominator, &Q, &R); 922 923 // Bail out if types do not match. 924 if (Ty != Q->getType() || Ty != R->getType()) 925 return cannotDivide(Numerator); 926 927 Qs.push_back(Q); 928 Rs.push_back(R); 929 } 930 931 if (Qs.size() == 1) { 932 Quotient = Qs[0]; 933 Remainder = Rs[0]; 934 return; 935 } 936 937 Quotient = SE.getAddExpr(Qs); 938 Remainder = SE.getAddExpr(Rs); 939 } 940 941 void visitMulExpr(const SCEVMulExpr *Numerator) { 942 SmallVector<const SCEV *, 2> Qs; 943 Type *Ty = Denominator->getType(); 944 945 bool FoundDenominatorTerm = false; 946 for (const SCEV *Op : Numerator->operands()) { 947 // Bail out if types do not match. 948 if (Ty != Op->getType()) 949 return cannotDivide(Numerator); 950 951 if (FoundDenominatorTerm) { 952 Qs.push_back(Op); 953 continue; 954 } 955 956 // Check whether Denominator divides one of the product operands. 957 const SCEV *Q, *R; 958 divide(SE, Op, Denominator, &Q, &R); 959 if (!R->isZero()) { 960 Qs.push_back(Op); 961 continue; 962 } 963 964 // Bail out if types do not match. 965 if (Ty != Q->getType()) 966 return cannotDivide(Numerator); 967 968 FoundDenominatorTerm = true; 969 Qs.push_back(Q); 970 } 971 972 if (FoundDenominatorTerm) { 973 Remainder = Zero; 974 if (Qs.size() == 1) 975 Quotient = Qs[0]; 976 else 977 Quotient = SE.getMulExpr(Qs); 978 return; 979 } 980 981 if (!isa<SCEVUnknown>(Denominator)) 982 return cannotDivide(Numerator); 983 984 // The Remainder is obtained by replacing Denominator by 0 in Numerator. 985 ValueToValueMap RewriteMap; 986 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 987 cast<SCEVConstant>(Zero)->getValue(); 988 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 989 990 if (Remainder->isZero()) { 991 // The Quotient is obtained by replacing Denominator by 1 in Numerator. 992 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 993 cast<SCEVConstant>(One)->getValue(); 994 Quotient = 995 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 996 return; 997 } 998 999 // Quotient is (Numerator - Remainder) divided by Denominator. 1000 const SCEV *Q, *R; 1001 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); 1002 // This SCEV does not seem to simplify: fail the division here. 1003 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) 1004 return cannotDivide(Numerator); 1005 divide(SE, Diff, Denominator, &Q, &R); 1006 if (R != Zero) 1007 return cannotDivide(Numerator); 1008 Quotient = Q; 1009 } 1010 1011 private: 1012 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, 1013 const SCEV *Denominator) 1014 : SE(S), Denominator(Denominator) { 1015 Zero = SE.getZero(Denominator->getType()); 1016 One = SE.getOne(Denominator->getType()); 1017 1018 // We generally do not know how to divide Expr by Denominator. We 1019 // initialize the division to a "cannot divide" state to simplify the rest 1020 // of the code. 1021 cannotDivide(Numerator); 1022 } 1023 1024 // Convenience function for giving up on the division. We set the quotient to 1025 // be equal to zero and the remainder to be equal to the numerator. 1026 void cannotDivide(const SCEV *Numerator) { 1027 Quotient = Zero; 1028 Remainder = Numerator; 1029 } 1030 1031 ScalarEvolution &SE; 1032 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; 1033 }; 1034 1035 } 1036 1037 //===----------------------------------------------------------------------===// 1038 // Simple SCEV method implementations 1039 //===----------------------------------------------------------------------===// 1040 1041 /// Compute BC(It, K). The result has width W. Assume, K > 0. 1042 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 1043 ScalarEvolution &SE, 1044 Type *ResultTy) { 1045 // Handle the simplest case efficiently. 1046 if (K == 1) 1047 return SE.getTruncateOrZeroExtend(It, ResultTy); 1048 1049 // We are using the following formula for BC(It, K): 1050 // 1051 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 1052 // 1053 // Suppose, W is the bitwidth of the return value. We must be prepared for 1054 // overflow. Hence, we must assure that the result of our computation is 1055 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 1056 // safe in modular arithmetic. 1057 // 1058 // However, this code doesn't use exactly that formula; the formula it uses 1059 // is something like the following, where T is the number of factors of 2 in 1060 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 1061 // exponentiation: 1062 // 1063 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 1064 // 1065 // This formula is trivially equivalent to the previous formula. However, 1066 // this formula can be implemented much more efficiently. The trick is that 1067 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 1068 // arithmetic. To do exact division in modular arithmetic, all we have 1069 // to do is multiply by the inverse. Therefore, this step can be done at 1070 // width W. 1071 // 1072 // The next issue is how to safely do the division by 2^T. The way this 1073 // is done is by doing the multiplication step at a width of at least W + T 1074 // bits. This way, the bottom W+T bits of the product are accurate. Then, 1075 // when we perform the division by 2^T (which is equivalent to a right shift 1076 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 1077 // truncated out after the division by 2^T. 1078 // 1079 // In comparison to just directly using the first formula, this technique 1080 // is much more efficient; using the first formula requires W * K bits, 1081 // but this formula less than W + K bits. Also, the first formula requires 1082 // a division step, whereas this formula only requires multiplies and shifts. 1083 // 1084 // It doesn't matter whether the subtraction step is done in the calculation 1085 // width or the input iteration count's width; if the subtraction overflows, 1086 // the result must be zero anyway. We prefer here to do it in the width of 1087 // the induction variable because it helps a lot for certain cases; CodeGen 1088 // isn't smart enough to ignore the overflow, which leads to much less 1089 // efficient code if the width of the subtraction is wider than the native 1090 // register width. 1091 // 1092 // (It's possible to not widen at all by pulling out factors of 2 before 1093 // the multiplication; for example, K=2 can be calculated as 1094 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 1095 // extra arithmetic, so it's not an obvious win, and it gets 1096 // much more complicated for K > 3.) 1097 1098 // Protection from insane SCEVs; this bound is conservative, 1099 // but it probably doesn't matter. 1100 if (K > 1000) 1101 return SE.getCouldNotCompute(); 1102 1103 unsigned W = SE.getTypeSizeInBits(ResultTy); 1104 1105 // Calculate K! / 2^T and T; we divide out the factors of two before 1106 // multiplying for calculating K! / 2^T to avoid overflow. 1107 // Other overflow doesn't matter because we only care about the bottom 1108 // W bits of the result. 1109 APInt OddFactorial(W, 1); 1110 unsigned T = 1; 1111 for (unsigned i = 3; i <= K; ++i) { 1112 APInt Mult(W, i); 1113 unsigned TwoFactors = Mult.countTrailingZeros(); 1114 T += TwoFactors; 1115 Mult.lshrInPlace(TwoFactors); 1116 OddFactorial *= Mult; 1117 } 1118 1119 // We need at least W + T bits for the multiplication step 1120 unsigned CalculationBits = W + T; 1121 1122 // Calculate 2^T, at width T+W. 1123 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1124 1125 // Calculate the multiplicative inverse of K! / 2^T; 1126 // this multiplication factor will perform the exact division by 1127 // K! / 2^T. 1128 APInt Mod = APInt::getSignedMinValue(W+1); 1129 APInt MultiplyFactor = OddFactorial.zext(W+1); 1130 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1131 MultiplyFactor = MultiplyFactor.trunc(W); 1132 1133 // Calculate the product, at width T+W 1134 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1135 CalculationBits); 1136 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1137 for (unsigned i = 1; i != K; ++i) { 1138 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1139 Dividend = SE.getMulExpr(Dividend, 1140 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1141 } 1142 1143 // Divide by 2^T 1144 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1145 1146 // Truncate the result, and divide by K! / 2^T. 1147 1148 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1149 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1150 } 1151 1152 /// Return the value of this chain of recurrences at the specified iteration 1153 /// number. We can evaluate this recurrence by multiplying each element in the 1154 /// chain by the binomial coefficient corresponding to it. In other words, we 1155 /// can evaluate {A,+,B,+,C,+,D} as: 1156 /// 1157 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1158 /// 1159 /// where BC(It, k) stands for binomial coefficient. 1160 /// 1161 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1162 ScalarEvolution &SE) const { 1163 const SCEV *Result = getStart(); 1164 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1165 // The computation is correct in the face of overflow provided that the 1166 // multiplication is performed _after_ the evaluation of the binomial 1167 // coefficient. 1168 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1169 if (isa<SCEVCouldNotCompute>(Coeff)) 1170 return Coeff; 1171 1172 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1173 } 1174 return Result; 1175 } 1176 1177 //===----------------------------------------------------------------------===// 1178 // SCEV Expression folder implementations 1179 //===----------------------------------------------------------------------===// 1180 1181 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 1182 Type *Ty) { 1183 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1184 "This is not a truncating conversion!"); 1185 assert(isSCEVable(Ty) && 1186 "This is not a conversion to a SCEVable type!"); 1187 Ty = getEffectiveSCEVType(Ty); 1188 1189 FoldingSetNodeID ID; 1190 ID.AddInteger(scTruncate); 1191 ID.AddPointer(Op); 1192 ID.AddPointer(Ty); 1193 void *IP = nullptr; 1194 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1195 1196 // Fold if the operand is constant. 1197 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1198 return getConstant( 1199 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1200 1201 // trunc(trunc(x)) --> trunc(x) 1202 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1203 return getTruncateExpr(ST->getOperand(), Ty); 1204 1205 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1206 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1207 return getTruncateOrSignExtend(SS->getOperand(), Ty); 1208 1209 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1210 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1211 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 1212 1213 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can 1214 // eliminate all the truncates, or we replace other casts with truncates. 1215 if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) { 1216 SmallVector<const SCEV *, 4> Operands; 1217 bool hasTrunc = false; 1218 for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) { 1219 const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty); 1220 if (!isa<SCEVCastExpr>(SA->getOperand(i))) 1221 hasTrunc = isa<SCEVTruncateExpr>(S); 1222 Operands.push_back(S); 1223 } 1224 if (!hasTrunc) 1225 return getAddExpr(Operands); 1226 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1227 } 1228 1229 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can 1230 // eliminate all the truncates, or we replace other casts with truncates. 1231 if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) { 1232 SmallVector<const SCEV *, 4> Operands; 1233 bool hasTrunc = false; 1234 for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) { 1235 const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty); 1236 if (!isa<SCEVCastExpr>(SM->getOperand(i))) 1237 hasTrunc = isa<SCEVTruncateExpr>(S); 1238 Operands.push_back(S); 1239 } 1240 if (!hasTrunc) 1241 return getMulExpr(Operands); 1242 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1243 } 1244 1245 // If the input value is a chrec scev, truncate the chrec's operands. 1246 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1247 SmallVector<const SCEV *, 4> Operands; 1248 for (const SCEV *Op : AddRec->operands()) 1249 Operands.push_back(getTruncateExpr(Op, Ty)); 1250 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1251 } 1252 1253 // The cast wasn't folded; create an explicit cast node. We can reuse 1254 // the existing insert position since if we get here, we won't have 1255 // made any changes which would invalidate it. 1256 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1257 Op, Ty); 1258 UniqueSCEVs.InsertNode(S, IP); 1259 return S; 1260 } 1261 1262 // Get the limit of a recurrence such that incrementing by Step cannot cause 1263 // signed overflow as long as the value of the recurrence within the 1264 // loop does not exceed this limit before incrementing. 1265 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1266 ICmpInst::Predicate *Pred, 1267 ScalarEvolution *SE) { 1268 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1269 if (SE->isKnownPositive(Step)) { 1270 *Pred = ICmpInst::ICMP_SLT; 1271 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1272 SE->getSignedRangeMax(Step)); 1273 } 1274 if (SE->isKnownNegative(Step)) { 1275 *Pred = ICmpInst::ICMP_SGT; 1276 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1277 SE->getSignedRangeMin(Step)); 1278 } 1279 return nullptr; 1280 } 1281 1282 // Get the limit of a recurrence such that incrementing by Step cannot cause 1283 // unsigned overflow as long as the value of the recurrence within the loop does 1284 // not exceed this limit before incrementing. 1285 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1286 ICmpInst::Predicate *Pred, 1287 ScalarEvolution *SE) { 1288 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1289 *Pred = ICmpInst::ICMP_ULT; 1290 1291 return SE->getConstant(APInt::getMinValue(BitWidth) - 1292 SE->getUnsignedRangeMax(Step)); 1293 } 1294 1295 namespace { 1296 1297 struct ExtendOpTraitsBase { 1298 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1299 unsigned); 1300 }; 1301 1302 // Used to make code generic over signed and unsigned overflow. 1303 template <typename ExtendOp> struct ExtendOpTraits { 1304 // Members present: 1305 // 1306 // static const SCEV::NoWrapFlags WrapType; 1307 // 1308 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1309 // 1310 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1311 // ICmpInst::Predicate *Pred, 1312 // ScalarEvolution *SE); 1313 }; 1314 1315 template <> 1316 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1317 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1318 1319 static const GetExtendExprTy GetExtendExpr; 1320 1321 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1322 ICmpInst::Predicate *Pred, 1323 ScalarEvolution *SE) { 1324 return getSignedOverflowLimitForStep(Step, Pred, SE); 1325 } 1326 }; 1327 1328 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1329 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1330 1331 template <> 1332 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1333 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1334 1335 static const GetExtendExprTy GetExtendExpr; 1336 1337 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1338 ICmpInst::Predicate *Pred, 1339 ScalarEvolution *SE) { 1340 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1341 } 1342 }; 1343 1344 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1345 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1346 } 1347 1348 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1349 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1350 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1351 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1352 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1353 // expression "Step + sext/zext(PreIncAR)" is congruent with 1354 // "sext/zext(PostIncAR)" 1355 template <typename ExtendOpTy> 1356 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1357 ScalarEvolution *SE, unsigned Depth) { 1358 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1359 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1360 1361 const Loop *L = AR->getLoop(); 1362 const SCEV *Start = AR->getStart(); 1363 const SCEV *Step = AR->getStepRecurrence(*SE); 1364 1365 // Check for a simple looking step prior to loop entry. 1366 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1367 if (!SA) 1368 return nullptr; 1369 1370 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1371 // subtraction is expensive. For this purpose, perform a quick and dirty 1372 // difference, by checking for Step in the operand list. 1373 SmallVector<const SCEV *, 4> DiffOps; 1374 for (const SCEV *Op : SA->operands()) 1375 if (Op != Step) 1376 DiffOps.push_back(Op); 1377 1378 if (DiffOps.size() == SA->getNumOperands()) 1379 return nullptr; 1380 1381 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1382 // `Step`: 1383 1384 // 1. NSW/NUW flags on the step increment. 1385 auto PreStartFlags = 1386 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1387 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1388 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1389 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1390 1391 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1392 // "S+X does not sign/unsign-overflow". 1393 // 1394 1395 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1396 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1397 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1398 return PreStart; 1399 1400 // 2. Direct overflow check on the step operation's expression. 1401 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1402 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1403 const SCEV *OperandExtendedStart = 1404 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1405 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1406 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1407 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1408 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1409 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1410 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1411 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1412 } 1413 return PreStart; 1414 } 1415 1416 // 3. Loop precondition. 1417 ICmpInst::Predicate Pred; 1418 const SCEV *OverflowLimit = 1419 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1420 1421 if (OverflowLimit && 1422 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1423 return PreStart; 1424 1425 return nullptr; 1426 } 1427 1428 // Get the normalized zero or sign extended expression for this AddRec's Start. 1429 template <typename ExtendOpTy> 1430 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1431 ScalarEvolution *SE, 1432 unsigned Depth) { 1433 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1434 1435 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1436 if (!PreStart) 1437 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1438 1439 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1440 Depth), 1441 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1442 } 1443 1444 // Try to prove away overflow by looking at "nearby" add recurrences. A 1445 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1446 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1447 // 1448 // Formally: 1449 // 1450 // {S,+,X} == {S-T,+,X} + T 1451 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1452 // 1453 // If ({S-T,+,X} + T) does not overflow ... (1) 1454 // 1455 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1456 // 1457 // If {S-T,+,X} does not overflow ... (2) 1458 // 1459 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1460 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1461 // 1462 // If (S-T)+T does not overflow ... (3) 1463 // 1464 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1465 // == {Ext(S),+,Ext(X)} == LHS 1466 // 1467 // Thus, if (1), (2) and (3) are true for some T, then 1468 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1469 // 1470 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1471 // does not overflow" restricted to the 0th iteration. Therefore we only need 1472 // to check for (1) and (2). 1473 // 1474 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1475 // is `Delta` (defined below). 1476 // 1477 template <typename ExtendOpTy> 1478 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1479 const SCEV *Step, 1480 const Loop *L) { 1481 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1482 1483 // We restrict `Start` to a constant to prevent SCEV from spending too much 1484 // time here. It is correct (but more expensive) to continue with a 1485 // non-constant `Start` and do a general SCEV subtraction to compute 1486 // `PreStart` below. 1487 // 1488 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1489 if (!StartC) 1490 return false; 1491 1492 APInt StartAI = StartC->getAPInt(); 1493 1494 for (unsigned Delta : {-2, -1, 1, 2}) { 1495 const SCEV *PreStart = getConstant(StartAI - Delta); 1496 1497 FoldingSetNodeID ID; 1498 ID.AddInteger(scAddRecExpr); 1499 ID.AddPointer(PreStart); 1500 ID.AddPointer(Step); 1501 ID.AddPointer(L); 1502 void *IP = nullptr; 1503 const auto *PreAR = 1504 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1505 1506 // Give up if we don't already have the add recurrence we need because 1507 // actually constructing an add recurrence is relatively expensive. 1508 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1509 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1510 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1511 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1512 DeltaS, &Pred, this); 1513 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1514 return true; 1515 } 1516 } 1517 1518 return false; 1519 } 1520 1521 const SCEV * 1522 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1523 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1524 "This is not an extending conversion!"); 1525 assert(isSCEVable(Ty) && 1526 "This is not a conversion to a SCEVable type!"); 1527 Ty = getEffectiveSCEVType(Ty); 1528 1529 // Fold if the operand is constant. 1530 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1531 return getConstant( 1532 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1533 1534 // zext(zext(x)) --> zext(x) 1535 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1536 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1537 1538 // Before doing any expensive analysis, check to see if we've already 1539 // computed a SCEV for this Op and Ty. 1540 FoldingSetNodeID ID; 1541 ID.AddInteger(scZeroExtend); 1542 ID.AddPointer(Op); 1543 ID.AddPointer(Ty); 1544 void *IP = nullptr; 1545 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1546 if (Depth > MaxExtDepth) { 1547 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1548 Op, Ty); 1549 UniqueSCEVs.InsertNode(S, IP); 1550 return S; 1551 } 1552 1553 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1554 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1555 // It's possible the bits taken off by the truncate were all zero bits. If 1556 // so, we should be able to simplify this further. 1557 const SCEV *X = ST->getOperand(); 1558 ConstantRange CR = getUnsignedRange(X); 1559 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1560 unsigned NewBits = getTypeSizeInBits(Ty); 1561 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1562 CR.zextOrTrunc(NewBits))) 1563 return getTruncateOrZeroExtend(X, Ty); 1564 } 1565 1566 // If the input value is a chrec scev, and we can prove that the value 1567 // did not overflow the old, smaller, value, we can zero extend all of the 1568 // operands (often constants). This allows analysis of something like 1569 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1570 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1571 if (AR->isAffine()) { 1572 const SCEV *Start = AR->getStart(); 1573 const SCEV *Step = AR->getStepRecurrence(*this); 1574 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1575 const Loop *L = AR->getLoop(); 1576 1577 if (!AR->hasNoUnsignedWrap()) { 1578 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1579 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1580 } 1581 1582 // If we have special knowledge that this addrec won't overflow, 1583 // we don't need to do any further analysis. 1584 if (AR->hasNoUnsignedWrap()) 1585 return getAddRecExpr( 1586 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1587 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1588 1589 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1590 // Note that this serves two purposes: It filters out loops that are 1591 // simply not analyzable, and it covers the case where this code is 1592 // being called from within backedge-taken count analysis, such that 1593 // attempting to ask for the backedge-taken count would likely result 1594 // in infinite recursion. In the later case, the analysis code will 1595 // cope with a conservative value, and it will take care to purge 1596 // that value once it has finished. 1597 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1598 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1599 // Manually compute the final value for AR, checking for 1600 // overflow. 1601 1602 // Check whether the backedge-taken count can be losslessly casted to 1603 // the addrec's type. The count is always unsigned. 1604 const SCEV *CastedMaxBECount = 1605 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1606 const SCEV *RecastedMaxBECount = 1607 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1608 if (MaxBECount == RecastedMaxBECount) { 1609 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1610 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1611 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1612 SCEV::FlagAnyWrap, Depth + 1); 1613 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1614 SCEV::FlagAnyWrap, 1615 Depth + 1), 1616 WideTy, Depth + 1); 1617 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1618 const SCEV *WideMaxBECount = 1619 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1620 const SCEV *OperandExtendedAdd = 1621 getAddExpr(WideStart, 1622 getMulExpr(WideMaxBECount, 1623 getZeroExtendExpr(Step, WideTy, Depth + 1), 1624 SCEV::FlagAnyWrap, Depth + 1), 1625 SCEV::FlagAnyWrap, Depth + 1); 1626 if (ZAdd == OperandExtendedAdd) { 1627 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1628 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1629 // Return the expression with the addrec on the outside. 1630 return getAddRecExpr( 1631 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1632 Depth + 1), 1633 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1634 AR->getNoWrapFlags()); 1635 } 1636 // Similar to above, only this time treat the step value as signed. 1637 // This covers loops that count down. 1638 OperandExtendedAdd = 1639 getAddExpr(WideStart, 1640 getMulExpr(WideMaxBECount, 1641 getSignExtendExpr(Step, WideTy, Depth + 1), 1642 SCEV::FlagAnyWrap, Depth + 1), 1643 SCEV::FlagAnyWrap, Depth + 1); 1644 if (ZAdd == OperandExtendedAdd) { 1645 // Cache knowledge of AR NW, which is propagated to this AddRec. 1646 // Negative step causes unsigned wrap, but it still can't self-wrap. 1647 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1648 // Return the expression with the addrec on the outside. 1649 return getAddRecExpr( 1650 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1651 Depth + 1), 1652 getSignExtendExpr(Step, Ty, Depth + 1), L, 1653 AR->getNoWrapFlags()); 1654 } 1655 } 1656 } 1657 1658 // Normally, in the cases we can prove no-overflow via a 1659 // backedge guarding condition, we can also compute a backedge 1660 // taken count for the loop. The exceptions are assumptions and 1661 // guards present in the loop -- SCEV is not great at exploiting 1662 // these to compute max backedge taken counts, but can still use 1663 // these to prove lack of overflow. Use this fact to avoid 1664 // doing extra work that may not pay off. 1665 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1666 !AC.assumptions().empty()) { 1667 // If the backedge is guarded by a comparison with the pre-inc 1668 // value the addrec is safe. Also, if the entry is guarded by 1669 // a comparison with the start value and the backedge is 1670 // guarded by a comparison with the post-inc value, the addrec 1671 // is safe. 1672 if (isKnownPositive(Step)) { 1673 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1674 getUnsignedRangeMax(Step)); 1675 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1676 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && 1677 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, 1678 AR->getPostIncExpr(*this), N))) { 1679 // Cache knowledge of AR NUW, which is propagated to this 1680 // AddRec. 1681 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1682 // Return the expression with the addrec on the outside. 1683 return getAddRecExpr( 1684 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1685 Depth + 1), 1686 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1687 AR->getNoWrapFlags()); 1688 } 1689 } else if (isKnownNegative(Step)) { 1690 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1691 getSignedRangeMin(Step)); 1692 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1693 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) && 1694 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, 1695 AR->getPostIncExpr(*this), N))) { 1696 // Cache knowledge of AR NW, which is propagated to this 1697 // AddRec. Negative step causes unsigned wrap, but it 1698 // still can't self-wrap. 1699 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1700 // Return the expression with the addrec on the outside. 1701 return getAddRecExpr( 1702 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1703 Depth + 1), 1704 getSignExtendExpr(Step, Ty, Depth + 1), L, 1705 AR->getNoWrapFlags()); 1706 } 1707 } 1708 } 1709 1710 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1711 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1712 return getAddRecExpr( 1713 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1714 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1715 } 1716 } 1717 1718 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1719 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1720 if (SA->hasNoUnsignedWrap()) { 1721 // If the addition does not unsign overflow then we can, by definition, 1722 // commute the zero extension with the addition operation. 1723 SmallVector<const SCEV *, 4> Ops; 1724 for (const auto *Op : SA->operands()) 1725 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1726 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1727 } 1728 } 1729 1730 // The cast wasn't folded; create an explicit cast node. 1731 // Recompute the insert position, as it may have been invalidated. 1732 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1733 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1734 Op, Ty); 1735 UniqueSCEVs.InsertNode(S, IP); 1736 return S; 1737 } 1738 1739 const SCEV * 1740 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1741 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1742 "This is not an extending conversion!"); 1743 assert(isSCEVable(Ty) && 1744 "This is not a conversion to a SCEVable type!"); 1745 Ty = getEffectiveSCEVType(Ty); 1746 1747 // Fold if the operand is constant. 1748 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1749 return getConstant( 1750 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1751 1752 // sext(sext(x)) --> sext(x) 1753 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1754 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1755 1756 // sext(zext(x)) --> zext(x) 1757 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1758 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1759 1760 // Before doing any expensive analysis, check to see if we've already 1761 // computed a SCEV for this Op and Ty. 1762 FoldingSetNodeID ID; 1763 ID.AddInteger(scSignExtend); 1764 ID.AddPointer(Op); 1765 ID.AddPointer(Ty); 1766 void *IP = nullptr; 1767 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1768 // Limit recursion depth. 1769 if (Depth > MaxExtDepth) { 1770 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1771 Op, Ty); 1772 UniqueSCEVs.InsertNode(S, IP); 1773 return S; 1774 } 1775 1776 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1777 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1778 // It's possible the bits taken off by the truncate were all sign bits. If 1779 // so, we should be able to simplify this further. 1780 const SCEV *X = ST->getOperand(); 1781 ConstantRange CR = getSignedRange(X); 1782 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1783 unsigned NewBits = getTypeSizeInBits(Ty); 1784 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1785 CR.sextOrTrunc(NewBits))) 1786 return getTruncateOrSignExtend(X, Ty); 1787 } 1788 1789 // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2 1790 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1791 if (SA->getNumOperands() == 2) { 1792 auto *SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0)); 1793 auto *SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1)); 1794 if (SMul && SC1) { 1795 if (auto *SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) { 1796 const APInt &C1 = SC1->getAPInt(); 1797 const APInt &C2 = SC2->getAPInt(); 1798 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && 1799 C2.ugt(C1) && C2.isPowerOf2()) 1800 return getAddExpr(getSignExtendExpr(SC1, Ty, Depth + 1), 1801 getSignExtendExpr(SMul, Ty, Depth + 1), 1802 SCEV::FlagAnyWrap, Depth + 1); 1803 } 1804 } 1805 } 1806 1807 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1808 if (SA->hasNoSignedWrap()) { 1809 // If the addition does not sign overflow then we can, by definition, 1810 // commute the sign extension with the addition operation. 1811 SmallVector<const SCEV *, 4> Ops; 1812 for (const auto *Op : SA->operands()) 1813 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1814 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1815 } 1816 } 1817 // If the input value is a chrec scev, and we can prove that the value 1818 // did not overflow the old, smaller, value, we can sign extend all of the 1819 // operands (often constants). This allows analysis of something like 1820 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1821 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1822 if (AR->isAffine()) { 1823 const SCEV *Start = AR->getStart(); 1824 const SCEV *Step = AR->getStepRecurrence(*this); 1825 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1826 const Loop *L = AR->getLoop(); 1827 1828 if (!AR->hasNoSignedWrap()) { 1829 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1830 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1831 } 1832 1833 // If we have special knowledge that this addrec won't overflow, 1834 // we don't need to do any further analysis. 1835 if (AR->hasNoSignedWrap()) 1836 return getAddRecExpr( 1837 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1838 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1839 1840 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1841 // Note that this serves two purposes: It filters out loops that are 1842 // simply not analyzable, and it covers the case where this code is 1843 // being called from within backedge-taken count analysis, such that 1844 // attempting to ask for the backedge-taken count would likely result 1845 // in infinite recursion. In the later case, the analysis code will 1846 // cope with a conservative value, and it will take care to purge 1847 // that value once it has finished. 1848 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1849 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1850 // Manually compute the final value for AR, checking for 1851 // overflow. 1852 1853 // Check whether the backedge-taken count can be losslessly casted to 1854 // the addrec's type. The count is always unsigned. 1855 const SCEV *CastedMaxBECount = 1856 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1857 const SCEV *RecastedMaxBECount = 1858 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1859 if (MaxBECount == RecastedMaxBECount) { 1860 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1861 // Check whether Start+Step*MaxBECount has no signed overflow. 1862 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 1863 SCEV::FlagAnyWrap, Depth + 1); 1864 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 1865 SCEV::FlagAnyWrap, 1866 Depth + 1), 1867 WideTy, Depth + 1); 1868 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 1869 const SCEV *WideMaxBECount = 1870 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1871 const SCEV *OperandExtendedAdd = 1872 getAddExpr(WideStart, 1873 getMulExpr(WideMaxBECount, 1874 getSignExtendExpr(Step, WideTy, Depth + 1), 1875 SCEV::FlagAnyWrap, Depth + 1), 1876 SCEV::FlagAnyWrap, Depth + 1); 1877 if (SAdd == OperandExtendedAdd) { 1878 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1879 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1880 // Return the expression with the addrec on the outside. 1881 return getAddRecExpr( 1882 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1883 Depth + 1), 1884 getSignExtendExpr(Step, Ty, Depth + 1), L, 1885 AR->getNoWrapFlags()); 1886 } 1887 // Similar to above, only this time treat the step value as unsigned. 1888 // This covers loops that count up with an unsigned step. 1889 OperandExtendedAdd = 1890 getAddExpr(WideStart, 1891 getMulExpr(WideMaxBECount, 1892 getZeroExtendExpr(Step, WideTy, Depth + 1), 1893 SCEV::FlagAnyWrap, Depth + 1), 1894 SCEV::FlagAnyWrap, Depth + 1); 1895 if (SAdd == OperandExtendedAdd) { 1896 // If AR wraps around then 1897 // 1898 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 1899 // => SAdd != OperandExtendedAdd 1900 // 1901 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 1902 // (SAdd == OperandExtendedAdd => AR is NW) 1903 1904 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1905 1906 // Return the expression with the addrec on the outside. 1907 return getAddRecExpr( 1908 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1909 Depth + 1), 1910 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1911 AR->getNoWrapFlags()); 1912 } 1913 } 1914 } 1915 1916 // Normally, in the cases we can prove no-overflow via a 1917 // backedge guarding condition, we can also compute a backedge 1918 // taken count for the loop. The exceptions are assumptions and 1919 // guards present in the loop -- SCEV is not great at exploiting 1920 // these to compute max backedge taken counts, but can still use 1921 // these to prove lack of overflow. Use this fact to avoid 1922 // doing extra work that may not pay off. 1923 1924 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1925 !AC.assumptions().empty()) { 1926 // If the backedge is guarded by a comparison with the pre-inc 1927 // value the addrec is safe. Also, if the entry is guarded by 1928 // a comparison with the start value and the backedge is 1929 // guarded by a comparison with the post-inc value, the addrec 1930 // is safe. 1931 ICmpInst::Predicate Pred; 1932 const SCEV *OverflowLimit = 1933 getSignedOverflowLimitForStep(Step, &Pred, this); 1934 if (OverflowLimit && 1935 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 1936 (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) && 1937 isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this), 1938 OverflowLimit)))) { 1939 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 1940 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1941 return getAddRecExpr( 1942 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1943 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1944 } 1945 } 1946 1947 // If Start and Step are constants, check if we can apply this 1948 // transformation: 1949 // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2 1950 auto *SC1 = dyn_cast<SCEVConstant>(Start); 1951 auto *SC2 = dyn_cast<SCEVConstant>(Step); 1952 if (SC1 && SC2) { 1953 const APInt &C1 = SC1->getAPInt(); 1954 const APInt &C2 = SC2->getAPInt(); 1955 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) && 1956 C2.isPowerOf2()) { 1957 Start = getSignExtendExpr(Start, Ty, Depth + 1); 1958 const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L, 1959 AR->getNoWrapFlags()); 1960 return getAddExpr(Start, getSignExtendExpr(NewAR, Ty, Depth + 1), 1961 SCEV::FlagAnyWrap, Depth + 1); 1962 } 1963 } 1964 1965 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 1966 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1967 return getAddRecExpr( 1968 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1969 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1970 } 1971 } 1972 1973 // If the input value is provably positive and we could not simplify 1974 // away the sext build a zext instead. 1975 if (isKnownNonNegative(Op)) 1976 return getZeroExtendExpr(Op, Ty, Depth + 1); 1977 1978 // The cast wasn't folded; create an explicit cast node. 1979 // Recompute the insert position, as it may have been invalidated. 1980 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1981 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1982 Op, Ty); 1983 UniqueSCEVs.InsertNode(S, IP); 1984 return S; 1985 } 1986 1987 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 1988 /// unspecified bits out to the given type. 1989 /// 1990 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 1991 Type *Ty) { 1992 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1993 "This is not an extending conversion!"); 1994 assert(isSCEVable(Ty) && 1995 "This is not a conversion to a SCEVable type!"); 1996 Ty = getEffectiveSCEVType(Ty); 1997 1998 // Sign-extend negative constants. 1999 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2000 if (SC->getAPInt().isNegative()) 2001 return getSignExtendExpr(Op, Ty); 2002 2003 // Peel off a truncate cast. 2004 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2005 const SCEV *NewOp = T->getOperand(); 2006 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2007 return getAnyExtendExpr(NewOp, Ty); 2008 return getTruncateOrNoop(NewOp, Ty); 2009 } 2010 2011 // Next try a zext cast. If the cast is folded, use it. 2012 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2013 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2014 return ZExt; 2015 2016 // Next try a sext cast. If the cast is folded, use it. 2017 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2018 if (!isa<SCEVSignExtendExpr>(SExt)) 2019 return SExt; 2020 2021 // Force the cast to be folded into the operands of an addrec. 2022 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2023 SmallVector<const SCEV *, 4> Ops; 2024 for (const SCEV *Op : AR->operands()) 2025 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2026 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2027 } 2028 2029 // If the expression is obviously signed, use the sext cast value. 2030 if (isa<SCEVSMaxExpr>(Op)) 2031 return SExt; 2032 2033 // Absent any other information, use the zext cast value. 2034 return ZExt; 2035 } 2036 2037 /// Process the given Ops list, which is a list of operands to be added under 2038 /// the given scale, update the given map. This is a helper function for 2039 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2040 /// that would form an add expression like this: 2041 /// 2042 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2043 /// 2044 /// where A and B are constants, update the map with these values: 2045 /// 2046 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2047 /// 2048 /// and add 13 + A*B*29 to AccumulatedConstant. 2049 /// This will allow getAddRecExpr to produce this: 2050 /// 2051 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2052 /// 2053 /// This form often exposes folding opportunities that are hidden in 2054 /// the original operand list. 2055 /// 2056 /// Return true iff it appears that any interesting folding opportunities 2057 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2058 /// the common case where no interesting opportunities are present, and 2059 /// is also used as a check to avoid infinite recursion. 2060 /// 2061 static bool 2062 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2063 SmallVectorImpl<const SCEV *> &NewOps, 2064 APInt &AccumulatedConstant, 2065 const SCEV *const *Ops, size_t NumOperands, 2066 const APInt &Scale, 2067 ScalarEvolution &SE) { 2068 bool Interesting = false; 2069 2070 // Iterate over the add operands. They are sorted, with constants first. 2071 unsigned i = 0; 2072 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2073 ++i; 2074 // Pull a buried constant out to the outside. 2075 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2076 Interesting = true; 2077 AccumulatedConstant += Scale * C->getAPInt(); 2078 } 2079 2080 // Next comes everything else. We're especially interested in multiplies 2081 // here, but they're in the middle, so just visit the rest with one loop. 2082 for (; i != NumOperands; ++i) { 2083 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2084 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2085 APInt NewScale = 2086 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2087 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2088 // A multiplication of a constant with another add; recurse. 2089 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2090 Interesting |= 2091 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2092 Add->op_begin(), Add->getNumOperands(), 2093 NewScale, SE); 2094 } else { 2095 // A multiplication of a constant with some other value. Update 2096 // the map. 2097 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2098 const SCEV *Key = SE.getMulExpr(MulOps); 2099 auto Pair = M.insert({Key, NewScale}); 2100 if (Pair.second) { 2101 NewOps.push_back(Pair.first->first); 2102 } else { 2103 Pair.first->second += NewScale; 2104 // The map already had an entry for this value, which may indicate 2105 // a folding opportunity. 2106 Interesting = true; 2107 } 2108 } 2109 } else { 2110 // An ordinary operand. Update the map. 2111 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2112 M.insert({Ops[i], Scale}); 2113 if (Pair.second) { 2114 NewOps.push_back(Pair.first->first); 2115 } else { 2116 Pair.first->second += Scale; 2117 // The map already had an entry for this value, which may indicate 2118 // a folding opportunity. 2119 Interesting = true; 2120 } 2121 } 2122 } 2123 2124 return Interesting; 2125 } 2126 2127 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2128 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2129 // can't-overflow flags for the operation if possible. 2130 static SCEV::NoWrapFlags 2131 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2132 const SmallVectorImpl<const SCEV *> &Ops, 2133 SCEV::NoWrapFlags Flags) { 2134 using namespace std::placeholders; 2135 typedef OverflowingBinaryOperator OBO; 2136 2137 bool CanAnalyze = 2138 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2139 (void)CanAnalyze; 2140 assert(CanAnalyze && "don't call from other places!"); 2141 2142 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2143 SCEV::NoWrapFlags SignOrUnsignWrap = 2144 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2145 2146 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2147 auto IsKnownNonNegative = [&](const SCEV *S) { 2148 return SE->isKnownNonNegative(S); 2149 }; 2150 2151 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2152 Flags = 2153 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2154 2155 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2156 2157 if (SignOrUnsignWrap != SignOrUnsignMask && Type == scAddExpr && 2158 Ops.size() == 2 && isa<SCEVConstant>(Ops[0])) { 2159 2160 // (A + C) --> (A + C)<nsw> if the addition does not sign overflow 2161 // (A + C) --> (A + C)<nuw> if the addition does not unsign overflow 2162 2163 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2164 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2165 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2166 Instruction::Add, C, OBO::NoSignedWrap); 2167 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2168 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2169 } 2170 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2171 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2172 Instruction::Add, C, OBO::NoUnsignedWrap); 2173 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2174 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2175 } 2176 } 2177 2178 return Flags; 2179 } 2180 2181 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2182 if (!isLoopInvariant(S, L)) 2183 return false; 2184 // If a value depends on a SCEVUnknown which is defined after the loop, we 2185 // conservatively assume that we cannot calculate it at the loop's entry. 2186 struct FindDominatedSCEVUnknown { 2187 bool Found = false; 2188 const Loop *L; 2189 DominatorTree &DT; 2190 LoopInfo &LI; 2191 2192 FindDominatedSCEVUnknown(const Loop *L, DominatorTree &DT, LoopInfo &LI) 2193 : L(L), DT(DT), LI(LI) {} 2194 2195 bool checkSCEVUnknown(const SCEVUnknown *SU) { 2196 if (auto *I = dyn_cast<Instruction>(SU->getValue())) { 2197 if (DT.dominates(L->getHeader(), I->getParent())) 2198 Found = true; 2199 else 2200 assert(DT.dominates(I->getParent(), L->getHeader()) && 2201 "No dominance relationship between SCEV and loop?"); 2202 } 2203 return false; 2204 } 2205 2206 bool follow(const SCEV *S) { 2207 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 2208 case scConstant: 2209 return false; 2210 case scAddRecExpr: 2211 case scTruncate: 2212 case scZeroExtend: 2213 case scSignExtend: 2214 case scAddExpr: 2215 case scMulExpr: 2216 case scUMaxExpr: 2217 case scSMaxExpr: 2218 case scUDivExpr: 2219 return true; 2220 case scUnknown: 2221 return checkSCEVUnknown(cast<SCEVUnknown>(S)); 2222 case scCouldNotCompute: 2223 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 2224 } 2225 return false; 2226 } 2227 2228 bool isDone() { return Found; } 2229 }; 2230 2231 FindDominatedSCEVUnknown FSU(L, DT, LI); 2232 SCEVTraversal<FindDominatedSCEVUnknown> ST(FSU); 2233 ST.visitAll(S); 2234 return !FSU.Found; 2235 } 2236 2237 /// Get a canonical add expression, or something simpler if possible. 2238 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2239 SCEV::NoWrapFlags Flags, 2240 unsigned Depth) { 2241 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2242 "only nuw or nsw allowed"); 2243 assert(!Ops.empty() && "Cannot get empty add!"); 2244 if (Ops.size() == 1) return Ops[0]; 2245 #ifndef NDEBUG 2246 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2247 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2248 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2249 "SCEVAddExpr operand types don't match!"); 2250 #endif 2251 2252 // Sort by complexity, this groups all similar expression types together. 2253 GroupByComplexity(Ops, &LI, DT); 2254 2255 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2256 2257 // If there are any constants, fold them together. 2258 unsigned Idx = 0; 2259 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2260 ++Idx; 2261 assert(Idx < Ops.size()); 2262 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2263 // We found two constants, fold them together! 2264 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2265 if (Ops.size() == 2) return Ops[0]; 2266 Ops.erase(Ops.begin()+1); // Erase the folded element 2267 LHSC = cast<SCEVConstant>(Ops[0]); 2268 } 2269 2270 // If we are left with a constant zero being added, strip it off. 2271 if (LHSC->getValue()->isZero()) { 2272 Ops.erase(Ops.begin()); 2273 --Idx; 2274 } 2275 2276 if (Ops.size() == 1) return Ops[0]; 2277 } 2278 2279 // Limit recursion calls depth. 2280 if (Depth > MaxArithDepth) 2281 return getOrCreateAddExpr(Ops, Flags); 2282 2283 // Okay, check to see if the same value occurs in the operand list more than 2284 // once. If so, merge them together into an multiply expression. Since we 2285 // sorted the list, these values are required to be adjacent. 2286 Type *Ty = Ops[0]->getType(); 2287 bool FoundMatch = false; 2288 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2289 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2290 // Scan ahead to count how many equal operands there are. 2291 unsigned Count = 2; 2292 while (i+Count != e && Ops[i+Count] == Ops[i]) 2293 ++Count; 2294 // Merge the values into a multiply. 2295 const SCEV *Scale = getConstant(Ty, Count); 2296 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2297 if (Ops.size() == Count) 2298 return Mul; 2299 Ops[i] = Mul; 2300 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2301 --i; e -= Count - 1; 2302 FoundMatch = true; 2303 } 2304 if (FoundMatch) 2305 return getAddExpr(Ops, Flags); 2306 2307 // Check for truncates. If all the operands are truncated from the same 2308 // type, see if factoring out the truncate would permit the result to be 2309 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) 2310 // if the contents of the resulting outer trunc fold to something simple. 2311 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { 2312 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); 2313 Type *DstType = Trunc->getType(); 2314 Type *SrcType = Trunc->getOperand()->getType(); 2315 SmallVector<const SCEV *, 8> LargeOps; 2316 bool Ok = true; 2317 // Check all the operands to see if they can be represented in the 2318 // source type of the truncate. 2319 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2320 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2321 if (T->getOperand()->getType() != SrcType) { 2322 Ok = false; 2323 break; 2324 } 2325 LargeOps.push_back(T->getOperand()); 2326 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2327 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2328 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2329 SmallVector<const SCEV *, 8> LargeMulOps; 2330 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2331 if (const SCEVTruncateExpr *T = 2332 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2333 if (T->getOperand()->getType() != SrcType) { 2334 Ok = false; 2335 break; 2336 } 2337 LargeMulOps.push_back(T->getOperand()); 2338 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2339 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2340 } else { 2341 Ok = false; 2342 break; 2343 } 2344 } 2345 if (Ok) 2346 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2347 } else { 2348 Ok = false; 2349 break; 2350 } 2351 } 2352 if (Ok) { 2353 // Evaluate the expression in the larger type. 2354 const SCEV *Fold = getAddExpr(LargeOps, Flags, Depth + 1); 2355 // If it folds to something simple, use it. Otherwise, don't. 2356 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2357 return getTruncateExpr(Fold, DstType); 2358 } 2359 } 2360 2361 // Skip past any other cast SCEVs. 2362 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2363 ++Idx; 2364 2365 // If there are add operands they would be next. 2366 if (Idx < Ops.size()) { 2367 bool DeletedAdd = false; 2368 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2369 if (Ops.size() > AddOpsInlineThreshold || 2370 Add->getNumOperands() > AddOpsInlineThreshold) 2371 break; 2372 // If we have an add, expand the add operands onto the end of the operands 2373 // list. 2374 Ops.erase(Ops.begin()+Idx); 2375 Ops.append(Add->op_begin(), Add->op_end()); 2376 DeletedAdd = true; 2377 } 2378 2379 // If we deleted at least one add, we added operands to the end of the list, 2380 // and they are not necessarily sorted. Recurse to resort and resimplify 2381 // any operands we just acquired. 2382 if (DeletedAdd) 2383 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2384 } 2385 2386 // Skip over the add expression until we get to a multiply. 2387 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2388 ++Idx; 2389 2390 // Check to see if there are any folding opportunities present with 2391 // operands multiplied by constant values. 2392 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2393 uint64_t BitWidth = getTypeSizeInBits(Ty); 2394 DenseMap<const SCEV *, APInt> M; 2395 SmallVector<const SCEV *, 8> NewOps; 2396 APInt AccumulatedConstant(BitWidth, 0); 2397 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2398 Ops.data(), Ops.size(), 2399 APInt(BitWidth, 1), *this)) { 2400 struct APIntCompare { 2401 bool operator()(const APInt &LHS, const APInt &RHS) const { 2402 return LHS.ult(RHS); 2403 } 2404 }; 2405 2406 // Some interesting folding opportunity is present, so its worthwhile to 2407 // re-generate the operands list. Group the operands by constant scale, 2408 // to avoid multiplying by the same constant scale multiple times. 2409 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2410 for (const SCEV *NewOp : NewOps) 2411 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2412 // Re-generate the operands list. 2413 Ops.clear(); 2414 if (AccumulatedConstant != 0) 2415 Ops.push_back(getConstant(AccumulatedConstant)); 2416 for (auto &MulOp : MulOpLists) 2417 if (MulOp.first != 0) 2418 Ops.push_back(getMulExpr( 2419 getConstant(MulOp.first), 2420 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2421 SCEV::FlagAnyWrap, Depth + 1)); 2422 if (Ops.empty()) 2423 return getZero(Ty); 2424 if (Ops.size() == 1) 2425 return Ops[0]; 2426 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2427 } 2428 } 2429 2430 // If we are adding something to a multiply expression, make sure the 2431 // something is not already an operand of the multiply. If so, merge it into 2432 // the multiply. 2433 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2434 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2435 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2436 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2437 if (isa<SCEVConstant>(MulOpSCEV)) 2438 continue; 2439 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2440 if (MulOpSCEV == Ops[AddOp]) { 2441 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2442 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2443 if (Mul->getNumOperands() != 2) { 2444 // If the multiply has more than two operands, we must get the 2445 // Y*Z term. 2446 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2447 Mul->op_begin()+MulOp); 2448 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2449 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2450 } 2451 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2452 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2453 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2454 SCEV::FlagAnyWrap, Depth + 1); 2455 if (Ops.size() == 2) return OuterMul; 2456 if (AddOp < Idx) { 2457 Ops.erase(Ops.begin()+AddOp); 2458 Ops.erase(Ops.begin()+Idx-1); 2459 } else { 2460 Ops.erase(Ops.begin()+Idx); 2461 Ops.erase(Ops.begin()+AddOp-1); 2462 } 2463 Ops.push_back(OuterMul); 2464 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2465 } 2466 2467 // Check this multiply against other multiplies being added together. 2468 for (unsigned OtherMulIdx = Idx+1; 2469 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2470 ++OtherMulIdx) { 2471 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2472 // If MulOp occurs in OtherMul, we can fold the two multiplies 2473 // together. 2474 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2475 OMulOp != e; ++OMulOp) 2476 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2477 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2478 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2479 if (Mul->getNumOperands() != 2) { 2480 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2481 Mul->op_begin()+MulOp); 2482 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2483 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2484 } 2485 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2486 if (OtherMul->getNumOperands() != 2) { 2487 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2488 OtherMul->op_begin()+OMulOp); 2489 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2490 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2491 } 2492 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2493 const SCEV *InnerMulSum = 2494 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2495 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2496 SCEV::FlagAnyWrap, Depth + 1); 2497 if (Ops.size() == 2) return OuterMul; 2498 Ops.erase(Ops.begin()+Idx); 2499 Ops.erase(Ops.begin()+OtherMulIdx-1); 2500 Ops.push_back(OuterMul); 2501 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2502 } 2503 } 2504 } 2505 } 2506 2507 // If there are any add recurrences in the operands list, see if any other 2508 // added values are loop invariant. If so, we can fold them into the 2509 // recurrence. 2510 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2511 ++Idx; 2512 2513 // Scan over all recurrences, trying to fold loop invariants into them. 2514 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2515 // Scan all of the other operands to this add and add them to the vector if 2516 // they are loop invariant w.r.t. the recurrence. 2517 SmallVector<const SCEV *, 8> LIOps; 2518 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2519 const Loop *AddRecLoop = AddRec->getLoop(); 2520 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2521 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2522 LIOps.push_back(Ops[i]); 2523 Ops.erase(Ops.begin()+i); 2524 --i; --e; 2525 } 2526 2527 // If we found some loop invariants, fold them into the recurrence. 2528 if (!LIOps.empty()) { 2529 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2530 LIOps.push_back(AddRec->getStart()); 2531 2532 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2533 AddRec->op_end()); 2534 // This follows from the fact that the no-wrap flags on the outer add 2535 // expression are applicable on the 0th iteration, when the add recurrence 2536 // will be equal to its start value. 2537 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2538 2539 // Build the new addrec. Propagate the NUW and NSW flags if both the 2540 // outer add and the inner addrec are guaranteed to have no overflow. 2541 // Always propagate NW. 2542 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2543 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2544 2545 // If all of the other operands were loop invariant, we are done. 2546 if (Ops.size() == 1) return NewRec; 2547 2548 // Otherwise, add the folded AddRec by the non-invariant parts. 2549 for (unsigned i = 0;; ++i) 2550 if (Ops[i] == AddRec) { 2551 Ops[i] = NewRec; 2552 break; 2553 } 2554 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2555 } 2556 2557 // Okay, if there weren't any loop invariants to be folded, check to see if 2558 // there are multiple AddRec's with the same loop induction variable being 2559 // added together. If so, we can fold them. 2560 for (unsigned OtherIdx = Idx+1; 2561 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2562 ++OtherIdx) { 2563 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2564 // so that the 1st found AddRecExpr is dominated by all others. 2565 assert(DT.dominates( 2566 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2567 AddRec->getLoop()->getHeader()) && 2568 "AddRecExprs are not sorted in reverse dominance order?"); 2569 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2570 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2571 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2572 AddRec->op_end()); 2573 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2574 ++OtherIdx) { 2575 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2576 if (OtherAddRec->getLoop() == AddRecLoop) { 2577 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2578 i != e; ++i) { 2579 if (i >= AddRecOps.size()) { 2580 AddRecOps.append(OtherAddRec->op_begin()+i, 2581 OtherAddRec->op_end()); 2582 break; 2583 } 2584 SmallVector<const SCEV *, 2> TwoOps = { 2585 AddRecOps[i], OtherAddRec->getOperand(i)}; 2586 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2587 } 2588 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2589 } 2590 } 2591 // Step size has changed, so we cannot guarantee no self-wraparound. 2592 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2593 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2594 } 2595 } 2596 2597 // Otherwise couldn't fold anything into this recurrence. Move onto the 2598 // next one. 2599 } 2600 2601 // Okay, it looks like we really DO need an add expr. Check to see if we 2602 // already have one, otherwise create a new one. 2603 return getOrCreateAddExpr(Ops, Flags); 2604 } 2605 2606 const SCEV * 2607 ScalarEvolution::getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2608 SCEV::NoWrapFlags Flags) { 2609 FoldingSetNodeID ID; 2610 ID.AddInteger(scAddExpr); 2611 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2612 ID.AddPointer(Ops[i]); 2613 void *IP = nullptr; 2614 SCEVAddExpr *S = 2615 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2616 if (!S) { 2617 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2618 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2619 S = new (SCEVAllocator) 2620 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2621 UniqueSCEVs.InsertNode(S, IP); 2622 } 2623 S->setNoWrapFlags(Flags); 2624 return S; 2625 } 2626 2627 const SCEV * 2628 ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2629 SCEV::NoWrapFlags Flags) { 2630 FoldingSetNodeID ID; 2631 ID.AddInteger(scMulExpr); 2632 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2633 ID.AddPointer(Ops[i]); 2634 void *IP = nullptr; 2635 SCEVMulExpr *S = 2636 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2637 if (!S) { 2638 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2639 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2640 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2641 O, Ops.size()); 2642 UniqueSCEVs.InsertNode(S, IP); 2643 } 2644 S->setNoWrapFlags(Flags); 2645 return S; 2646 } 2647 2648 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2649 uint64_t k = i*j; 2650 if (j > 1 && k / j != i) Overflow = true; 2651 return k; 2652 } 2653 2654 /// Compute the result of "n choose k", the binomial coefficient. If an 2655 /// intermediate computation overflows, Overflow will be set and the return will 2656 /// be garbage. Overflow is not cleared on absence of overflow. 2657 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2658 // We use the multiplicative formula: 2659 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2660 // At each iteration, we take the n-th term of the numeral and divide by the 2661 // (k-n)th term of the denominator. This division will always produce an 2662 // integral result, and helps reduce the chance of overflow in the 2663 // intermediate computations. However, we can still overflow even when the 2664 // final result would fit. 2665 2666 if (n == 0 || n == k) return 1; 2667 if (k > n) return 0; 2668 2669 if (k > n/2) 2670 k = n-k; 2671 2672 uint64_t r = 1; 2673 for (uint64_t i = 1; i <= k; ++i) { 2674 r = umul_ov(r, n-(i-1), Overflow); 2675 r /= i; 2676 } 2677 return r; 2678 } 2679 2680 /// Determine if any of the operands in this SCEV are a constant or if 2681 /// any of the add or multiply expressions in this SCEV contain a constant. 2682 static bool containsConstantSomewhere(const SCEV *StartExpr) { 2683 SmallVector<const SCEV *, 4> Ops; 2684 Ops.push_back(StartExpr); 2685 while (!Ops.empty()) { 2686 const SCEV *CurrentExpr = Ops.pop_back_val(); 2687 if (isa<SCEVConstant>(*CurrentExpr)) 2688 return true; 2689 2690 if (isa<SCEVAddExpr>(*CurrentExpr) || isa<SCEVMulExpr>(*CurrentExpr)) { 2691 const auto *CurrentNAry = cast<SCEVNAryExpr>(CurrentExpr); 2692 Ops.append(CurrentNAry->op_begin(), CurrentNAry->op_end()); 2693 } 2694 } 2695 return false; 2696 } 2697 2698 /// Get a canonical multiply expression, or something simpler if possible. 2699 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2700 SCEV::NoWrapFlags Flags, 2701 unsigned Depth) { 2702 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2703 "only nuw or nsw allowed"); 2704 assert(!Ops.empty() && "Cannot get empty mul!"); 2705 if (Ops.size() == 1) return Ops[0]; 2706 #ifndef NDEBUG 2707 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2708 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2709 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2710 "SCEVMulExpr operand types don't match!"); 2711 #endif 2712 2713 // Sort by complexity, this groups all similar expression types together. 2714 GroupByComplexity(Ops, &LI, DT); 2715 2716 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2717 2718 // Limit recursion calls depth. 2719 if (Depth > MaxArithDepth) 2720 return getOrCreateMulExpr(Ops, Flags); 2721 2722 // If there are any constants, fold them together. 2723 unsigned Idx = 0; 2724 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2725 2726 // C1*(C2+V) -> C1*C2 + C1*V 2727 if (Ops.size() == 2) 2728 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2729 // If any of Add's ops are Adds or Muls with a constant, 2730 // apply this transformation as well. 2731 if (Add->getNumOperands() == 2) 2732 if (containsConstantSomewhere(Add)) 2733 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2734 SCEV::FlagAnyWrap, Depth + 1), 2735 getMulExpr(LHSC, Add->getOperand(1), 2736 SCEV::FlagAnyWrap, Depth + 1), 2737 SCEV::FlagAnyWrap, Depth + 1); 2738 2739 ++Idx; 2740 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2741 // We found two constants, fold them together! 2742 ConstantInt *Fold = 2743 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2744 Ops[0] = getConstant(Fold); 2745 Ops.erase(Ops.begin()+1); // Erase the folded element 2746 if (Ops.size() == 1) return Ops[0]; 2747 LHSC = cast<SCEVConstant>(Ops[0]); 2748 } 2749 2750 // If we are left with a constant one being multiplied, strip it off. 2751 if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) { 2752 Ops.erase(Ops.begin()); 2753 --Idx; 2754 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2755 // If we have a multiply of zero, it will always be zero. 2756 return Ops[0]; 2757 } else if (Ops[0]->isAllOnesValue()) { 2758 // If we have a mul by -1 of an add, try distributing the -1 among the 2759 // add operands. 2760 if (Ops.size() == 2) { 2761 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2762 SmallVector<const SCEV *, 4> NewOps; 2763 bool AnyFolded = false; 2764 for (const SCEV *AddOp : Add->operands()) { 2765 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2766 Depth + 1); 2767 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2768 NewOps.push_back(Mul); 2769 } 2770 if (AnyFolded) 2771 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2772 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2773 // Negation preserves a recurrence's no self-wrap property. 2774 SmallVector<const SCEV *, 4> Operands; 2775 for (const SCEV *AddRecOp : AddRec->operands()) 2776 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2777 Depth + 1)); 2778 2779 return getAddRecExpr(Operands, AddRec->getLoop(), 2780 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2781 } 2782 } 2783 } 2784 2785 if (Ops.size() == 1) 2786 return Ops[0]; 2787 } 2788 2789 // Skip over the add expression until we get to a multiply. 2790 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2791 ++Idx; 2792 2793 // If there are mul operands inline them all into this expression. 2794 if (Idx < Ops.size()) { 2795 bool DeletedMul = false; 2796 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2797 if (Ops.size() > MulOpsInlineThreshold) 2798 break; 2799 // If we have an mul, expand the mul operands onto the end of the 2800 // operands list. 2801 Ops.erase(Ops.begin()+Idx); 2802 Ops.append(Mul->op_begin(), Mul->op_end()); 2803 DeletedMul = true; 2804 } 2805 2806 // If we deleted at least one mul, we added operands to the end of the 2807 // list, and they are not necessarily sorted. Recurse to resort and 2808 // resimplify any operands we just acquired. 2809 if (DeletedMul) 2810 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2811 } 2812 2813 // If there are any add recurrences in the operands list, see if any other 2814 // added values are loop invariant. If so, we can fold them into the 2815 // recurrence. 2816 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2817 ++Idx; 2818 2819 // Scan over all recurrences, trying to fold loop invariants into them. 2820 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2821 // Scan all of the other operands to this mul and add them to the vector 2822 // if they are loop invariant w.r.t. the recurrence. 2823 SmallVector<const SCEV *, 8> LIOps; 2824 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2825 const Loop *AddRecLoop = AddRec->getLoop(); 2826 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2827 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2828 LIOps.push_back(Ops[i]); 2829 Ops.erase(Ops.begin()+i); 2830 --i; --e; 2831 } 2832 2833 // If we found some loop invariants, fold them into the recurrence. 2834 if (!LIOps.empty()) { 2835 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2836 SmallVector<const SCEV *, 4> NewOps; 2837 NewOps.reserve(AddRec->getNumOperands()); 2838 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 2839 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2840 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 2841 SCEV::FlagAnyWrap, Depth + 1)); 2842 2843 // Build the new addrec. Propagate the NUW and NSW flags if both the 2844 // outer mul and the inner addrec are guaranteed to have no overflow. 2845 // 2846 // No self-wrap cannot be guaranteed after changing the step size, but 2847 // will be inferred if either NUW or NSW is true. 2848 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 2849 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 2850 2851 // If all of the other operands were loop invariant, we are done. 2852 if (Ops.size() == 1) return NewRec; 2853 2854 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2855 for (unsigned i = 0;; ++i) 2856 if (Ops[i] == AddRec) { 2857 Ops[i] = NewRec; 2858 break; 2859 } 2860 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2861 } 2862 2863 // Okay, if there weren't any loop invariants to be folded, check to see 2864 // if there are multiple AddRec's with the same loop induction variable 2865 // being multiplied together. If so, we can fold them. 2866 2867 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2868 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2869 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2870 // ]]],+,...up to x=2n}. 2871 // Note that the arguments to choose() are always integers with values 2872 // known at compile time, never SCEV objects. 2873 // 2874 // The implementation avoids pointless extra computations when the two 2875 // addrec's are of different length (mathematically, it's equivalent to 2876 // an infinite stream of zeros on the right). 2877 bool OpsModified = false; 2878 for (unsigned OtherIdx = Idx+1; 2879 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2880 ++OtherIdx) { 2881 const SCEVAddRecExpr *OtherAddRec = 2882 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2883 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 2884 continue; 2885 2886 // Limit max number of arguments to avoid creation of unreasonably big 2887 // SCEVAddRecs with very complex operands. 2888 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 2889 MaxAddRecSize) 2890 continue; 2891 2892 bool Overflow = false; 2893 Type *Ty = AddRec->getType(); 2894 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 2895 SmallVector<const SCEV*, 7> AddRecOps; 2896 for (int x = 0, xe = AddRec->getNumOperands() + 2897 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 2898 const SCEV *Term = getZero(Ty); 2899 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 2900 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 2901 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 2902 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 2903 z < ze && !Overflow; ++z) { 2904 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 2905 uint64_t Coeff; 2906 if (LargerThan64Bits) 2907 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 2908 else 2909 Coeff = Coeff1*Coeff2; 2910 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 2911 const SCEV *Term1 = AddRec->getOperand(y-z); 2912 const SCEV *Term2 = OtherAddRec->getOperand(z); 2913 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1, Term2, 2914 SCEV::FlagAnyWrap, Depth + 1), 2915 SCEV::FlagAnyWrap, Depth + 1); 2916 } 2917 } 2918 AddRecOps.push_back(Term); 2919 } 2920 if (!Overflow) { 2921 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(), 2922 SCEV::FlagAnyWrap); 2923 if (Ops.size() == 2) return NewAddRec; 2924 Ops[Idx] = NewAddRec; 2925 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2926 OpsModified = true; 2927 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 2928 if (!AddRec) 2929 break; 2930 } 2931 } 2932 if (OpsModified) 2933 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2934 2935 // Otherwise couldn't fold anything into this recurrence. Move onto the 2936 // next one. 2937 } 2938 2939 // Okay, it looks like we really DO need an mul expr. Check to see if we 2940 // already have one, otherwise create a new one. 2941 return getOrCreateMulExpr(Ops, Flags); 2942 } 2943 2944 /// Get a canonical unsigned division expression, or something simpler if 2945 /// possible. 2946 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 2947 const SCEV *RHS) { 2948 assert(getEffectiveSCEVType(LHS->getType()) == 2949 getEffectiveSCEVType(RHS->getType()) && 2950 "SCEVUDivExpr operand types don't match!"); 2951 2952 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 2953 if (RHSC->getValue()->isOne()) 2954 return LHS; // X udiv 1 --> x 2955 // If the denominator is zero, the result of the udiv is undefined. Don't 2956 // try to analyze it, because the resolution chosen here may differ from 2957 // the resolution chosen in other parts of the compiler. 2958 if (!RHSC->getValue()->isZero()) { 2959 // Determine if the division can be folded into the operands of 2960 // its operands. 2961 // TODO: Generalize this to non-constants by using known-bits information. 2962 Type *Ty = LHS->getType(); 2963 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 2964 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 2965 // For non-power-of-two values, effectively round the value up to the 2966 // nearest power of two. 2967 if (!RHSC->getAPInt().isPowerOf2()) 2968 ++MaxShiftAmt; 2969 IntegerType *ExtTy = 2970 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 2971 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 2972 if (const SCEVConstant *Step = 2973 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 2974 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 2975 const APInt &StepInt = Step->getAPInt(); 2976 const APInt &DivInt = RHSC->getAPInt(); 2977 if (!StepInt.urem(DivInt) && 2978 getZeroExtendExpr(AR, ExtTy) == 2979 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 2980 getZeroExtendExpr(Step, ExtTy), 2981 AR->getLoop(), SCEV::FlagAnyWrap)) { 2982 SmallVector<const SCEV *, 4> Operands; 2983 for (const SCEV *Op : AR->operands()) 2984 Operands.push_back(getUDivExpr(Op, RHS)); 2985 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 2986 } 2987 /// Get a canonical UDivExpr for a recurrence. 2988 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 2989 // We can currently only fold X%N if X is constant. 2990 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 2991 if (StartC && !DivInt.urem(StepInt) && 2992 getZeroExtendExpr(AR, ExtTy) == 2993 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 2994 getZeroExtendExpr(Step, ExtTy), 2995 AR->getLoop(), SCEV::FlagAnyWrap)) { 2996 const APInt &StartInt = StartC->getAPInt(); 2997 const APInt &StartRem = StartInt.urem(StepInt); 2998 if (StartRem != 0) 2999 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, 3000 AR->getLoop(), SCEV::FlagNW); 3001 } 3002 } 3003 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3004 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3005 SmallVector<const SCEV *, 4> Operands; 3006 for (const SCEV *Op : M->operands()) 3007 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3008 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3009 // Find an operand that's safely divisible. 3010 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3011 const SCEV *Op = M->getOperand(i); 3012 const SCEV *Div = getUDivExpr(Op, RHSC); 3013 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3014 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3015 M->op_end()); 3016 Operands[i] = Div; 3017 return getMulExpr(Operands); 3018 } 3019 } 3020 } 3021 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3022 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3023 SmallVector<const SCEV *, 4> Operands; 3024 for (const SCEV *Op : A->operands()) 3025 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3026 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3027 Operands.clear(); 3028 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3029 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3030 if (isa<SCEVUDivExpr>(Op) || 3031 getMulExpr(Op, RHS) != A->getOperand(i)) 3032 break; 3033 Operands.push_back(Op); 3034 } 3035 if (Operands.size() == A->getNumOperands()) 3036 return getAddExpr(Operands); 3037 } 3038 } 3039 3040 // Fold if both operands are constant. 3041 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3042 Constant *LHSCV = LHSC->getValue(); 3043 Constant *RHSCV = RHSC->getValue(); 3044 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3045 RHSCV))); 3046 } 3047 } 3048 } 3049 3050 FoldingSetNodeID ID; 3051 ID.AddInteger(scUDivExpr); 3052 ID.AddPointer(LHS); 3053 ID.AddPointer(RHS); 3054 void *IP = nullptr; 3055 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3056 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3057 LHS, RHS); 3058 UniqueSCEVs.InsertNode(S, IP); 3059 return S; 3060 } 3061 3062 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3063 APInt A = C1->getAPInt().abs(); 3064 APInt B = C2->getAPInt().abs(); 3065 uint32_t ABW = A.getBitWidth(); 3066 uint32_t BBW = B.getBitWidth(); 3067 3068 if (ABW > BBW) 3069 B = B.zext(ABW); 3070 else if (ABW < BBW) 3071 A = A.zext(BBW); 3072 3073 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3074 } 3075 3076 /// Get a canonical unsigned division expression, or something simpler if 3077 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3078 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3079 /// it's not exact because the udiv may be clearing bits. 3080 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3081 const SCEV *RHS) { 3082 // TODO: we could try to find factors in all sorts of things, but for now we 3083 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3084 // end of this file for inspiration. 3085 3086 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3087 if (!Mul || !Mul->hasNoUnsignedWrap()) 3088 return getUDivExpr(LHS, RHS); 3089 3090 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3091 // If the mulexpr multiplies by a constant, then that constant must be the 3092 // first element of the mulexpr. 3093 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3094 if (LHSCst == RHSCst) { 3095 SmallVector<const SCEV *, 2> Operands; 3096 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3097 return getMulExpr(Operands); 3098 } 3099 3100 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3101 // that there's a factor provided by one of the other terms. We need to 3102 // check. 3103 APInt Factor = gcd(LHSCst, RHSCst); 3104 if (!Factor.isIntN(1)) { 3105 LHSCst = 3106 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3107 RHSCst = 3108 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3109 SmallVector<const SCEV *, 2> Operands; 3110 Operands.push_back(LHSCst); 3111 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3112 LHS = getMulExpr(Operands); 3113 RHS = RHSCst; 3114 Mul = dyn_cast<SCEVMulExpr>(LHS); 3115 if (!Mul) 3116 return getUDivExactExpr(LHS, RHS); 3117 } 3118 } 3119 } 3120 3121 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3122 if (Mul->getOperand(i) == RHS) { 3123 SmallVector<const SCEV *, 2> Operands; 3124 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3125 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3126 return getMulExpr(Operands); 3127 } 3128 } 3129 3130 return getUDivExpr(LHS, RHS); 3131 } 3132 3133 /// Get an add recurrence expression for the specified loop. Simplify the 3134 /// expression as much as possible. 3135 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3136 const Loop *L, 3137 SCEV::NoWrapFlags Flags) { 3138 SmallVector<const SCEV *, 4> Operands; 3139 Operands.push_back(Start); 3140 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3141 if (StepChrec->getLoop() == L) { 3142 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3143 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3144 } 3145 3146 Operands.push_back(Step); 3147 return getAddRecExpr(Operands, L, Flags); 3148 } 3149 3150 /// Get an add recurrence expression for the specified loop. Simplify the 3151 /// expression as much as possible. 3152 const SCEV * 3153 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3154 const Loop *L, SCEV::NoWrapFlags Flags) { 3155 if (Operands.size() == 1) return Operands[0]; 3156 #ifndef NDEBUG 3157 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3158 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3159 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3160 "SCEVAddRecExpr operand types don't match!"); 3161 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3162 assert(isLoopInvariant(Operands[i], L) && 3163 "SCEVAddRecExpr operand is not loop-invariant!"); 3164 #endif 3165 3166 if (Operands.back()->isZero()) { 3167 Operands.pop_back(); 3168 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3169 } 3170 3171 // It's tempting to want to call getMaxBackedgeTakenCount count here and 3172 // use that information to infer NUW and NSW flags. However, computing a 3173 // BE count requires calling getAddRecExpr, so we may not yet have a 3174 // meaningful BE count at this point (and if we don't, we'd be stuck 3175 // with a SCEVCouldNotCompute as the cached BE count). 3176 3177 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3178 3179 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3180 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3181 const Loop *NestedLoop = NestedAR->getLoop(); 3182 if (L->contains(NestedLoop) 3183 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3184 : (!NestedLoop->contains(L) && 3185 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3186 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3187 NestedAR->op_end()); 3188 Operands[0] = NestedAR->getStart(); 3189 // AddRecs require their operands be loop-invariant with respect to their 3190 // loops. Don't perform this transformation if it would break this 3191 // requirement. 3192 bool AllInvariant = all_of( 3193 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3194 3195 if (AllInvariant) { 3196 // Create a recurrence for the outer loop with the same step size. 3197 // 3198 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3199 // inner recurrence has the same property. 3200 SCEV::NoWrapFlags OuterFlags = 3201 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3202 3203 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3204 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3205 return isLoopInvariant(Op, NestedLoop); 3206 }); 3207 3208 if (AllInvariant) { 3209 // Ok, both add recurrences are valid after the transformation. 3210 // 3211 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3212 // the outer recurrence has the same property. 3213 SCEV::NoWrapFlags InnerFlags = 3214 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3215 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3216 } 3217 } 3218 // Reset Operands to its original state. 3219 Operands[0] = NestedAR; 3220 } 3221 } 3222 3223 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3224 // already have one, otherwise create a new one. 3225 FoldingSetNodeID ID; 3226 ID.AddInteger(scAddRecExpr); 3227 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3228 ID.AddPointer(Operands[i]); 3229 ID.AddPointer(L); 3230 void *IP = nullptr; 3231 SCEVAddRecExpr *S = 3232 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 3233 if (!S) { 3234 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); 3235 std::uninitialized_copy(Operands.begin(), Operands.end(), O); 3236 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), 3237 O, Operands.size(), L); 3238 UniqueSCEVs.InsertNode(S, IP); 3239 } 3240 S->setNoWrapFlags(Flags); 3241 return S; 3242 } 3243 3244 const SCEV * 3245 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3246 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3247 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3248 // getSCEV(Base)->getType() has the same address space as Base->getType() 3249 // because SCEV::getType() preserves the address space. 3250 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); 3251 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3252 // instruction to its SCEV, because the Instruction may be guarded by control 3253 // flow and the no-overflow bits may not be valid for the expression in any 3254 // context. This can be fixed similarly to how these flags are handled for 3255 // adds. 3256 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3257 : SCEV::FlagAnyWrap; 3258 3259 const SCEV *TotalOffset = getZero(IntPtrTy); 3260 // The array size is unimportant. The first thing we do on CurTy is getting 3261 // its element type. 3262 Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0); 3263 for (const SCEV *IndexExpr : IndexExprs) { 3264 // Compute the (potentially symbolic) offset in bytes for this index. 3265 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3266 // For a struct, add the member offset. 3267 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3268 unsigned FieldNo = Index->getZExtValue(); 3269 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); 3270 3271 // Add the field offset to the running total offset. 3272 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3273 3274 // Update CurTy to the type of the field at Index. 3275 CurTy = STy->getTypeAtIndex(Index); 3276 } else { 3277 // Update CurTy to its element type. 3278 CurTy = cast<SequentialType>(CurTy)->getElementType(); 3279 // For an array, add the element offset, explicitly scaled. 3280 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); 3281 // Getelementptr indices are signed. 3282 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); 3283 3284 // Multiply the index by the element size to compute the element offset. 3285 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3286 3287 // Add the element offset to the running total offset. 3288 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3289 } 3290 } 3291 3292 // Add the total offset from all the GEP indices to the base. 3293 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3294 } 3295 3296 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 3297 const SCEV *RHS) { 3298 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3299 return getSMaxExpr(Ops); 3300 } 3301 3302 const SCEV * 3303 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3304 assert(!Ops.empty() && "Cannot get empty smax!"); 3305 if (Ops.size() == 1) return Ops[0]; 3306 #ifndef NDEBUG 3307 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3308 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3309 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3310 "SCEVSMaxExpr operand types don't match!"); 3311 #endif 3312 3313 // Sort by complexity, this groups all similar expression types together. 3314 GroupByComplexity(Ops, &LI, DT); 3315 3316 // If there are any constants, fold them together. 3317 unsigned Idx = 0; 3318 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3319 ++Idx; 3320 assert(Idx < Ops.size()); 3321 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3322 // We found two constants, fold them together! 3323 ConstantInt *Fold = ConstantInt::get( 3324 getContext(), APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt())); 3325 Ops[0] = getConstant(Fold); 3326 Ops.erase(Ops.begin()+1); // Erase the folded element 3327 if (Ops.size() == 1) return Ops[0]; 3328 LHSC = cast<SCEVConstant>(Ops[0]); 3329 } 3330 3331 // If we are left with a constant minimum-int, strip it off. 3332 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 3333 Ops.erase(Ops.begin()); 3334 --Idx; 3335 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 3336 // If we have an smax with a constant maximum-int, it will always be 3337 // maximum-int. 3338 return Ops[0]; 3339 } 3340 3341 if (Ops.size() == 1) return Ops[0]; 3342 } 3343 3344 // Find the first SMax 3345 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 3346 ++Idx; 3347 3348 // Check to see if one of the operands is an SMax. If so, expand its operands 3349 // onto our operand list, and recurse to simplify. 3350 if (Idx < Ops.size()) { 3351 bool DeletedSMax = false; 3352 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 3353 Ops.erase(Ops.begin()+Idx); 3354 Ops.append(SMax->op_begin(), SMax->op_end()); 3355 DeletedSMax = true; 3356 } 3357 3358 if (DeletedSMax) 3359 return getSMaxExpr(Ops); 3360 } 3361 3362 // Okay, check to see if the same value occurs in the operand list twice. If 3363 // so, delete one. Since we sorted the list, these values are required to 3364 // be adjacent. 3365 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3366 // X smax Y smax Y --> X smax Y 3367 // X smax Y --> X, if X is always greater than Y 3368 if (Ops[i] == Ops[i+1] || 3369 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 3370 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3371 --i; --e; 3372 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 3373 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3374 --i; --e; 3375 } 3376 3377 if (Ops.size() == 1) return Ops[0]; 3378 3379 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3380 3381 // Okay, it looks like we really DO need an smax expr. Check to see if we 3382 // already have one, otherwise create a new one. 3383 FoldingSetNodeID ID; 3384 ID.AddInteger(scSMaxExpr); 3385 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3386 ID.AddPointer(Ops[i]); 3387 void *IP = nullptr; 3388 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3389 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3390 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3391 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), 3392 O, Ops.size()); 3393 UniqueSCEVs.InsertNode(S, IP); 3394 return S; 3395 } 3396 3397 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 3398 const SCEV *RHS) { 3399 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3400 return getUMaxExpr(Ops); 3401 } 3402 3403 const SCEV * 3404 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3405 assert(!Ops.empty() && "Cannot get empty umax!"); 3406 if (Ops.size() == 1) return Ops[0]; 3407 #ifndef NDEBUG 3408 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3409 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3410 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3411 "SCEVUMaxExpr operand types don't match!"); 3412 #endif 3413 3414 // Sort by complexity, this groups all similar expression types together. 3415 GroupByComplexity(Ops, &LI, DT); 3416 3417 // If there are any constants, fold them together. 3418 unsigned Idx = 0; 3419 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3420 ++Idx; 3421 assert(Idx < Ops.size()); 3422 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3423 // We found two constants, fold them together! 3424 ConstantInt *Fold = ConstantInt::get( 3425 getContext(), APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt())); 3426 Ops[0] = getConstant(Fold); 3427 Ops.erase(Ops.begin()+1); // Erase the folded element 3428 if (Ops.size() == 1) return Ops[0]; 3429 LHSC = cast<SCEVConstant>(Ops[0]); 3430 } 3431 3432 // If we are left with a constant minimum-int, strip it off. 3433 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 3434 Ops.erase(Ops.begin()); 3435 --Idx; 3436 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 3437 // If we have an umax with a constant maximum-int, it will always be 3438 // maximum-int. 3439 return Ops[0]; 3440 } 3441 3442 if (Ops.size() == 1) return Ops[0]; 3443 } 3444 3445 // Find the first UMax 3446 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 3447 ++Idx; 3448 3449 // Check to see if one of the operands is a UMax. If so, expand its operands 3450 // onto our operand list, and recurse to simplify. 3451 if (Idx < Ops.size()) { 3452 bool DeletedUMax = false; 3453 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 3454 Ops.erase(Ops.begin()+Idx); 3455 Ops.append(UMax->op_begin(), UMax->op_end()); 3456 DeletedUMax = true; 3457 } 3458 3459 if (DeletedUMax) 3460 return getUMaxExpr(Ops); 3461 } 3462 3463 // Okay, check to see if the same value occurs in the operand list twice. If 3464 // so, delete one. Since we sorted the list, these values are required to 3465 // be adjacent. 3466 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3467 // X umax Y umax Y --> X umax Y 3468 // X umax Y --> X, if X is always greater than Y 3469 if (Ops[i] == Ops[i+1] || 3470 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) { 3471 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3472 --i; --e; 3473 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) { 3474 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3475 --i; --e; 3476 } 3477 3478 if (Ops.size() == 1) return Ops[0]; 3479 3480 assert(!Ops.empty() && "Reduced umax down to nothing!"); 3481 3482 // Okay, it looks like we really DO need a umax expr. Check to see if we 3483 // already have one, otherwise create a new one. 3484 FoldingSetNodeID ID; 3485 ID.AddInteger(scUMaxExpr); 3486 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3487 ID.AddPointer(Ops[i]); 3488 void *IP = nullptr; 3489 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3490 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3491 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3492 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 3493 O, Ops.size()); 3494 UniqueSCEVs.InsertNode(S, IP); 3495 return S; 3496 } 3497 3498 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3499 const SCEV *RHS) { 3500 // ~smax(~x, ~y) == smin(x, y). 3501 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3502 } 3503 3504 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3505 const SCEV *RHS) { 3506 // ~umax(~x, ~y) == umin(x, y) 3507 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3508 } 3509 3510 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3511 // We can bypass creating a target-independent 3512 // constant expression and then folding it back into a ConstantInt. 3513 // This is just a compile-time optimization. 3514 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3515 } 3516 3517 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3518 StructType *STy, 3519 unsigned FieldNo) { 3520 // We can bypass creating a target-independent 3521 // constant expression and then folding it back into a ConstantInt. 3522 // This is just a compile-time optimization. 3523 return getConstant( 3524 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3525 } 3526 3527 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3528 // Don't attempt to do anything other than create a SCEVUnknown object 3529 // here. createSCEV only calls getUnknown after checking for all other 3530 // interesting possibilities, and any other code that calls getUnknown 3531 // is doing so in order to hide a value from SCEV canonicalization. 3532 3533 FoldingSetNodeID ID; 3534 ID.AddInteger(scUnknown); 3535 ID.AddPointer(V); 3536 void *IP = nullptr; 3537 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3538 assert(cast<SCEVUnknown>(S)->getValue() == V && 3539 "Stale SCEVUnknown in uniquing map!"); 3540 return S; 3541 } 3542 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3543 FirstUnknown); 3544 FirstUnknown = cast<SCEVUnknown>(S); 3545 UniqueSCEVs.InsertNode(S, IP); 3546 return S; 3547 } 3548 3549 //===----------------------------------------------------------------------===// 3550 // Basic SCEV Analysis and PHI Idiom Recognition Code 3551 // 3552 3553 /// Test if values of the given type are analyzable within the SCEV 3554 /// framework. This primarily includes integer types, and it can optionally 3555 /// include pointer types if the ScalarEvolution class has access to 3556 /// target-specific information. 3557 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3558 // Integers and pointers are always SCEVable. 3559 return Ty->isIntegerTy() || Ty->isPointerTy(); 3560 } 3561 3562 /// Return the size in bits of the specified type, for which isSCEVable must 3563 /// return true. 3564 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3565 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3566 return getDataLayout().getTypeSizeInBits(Ty); 3567 } 3568 3569 /// Return a type with the same bitwidth as the given type and which represents 3570 /// how SCEV will treat the given type, for which isSCEVable must return 3571 /// true. For pointer types, this is the pointer-sized integer type. 3572 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3573 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3574 3575 if (Ty->isIntegerTy()) 3576 return Ty; 3577 3578 // The only other support type is pointer. 3579 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3580 return getDataLayout().getIntPtrType(Ty); 3581 } 3582 3583 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3584 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3585 } 3586 3587 const SCEV *ScalarEvolution::getCouldNotCompute() { 3588 return CouldNotCompute.get(); 3589 } 3590 3591 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3592 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3593 auto *SU = dyn_cast<SCEVUnknown>(S); 3594 return SU && SU->getValue() == nullptr; 3595 }); 3596 3597 return !ContainsNulls; 3598 } 3599 3600 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3601 HasRecMapType::iterator I = HasRecMap.find(S); 3602 if (I != HasRecMap.end()) 3603 return I->second; 3604 3605 bool FoundAddRec = SCEVExprContains(S, isa<SCEVAddRecExpr, const SCEV *>); 3606 HasRecMap.insert({S, FoundAddRec}); 3607 return FoundAddRec; 3608 } 3609 3610 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3611 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3612 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3613 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3614 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3615 if (!Add) 3616 return {S, nullptr}; 3617 3618 if (Add->getNumOperands() != 2) 3619 return {S, nullptr}; 3620 3621 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3622 if (!ConstOp) 3623 return {S, nullptr}; 3624 3625 return {Add->getOperand(1), ConstOp->getValue()}; 3626 } 3627 3628 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3629 /// by the value and offset from any ValueOffsetPair in the set. 3630 SetVector<ScalarEvolution::ValueOffsetPair> * 3631 ScalarEvolution::getSCEVValues(const SCEV *S) { 3632 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3633 if (SI == ExprValueMap.end()) 3634 return nullptr; 3635 #ifndef NDEBUG 3636 if (VerifySCEVMap) { 3637 // Check there is no dangling Value in the set returned. 3638 for (const auto &VE : SI->second) 3639 assert(ValueExprMap.count(VE.first)); 3640 } 3641 #endif 3642 return &SI->second; 3643 } 3644 3645 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3646 /// cannot be used separately. eraseValueFromMap should be used to remove 3647 /// V from ValueExprMap and ExprValueMap at the same time. 3648 void ScalarEvolution::eraseValueFromMap(Value *V) { 3649 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3650 if (I != ValueExprMap.end()) { 3651 const SCEV *S = I->second; 3652 // Remove {V, 0} from the set of ExprValueMap[S] 3653 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3654 SV->remove({V, nullptr}); 3655 3656 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3657 const SCEV *Stripped; 3658 ConstantInt *Offset; 3659 std::tie(Stripped, Offset) = splitAddExpr(S); 3660 if (Offset != nullptr) { 3661 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3662 SV->remove({V, Offset}); 3663 } 3664 ValueExprMap.erase(V); 3665 } 3666 } 3667 3668 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3669 /// create a new one. 3670 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3671 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3672 3673 const SCEV *S = getExistingSCEV(V); 3674 if (S == nullptr) { 3675 S = createSCEV(V); 3676 // During PHI resolution, it is possible to create two SCEVs for the same 3677 // V, so it is needed to double check whether V->S is inserted into 3678 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3679 std::pair<ValueExprMapType::iterator, bool> Pair = 3680 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3681 if (Pair.second) { 3682 ExprValueMap[S].insert({V, nullptr}); 3683 3684 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3685 // ExprValueMap. 3686 const SCEV *Stripped = S; 3687 ConstantInt *Offset = nullptr; 3688 std::tie(Stripped, Offset) = splitAddExpr(S); 3689 // If stripped is SCEVUnknown, don't bother to save 3690 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3691 // increase the complexity of the expansion code. 3692 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3693 // because it may generate add/sub instead of GEP in SCEV expansion. 3694 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3695 !isa<GetElementPtrInst>(V)) 3696 ExprValueMap[Stripped].insert({V, Offset}); 3697 } 3698 } 3699 return S; 3700 } 3701 3702 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3703 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3704 3705 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3706 if (I != ValueExprMap.end()) { 3707 const SCEV *S = I->second; 3708 if (checkValidity(S)) 3709 return S; 3710 eraseValueFromMap(V); 3711 forgetMemoizedResults(S); 3712 } 3713 return nullptr; 3714 } 3715 3716 /// Return a SCEV corresponding to -V = -1*V 3717 /// 3718 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3719 SCEV::NoWrapFlags Flags) { 3720 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3721 return getConstant( 3722 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3723 3724 Type *Ty = V->getType(); 3725 Ty = getEffectiveSCEVType(Ty); 3726 return getMulExpr( 3727 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 3728 } 3729 3730 /// Return a SCEV corresponding to ~V = -1-V 3731 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3732 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3733 return getConstant( 3734 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3735 3736 Type *Ty = V->getType(); 3737 Ty = getEffectiveSCEVType(Ty); 3738 const SCEV *AllOnes = 3739 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 3740 return getMinusSCEV(AllOnes, V); 3741 } 3742 3743 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3744 SCEV::NoWrapFlags Flags, 3745 unsigned Depth) { 3746 // Fast path: X - X --> 0. 3747 if (LHS == RHS) 3748 return getZero(LHS->getType()); 3749 3750 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3751 // makes it so that we cannot make much use of NUW. 3752 auto AddFlags = SCEV::FlagAnyWrap; 3753 const bool RHSIsNotMinSigned = 3754 !getSignedRangeMin(RHS).isMinSignedValue(); 3755 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3756 // Let M be the minimum representable signed value. Then (-1)*RHS 3757 // signed-wraps if and only if RHS is M. That can happen even for 3758 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3759 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3760 // (-1)*RHS, we need to prove that RHS != M. 3761 // 3762 // If LHS is non-negative and we know that LHS - RHS does not 3763 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 3764 // either by proving that RHS > M or that LHS >= 0. 3765 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 3766 AddFlags = SCEV::FlagNSW; 3767 } 3768 } 3769 3770 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 3771 // RHS is NSW and LHS >= 0. 3772 // 3773 // The difficulty here is that the NSW flag may have been proven 3774 // relative to a loop that is to be found in a recurrence in LHS and 3775 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 3776 // larger scope than intended. 3777 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3778 3779 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 3780 } 3781 3782 const SCEV * 3783 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) { 3784 Type *SrcTy = V->getType(); 3785 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3786 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3787 "Cannot truncate or zero extend with non-integer arguments!"); 3788 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3789 return V; // No conversion 3790 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3791 return getTruncateExpr(V, Ty); 3792 return getZeroExtendExpr(V, Ty); 3793 } 3794 3795 const SCEV * 3796 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 3797 Type *Ty) { 3798 Type *SrcTy = V->getType(); 3799 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3800 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3801 "Cannot truncate or zero extend with non-integer arguments!"); 3802 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3803 return V; // No conversion 3804 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3805 return getTruncateExpr(V, Ty); 3806 return getSignExtendExpr(V, Ty); 3807 } 3808 3809 const SCEV * 3810 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 3811 Type *SrcTy = V->getType(); 3812 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3813 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3814 "Cannot noop or zero extend with non-integer arguments!"); 3815 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3816 "getNoopOrZeroExtend cannot truncate!"); 3817 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3818 return V; // No conversion 3819 return getZeroExtendExpr(V, Ty); 3820 } 3821 3822 const SCEV * 3823 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 3824 Type *SrcTy = V->getType(); 3825 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3826 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3827 "Cannot noop or sign extend with non-integer arguments!"); 3828 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3829 "getNoopOrSignExtend cannot truncate!"); 3830 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3831 return V; // No conversion 3832 return getSignExtendExpr(V, Ty); 3833 } 3834 3835 const SCEV * 3836 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 3837 Type *SrcTy = V->getType(); 3838 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3839 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3840 "Cannot noop or any extend with non-integer arguments!"); 3841 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3842 "getNoopOrAnyExtend cannot truncate!"); 3843 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3844 return V; // No conversion 3845 return getAnyExtendExpr(V, Ty); 3846 } 3847 3848 const SCEV * 3849 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 3850 Type *SrcTy = V->getType(); 3851 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3852 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3853 "Cannot truncate or noop with non-integer arguments!"); 3854 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 3855 "getTruncateOrNoop cannot extend!"); 3856 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3857 return V; // No conversion 3858 return getTruncateExpr(V, Ty); 3859 } 3860 3861 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 3862 const SCEV *RHS) { 3863 const SCEV *PromotedLHS = LHS; 3864 const SCEV *PromotedRHS = RHS; 3865 3866 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3867 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3868 else 3869 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3870 3871 return getUMaxExpr(PromotedLHS, PromotedRHS); 3872 } 3873 3874 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 3875 const SCEV *RHS) { 3876 const SCEV *PromotedLHS = LHS; 3877 const SCEV *PromotedRHS = RHS; 3878 3879 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3880 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3881 else 3882 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3883 3884 return getUMinExpr(PromotedLHS, PromotedRHS); 3885 } 3886 3887 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 3888 // A pointer operand may evaluate to a nonpointer expression, such as null. 3889 if (!V->getType()->isPointerTy()) 3890 return V; 3891 3892 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 3893 return getPointerBase(Cast->getOperand()); 3894 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 3895 const SCEV *PtrOp = nullptr; 3896 for (const SCEV *NAryOp : NAry->operands()) { 3897 if (NAryOp->getType()->isPointerTy()) { 3898 // Cannot find the base of an expression with multiple pointer operands. 3899 if (PtrOp) 3900 return V; 3901 PtrOp = NAryOp; 3902 } 3903 } 3904 if (!PtrOp) 3905 return V; 3906 return getPointerBase(PtrOp); 3907 } 3908 return V; 3909 } 3910 3911 /// Push users of the given Instruction onto the given Worklist. 3912 static void 3913 PushDefUseChildren(Instruction *I, 3914 SmallVectorImpl<Instruction *> &Worklist) { 3915 // Push the def-use children onto the Worklist stack. 3916 for (User *U : I->users()) 3917 Worklist.push_back(cast<Instruction>(U)); 3918 } 3919 3920 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 3921 SmallVector<Instruction *, 16> Worklist; 3922 PushDefUseChildren(PN, Worklist); 3923 3924 SmallPtrSet<Instruction *, 8> Visited; 3925 Visited.insert(PN); 3926 while (!Worklist.empty()) { 3927 Instruction *I = Worklist.pop_back_val(); 3928 if (!Visited.insert(I).second) 3929 continue; 3930 3931 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 3932 if (It != ValueExprMap.end()) { 3933 const SCEV *Old = It->second; 3934 3935 // Short-circuit the def-use traversal if the symbolic name 3936 // ceases to appear in expressions. 3937 if (Old != SymName && !hasOperand(Old, SymName)) 3938 continue; 3939 3940 // SCEVUnknown for a PHI either means that it has an unrecognized 3941 // structure, it's a PHI that's in the progress of being computed 3942 // by createNodeForPHI, or it's a single-value PHI. In the first case, 3943 // additional loop trip count information isn't going to change anything. 3944 // In the second case, createNodeForPHI will perform the necessary 3945 // updates on its own when it gets to that point. In the third, we do 3946 // want to forget the SCEVUnknown. 3947 if (!isa<PHINode>(I) || 3948 !isa<SCEVUnknown>(Old) || 3949 (I != PN && Old == SymName)) { 3950 eraseValueFromMap(It->first); 3951 forgetMemoizedResults(Old); 3952 } 3953 } 3954 3955 PushDefUseChildren(I, Worklist); 3956 } 3957 } 3958 3959 namespace { 3960 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 3961 public: 3962 static const SCEV *rewrite(const SCEV *S, const Loop *L, 3963 ScalarEvolution &SE) { 3964 SCEVInitRewriter Rewriter(L, SE); 3965 const SCEV *Result = Rewriter.visit(S); 3966 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 3967 } 3968 3969 SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 3970 : SCEVRewriteVisitor(SE), L(L), Valid(true) {} 3971 3972 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 3973 if (!SE.isLoopInvariant(Expr, L)) 3974 Valid = false; 3975 return Expr; 3976 } 3977 3978 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 3979 // Only allow AddRecExprs for this loop. 3980 if (Expr->getLoop() == L) 3981 return Expr->getStart(); 3982 Valid = false; 3983 return Expr; 3984 } 3985 3986 bool isValid() { return Valid; } 3987 3988 private: 3989 const Loop *L; 3990 bool Valid; 3991 }; 3992 3993 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 3994 public: 3995 static const SCEV *rewrite(const SCEV *S, const Loop *L, 3996 ScalarEvolution &SE) { 3997 SCEVShiftRewriter Rewriter(L, SE); 3998 const SCEV *Result = Rewriter.visit(S); 3999 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4000 } 4001 4002 SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4003 : SCEVRewriteVisitor(SE), L(L), Valid(true) {} 4004 4005 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4006 // Only allow AddRecExprs for this loop. 4007 if (!SE.isLoopInvariant(Expr, L)) 4008 Valid = false; 4009 return Expr; 4010 } 4011 4012 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4013 if (Expr->getLoop() == L && Expr->isAffine()) 4014 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4015 Valid = false; 4016 return Expr; 4017 } 4018 bool isValid() { return Valid; } 4019 4020 private: 4021 const Loop *L; 4022 bool Valid; 4023 }; 4024 } // end anonymous namespace 4025 4026 SCEV::NoWrapFlags 4027 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4028 if (!AR->isAffine()) 4029 return SCEV::FlagAnyWrap; 4030 4031 typedef OverflowingBinaryOperator OBO; 4032 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4033 4034 if (!AR->hasNoSignedWrap()) { 4035 ConstantRange AddRecRange = getSignedRange(AR); 4036 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4037 4038 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4039 Instruction::Add, IncRange, OBO::NoSignedWrap); 4040 if (NSWRegion.contains(AddRecRange)) 4041 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4042 } 4043 4044 if (!AR->hasNoUnsignedWrap()) { 4045 ConstantRange AddRecRange = getUnsignedRange(AR); 4046 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4047 4048 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4049 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4050 if (NUWRegion.contains(AddRecRange)) 4051 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4052 } 4053 4054 return Result; 4055 } 4056 4057 namespace { 4058 /// Represents an abstract binary operation. This may exist as a 4059 /// normal instruction or constant expression, or may have been 4060 /// derived from an expression tree. 4061 struct BinaryOp { 4062 unsigned Opcode; 4063 Value *LHS; 4064 Value *RHS; 4065 bool IsNSW; 4066 bool IsNUW; 4067 4068 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4069 /// constant expression. 4070 Operator *Op; 4071 4072 explicit BinaryOp(Operator *Op) 4073 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4074 IsNSW(false), IsNUW(false), Op(Op) { 4075 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4076 IsNSW = OBO->hasNoSignedWrap(); 4077 IsNUW = OBO->hasNoUnsignedWrap(); 4078 } 4079 } 4080 4081 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4082 bool IsNUW = false) 4083 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW), 4084 Op(nullptr) {} 4085 }; 4086 } 4087 4088 4089 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4090 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4091 auto *Op = dyn_cast<Operator>(V); 4092 if (!Op) 4093 return None; 4094 4095 // Implementation detail: all the cleverness here should happen without 4096 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4097 // SCEV expressions when possible, and we should not break that. 4098 4099 switch (Op->getOpcode()) { 4100 case Instruction::Add: 4101 case Instruction::Sub: 4102 case Instruction::Mul: 4103 case Instruction::UDiv: 4104 case Instruction::And: 4105 case Instruction::Or: 4106 case Instruction::AShr: 4107 case Instruction::Shl: 4108 return BinaryOp(Op); 4109 4110 case Instruction::Xor: 4111 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4112 // If the RHS of the xor is a signmask, then this is just an add. 4113 // Instcombine turns add of signmask into xor as a strength reduction step. 4114 if (RHSC->getValue().isSignMask()) 4115 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4116 return BinaryOp(Op); 4117 4118 case Instruction::LShr: 4119 // Turn logical shift right of a constant into a unsigned divide. 4120 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4121 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4122 4123 // If the shift count is not less than the bitwidth, the result of 4124 // the shift is undefined. Don't try to analyze it, because the 4125 // resolution chosen here may differ from the resolution chosen in 4126 // other parts of the compiler. 4127 if (SA->getValue().ult(BitWidth)) { 4128 Constant *X = 4129 ConstantInt::get(SA->getContext(), 4130 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4131 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4132 } 4133 } 4134 return BinaryOp(Op); 4135 4136 case Instruction::ExtractValue: { 4137 auto *EVI = cast<ExtractValueInst>(Op); 4138 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4139 break; 4140 4141 auto *CI = dyn_cast<CallInst>(EVI->getAggregateOperand()); 4142 if (!CI) 4143 break; 4144 4145 if (auto *F = CI->getCalledFunction()) 4146 switch (F->getIntrinsicID()) { 4147 case Intrinsic::sadd_with_overflow: 4148 case Intrinsic::uadd_with_overflow: { 4149 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4150 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4151 CI->getArgOperand(1)); 4152 4153 // Now that we know that all uses of the arithmetic-result component of 4154 // CI are guarded by the overflow check, we can go ahead and pretend 4155 // that the arithmetic is non-overflowing. 4156 if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow) 4157 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4158 CI->getArgOperand(1), /* IsNSW = */ true, 4159 /* IsNUW = */ false); 4160 else 4161 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4162 CI->getArgOperand(1), /* IsNSW = */ false, 4163 /* IsNUW*/ true); 4164 } 4165 4166 case Intrinsic::ssub_with_overflow: 4167 case Intrinsic::usub_with_overflow: 4168 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4169 CI->getArgOperand(1)); 4170 4171 case Intrinsic::smul_with_overflow: 4172 case Intrinsic::umul_with_overflow: 4173 return BinaryOp(Instruction::Mul, CI->getArgOperand(0), 4174 CI->getArgOperand(1)); 4175 default: 4176 break; 4177 } 4178 } 4179 4180 default: 4181 break; 4182 } 4183 4184 return None; 4185 } 4186 4187 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4188 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4189 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4190 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4191 /// follows one of the following patterns: 4192 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4193 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4194 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4195 /// we return the type of the truncation operation, and indicate whether the 4196 /// truncated type should be treated as signed/unsigned by setting 4197 /// \p Signed to true/false, respectively. 4198 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4199 bool &Signed, ScalarEvolution &SE) { 4200 4201 // The case where Op == SymbolicPHI (that is, with no type conversions on 4202 // the way) is handled by the regular add recurrence creating logic and 4203 // would have already been triggered in createAddRecForPHI. Reaching it here 4204 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4205 // because one of the other operands of the SCEVAddExpr updating this PHI is 4206 // not invariant). 4207 // 4208 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4209 // this case predicates that allow us to prove that Op == SymbolicPHI will 4210 // be added. 4211 if (Op == SymbolicPHI) 4212 return nullptr; 4213 4214 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4215 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4216 if (SourceBits != NewBits) 4217 return nullptr; 4218 4219 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4220 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4221 if (!SExt && !ZExt) 4222 return nullptr; 4223 const SCEVTruncateExpr *Trunc = 4224 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4225 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4226 if (!Trunc) 4227 return nullptr; 4228 const SCEV *X = Trunc->getOperand(); 4229 if (X != SymbolicPHI) 4230 return nullptr; 4231 Signed = SExt ? true : false; 4232 return Trunc->getType(); 4233 } 4234 4235 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4236 if (!PN->getType()->isIntegerTy()) 4237 return nullptr; 4238 const Loop *L = LI.getLoopFor(PN->getParent()); 4239 if (!L || L->getHeader() != PN->getParent()) 4240 return nullptr; 4241 return L; 4242 } 4243 4244 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4245 // computation that updates the phi follows the following pattern: 4246 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4247 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4248 // If so, try to see if it can be rewritten as an AddRecExpr under some 4249 // Predicates. If successful, return them as a pair. Also cache the results 4250 // of the analysis. 4251 // 4252 // Example usage scenario: 4253 // Say the Rewriter is called for the following SCEV: 4254 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4255 // where: 4256 // %X = phi i64 (%Start, %BEValue) 4257 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4258 // and call this function with %SymbolicPHI = %X. 4259 // 4260 // The analysis will find that the value coming around the backedge has 4261 // the following SCEV: 4262 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4263 // Upon concluding that this matches the desired pattern, the function 4264 // will return the pair {NewAddRec, SmallPredsVec} where: 4265 // NewAddRec = {%Start,+,%Step} 4266 // SmallPredsVec = {P1, P2, P3} as follows: 4267 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4268 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4269 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4270 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4271 // under the predicates {P1,P2,P3}. 4272 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4273 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4274 // 4275 // TODO's: 4276 // 4277 // 1) Extend the Induction descriptor to also support inductions that involve 4278 // casts: When needed (namely, when we are called in the context of the 4279 // vectorizer induction analysis), a Set of cast instructions will be 4280 // populated by this method, and provided back to isInductionPHI. This is 4281 // needed to allow the vectorizer to properly record them to be ignored by 4282 // the cost model and to avoid vectorizing them (otherwise these casts, 4283 // which are redundant under the runtime overflow checks, will be 4284 // vectorized, which can be costly). 4285 // 4286 // 2) Support additional induction/PHISCEV patterns: We also want to support 4287 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4288 // after the induction update operation (the induction increment): 4289 // 4290 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4291 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4292 // 4293 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4294 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4295 // 4296 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4297 // 4298 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4299 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4300 SmallVector<const SCEVPredicate *, 3> Predicates; 4301 4302 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4303 // return an AddRec expression under some predicate. 4304 4305 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4306 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4307 assert (L && "Expecting an integer loop header phi"); 4308 4309 // The loop may have multiple entrances or multiple exits; we can analyze 4310 // this phi as an addrec if it has a unique entry value and a unique 4311 // backedge value. 4312 Value *BEValueV = nullptr, *StartValueV = nullptr; 4313 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4314 Value *V = PN->getIncomingValue(i); 4315 if (L->contains(PN->getIncomingBlock(i))) { 4316 if (!BEValueV) { 4317 BEValueV = V; 4318 } else if (BEValueV != V) { 4319 BEValueV = nullptr; 4320 break; 4321 } 4322 } else if (!StartValueV) { 4323 StartValueV = V; 4324 } else if (StartValueV != V) { 4325 StartValueV = nullptr; 4326 break; 4327 } 4328 } 4329 if (!BEValueV || !StartValueV) 4330 return None; 4331 4332 const SCEV *BEValue = getSCEV(BEValueV); 4333 4334 // If the value coming around the backedge is an add with the symbolic 4335 // value we just inserted, possibly with casts that we can ignore under 4336 // an appropriate runtime guard, then we found a simple induction variable! 4337 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4338 if (!Add) 4339 return None; 4340 4341 // If there is a single occurrence of the symbolic value, possibly 4342 // casted, replace it with a recurrence. 4343 unsigned FoundIndex = Add->getNumOperands(); 4344 Type *TruncTy = nullptr; 4345 bool Signed; 4346 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4347 if ((TruncTy = 4348 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4349 if (FoundIndex == e) { 4350 FoundIndex = i; 4351 break; 4352 } 4353 4354 if (FoundIndex == Add->getNumOperands()) 4355 return None; 4356 4357 // Create an add with everything but the specified operand. 4358 SmallVector<const SCEV *, 8> Ops; 4359 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4360 if (i != FoundIndex) 4361 Ops.push_back(Add->getOperand(i)); 4362 const SCEV *Accum = getAddExpr(Ops); 4363 4364 // The runtime checks will not be valid if the step amount is 4365 // varying inside the loop. 4366 if (!isLoopInvariant(Accum, L)) 4367 return None; 4368 4369 4370 // *** Part2: Create the predicates 4371 4372 // Analysis was successful: we have a phi-with-cast pattern for which we 4373 // can return an AddRec expression under the following predicates: 4374 // 4375 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4376 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4377 // P2: An Equal predicate that guarantees that 4378 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4379 // P3: An Equal predicate that guarantees that 4380 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4381 // 4382 // As we next prove, the above predicates guarantee that: 4383 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4384 // 4385 // 4386 // More formally, we want to prove that: 4387 // Expr(i+1) = Start + (i+1) * Accum 4388 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4389 // 4390 // Given that: 4391 // 1) Expr(0) = Start 4392 // 2) Expr(1) = Start + Accum 4393 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4394 // 3) Induction hypothesis (step i): 4395 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4396 // 4397 // Proof: 4398 // Expr(i+1) = 4399 // = Start + (i+1)*Accum 4400 // = (Start + i*Accum) + Accum 4401 // = Expr(i) + Accum 4402 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4403 // :: from step i 4404 // 4405 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4406 // 4407 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4408 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4409 // + Accum :: from P3 4410 // 4411 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4412 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4413 // 4414 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4415 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4416 // 4417 // By induction, the same applies to all iterations 1<=i<n: 4418 // 4419 4420 // Create a truncated addrec for which we will add a no overflow check (P1). 4421 const SCEV *StartVal = getSCEV(StartValueV); 4422 const SCEV *PHISCEV = 4423 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4424 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4425 const auto *AR = cast<SCEVAddRecExpr>(PHISCEV); 4426 4427 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4428 Signed ? SCEVWrapPredicate::IncrementNSSW 4429 : SCEVWrapPredicate::IncrementNUSW; 4430 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4431 Predicates.push_back(AddRecPred); 4432 4433 // Create the Equal Predicates P2,P3: 4434 auto AppendPredicate = [&](const SCEV *Expr) -> void { 4435 assert (isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4436 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4437 const SCEV *ExtendedExpr = 4438 Signed ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4439 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4440 if (Expr != ExtendedExpr && 4441 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4442 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4443 DEBUG (dbgs() << "Added Predicate: " << *Pred); 4444 Predicates.push_back(Pred); 4445 } 4446 }; 4447 4448 AppendPredicate(StartVal); 4449 AppendPredicate(Accum); 4450 4451 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4452 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4453 // into NewAR if it will also add the runtime overflow checks specified in 4454 // Predicates. 4455 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4456 4457 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4458 std::make_pair(NewAR, Predicates); 4459 // Remember the result of the analysis for this SCEV at this locayyytion. 4460 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4461 return PredRewrite; 4462 } 4463 4464 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4465 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4466 4467 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4468 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4469 if (!L) 4470 return None; 4471 4472 // Check to see if we already analyzed this PHI. 4473 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4474 if (I != PredicatedSCEVRewrites.end()) { 4475 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 4476 I->second; 4477 // Analysis was done before and failed to create an AddRec: 4478 if (Rewrite.first == SymbolicPHI) 4479 return None; 4480 // Analysis was done before and succeeded to create an AddRec under 4481 // a predicate: 4482 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 4483 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 4484 return Rewrite; 4485 } 4486 4487 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4488 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 4489 4490 // Record in the cache that the analysis failed 4491 if (!Rewrite) { 4492 SmallVector<const SCEVPredicate *, 3> Predicates; 4493 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 4494 return None; 4495 } 4496 4497 return Rewrite; 4498 } 4499 4500 /// A helper function for createAddRecFromPHI to handle simple cases. 4501 /// 4502 /// This function tries to find an AddRec expression for the simplest (yet most 4503 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 4504 /// If it fails, createAddRecFromPHI will use a more general, but slow, 4505 /// technique for finding the AddRec expression. 4506 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 4507 Value *BEValueV, 4508 Value *StartValueV) { 4509 const Loop *L = LI.getLoopFor(PN->getParent()); 4510 assert(L && L->getHeader() == PN->getParent()); 4511 assert(BEValueV && StartValueV); 4512 4513 auto BO = MatchBinaryOp(BEValueV, DT); 4514 if (!BO) 4515 return nullptr; 4516 4517 if (BO->Opcode != Instruction::Add) 4518 return nullptr; 4519 4520 const SCEV *Accum = nullptr; 4521 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 4522 Accum = getSCEV(BO->RHS); 4523 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 4524 Accum = getSCEV(BO->LHS); 4525 4526 if (!Accum) 4527 return nullptr; 4528 4529 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4530 if (BO->IsNUW) 4531 Flags = setFlags(Flags, SCEV::FlagNUW); 4532 if (BO->IsNSW) 4533 Flags = setFlags(Flags, SCEV::FlagNSW); 4534 4535 const SCEV *StartVal = getSCEV(StartValueV); 4536 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4537 4538 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4539 4540 // We can add Flags to the post-inc expression only if we 4541 // know that it is *undefined behavior* for BEValueV to 4542 // overflow. 4543 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4544 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4545 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4546 4547 return PHISCEV; 4548 } 4549 4550 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 4551 const Loop *L = LI.getLoopFor(PN->getParent()); 4552 if (!L || L->getHeader() != PN->getParent()) 4553 return nullptr; 4554 4555 // The loop may have multiple entrances or multiple exits; we can analyze 4556 // this phi as an addrec if it has a unique entry value and a unique 4557 // backedge value. 4558 Value *BEValueV = nullptr, *StartValueV = nullptr; 4559 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4560 Value *V = PN->getIncomingValue(i); 4561 if (L->contains(PN->getIncomingBlock(i))) { 4562 if (!BEValueV) { 4563 BEValueV = V; 4564 } else if (BEValueV != V) { 4565 BEValueV = nullptr; 4566 break; 4567 } 4568 } else if (!StartValueV) { 4569 StartValueV = V; 4570 } else if (StartValueV != V) { 4571 StartValueV = nullptr; 4572 break; 4573 } 4574 } 4575 if (!BEValueV || !StartValueV) 4576 return nullptr; 4577 4578 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 4579 "PHI node already processed?"); 4580 4581 // First, try to find AddRec expression without creating a fictituos symbolic 4582 // value for PN. 4583 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 4584 return S; 4585 4586 // Handle PHI node value symbolically. 4587 const SCEV *SymbolicName = getUnknown(PN); 4588 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 4589 4590 // Using this symbolic name for the PHI, analyze the value coming around 4591 // the back-edge. 4592 const SCEV *BEValue = getSCEV(BEValueV); 4593 4594 // NOTE: If BEValue is loop invariant, we know that the PHI node just 4595 // has a special value for the first iteration of the loop. 4596 4597 // If the value coming around the backedge is an add with the symbolic 4598 // value we just inserted, then we found a simple induction variable! 4599 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 4600 // If there is a single occurrence of the symbolic value, replace it 4601 // with a recurrence. 4602 unsigned FoundIndex = Add->getNumOperands(); 4603 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4604 if (Add->getOperand(i) == SymbolicName) 4605 if (FoundIndex == e) { 4606 FoundIndex = i; 4607 break; 4608 } 4609 4610 if (FoundIndex != Add->getNumOperands()) { 4611 // Create an add with everything but the specified operand. 4612 SmallVector<const SCEV *, 8> Ops; 4613 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4614 if (i != FoundIndex) 4615 Ops.push_back(Add->getOperand(i)); 4616 const SCEV *Accum = getAddExpr(Ops); 4617 4618 // This is not a valid addrec if the step amount is varying each 4619 // loop iteration, but is not itself an addrec in this loop. 4620 if (isLoopInvariant(Accum, L) || 4621 (isa<SCEVAddRecExpr>(Accum) && 4622 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 4623 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4624 4625 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 4626 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 4627 if (BO->IsNUW) 4628 Flags = setFlags(Flags, SCEV::FlagNUW); 4629 if (BO->IsNSW) 4630 Flags = setFlags(Flags, SCEV::FlagNSW); 4631 } 4632 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 4633 // If the increment is an inbounds GEP, then we know the address 4634 // space cannot be wrapped around. We cannot make any guarantee 4635 // about signed or unsigned overflow because pointers are 4636 // unsigned but we may have a negative index from the base 4637 // pointer. We can guarantee that no unsigned wrap occurs if the 4638 // indices form a positive value. 4639 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 4640 Flags = setFlags(Flags, SCEV::FlagNW); 4641 4642 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 4643 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 4644 Flags = setFlags(Flags, SCEV::FlagNUW); 4645 } 4646 4647 // We cannot transfer nuw and nsw flags from subtraction 4648 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 4649 // for instance. 4650 } 4651 4652 const SCEV *StartVal = getSCEV(StartValueV); 4653 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4654 4655 // Okay, for the entire analysis of this edge we assumed the PHI 4656 // to be symbolic. We now need to go back and purge all of the 4657 // entries for the scalars that use the symbolic expression. 4658 forgetSymbolicName(PN, SymbolicName); 4659 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4660 4661 // We can add Flags to the post-inc expression only if we 4662 // know that it is *undefined behavior* for BEValueV to 4663 // overflow. 4664 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4665 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4666 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4667 4668 return PHISCEV; 4669 } 4670 } 4671 } else { 4672 // Otherwise, this could be a loop like this: 4673 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 4674 // In this case, j = {1,+,1} and BEValue is j. 4675 // Because the other in-value of i (0) fits the evolution of BEValue 4676 // i really is an addrec evolution. 4677 // 4678 // We can generalize this saying that i is the shifted value of BEValue 4679 // by one iteration: 4680 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 4681 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 4682 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this); 4683 if (Shifted != getCouldNotCompute() && 4684 Start != getCouldNotCompute()) { 4685 const SCEV *StartVal = getSCEV(StartValueV); 4686 if (Start == StartVal) { 4687 // Okay, for the entire analysis of this edge we assumed the PHI 4688 // to be symbolic. We now need to go back and purge all of the 4689 // entries for the scalars that use the symbolic expression. 4690 forgetSymbolicName(PN, SymbolicName); 4691 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 4692 return Shifted; 4693 } 4694 } 4695 } 4696 4697 // Remove the temporary PHI node SCEV that has been inserted while intending 4698 // to create an AddRecExpr for this PHI node. We can not keep this temporary 4699 // as it will prevent later (possibly simpler) SCEV expressions to be added 4700 // to the ValueExprMap. 4701 eraseValueFromMap(PN); 4702 4703 return nullptr; 4704 } 4705 4706 // Checks if the SCEV S is available at BB. S is considered available at BB 4707 // if S can be materialized at BB without introducing a fault. 4708 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 4709 BasicBlock *BB) { 4710 struct CheckAvailable { 4711 bool TraversalDone = false; 4712 bool Available = true; 4713 4714 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 4715 BasicBlock *BB = nullptr; 4716 DominatorTree &DT; 4717 4718 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 4719 : L(L), BB(BB), DT(DT) {} 4720 4721 bool setUnavailable() { 4722 TraversalDone = true; 4723 Available = false; 4724 return false; 4725 } 4726 4727 bool follow(const SCEV *S) { 4728 switch (S->getSCEVType()) { 4729 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 4730 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 4731 // These expressions are available if their operand(s) is/are. 4732 return true; 4733 4734 case scAddRecExpr: { 4735 // We allow add recurrences that are on the loop BB is in, or some 4736 // outer loop. This guarantees availability because the value of the 4737 // add recurrence at BB is simply the "current" value of the induction 4738 // variable. We can relax this in the future; for instance an add 4739 // recurrence on a sibling dominating loop is also available at BB. 4740 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 4741 if (L && (ARLoop == L || ARLoop->contains(L))) 4742 return true; 4743 4744 return setUnavailable(); 4745 } 4746 4747 case scUnknown: { 4748 // For SCEVUnknown, we check for simple dominance. 4749 const auto *SU = cast<SCEVUnknown>(S); 4750 Value *V = SU->getValue(); 4751 4752 if (isa<Argument>(V)) 4753 return false; 4754 4755 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 4756 return false; 4757 4758 return setUnavailable(); 4759 } 4760 4761 case scUDivExpr: 4762 case scCouldNotCompute: 4763 // We do not try to smart about these at all. 4764 return setUnavailable(); 4765 } 4766 llvm_unreachable("switch should be fully covered!"); 4767 } 4768 4769 bool isDone() { return TraversalDone; } 4770 }; 4771 4772 CheckAvailable CA(L, BB, DT); 4773 SCEVTraversal<CheckAvailable> ST(CA); 4774 4775 ST.visitAll(S); 4776 return CA.Available; 4777 } 4778 4779 // Try to match a control flow sequence that branches out at BI and merges back 4780 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 4781 // match. 4782 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 4783 Value *&C, Value *&LHS, Value *&RHS) { 4784 C = BI->getCondition(); 4785 4786 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 4787 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 4788 4789 if (!LeftEdge.isSingleEdge()) 4790 return false; 4791 4792 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 4793 4794 Use &LeftUse = Merge->getOperandUse(0); 4795 Use &RightUse = Merge->getOperandUse(1); 4796 4797 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 4798 LHS = LeftUse; 4799 RHS = RightUse; 4800 return true; 4801 } 4802 4803 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 4804 LHS = RightUse; 4805 RHS = LeftUse; 4806 return true; 4807 } 4808 4809 return false; 4810 } 4811 4812 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 4813 auto IsReachable = 4814 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 4815 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 4816 const Loop *L = LI.getLoopFor(PN->getParent()); 4817 4818 // We don't want to break LCSSA, even in a SCEV expression tree. 4819 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 4820 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 4821 return nullptr; 4822 4823 // Try to match 4824 // 4825 // br %cond, label %left, label %right 4826 // left: 4827 // br label %merge 4828 // right: 4829 // br label %merge 4830 // merge: 4831 // V = phi [ %x, %left ], [ %y, %right ] 4832 // 4833 // as "select %cond, %x, %y" 4834 4835 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 4836 assert(IDom && "At least the entry block should dominate PN"); 4837 4838 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 4839 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 4840 4841 if (BI && BI->isConditional() && 4842 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 4843 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 4844 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 4845 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 4846 } 4847 4848 return nullptr; 4849 } 4850 4851 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 4852 if (const SCEV *S = createAddRecFromPHI(PN)) 4853 return S; 4854 4855 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 4856 return S; 4857 4858 // If the PHI has a single incoming value, follow that value, unless the 4859 // PHI's incoming blocks are in a different loop, in which case doing so 4860 // risks breaking LCSSA form. Instcombine would normally zap these, but 4861 // it doesn't have DominatorTree information, so it may miss cases. 4862 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 4863 if (LI.replacementPreservesLCSSAForm(PN, V)) 4864 return getSCEV(V); 4865 4866 // If it's not a loop phi, we can't handle it yet. 4867 return getUnknown(PN); 4868 } 4869 4870 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 4871 Value *Cond, 4872 Value *TrueVal, 4873 Value *FalseVal) { 4874 // Handle "constant" branch or select. This can occur for instance when a 4875 // loop pass transforms an inner loop and moves on to process the outer loop. 4876 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 4877 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 4878 4879 // Try to match some simple smax or umax patterns. 4880 auto *ICI = dyn_cast<ICmpInst>(Cond); 4881 if (!ICI) 4882 return getUnknown(I); 4883 4884 Value *LHS = ICI->getOperand(0); 4885 Value *RHS = ICI->getOperand(1); 4886 4887 switch (ICI->getPredicate()) { 4888 case ICmpInst::ICMP_SLT: 4889 case ICmpInst::ICMP_SLE: 4890 std::swap(LHS, RHS); 4891 LLVM_FALLTHROUGH; 4892 case ICmpInst::ICMP_SGT: 4893 case ICmpInst::ICMP_SGE: 4894 // a >s b ? a+x : b+x -> smax(a, b)+x 4895 // a >s b ? b+x : a+x -> smin(a, b)+x 4896 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 4897 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 4898 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 4899 const SCEV *LA = getSCEV(TrueVal); 4900 const SCEV *RA = getSCEV(FalseVal); 4901 const SCEV *LDiff = getMinusSCEV(LA, LS); 4902 const SCEV *RDiff = getMinusSCEV(RA, RS); 4903 if (LDiff == RDiff) 4904 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 4905 LDiff = getMinusSCEV(LA, RS); 4906 RDiff = getMinusSCEV(RA, LS); 4907 if (LDiff == RDiff) 4908 return getAddExpr(getSMinExpr(LS, RS), LDiff); 4909 } 4910 break; 4911 case ICmpInst::ICMP_ULT: 4912 case ICmpInst::ICMP_ULE: 4913 std::swap(LHS, RHS); 4914 LLVM_FALLTHROUGH; 4915 case ICmpInst::ICMP_UGT: 4916 case ICmpInst::ICMP_UGE: 4917 // a >u b ? a+x : b+x -> umax(a, b)+x 4918 // a >u b ? b+x : a+x -> umin(a, b)+x 4919 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 4920 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4921 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 4922 const SCEV *LA = getSCEV(TrueVal); 4923 const SCEV *RA = getSCEV(FalseVal); 4924 const SCEV *LDiff = getMinusSCEV(LA, LS); 4925 const SCEV *RDiff = getMinusSCEV(RA, RS); 4926 if (LDiff == RDiff) 4927 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 4928 LDiff = getMinusSCEV(LA, RS); 4929 RDiff = getMinusSCEV(RA, LS); 4930 if (LDiff == RDiff) 4931 return getAddExpr(getUMinExpr(LS, RS), LDiff); 4932 } 4933 break; 4934 case ICmpInst::ICMP_NE: 4935 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 4936 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 4937 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 4938 const SCEV *One = getOne(I->getType()); 4939 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4940 const SCEV *LA = getSCEV(TrueVal); 4941 const SCEV *RA = getSCEV(FalseVal); 4942 const SCEV *LDiff = getMinusSCEV(LA, LS); 4943 const SCEV *RDiff = getMinusSCEV(RA, One); 4944 if (LDiff == RDiff) 4945 return getAddExpr(getUMaxExpr(One, LS), LDiff); 4946 } 4947 break; 4948 case ICmpInst::ICMP_EQ: 4949 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 4950 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 4951 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 4952 const SCEV *One = getOne(I->getType()); 4953 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4954 const SCEV *LA = getSCEV(TrueVal); 4955 const SCEV *RA = getSCEV(FalseVal); 4956 const SCEV *LDiff = getMinusSCEV(LA, One); 4957 const SCEV *RDiff = getMinusSCEV(RA, LS); 4958 if (LDiff == RDiff) 4959 return getAddExpr(getUMaxExpr(One, LS), LDiff); 4960 } 4961 break; 4962 default: 4963 break; 4964 } 4965 4966 return getUnknown(I); 4967 } 4968 4969 /// Expand GEP instructions into add and multiply operations. This allows them 4970 /// to be analyzed by regular SCEV code. 4971 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 4972 // Don't attempt to analyze GEPs over unsized objects. 4973 if (!GEP->getSourceElementType()->isSized()) 4974 return getUnknown(GEP); 4975 4976 SmallVector<const SCEV *, 4> IndexExprs; 4977 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 4978 IndexExprs.push_back(getSCEV(*Index)); 4979 return getGEPExpr(GEP, IndexExprs); 4980 } 4981 4982 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 4983 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 4984 return C->getAPInt().countTrailingZeros(); 4985 4986 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 4987 return std::min(GetMinTrailingZeros(T->getOperand()), 4988 (uint32_t)getTypeSizeInBits(T->getType())); 4989 4990 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 4991 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 4992 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 4993 ? getTypeSizeInBits(E->getType()) 4994 : OpRes; 4995 } 4996 4997 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 4998 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 4999 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5000 ? getTypeSizeInBits(E->getType()) 5001 : OpRes; 5002 } 5003 5004 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5005 // The result is the min of all operands results. 5006 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5007 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5008 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5009 return MinOpRes; 5010 } 5011 5012 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5013 // The result is the sum of all operands results. 5014 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5015 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5016 for (unsigned i = 1, e = M->getNumOperands(); 5017 SumOpRes != BitWidth && i != e; ++i) 5018 SumOpRes = 5019 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5020 return SumOpRes; 5021 } 5022 5023 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5024 // The result is the min of all operands results. 5025 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5026 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5027 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5028 return MinOpRes; 5029 } 5030 5031 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5032 // The result is the min of all operands results. 5033 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5034 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5035 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5036 return MinOpRes; 5037 } 5038 5039 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5040 // The result is the min of all operands results. 5041 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5042 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5043 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5044 return MinOpRes; 5045 } 5046 5047 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5048 // For a SCEVUnknown, ask ValueTracking. 5049 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5050 return Known.countMinTrailingZeros(); 5051 } 5052 5053 // SCEVUDivExpr 5054 return 0; 5055 } 5056 5057 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5058 auto I = MinTrailingZerosCache.find(S); 5059 if (I != MinTrailingZerosCache.end()) 5060 return I->second; 5061 5062 uint32_t Result = GetMinTrailingZerosImpl(S); 5063 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5064 assert(InsertPair.second && "Should insert a new key"); 5065 return InsertPair.first->second; 5066 } 5067 5068 /// Helper method to assign a range to V from metadata present in the IR. 5069 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5070 if (Instruction *I = dyn_cast<Instruction>(V)) 5071 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5072 return getConstantRangeFromMetadata(*MD); 5073 5074 return None; 5075 } 5076 5077 /// Determine the range for a particular SCEV. If SignHint is 5078 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5079 /// with a "cleaner" unsigned (resp. signed) representation. 5080 const ConstantRange & 5081 ScalarEvolution::getRangeRef(const SCEV *S, 5082 ScalarEvolution::RangeSignHint SignHint) { 5083 DenseMap<const SCEV *, ConstantRange> &Cache = 5084 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5085 : SignedRanges; 5086 5087 // See if we've computed this range already. 5088 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5089 if (I != Cache.end()) 5090 return I->second; 5091 5092 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5093 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5094 5095 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5096 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5097 5098 // If the value has known zeros, the maximum value will have those known zeros 5099 // as well. 5100 uint32_t TZ = GetMinTrailingZeros(S); 5101 if (TZ != 0) { 5102 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5103 ConservativeResult = 5104 ConstantRange(APInt::getMinValue(BitWidth), 5105 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5106 else 5107 ConservativeResult = ConstantRange( 5108 APInt::getSignedMinValue(BitWidth), 5109 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5110 } 5111 5112 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5113 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5114 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5115 X = X.add(getRangeRef(Add->getOperand(i), SignHint)); 5116 return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); 5117 } 5118 5119 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5120 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5121 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5122 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5123 return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); 5124 } 5125 5126 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5127 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5128 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5129 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5130 return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); 5131 } 5132 5133 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5134 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5135 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5136 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5137 return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); 5138 } 5139 5140 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5141 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5142 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5143 return setRange(UDiv, SignHint, 5144 ConservativeResult.intersectWith(X.udiv(Y))); 5145 } 5146 5147 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5148 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5149 return setRange(ZExt, SignHint, 5150 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 5151 } 5152 5153 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5154 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5155 return setRange(SExt, SignHint, 5156 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 5157 } 5158 5159 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5160 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5161 return setRange(Trunc, SignHint, 5162 ConservativeResult.intersectWith(X.truncate(BitWidth))); 5163 } 5164 5165 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5166 // If there's no unsigned wrap, the value will never be less than its 5167 // initial value. 5168 if (AddRec->hasNoUnsignedWrap()) 5169 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 5170 if (!C->getValue()->isZero()) 5171 ConservativeResult = ConservativeResult.intersectWith( 5172 ConstantRange(C->getAPInt(), APInt(BitWidth, 0))); 5173 5174 // If there's no signed wrap, and all the operands have the same sign or 5175 // zero, the value won't ever change sign. 5176 if (AddRec->hasNoSignedWrap()) { 5177 bool AllNonNeg = true; 5178 bool AllNonPos = true; 5179 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 5180 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 5181 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 5182 } 5183 if (AllNonNeg) 5184 ConservativeResult = ConservativeResult.intersectWith( 5185 ConstantRange(APInt(BitWidth, 0), 5186 APInt::getSignedMinValue(BitWidth))); 5187 else if (AllNonPos) 5188 ConservativeResult = ConservativeResult.intersectWith( 5189 ConstantRange(APInt::getSignedMinValue(BitWidth), 5190 APInt(BitWidth, 1))); 5191 } 5192 5193 // TODO: non-affine addrec 5194 if (AddRec->isAffine()) { 5195 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 5196 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5197 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5198 auto RangeFromAffine = getRangeForAffineAR( 5199 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5200 BitWidth); 5201 if (!RangeFromAffine.isFullSet()) 5202 ConservativeResult = 5203 ConservativeResult.intersectWith(RangeFromAffine); 5204 5205 auto RangeFromFactoring = getRangeViaFactoring( 5206 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5207 BitWidth); 5208 if (!RangeFromFactoring.isFullSet()) 5209 ConservativeResult = 5210 ConservativeResult.intersectWith(RangeFromFactoring); 5211 } 5212 } 5213 5214 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5215 } 5216 5217 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5218 // Check if the IR explicitly contains !range metadata. 5219 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5220 if (MDRange.hasValue()) 5221 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue()); 5222 5223 // Split here to avoid paying the compile-time cost of calling both 5224 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 5225 // if needed. 5226 const DataLayout &DL = getDataLayout(); 5227 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 5228 // For a SCEVUnknown, ask ValueTracking. 5229 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5230 if (Known.One != ~Known.Zero + 1) 5231 ConservativeResult = 5232 ConservativeResult.intersectWith(ConstantRange(Known.One, 5233 ~Known.Zero + 1)); 5234 } else { 5235 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 5236 "generalize as needed!"); 5237 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5238 if (NS > 1) 5239 ConservativeResult = ConservativeResult.intersectWith( 5240 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5241 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1)); 5242 } 5243 5244 return setRange(U, SignHint, std::move(ConservativeResult)); 5245 } 5246 5247 return setRange(S, SignHint, std::move(ConservativeResult)); 5248 } 5249 5250 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5251 // values that the expression can take. Initially, the expression has a value 5252 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5253 // argument defines if we treat Step as signed or unsigned. 5254 static ConstantRange getRangeForAffineARHelper(APInt Step, 5255 const ConstantRange &StartRange, 5256 const APInt &MaxBECount, 5257 unsigned BitWidth, bool Signed) { 5258 // If either Step or MaxBECount is 0, then the expression won't change, and we 5259 // just need to return the initial range. 5260 if (Step == 0 || MaxBECount == 0) 5261 return StartRange; 5262 5263 // If we don't know anything about the initial value (i.e. StartRange is 5264 // FullRange), then we don't know anything about the final range either. 5265 // Return FullRange. 5266 if (StartRange.isFullSet()) 5267 return ConstantRange(BitWidth, /* isFullSet = */ true); 5268 5269 // If Step is signed and negative, then we use its absolute value, but we also 5270 // note that we're moving in the opposite direction. 5271 bool Descending = Signed && Step.isNegative(); 5272 5273 if (Signed) 5274 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 5275 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 5276 // This equations hold true due to the well-defined wrap-around behavior of 5277 // APInt. 5278 Step = Step.abs(); 5279 5280 // Check if Offset is more than full span of BitWidth. If it is, the 5281 // expression is guaranteed to overflow. 5282 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 5283 return ConstantRange(BitWidth, /* isFullSet = */ true); 5284 5285 // Offset is by how much the expression can change. Checks above guarantee no 5286 // overflow here. 5287 APInt Offset = Step * MaxBECount; 5288 5289 // Minimum value of the final range will match the minimal value of StartRange 5290 // if the expression is increasing and will be decreased by Offset otherwise. 5291 // Maximum value of the final range will match the maximal value of StartRange 5292 // if the expression is decreasing and will be increased by Offset otherwise. 5293 APInt StartLower = StartRange.getLower(); 5294 APInt StartUpper = StartRange.getUpper() - 1; 5295 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 5296 : (StartUpper + std::move(Offset)); 5297 5298 // It's possible that the new minimum/maximum value will fall into the initial 5299 // range (due to wrap around). This means that the expression can take any 5300 // value in this bitwidth, and we have to return full range. 5301 if (StartRange.contains(MovedBoundary)) 5302 return ConstantRange(BitWidth, /* isFullSet = */ true); 5303 5304 APInt NewLower = 5305 Descending ? std::move(MovedBoundary) : std::move(StartLower); 5306 APInt NewUpper = 5307 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 5308 NewUpper += 1; 5309 5310 // If we end up with full range, return a proper full range. 5311 if (NewLower == NewUpper) 5312 return ConstantRange(BitWidth, /* isFullSet = */ true); 5313 5314 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 5315 return ConstantRange(std::move(NewLower), std::move(NewUpper)); 5316 } 5317 5318 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5319 const SCEV *Step, 5320 const SCEV *MaxBECount, 5321 unsigned BitWidth) { 5322 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5323 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5324 "Precondition!"); 5325 5326 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5327 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5328 5329 // First, consider step signed. 5330 ConstantRange StartSRange = getSignedRange(Start); 5331 ConstantRange StepSRange = getSignedRange(Step); 5332 5333 // If Step can be both positive and negative, we need to find ranges for the 5334 // maximum absolute step values in both directions and union them. 5335 ConstantRange SR = 5336 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5337 MaxBECountValue, BitWidth, /* Signed = */ true); 5338 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5339 StartSRange, MaxBECountValue, 5340 BitWidth, /* Signed = */ true)); 5341 5342 // Next, consider step unsigned. 5343 ConstantRange UR = getRangeForAffineARHelper( 5344 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5345 MaxBECountValue, BitWidth, /* Signed = */ false); 5346 5347 // Finally, intersect signed and unsigned ranges. 5348 return SR.intersectWith(UR); 5349 } 5350 5351 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 5352 const SCEV *Step, 5353 const SCEV *MaxBECount, 5354 unsigned BitWidth) { 5355 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 5356 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 5357 5358 struct SelectPattern { 5359 Value *Condition = nullptr; 5360 APInt TrueValue; 5361 APInt FalseValue; 5362 5363 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 5364 const SCEV *S) { 5365 Optional<unsigned> CastOp; 5366 APInt Offset(BitWidth, 0); 5367 5368 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 5369 "Should be!"); 5370 5371 // Peel off a constant offset: 5372 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 5373 // In the future we could consider being smarter here and handle 5374 // {Start+Step,+,Step} too. 5375 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 5376 return; 5377 5378 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 5379 S = SA->getOperand(1); 5380 } 5381 5382 // Peel off a cast operation 5383 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 5384 CastOp = SCast->getSCEVType(); 5385 S = SCast->getOperand(); 5386 } 5387 5388 using namespace llvm::PatternMatch; 5389 5390 auto *SU = dyn_cast<SCEVUnknown>(S); 5391 const APInt *TrueVal, *FalseVal; 5392 if (!SU || 5393 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 5394 m_APInt(FalseVal)))) { 5395 Condition = nullptr; 5396 return; 5397 } 5398 5399 TrueValue = *TrueVal; 5400 FalseValue = *FalseVal; 5401 5402 // Re-apply the cast we peeled off earlier 5403 if (CastOp.hasValue()) 5404 switch (*CastOp) { 5405 default: 5406 llvm_unreachable("Unknown SCEV cast type!"); 5407 5408 case scTruncate: 5409 TrueValue = TrueValue.trunc(BitWidth); 5410 FalseValue = FalseValue.trunc(BitWidth); 5411 break; 5412 case scZeroExtend: 5413 TrueValue = TrueValue.zext(BitWidth); 5414 FalseValue = FalseValue.zext(BitWidth); 5415 break; 5416 case scSignExtend: 5417 TrueValue = TrueValue.sext(BitWidth); 5418 FalseValue = FalseValue.sext(BitWidth); 5419 break; 5420 } 5421 5422 // Re-apply the constant offset we peeled off earlier 5423 TrueValue += Offset; 5424 FalseValue += Offset; 5425 } 5426 5427 bool isRecognized() { return Condition != nullptr; } 5428 }; 5429 5430 SelectPattern StartPattern(*this, BitWidth, Start); 5431 if (!StartPattern.isRecognized()) 5432 return ConstantRange(BitWidth, /* isFullSet = */ true); 5433 5434 SelectPattern StepPattern(*this, BitWidth, Step); 5435 if (!StepPattern.isRecognized()) 5436 return ConstantRange(BitWidth, /* isFullSet = */ true); 5437 5438 if (StartPattern.Condition != StepPattern.Condition) { 5439 // We don't handle this case today; but we could, by considering four 5440 // possibilities below instead of two. I'm not sure if there are cases where 5441 // that will help over what getRange already does, though. 5442 return ConstantRange(BitWidth, /* isFullSet = */ true); 5443 } 5444 5445 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 5446 // construct arbitrary general SCEV expressions here. This function is called 5447 // from deep in the call stack, and calling getSCEV (on a sext instruction, 5448 // say) can end up caching a suboptimal value. 5449 5450 // FIXME: without the explicit `this` receiver below, MSVC errors out with 5451 // C2352 and C2512 (otherwise it isn't needed). 5452 5453 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 5454 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 5455 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 5456 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 5457 5458 ConstantRange TrueRange = 5459 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 5460 ConstantRange FalseRange = 5461 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 5462 5463 return TrueRange.unionWith(FalseRange); 5464 } 5465 5466 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 5467 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 5468 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 5469 5470 // Return early if there are no flags to propagate to the SCEV. 5471 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5472 if (BinOp->hasNoUnsignedWrap()) 5473 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 5474 if (BinOp->hasNoSignedWrap()) 5475 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 5476 if (Flags == SCEV::FlagAnyWrap) 5477 return SCEV::FlagAnyWrap; 5478 5479 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 5480 } 5481 5482 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 5483 // Here we check that I is in the header of the innermost loop containing I, 5484 // since we only deal with instructions in the loop header. The actual loop we 5485 // need to check later will come from an add recurrence, but getting that 5486 // requires computing the SCEV of the operands, which can be expensive. This 5487 // check we can do cheaply to rule out some cases early. 5488 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 5489 if (InnermostContainingLoop == nullptr || 5490 InnermostContainingLoop->getHeader() != I->getParent()) 5491 return false; 5492 5493 // Only proceed if we can prove that I does not yield poison. 5494 if (!programUndefinedIfFullPoison(I)) 5495 return false; 5496 5497 // At this point we know that if I is executed, then it does not wrap 5498 // according to at least one of NSW or NUW. If I is not executed, then we do 5499 // not know if the calculation that I represents would wrap. Multiple 5500 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 5501 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 5502 // derived from other instructions that map to the same SCEV. We cannot make 5503 // that guarantee for cases where I is not executed. So we need to find the 5504 // loop that I is considered in relation to and prove that I is executed for 5505 // every iteration of that loop. That implies that the value that I 5506 // calculates does not wrap anywhere in the loop, so then we can apply the 5507 // flags to the SCEV. 5508 // 5509 // We check isLoopInvariant to disambiguate in case we are adding recurrences 5510 // from different loops, so that we know which loop to prove that I is 5511 // executed in. 5512 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 5513 // I could be an extractvalue from a call to an overflow intrinsic. 5514 // TODO: We can do better here in some cases. 5515 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 5516 return false; 5517 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 5518 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 5519 bool AllOtherOpsLoopInvariant = true; 5520 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 5521 ++OtherOpIndex) { 5522 if (OtherOpIndex != OpIndex) { 5523 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 5524 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 5525 AllOtherOpsLoopInvariant = false; 5526 break; 5527 } 5528 } 5529 } 5530 if (AllOtherOpsLoopInvariant && 5531 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 5532 return true; 5533 } 5534 } 5535 return false; 5536 } 5537 5538 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 5539 // If we know that \c I can never be poison period, then that's enough. 5540 if (isSCEVExprNeverPoison(I)) 5541 return true; 5542 5543 // For an add recurrence specifically, we assume that infinite loops without 5544 // side effects are undefined behavior, and then reason as follows: 5545 // 5546 // If the add recurrence is poison in any iteration, it is poison on all 5547 // future iterations (since incrementing poison yields poison). If the result 5548 // of the add recurrence is fed into the loop latch condition and the loop 5549 // does not contain any throws or exiting blocks other than the latch, we now 5550 // have the ability to "choose" whether the backedge is taken or not (by 5551 // choosing a sufficiently evil value for the poison feeding into the branch) 5552 // for every iteration including and after the one in which \p I first became 5553 // poison. There are two possibilities (let's call the iteration in which \p 5554 // I first became poison as K): 5555 // 5556 // 1. In the set of iterations including and after K, the loop body executes 5557 // no side effects. In this case executing the backege an infinte number 5558 // of times will yield undefined behavior. 5559 // 5560 // 2. In the set of iterations including and after K, the loop body executes 5561 // at least one side effect. In this case, that specific instance of side 5562 // effect is control dependent on poison, which also yields undefined 5563 // behavior. 5564 5565 auto *ExitingBB = L->getExitingBlock(); 5566 auto *LatchBB = L->getLoopLatch(); 5567 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 5568 return false; 5569 5570 SmallPtrSet<const Instruction *, 16> Pushed; 5571 SmallVector<const Instruction *, 8> PoisonStack; 5572 5573 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 5574 // things that are known to be fully poison under that assumption go on the 5575 // PoisonStack. 5576 Pushed.insert(I); 5577 PoisonStack.push_back(I); 5578 5579 bool LatchControlDependentOnPoison = false; 5580 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 5581 const Instruction *Poison = PoisonStack.pop_back_val(); 5582 5583 for (auto *PoisonUser : Poison->users()) { 5584 if (propagatesFullPoison(cast<Instruction>(PoisonUser))) { 5585 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 5586 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 5587 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 5588 assert(BI->isConditional() && "Only possibility!"); 5589 if (BI->getParent() == LatchBB) { 5590 LatchControlDependentOnPoison = true; 5591 break; 5592 } 5593 } 5594 } 5595 } 5596 5597 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 5598 } 5599 5600 ScalarEvolution::LoopProperties 5601 ScalarEvolution::getLoopProperties(const Loop *L) { 5602 typedef ScalarEvolution::LoopProperties LoopProperties; 5603 5604 auto Itr = LoopPropertiesCache.find(L); 5605 if (Itr == LoopPropertiesCache.end()) { 5606 auto HasSideEffects = [](Instruction *I) { 5607 if (auto *SI = dyn_cast<StoreInst>(I)) 5608 return !SI->isSimple(); 5609 5610 return I->mayHaveSideEffects(); 5611 }; 5612 5613 LoopProperties LP = {/* HasNoAbnormalExits */ true, 5614 /*HasNoSideEffects*/ true}; 5615 5616 for (auto *BB : L->getBlocks()) 5617 for (auto &I : *BB) { 5618 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5619 LP.HasNoAbnormalExits = false; 5620 if (HasSideEffects(&I)) 5621 LP.HasNoSideEffects = false; 5622 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 5623 break; // We're already as pessimistic as we can get. 5624 } 5625 5626 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 5627 assert(InsertPair.second && "We just checked!"); 5628 Itr = InsertPair.first; 5629 } 5630 5631 return Itr->second; 5632 } 5633 5634 const SCEV *ScalarEvolution::createSCEV(Value *V) { 5635 if (!isSCEVable(V->getType())) 5636 return getUnknown(V); 5637 5638 if (Instruction *I = dyn_cast<Instruction>(V)) { 5639 // Don't attempt to analyze instructions in blocks that aren't 5640 // reachable. Such instructions don't matter, and they aren't required 5641 // to obey basic rules for definitions dominating uses which this 5642 // analysis depends on. 5643 if (!DT.isReachableFromEntry(I->getParent())) 5644 return getUnknown(V); 5645 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 5646 return getConstant(CI); 5647 else if (isa<ConstantPointerNull>(V)) 5648 return getZero(V->getType()); 5649 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 5650 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 5651 else if (!isa<ConstantExpr>(V)) 5652 return getUnknown(V); 5653 5654 Operator *U = cast<Operator>(V); 5655 if (auto BO = MatchBinaryOp(U, DT)) { 5656 switch (BO->Opcode) { 5657 case Instruction::Add: { 5658 // The simple thing to do would be to just call getSCEV on both operands 5659 // and call getAddExpr with the result. However if we're looking at a 5660 // bunch of things all added together, this can be quite inefficient, 5661 // because it leads to N-1 getAddExpr calls for N ultimate operands. 5662 // Instead, gather up all the operands and make a single getAddExpr call. 5663 // LLVM IR canonical form means we need only traverse the left operands. 5664 SmallVector<const SCEV *, 4> AddOps; 5665 do { 5666 if (BO->Op) { 5667 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 5668 AddOps.push_back(OpSCEV); 5669 break; 5670 } 5671 5672 // If a NUW or NSW flag can be applied to the SCEV for this 5673 // addition, then compute the SCEV for this addition by itself 5674 // with a separate call to getAddExpr. We need to do that 5675 // instead of pushing the operands of the addition onto AddOps, 5676 // since the flags are only known to apply to this particular 5677 // addition - they may not apply to other additions that can be 5678 // formed with operands from AddOps. 5679 const SCEV *RHS = getSCEV(BO->RHS); 5680 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 5681 if (Flags != SCEV::FlagAnyWrap) { 5682 const SCEV *LHS = getSCEV(BO->LHS); 5683 if (BO->Opcode == Instruction::Sub) 5684 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 5685 else 5686 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 5687 break; 5688 } 5689 } 5690 5691 if (BO->Opcode == Instruction::Sub) 5692 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 5693 else 5694 AddOps.push_back(getSCEV(BO->RHS)); 5695 5696 auto NewBO = MatchBinaryOp(BO->LHS, DT); 5697 if (!NewBO || (NewBO->Opcode != Instruction::Add && 5698 NewBO->Opcode != Instruction::Sub)) { 5699 AddOps.push_back(getSCEV(BO->LHS)); 5700 break; 5701 } 5702 BO = NewBO; 5703 } while (true); 5704 5705 return getAddExpr(AddOps); 5706 } 5707 5708 case Instruction::Mul: { 5709 SmallVector<const SCEV *, 4> MulOps; 5710 do { 5711 if (BO->Op) { 5712 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 5713 MulOps.push_back(OpSCEV); 5714 break; 5715 } 5716 5717 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 5718 if (Flags != SCEV::FlagAnyWrap) { 5719 MulOps.push_back( 5720 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 5721 break; 5722 } 5723 } 5724 5725 MulOps.push_back(getSCEV(BO->RHS)); 5726 auto NewBO = MatchBinaryOp(BO->LHS, DT); 5727 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 5728 MulOps.push_back(getSCEV(BO->LHS)); 5729 break; 5730 } 5731 BO = NewBO; 5732 } while (true); 5733 5734 return getMulExpr(MulOps); 5735 } 5736 case Instruction::UDiv: 5737 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 5738 case Instruction::Sub: { 5739 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5740 if (BO->Op) 5741 Flags = getNoWrapFlagsFromUB(BO->Op); 5742 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 5743 } 5744 case Instruction::And: 5745 // For an expression like x&255 that merely masks off the high bits, 5746 // use zext(trunc(x)) as the SCEV expression. 5747 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5748 if (CI->isZero()) 5749 return getSCEV(BO->RHS); 5750 if (CI->isMinusOne()) 5751 return getSCEV(BO->LHS); 5752 const APInt &A = CI->getValue(); 5753 5754 // Instcombine's ShrinkDemandedConstant may strip bits out of 5755 // constants, obscuring what would otherwise be a low-bits mask. 5756 // Use computeKnownBits to compute what ShrinkDemandedConstant 5757 // knew about to reconstruct a low-bits mask value. 5758 unsigned LZ = A.countLeadingZeros(); 5759 unsigned TZ = A.countTrailingZeros(); 5760 unsigned BitWidth = A.getBitWidth(); 5761 KnownBits Known(BitWidth); 5762 computeKnownBits(BO->LHS, Known, getDataLayout(), 5763 0, &AC, nullptr, &DT); 5764 5765 APInt EffectiveMask = 5766 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 5767 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 5768 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 5769 const SCEV *LHS = getSCEV(BO->LHS); 5770 const SCEV *ShiftedLHS = nullptr; 5771 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 5772 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 5773 // For an expression like (x * 8) & 8, simplify the multiply. 5774 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 5775 unsigned GCD = std::min(MulZeros, TZ); 5776 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 5777 SmallVector<const SCEV*, 4> MulOps; 5778 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 5779 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 5780 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 5781 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 5782 } 5783 } 5784 if (!ShiftedLHS) 5785 ShiftedLHS = getUDivExpr(LHS, MulCount); 5786 return getMulExpr( 5787 getZeroExtendExpr( 5788 getTruncateExpr(ShiftedLHS, 5789 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 5790 BO->LHS->getType()), 5791 MulCount); 5792 } 5793 } 5794 break; 5795 5796 case Instruction::Or: 5797 // If the RHS of the Or is a constant, we may have something like: 5798 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 5799 // optimizations will transparently handle this case. 5800 // 5801 // In order for this transformation to be safe, the LHS must be of the 5802 // form X*(2^n) and the Or constant must be less than 2^n. 5803 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5804 const SCEV *LHS = getSCEV(BO->LHS); 5805 const APInt &CIVal = CI->getValue(); 5806 if (GetMinTrailingZeros(LHS) >= 5807 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 5808 // Build a plain add SCEV. 5809 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 5810 // If the LHS of the add was an addrec and it has no-wrap flags, 5811 // transfer the no-wrap flags, since an or won't introduce a wrap. 5812 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 5813 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 5814 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( 5815 OldAR->getNoWrapFlags()); 5816 } 5817 return S; 5818 } 5819 } 5820 break; 5821 5822 case Instruction::Xor: 5823 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5824 // If the RHS of xor is -1, then this is a not operation. 5825 if (CI->isMinusOne()) 5826 return getNotSCEV(getSCEV(BO->LHS)); 5827 5828 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 5829 // This is a variant of the check for xor with -1, and it handles 5830 // the case where instcombine has trimmed non-demanded bits out 5831 // of an xor with -1. 5832 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 5833 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 5834 if (LBO->getOpcode() == Instruction::And && 5835 LCI->getValue() == CI->getValue()) 5836 if (const SCEVZeroExtendExpr *Z = 5837 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 5838 Type *UTy = BO->LHS->getType(); 5839 const SCEV *Z0 = Z->getOperand(); 5840 Type *Z0Ty = Z0->getType(); 5841 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 5842 5843 // If C is a low-bits mask, the zero extend is serving to 5844 // mask off the high bits. Complement the operand and 5845 // re-apply the zext. 5846 if (CI->getValue().isMask(Z0TySize)) 5847 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 5848 5849 // If C is a single bit, it may be in the sign-bit position 5850 // before the zero-extend. In this case, represent the xor 5851 // using an add, which is equivalent, and re-apply the zext. 5852 APInt Trunc = CI->getValue().trunc(Z0TySize); 5853 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 5854 Trunc.isSignMask()) 5855 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 5856 UTy); 5857 } 5858 } 5859 break; 5860 5861 case Instruction::Shl: 5862 // Turn shift left of a constant amount into a multiply. 5863 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 5864 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 5865 5866 // If the shift count is not less than the bitwidth, the result of 5867 // the shift is undefined. Don't try to analyze it, because the 5868 // resolution chosen here may differ from the resolution chosen in 5869 // other parts of the compiler. 5870 if (SA->getValue().uge(BitWidth)) 5871 break; 5872 5873 // It is currently not resolved how to interpret NSW for left 5874 // shift by BitWidth - 1, so we avoid applying flags in that 5875 // case. Remove this check (or this comment) once the situation 5876 // is resolved. See 5877 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html 5878 // and http://reviews.llvm.org/D8890 . 5879 auto Flags = SCEV::FlagAnyWrap; 5880 if (BO->Op && SA->getValue().ult(BitWidth - 1)) 5881 Flags = getNoWrapFlagsFromUB(BO->Op); 5882 5883 Constant *X = ConstantInt::get(getContext(), 5884 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 5885 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 5886 } 5887 break; 5888 5889 case Instruction::AShr: 5890 // AShr X, C, where C is a constant. 5891 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 5892 if (!CI) 5893 break; 5894 5895 Type *OuterTy = BO->LHS->getType(); 5896 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 5897 // If the shift count is not less than the bitwidth, the result of 5898 // the shift is undefined. Don't try to analyze it, because the 5899 // resolution chosen here may differ from the resolution chosen in 5900 // other parts of the compiler. 5901 if (CI->getValue().uge(BitWidth)) 5902 break; 5903 5904 if (CI->isZero()) 5905 return getSCEV(BO->LHS); // shift by zero --> noop 5906 5907 uint64_t AShrAmt = CI->getZExtValue(); 5908 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 5909 5910 Operator *L = dyn_cast<Operator>(BO->LHS); 5911 if (L && L->getOpcode() == Instruction::Shl) { 5912 // X = Shl A, n 5913 // Y = AShr X, m 5914 // Both n and m are constant. 5915 5916 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 5917 if (L->getOperand(1) == BO->RHS) 5918 // For a two-shift sext-inreg, i.e. n = m, 5919 // use sext(trunc(x)) as the SCEV expression. 5920 return getSignExtendExpr( 5921 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 5922 5923 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 5924 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 5925 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 5926 if (ShlAmt > AShrAmt) { 5927 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 5928 // expression. We already checked that ShlAmt < BitWidth, so 5929 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 5930 // ShlAmt - AShrAmt < Amt. 5931 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 5932 ShlAmt - AShrAmt); 5933 return getSignExtendExpr( 5934 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 5935 getConstant(Mul)), OuterTy); 5936 } 5937 } 5938 } 5939 break; 5940 } 5941 } 5942 5943 switch (U->getOpcode()) { 5944 case Instruction::Trunc: 5945 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 5946 5947 case Instruction::ZExt: 5948 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 5949 5950 case Instruction::SExt: 5951 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 5952 5953 case Instruction::BitCast: 5954 // BitCasts are no-op casts so we just eliminate the cast. 5955 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 5956 return getSCEV(U->getOperand(0)); 5957 break; 5958 5959 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 5960 // lead to pointer expressions which cannot safely be expanded to GEPs, 5961 // because ScalarEvolution doesn't respect the GEP aliasing rules when 5962 // simplifying integer expressions. 5963 5964 case Instruction::GetElementPtr: 5965 return createNodeForGEP(cast<GEPOperator>(U)); 5966 5967 case Instruction::PHI: 5968 return createNodeForPHI(cast<PHINode>(U)); 5969 5970 case Instruction::Select: 5971 // U can also be a select constant expr, which let fall through. Since 5972 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 5973 // constant expressions cannot have instructions as operands, we'd have 5974 // returned getUnknown for a select constant expressions anyway. 5975 if (isa<Instruction>(U)) 5976 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 5977 U->getOperand(1), U->getOperand(2)); 5978 break; 5979 5980 case Instruction::Call: 5981 case Instruction::Invoke: 5982 if (Value *RV = CallSite(U).getReturnedArgOperand()) 5983 return getSCEV(RV); 5984 break; 5985 } 5986 5987 return getUnknown(V); 5988 } 5989 5990 5991 5992 //===----------------------------------------------------------------------===// 5993 // Iteration Count Computation Code 5994 // 5995 5996 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 5997 if (!ExitCount) 5998 return 0; 5999 6000 ConstantInt *ExitConst = ExitCount->getValue(); 6001 6002 // Guard against huge trip counts. 6003 if (ExitConst->getValue().getActiveBits() > 32) 6004 return 0; 6005 6006 // In case of integer overflow, this returns 0, which is correct. 6007 return ((unsigned)ExitConst->getZExtValue()) + 1; 6008 } 6009 6010 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6011 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6012 return getSmallConstantTripCount(L, ExitingBB); 6013 6014 // No trip count information for multiple exits. 6015 return 0; 6016 } 6017 6018 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6019 BasicBlock *ExitingBlock) { 6020 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6021 assert(L->isLoopExiting(ExitingBlock) && 6022 "Exiting block must actually branch out of the loop!"); 6023 const SCEVConstant *ExitCount = 6024 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6025 return getConstantTripCount(ExitCount); 6026 } 6027 6028 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6029 const auto *MaxExitCount = 6030 dyn_cast<SCEVConstant>(getMaxBackedgeTakenCount(L)); 6031 return getConstantTripCount(MaxExitCount); 6032 } 6033 6034 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6035 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6036 return getSmallConstantTripMultiple(L, ExitingBB); 6037 6038 // No trip multiple information for multiple exits. 6039 return 0; 6040 } 6041 6042 /// Returns the largest constant divisor of the trip count of this loop as a 6043 /// normal unsigned value, if possible. This means that the actual trip count is 6044 /// always a multiple of the returned value (don't forget the trip count could 6045 /// very well be zero as well!). 6046 /// 6047 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6048 /// multiple of a constant (which is also the case if the trip count is simply 6049 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6050 /// if the trip count is very large (>= 2^32). 6051 /// 6052 /// As explained in the comments for getSmallConstantTripCount, this assumes 6053 /// that control exits the loop via ExitingBlock. 6054 unsigned 6055 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6056 BasicBlock *ExitingBlock) { 6057 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6058 assert(L->isLoopExiting(ExitingBlock) && 6059 "Exiting block must actually branch out of the loop!"); 6060 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6061 if (ExitCount == getCouldNotCompute()) 6062 return 1; 6063 6064 // Get the trip count from the BE count by adding 1. 6065 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6066 6067 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6068 if (!TC) 6069 // Attempt to factor more general cases. Returns the greatest power of 6070 // two divisor. If overflow happens, the trip count expression is still 6071 // divisible by the greatest power of 2 divisor returned. 6072 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 6073 6074 ConstantInt *Result = TC->getValue(); 6075 6076 // Guard against huge trip counts (this requires checking 6077 // for zero to handle the case where the trip count == -1 and the 6078 // addition wraps). 6079 if (!Result || Result->getValue().getActiveBits() > 32 || 6080 Result->getValue().getActiveBits() == 0) 6081 return 1; 6082 6083 return (unsigned)Result->getZExtValue(); 6084 } 6085 6086 /// Get the expression for the number of loop iterations for which this loop is 6087 /// guaranteed not to exit via ExitingBlock. Otherwise return 6088 /// SCEVCouldNotCompute. 6089 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6090 BasicBlock *ExitingBlock) { 6091 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6092 } 6093 6094 const SCEV * 6095 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6096 SCEVUnionPredicate &Preds) { 6097 return getPredicatedBackedgeTakenInfo(L).getExact(this, &Preds); 6098 } 6099 6100 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 6101 return getBackedgeTakenInfo(L).getExact(this); 6102 } 6103 6104 /// Similar to getBackedgeTakenCount, except return the least SCEV value that is 6105 /// known never to be less than the actual backedge taken count. 6106 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 6107 return getBackedgeTakenInfo(L).getMax(this); 6108 } 6109 6110 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 6111 return getBackedgeTakenInfo(L).isMaxOrZero(this); 6112 } 6113 6114 /// Push PHI nodes in the header of the given loop onto the given Worklist. 6115 static void 6116 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 6117 BasicBlock *Header = L->getHeader(); 6118 6119 // Push all Loop-header PHIs onto the Worklist stack. 6120 for (BasicBlock::iterator I = Header->begin(); 6121 PHINode *PN = dyn_cast<PHINode>(I); ++I) 6122 Worklist.push_back(PN); 6123 } 6124 6125 const ScalarEvolution::BackedgeTakenInfo & 6126 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 6127 auto &BTI = getBackedgeTakenInfo(L); 6128 if (BTI.hasFullInfo()) 6129 return BTI; 6130 6131 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6132 6133 if (!Pair.second) 6134 return Pair.first->second; 6135 6136 BackedgeTakenInfo Result = 6137 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 6138 6139 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 6140 } 6141 6142 const ScalarEvolution::BackedgeTakenInfo & 6143 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 6144 // Initially insert an invalid entry for this loop. If the insertion 6145 // succeeds, proceed to actually compute a backedge-taken count and 6146 // update the value. The temporary CouldNotCompute value tells SCEV 6147 // code elsewhere that it shouldn't attempt to request a new 6148 // backedge-taken count, which could result in infinite recursion. 6149 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 6150 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6151 if (!Pair.second) 6152 return Pair.first->second; 6153 6154 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 6155 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 6156 // must be cleared in this scope. 6157 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 6158 6159 if (Result.getExact(this) != getCouldNotCompute()) { 6160 assert(isLoopInvariant(Result.getExact(this), L) && 6161 isLoopInvariant(Result.getMax(this), L) && 6162 "Computed backedge-taken count isn't loop invariant for loop!"); 6163 ++NumTripCountsComputed; 6164 } 6165 else if (Result.getMax(this) == getCouldNotCompute() && 6166 isa<PHINode>(L->getHeader()->begin())) { 6167 // Only count loops that have phi nodes as not being computable. 6168 ++NumTripCountsNotComputed; 6169 } 6170 6171 // Now that we know more about the trip count for this loop, forget any 6172 // existing SCEV values for PHI nodes in this loop since they are only 6173 // conservative estimates made without the benefit of trip count 6174 // information. This is similar to the code in forgetLoop, except that 6175 // it handles SCEVUnknown PHI nodes specially. 6176 if (Result.hasAnyInfo()) { 6177 SmallVector<Instruction *, 16> Worklist; 6178 PushLoopPHIs(L, Worklist); 6179 6180 SmallPtrSet<Instruction *, 8> Visited; 6181 while (!Worklist.empty()) { 6182 Instruction *I = Worklist.pop_back_val(); 6183 if (!Visited.insert(I).second) 6184 continue; 6185 6186 ValueExprMapType::iterator It = 6187 ValueExprMap.find_as(static_cast<Value *>(I)); 6188 if (It != ValueExprMap.end()) { 6189 const SCEV *Old = It->second; 6190 6191 // SCEVUnknown for a PHI either means that it has an unrecognized 6192 // structure, or it's a PHI that's in the progress of being computed 6193 // by createNodeForPHI. In the former case, additional loop trip 6194 // count information isn't going to change anything. In the later 6195 // case, createNodeForPHI will perform the necessary updates on its 6196 // own when it gets to that point. 6197 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 6198 eraseValueFromMap(It->first); 6199 forgetMemoizedResults(Old); 6200 } 6201 if (PHINode *PN = dyn_cast<PHINode>(I)) 6202 ConstantEvolutionLoopExitValue.erase(PN); 6203 } 6204 6205 PushDefUseChildren(I, Worklist); 6206 } 6207 } 6208 6209 // Re-lookup the insert position, since the call to 6210 // computeBackedgeTakenCount above could result in a 6211 // recusive call to getBackedgeTakenInfo (on a different 6212 // loop), which would invalidate the iterator computed 6213 // earlier. 6214 return BackedgeTakenCounts.find(L)->second = std::move(Result); 6215 } 6216 6217 void ScalarEvolution::forgetLoop(const Loop *L) { 6218 // Drop any stored trip count value. 6219 auto RemoveLoopFromBackedgeMap = 6220 [L](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 6221 auto BTCPos = Map.find(L); 6222 if (BTCPos != Map.end()) { 6223 BTCPos->second.clear(); 6224 Map.erase(BTCPos); 6225 } 6226 }; 6227 6228 RemoveLoopFromBackedgeMap(BackedgeTakenCounts); 6229 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts); 6230 6231 // Drop information about predicated SCEV rewrites for this loop. 6232 for (auto I = PredicatedSCEVRewrites.begin(); 6233 I != PredicatedSCEVRewrites.end();) { 6234 std::pair<const SCEV *, const Loop *> Entry = I->first; 6235 if (Entry.second == L) 6236 PredicatedSCEVRewrites.erase(I++); 6237 else 6238 ++I; 6239 } 6240 6241 // Drop information about expressions based on loop-header PHIs. 6242 SmallVector<Instruction *, 16> Worklist; 6243 PushLoopPHIs(L, Worklist); 6244 6245 SmallPtrSet<Instruction *, 8> Visited; 6246 while (!Worklist.empty()) { 6247 Instruction *I = Worklist.pop_back_val(); 6248 if (!Visited.insert(I).second) 6249 continue; 6250 6251 ValueExprMapType::iterator It = 6252 ValueExprMap.find_as(static_cast<Value *>(I)); 6253 if (It != ValueExprMap.end()) { 6254 eraseValueFromMap(It->first); 6255 forgetMemoizedResults(It->second); 6256 if (PHINode *PN = dyn_cast<PHINode>(I)) 6257 ConstantEvolutionLoopExitValue.erase(PN); 6258 } 6259 6260 PushDefUseChildren(I, Worklist); 6261 } 6262 6263 // Forget all contained loops too, to avoid dangling entries in the 6264 // ValuesAtScopes map. 6265 for (Loop *I : *L) 6266 forgetLoop(I); 6267 6268 LoopPropertiesCache.erase(L); 6269 } 6270 6271 void ScalarEvolution::forgetValue(Value *V) { 6272 Instruction *I = dyn_cast<Instruction>(V); 6273 if (!I) return; 6274 6275 // Drop information about expressions based on loop-header PHIs. 6276 SmallVector<Instruction *, 16> Worklist; 6277 Worklist.push_back(I); 6278 6279 SmallPtrSet<Instruction *, 8> Visited; 6280 while (!Worklist.empty()) { 6281 I = Worklist.pop_back_val(); 6282 if (!Visited.insert(I).second) 6283 continue; 6284 6285 ValueExprMapType::iterator It = 6286 ValueExprMap.find_as(static_cast<Value *>(I)); 6287 if (It != ValueExprMap.end()) { 6288 eraseValueFromMap(It->first); 6289 forgetMemoizedResults(It->second); 6290 if (PHINode *PN = dyn_cast<PHINode>(I)) 6291 ConstantEvolutionLoopExitValue.erase(PN); 6292 } 6293 6294 PushDefUseChildren(I, Worklist); 6295 } 6296 } 6297 6298 /// Get the exact loop backedge taken count considering all loop exits. A 6299 /// computable result can only be returned for loops with a single exit. 6300 /// Returning the minimum taken count among all exits is incorrect because one 6301 /// of the loop's exit limit's may have been skipped. howFarToZero assumes that 6302 /// the limit of each loop test is never skipped. This is a valid assumption as 6303 /// long as the loop exits via that test. For precise results, it is the 6304 /// caller's responsibility to specify the relevant loop exit using 6305 /// getExact(ExitingBlock, SE). 6306 const SCEV * 6307 ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE, 6308 SCEVUnionPredicate *Preds) const { 6309 // If any exits were not computable, the loop is not computable. 6310 if (!isComplete() || ExitNotTaken.empty()) 6311 return SE->getCouldNotCompute(); 6312 6313 const SCEV *BECount = nullptr; 6314 for (auto &ENT : ExitNotTaken) { 6315 assert(ENT.ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV"); 6316 6317 if (!BECount) 6318 BECount = ENT.ExactNotTaken; 6319 else if (BECount != ENT.ExactNotTaken) 6320 return SE->getCouldNotCompute(); 6321 if (Preds && !ENT.hasAlwaysTruePredicate()) 6322 Preds->add(ENT.Predicate.get()); 6323 6324 assert((Preds || ENT.hasAlwaysTruePredicate()) && 6325 "Predicate should be always true!"); 6326 } 6327 6328 assert(BECount && "Invalid not taken count for loop exit"); 6329 return BECount; 6330 } 6331 6332 /// Get the exact not taken count for this loop exit. 6333 const SCEV * 6334 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 6335 ScalarEvolution *SE) const { 6336 for (auto &ENT : ExitNotTaken) 6337 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6338 return ENT.ExactNotTaken; 6339 6340 return SE->getCouldNotCompute(); 6341 } 6342 6343 /// getMax - Get the max backedge taken count for the loop. 6344 const SCEV * 6345 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 6346 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6347 return !ENT.hasAlwaysTruePredicate(); 6348 }; 6349 6350 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax()) 6351 return SE->getCouldNotCompute(); 6352 6353 assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) && 6354 "No point in having a non-constant max backedge taken count!"); 6355 return getMax(); 6356 } 6357 6358 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const { 6359 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6360 return !ENT.hasAlwaysTruePredicate(); 6361 }; 6362 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 6363 } 6364 6365 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 6366 ScalarEvolution *SE) const { 6367 if (getMax() && getMax() != SE->getCouldNotCompute() && 6368 SE->hasOperand(getMax(), S)) 6369 return true; 6370 6371 for (auto &ENT : ExitNotTaken) 6372 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 6373 SE->hasOperand(ENT.ExactNotTaken, S)) 6374 return true; 6375 6376 return false; 6377 } 6378 6379 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 6380 : ExactNotTaken(E), MaxNotTaken(E), MaxOrZero(false) { 6381 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6382 isa<SCEVConstant>(MaxNotTaken)) && 6383 "No point in having a non-constant max backedge taken count!"); 6384 } 6385 6386 ScalarEvolution::ExitLimit::ExitLimit( 6387 const SCEV *E, const SCEV *M, bool MaxOrZero, 6388 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 6389 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 6390 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 6391 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 6392 "Exact is not allowed to be less precise than Max"); 6393 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6394 isa<SCEVConstant>(MaxNotTaken)) && 6395 "No point in having a non-constant max backedge taken count!"); 6396 for (auto *PredSet : PredSetList) 6397 for (auto *P : *PredSet) 6398 addPredicate(P); 6399 } 6400 6401 ScalarEvolution::ExitLimit::ExitLimit( 6402 const SCEV *E, const SCEV *M, bool MaxOrZero, 6403 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 6404 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 6405 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6406 isa<SCEVConstant>(MaxNotTaken)) && 6407 "No point in having a non-constant max backedge taken count!"); 6408 } 6409 6410 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 6411 bool MaxOrZero) 6412 : ExitLimit(E, M, MaxOrZero, None) { 6413 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6414 isa<SCEVConstant>(MaxNotTaken)) && 6415 "No point in having a non-constant max backedge taken count!"); 6416 } 6417 6418 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 6419 /// computable exit into a persistent ExitNotTakenInfo array. 6420 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 6421 SmallVectorImpl<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> 6422 &&ExitCounts, 6423 bool Complete, const SCEV *MaxCount, bool MaxOrZero) 6424 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { 6425 typedef ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo EdgeExitInfo; 6426 ExitNotTaken.reserve(ExitCounts.size()); 6427 std::transform( 6428 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 6429 [&](const EdgeExitInfo &EEI) { 6430 BasicBlock *ExitBB = EEI.first; 6431 const ExitLimit &EL = EEI.second; 6432 if (EL.Predicates.empty()) 6433 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, nullptr); 6434 6435 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 6436 for (auto *Pred : EL.Predicates) 6437 Predicate->add(Pred); 6438 6439 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, std::move(Predicate)); 6440 }); 6441 assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) && 6442 "No point in having a non-constant max backedge taken count!"); 6443 } 6444 6445 /// Invalidate this result and free the ExitNotTakenInfo array. 6446 void ScalarEvolution::BackedgeTakenInfo::clear() { 6447 ExitNotTaken.clear(); 6448 } 6449 6450 /// Compute the number of times the backedge of the specified loop will execute. 6451 ScalarEvolution::BackedgeTakenInfo 6452 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 6453 bool AllowPredicates) { 6454 SmallVector<BasicBlock *, 8> ExitingBlocks; 6455 L->getExitingBlocks(ExitingBlocks); 6456 6457 typedef ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo EdgeExitInfo; 6458 6459 SmallVector<EdgeExitInfo, 4> ExitCounts; 6460 bool CouldComputeBECount = true; 6461 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 6462 const SCEV *MustExitMaxBECount = nullptr; 6463 const SCEV *MayExitMaxBECount = nullptr; 6464 bool MustExitMaxOrZero = false; 6465 6466 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 6467 // and compute maxBECount. 6468 // Do a union of all the predicates here. 6469 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 6470 BasicBlock *ExitBB = ExitingBlocks[i]; 6471 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 6472 6473 assert((AllowPredicates || EL.Predicates.empty()) && 6474 "Predicated exit limit when predicates are not allowed!"); 6475 6476 // 1. For each exit that can be computed, add an entry to ExitCounts. 6477 // CouldComputeBECount is true only if all exits can be computed. 6478 if (EL.ExactNotTaken == getCouldNotCompute()) 6479 // We couldn't compute an exact value for this exit, so 6480 // we won't be able to compute an exact value for the loop. 6481 CouldComputeBECount = false; 6482 else 6483 ExitCounts.emplace_back(ExitBB, EL); 6484 6485 // 2. Derive the loop's MaxBECount from each exit's max number of 6486 // non-exiting iterations. Partition the loop exits into two kinds: 6487 // LoopMustExits and LoopMayExits. 6488 // 6489 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 6490 // is a LoopMayExit. If any computable LoopMustExit is found, then 6491 // MaxBECount is the minimum EL.MaxNotTaken of computable 6492 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 6493 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 6494 // computable EL.MaxNotTaken. 6495 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 6496 DT.dominates(ExitBB, Latch)) { 6497 if (!MustExitMaxBECount) { 6498 MustExitMaxBECount = EL.MaxNotTaken; 6499 MustExitMaxOrZero = EL.MaxOrZero; 6500 } else { 6501 MustExitMaxBECount = 6502 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 6503 } 6504 } else if (MayExitMaxBECount != getCouldNotCompute()) { 6505 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 6506 MayExitMaxBECount = EL.MaxNotTaken; 6507 else { 6508 MayExitMaxBECount = 6509 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 6510 } 6511 } 6512 } 6513 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 6514 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 6515 // The loop backedge will be taken the maximum or zero times if there's 6516 // a single exit that must be taken the maximum or zero times. 6517 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 6518 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 6519 MaxBECount, MaxOrZero); 6520 } 6521 6522 ScalarEvolution::ExitLimit 6523 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 6524 bool AllowPredicates) { 6525 6526 // Okay, we've chosen an exiting block. See what condition causes us to exit 6527 // at this block and remember the exit block and whether all other targets 6528 // lead to the loop header. 6529 bool MustExecuteLoopHeader = true; 6530 BasicBlock *Exit = nullptr; 6531 for (auto *SBB : successors(ExitingBlock)) 6532 if (!L->contains(SBB)) { 6533 if (Exit) // Multiple exit successors. 6534 return getCouldNotCompute(); 6535 Exit = SBB; 6536 } else if (SBB != L->getHeader()) { 6537 MustExecuteLoopHeader = false; 6538 } 6539 6540 // At this point, we know we have a conditional branch that determines whether 6541 // the loop is exited. However, we don't know if the branch is executed each 6542 // time through the loop. If not, then the execution count of the branch will 6543 // not be equal to the trip count of the loop. 6544 // 6545 // Currently we check for this by checking to see if the Exit branch goes to 6546 // the loop header. If so, we know it will always execute the same number of 6547 // times as the loop. We also handle the case where the exit block *is* the 6548 // loop header. This is common for un-rotated loops. 6549 // 6550 // If both of those tests fail, walk up the unique predecessor chain to the 6551 // header, stopping if there is an edge that doesn't exit the loop. If the 6552 // header is reached, the execution count of the branch will be equal to the 6553 // trip count of the loop. 6554 // 6555 // More extensive analysis could be done to handle more cases here. 6556 // 6557 if (!MustExecuteLoopHeader && ExitingBlock != L->getHeader()) { 6558 // The simple checks failed, try climbing the unique predecessor chain 6559 // up to the header. 6560 bool Ok = false; 6561 for (BasicBlock *BB = ExitingBlock; BB; ) { 6562 BasicBlock *Pred = BB->getUniquePredecessor(); 6563 if (!Pred) 6564 return getCouldNotCompute(); 6565 TerminatorInst *PredTerm = Pred->getTerminator(); 6566 for (const BasicBlock *PredSucc : PredTerm->successors()) { 6567 if (PredSucc == BB) 6568 continue; 6569 // If the predecessor has a successor that isn't BB and isn't 6570 // outside the loop, assume the worst. 6571 if (L->contains(PredSucc)) 6572 return getCouldNotCompute(); 6573 } 6574 if (Pred == L->getHeader()) { 6575 Ok = true; 6576 break; 6577 } 6578 BB = Pred; 6579 } 6580 if (!Ok) 6581 return getCouldNotCompute(); 6582 } 6583 6584 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 6585 TerminatorInst *Term = ExitingBlock->getTerminator(); 6586 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 6587 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 6588 // Proceed to the next level to examine the exit condition expression. 6589 return computeExitLimitFromCond( 6590 L, BI->getCondition(), BI->getSuccessor(0), BI->getSuccessor(1), 6591 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 6592 } 6593 6594 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) 6595 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 6596 /*ControlsExit=*/IsOnlyExit); 6597 6598 return getCouldNotCompute(); 6599 } 6600 6601 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 6602 const Loop *L, Value *ExitCond, BasicBlock *TBB, BasicBlock *FBB, 6603 bool ControlsExit, bool AllowPredicates) { 6604 ScalarEvolution::ExitLimitCacheTy Cache(L, TBB, FBB, AllowPredicates); 6605 return computeExitLimitFromCondCached(Cache, L, ExitCond, TBB, FBB, 6606 ControlsExit, AllowPredicates); 6607 } 6608 6609 Optional<ScalarEvolution::ExitLimit> 6610 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 6611 BasicBlock *TBB, BasicBlock *FBB, 6612 bool ControlsExit, bool AllowPredicates) { 6613 (void)this->L; 6614 (void)this->TBB; 6615 (void)this->FBB; 6616 (void)this->AllowPredicates; 6617 6618 assert(this->L == L && this->TBB == TBB && this->FBB == FBB && 6619 this->AllowPredicates == AllowPredicates && 6620 "Variance in assumed invariant key components!"); 6621 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 6622 if (Itr == TripCountMap.end()) 6623 return None; 6624 return Itr->second; 6625 } 6626 6627 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 6628 BasicBlock *TBB, BasicBlock *FBB, 6629 bool ControlsExit, 6630 bool AllowPredicates, 6631 const ExitLimit &EL) { 6632 assert(this->L == L && this->TBB == TBB && this->FBB == FBB && 6633 this->AllowPredicates == AllowPredicates && 6634 "Variance in assumed invariant key components!"); 6635 6636 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 6637 assert(InsertResult.second && "Expected successful insertion!"); 6638 (void)InsertResult; 6639 } 6640 6641 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 6642 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, BasicBlock *TBB, 6643 BasicBlock *FBB, bool ControlsExit, bool AllowPredicates) { 6644 6645 if (auto MaybeEL = 6646 Cache.find(L, ExitCond, TBB, FBB, ControlsExit, AllowPredicates)) 6647 return *MaybeEL; 6648 6649 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, TBB, FBB, 6650 ControlsExit, AllowPredicates); 6651 Cache.insert(L, ExitCond, TBB, FBB, ControlsExit, AllowPredicates, EL); 6652 return EL; 6653 } 6654 6655 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 6656 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, BasicBlock *TBB, 6657 BasicBlock *FBB, bool ControlsExit, bool AllowPredicates) { 6658 // Check if the controlling expression for this loop is an And or Or. 6659 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 6660 if (BO->getOpcode() == Instruction::And) { 6661 // Recurse on the operands of the and. 6662 bool EitherMayExit = L->contains(TBB); 6663 ExitLimit EL0 = computeExitLimitFromCondCached( 6664 Cache, L, BO->getOperand(0), TBB, FBB, ControlsExit && !EitherMayExit, 6665 AllowPredicates); 6666 ExitLimit EL1 = computeExitLimitFromCondCached( 6667 Cache, L, BO->getOperand(1), TBB, FBB, ControlsExit && !EitherMayExit, 6668 AllowPredicates); 6669 const SCEV *BECount = getCouldNotCompute(); 6670 const SCEV *MaxBECount = getCouldNotCompute(); 6671 if (EitherMayExit) { 6672 // Both conditions must be true for the loop to continue executing. 6673 // Choose the less conservative count. 6674 if (EL0.ExactNotTaken == getCouldNotCompute() || 6675 EL1.ExactNotTaken == getCouldNotCompute()) 6676 BECount = getCouldNotCompute(); 6677 else 6678 BECount = 6679 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 6680 if (EL0.MaxNotTaken == getCouldNotCompute()) 6681 MaxBECount = EL1.MaxNotTaken; 6682 else if (EL1.MaxNotTaken == getCouldNotCompute()) 6683 MaxBECount = EL0.MaxNotTaken; 6684 else 6685 MaxBECount = 6686 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 6687 } else { 6688 // Both conditions must be true at the same time for the loop to exit. 6689 // For now, be conservative. 6690 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 6691 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 6692 MaxBECount = EL0.MaxNotTaken; 6693 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 6694 BECount = EL0.ExactNotTaken; 6695 } 6696 6697 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 6698 // to be more aggressive when computing BECount than when computing 6699 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 6700 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 6701 // to not. 6702 if (isa<SCEVCouldNotCompute>(MaxBECount) && 6703 !isa<SCEVCouldNotCompute>(BECount)) 6704 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 6705 6706 return ExitLimit(BECount, MaxBECount, false, 6707 {&EL0.Predicates, &EL1.Predicates}); 6708 } 6709 if (BO->getOpcode() == Instruction::Or) { 6710 // Recurse on the operands of the or. 6711 bool EitherMayExit = L->contains(FBB); 6712 ExitLimit EL0 = computeExitLimitFromCondCached( 6713 Cache, L, BO->getOperand(0), TBB, FBB, ControlsExit && !EitherMayExit, 6714 AllowPredicates); 6715 ExitLimit EL1 = computeExitLimitFromCondCached( 6716 Cache, L, BO->getOperand(1), TBB, FBB, ControlsExit && !EitherMayExit, 6717 AllowPredicates); 6718 const SCEV *BECount = getCouldNotCompute(); 6719 const SCEV *MaxBECount = getCouldNotCompute(); 6720 if (EitherMayExit) { 6721 // Both conditions must be false for the loop to continue executing. 6722 // Choose the less conservative count. 6723 if (EL0.ExactNotTaken == getCouldNotCompute() || 6724 EL1.ExactNotTaken == getCouldNotCompute()) 6725 BECount = getCouldNotCompute(); 6726 else 6727 BECount = 6728 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 6729 if (EL0.MaxNotTaken == getCouldNotCompute()) 6730 MaxBECount = EL1.MaxNotTaken; 6731 else if (EL1.MaxNotTaken == getCouldNotCompute()) 6732 MaxBECount = EL0.MaxNotTaken; 6733 else 6734 MaxBECount = 6735 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 6736 } else { 6737 // Both conditions must be false at the same time for the loop to exit. 6738 // For now, be conservative. 6739 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 6740 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 6741 MaxBECount = EL0.MaxNotTaken; 6742 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 6743 BECount = EL0.ExactNotTaken; 6744 } 6745 6746 return ExitLimit(BECount, MaxBECount, false, 6747 {&EL0.Predicates, &EL1.Predicates}); 6748 } 6749 } 6750 6751 // With an icmp, it may be feasible to compute an exact backedge-taken count. 6752 // Proceed to the next level to examine the icmp. 6753 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 6754 ExitLimit EL = 6755 computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit); 6756 if (EL.hasFullInfo() || !AllowPredicates) 6757 return EL; 6758 6759 // Try again, but use SCEV predicates this time. 6760 return computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit, 6761 /*AllowPredicates=*/true); 6762 } 6763 6764 // Check for a constant condition. These are normally stripped out by 6765 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 6766 // preserve the CFG and is temporarily leaving constant conditions 6767 // in place. 6768 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 6769 if (L->contains(FBB) == !CI->getZExtValue()) 6770 // The backedge is always taken. 6771 return getCouldNotCompute(); 6772 else 6773 // The backedge is never taken. 6774 return getZero(CI->getType()); 6775 } 6776 6777 // If it's not an integer or pointer comparison then compute it the hard way. 6778 return computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 6779 } 6780 6781 ScalarEvolution::ExitLimit 6782 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 6783 ICmpInst *ExitCond, 6784 BasicBlock *TBB, 6785 BasicBlock *FBB, 6786 bool ControlsExit, 6787 bool AllowPredicates) { 6788 6789 // If the condition was exit on true, convert the condition to exit on false 6790 ICmpInst::Predicate Cond; 6791 if (!L->contains(FBB)) 6792 Cond = ExitCond->getPredicate(); 6793 else 6794 Cond = ExitCond->getInversePredicate(); 6795 6796 // Handle common loops like: for (X = "string"; *X; ++X) 6797 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 6798 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 6799 ExitLimit ItCnt = 6800 computeLoadConstantCompareExitLimit(LI, RHS, L, Cond); 6801 if (ItCnt.hasAnyInfo()) 6802 return ItCnt; 6803 } 6804 6805 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 6806 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 6807 6808 // Try to evaluate any dependencies out of the loop. 6809 LHS = getSCEVAtScope(LHS, L); 6810 RHS = getSCEVAtScope(RHS, L); 6811 6812 // At this point, we would like to compute how many iterations of the 6813 // loop the predicate will return true for these inputs. 6814 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 6815 // If there is a loop-invariant, force it into the RHS. 6816 std::swap(LHS, RHS); 6817 Cond = ICmpInst::getSwappedPredicate(Cond); 6818 } 6819 6820 // Simplify the operands before analyzing them. 6821 (void)SimplifyICmpOperands(Cond, LHS, RHS); 6822 6823 // If we have a comparison of a chrec against a constant, try to use value 6824 // ranges to answer this query. 6825 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 6826 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 6827 if (AddRec->getLoop() == L) { 6828 // Form the constant range. 6829 ConstantRange CompRange = 6830 ConstantRange::makeExactICmpRegion(Cond, RHSC->getAPInt()); 6831 6832 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 6833 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 6834 } 6835 6836 switch (Cond) { 6837 case ICmpInst::ICMP_NE: { // while (X != Y) 6838 // Convert to: while (X-Y != 0) 6839 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 6840 AllowPredicates); 6841 if (EL.hasAnyInfo()) return EL; 6842 break; 6843 } 6844 case ICmpInst::ICMP_EQ: { // while (X == Y) 6845 // Convert to: while (X-Y == 0) 6846 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 6847 if (EL.hasAnyInfo()) return EL; 6848 break; 6849 } 6850 case ICmpInst::ICMP_SLT: 6851 case ICmpInst::ICMP_ULT: { // while (X < Y) 6852 bool IsSigned = Cond == ICmpInst::ICMP_SLT; 6853 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 6854 AllowPredicates); 6855 if (EL.hasAnyInfo()) return EL; 6856 break; 6857 } 6858 case ICmpInst::ICMP_SGT: 6859 case ICmpInst::ICMP_UGT: { // while (X > Y) 6860 bool IsSigned = Cond == ICmpInst::ICMP_SGT; 6861 ExitLimit EL = 6862 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 6863 AllowPredicates); 6864 if (EL.hasAnyInfo()) return EL; 6865 break; 6866 } 6867 default: 6868 break; 6869 } 6870 6871 auto *ExhaustiveCount = 6872 computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 6873 6874 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 6875 return ExhaustiveCount; 6876 6877 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 6878 ExitCond->getOperand(1), L, Cond); 6879 } 6880 6881 ScalarEvolution::ExitLimit 6882 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 6883 SwitchInst *Switch, 6884 BasicBlock *ExitingBlock, 6885 bool ControlsExit) { 6886 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 6887 6888 // Give up if the exit is the default dest of a switch. 6889 if (Switch->getDefaultDest() == ExitingBlock) 6890 return getCouldNotCompute(); 6891 6892 assert(L->contains(Switch->getDefaultDest()) && 6893 "Default case must not exit the loop!"); 6894 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 6895 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 6896 6897 // while (X != Y) --> while (X-Y != 0) 6898 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 6899 if (EL.hasAnyInfo()) 6900 return EL; 6901 6902 return getCouldNotCompute(); 6903 } 6904 6905 static ConstantInt * 6906 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 6907 ScalarEvolution &SE) { 6908 const SCEV *InVal = SE.getConstant(C); 6909 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 6910 assert(isa<SCEVConstant>(Val) && 6911 "Evaluation of SCEV at constant didn't fold correctly?"); 6912 return cast<SCEVConstant>(Val)->getValue(); 6913 } 6914 6915 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 6916 /// compute the backedge execution count. 6917 ScalarEvolution::ExitLimit 6918 ScalarEvolution::computeLoadConstantCompareExitLimit( 6919 LoadInst *LI, 6920 Constant *RHS, 6921 const Loop *L, 6922 ICmpInst::Predicate predicate) { 6923 6924 if (LI->isVolatile()) return getCouldNotCompute(); 6925 6926 // Check to see if the loaded pointer is a getelementptr of a global. 6927 // TODO: Use SCEV instead of manually grubbing with GEPs. 6928 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 6929 if (!GEP) return getCouldNotCompute(); 6930 6931 // Make sure that it is really a constant global we are gepping, with an 6932 // initializer, and make sure the first IDX is really 0. 6933 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 6934 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 6935 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 6936 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 6937 return getCouldNotCompute(); 6938 6939 // Okay, we allow one non-constant index into the GEP instruction. 6940 Value *VarIdx = nullptr; 6941 std::vector<Constant*> Indexes; 6942 unsigned VarIdxNum = 0; 6943 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 6944 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 6945 Indexes.push_back(CI); 6946 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 6947 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 6948 VarIdx = GEP->getOperand(i); 6949 VarIdxNum = i-2; 6950 Indexes.push_back(nullptr); 6951 } 6952 6953 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 6954 if (!VarIdx) 6955 return getCouldNotCompute(); 6956 6957 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 6958 // Check to see if X is a loop variant variable value now. 6959 const SCEV *Idx = getSCEV(VarIdx); 6960 Idx = getSCEVAtScope(Idx, L); 6961 6962 // We can only recognize very limited forms of loop index expressions, in 6963 // particular, only affine AddRec's like {C1,+,C2}. 6964 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 6965 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 6966 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 6967 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 6968 return getCouldNotCompute(); 6969 6970 unsigned MaxSteps = MaxBruteForceIterations; 6971 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 6972 ConstantInt *ItCst = ConstantInt::get( 6973 cast<IntegerType>(IdxExpr->getType()), IterationNum); 6974 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 6975 6976 // Form the GEP offset. 6977 Indexes[VarIdxNum] = Val; 6978 6979 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 6980 Indexes); 6981 if (!Result) break; // Cannot compute! 6982 6983 // Evaluate the condition for this iteration. 6984 Result = ConstantExpr::getICmp(predicate, Result, RHS); 6985 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 6986 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 6987 ++NumArrayLenItCounts; 6988 return getConstant(ItCst); // Found terminating iteration! 6989 } 6990 } 6991 return getCouldNotCompute(); 6992 } 6993 6994 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 6995 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 6996 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 6997 if (!RHS) 6998 return getCouldNotCompute(); 6999 7000 const BasicBlock *Latch = L->getLoopLatch(); 7001 if (!Latch) 7002 return getCouldNotCompute(); 7003 7004 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7005 if (!Predecessor) 7006 return getCouldNotCompute(); 7007 7008 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7009 // Return LHS in OutLHS and shift_opt in OutOpCode. 7010 auto MatchPositiveShift = 7011 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7012 7013 using namespace PatternMatch; 7014 7015 ConstantInt *ShiftAmt; 7016 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7017 OutOpCode = Instruction::LShr; 7018 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7019 OutOpCode = Instruction::AShr; 7020 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7021 OutOpCode = Instruction::Shl; 7022 else 7023 return false; 7024 7025 return ShiftAmt->getValue().isStrictlyPositive(); 7026 }; 7027 7028 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 7029 // 7030 // loop: 7031 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 7032 // %iv.shifted = lshr i32 %iv, <positive constant> 7033 // 7034 // Return true on a successful match. Return the corresponding PHI node (%iv 7035 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 7036 auto MatchShiftRecurrence = 7037 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 7038 Optional<Instruction::BinaryOps> PostShiftOpCode; 7039 7040 { 7041 Instruction::BinaryOps OpC; 7042 Value *V; 7043 7044 // If we encounter a shift instruction, "peel off" the shift operation, 7045 // and remember that we did so. Later when we inspect %iv's backedge 7046 // value, we will make sure that the backedge value uses the same 7047 // operation. 7048 // 7049 // Note: the peeled shift operation does not have to be the same 7050 // instruction as the one feeding into the PHI's backedge value. We only 7051 // really care about it being the same *kind* of shift instruction -- 7052 // that's all that is required for our later inferences to hold. 7053 if (MatchPositiveShift(LHS, V, OpC)) { 7054 PostShiftOpCode = OpC; 7055 LHS = V; 7056 } 7057 } 7058 7059 PNOut = dyn_cast<PHINode>(LHS); 7060 if (!PNOut || PNOut->getParent() != L->getHeader()) 7061 return false; 7062 7063 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 7064 Value *OpLHS; 7065 7066 return 7067 // The backedge value for the PHI node must be a shift by a positive 7068 // amount 7069 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 7070 7071 // of the PHI node itself 7072 OpLHS == PNOut && 7073 7074 // and the kind of shift should be match the kind of shift we peeled 7075 // off, if any. 7076 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 7077 }; 7078 7079 PHINode *PN; 7080 Instruction::BinaryOps OpCode; 7081 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 7082 return getCouldNotCompute(); 7083 7084 const DataLayout &DL = getDataLayout(); 7085 7086 // The key rationale for this optimization is that for some kinds of shift 7087 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 7088 // within a finite number of iterations. If the condition guarding the 7089 // backedge (in the sense that the backedge is taken if the condition is true) 7090 // is false for the value the shift recurrence stabilizes to, then we know 7091 // that the backedge is taken only a finite number of times. 7092 7093 ConstantInt *StableValue = nullptr; 7094 switch (OpCode) { 7095 default: 7096 llvm_unreachable("Impossible case!"); 7097 7098 case Instruction::AShr: { 7099 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 7100 // bitwidth(K) iterations. 7101 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 7102 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 7103 Predecessor->getTerminator(), &DT); 7104 auto *Ty = cast<IntegerType>(RHS->getType()); 7105 if (Known.isNonNegative()) 7106 StableValue = ConstantInt::get(Ty, 0); 7107 else if (Known.isNegative()) 7108 StableValue = ConstantInt::get(Ty, -1, true); 7109 else 7110 return getCouldNotCompute(); 7111 7112 break; 7113 } 7114 case Instruction::LShr: 7115 case Instruction::Shl: 7116 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 7117 // stabilize to 0 in at most bitwidth(K) iterations. 7118 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 7119 break; 7120 } 7121 7122 auto *Result = 7123 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 7124 assert(Result->getType()->isIntegerTy(1) && 7125 "Otherwise cannot be an operand to a branch instruction"); 7126 7127 if (Result->isZeroValue()) { 7128 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 7129 const SCEV *UpperBound = 7130 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 7131 return ExitLimit(getCouldNotCompute(), UpperBound, false); 7132 } 7133 7134 return getCouldNotCompute(); 7135 } 7136 7137 /// Return true if we can constant fold an instruction of the specified type, 7138 /// assuming that all operands were constants. 7139 static bool CanConstantFold(const Instruction *I) { 7140 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 7141 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7142 isa<LoadInst>(I)) 7143 return true; 7144 7145 if (const CallInst *CI = dyn_cast<CallInst>(I)) 7146 if (const Function *F = CI->getCalledFunction()) 7147 return canConstantFoldCallTo(CI, F); 7148 return false; 7149 } 7150 7151 /// Determine whether this instruction can constant evolve within this loop 7152 /// assuming its operands can all constant evolve. 7153 static bool canConstantEvolve(Instruction *I, const Loop *L) { 7154 // An instruction outside of the loop can't be derived from a loop PHI. 7155 if (!L->contains(I)) return false; 7156 7157 if (isa<PHINode>(I)) { 7158 // We don't currently keep track of the control flow needed to evaluate 7159 // PHIs, so we cannot handle PHIs inside of loops. 7160 return L->getHeader() == I->getParent(); 7161 } 7162 7163 // If we won't be able to constant fold this expression even if the operands 7164 // are constants, bail early. 7165 return CanConstantFold(I); 7166 } 7167 7168 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 7169 /// recursing through each instruction operand until reaching a loop header phi. 7170 static PHINode * 7171 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 7172 DenseMap<Instruction *, PHINode *> &PHIMap, 7173 unsigned Depth) { 7174 if (Depth > MaxConstantEvolvingDepth) 7175 return nullptr; 7176 7177 // Otherwise, we can evaluate this instruction if all of its operands are 7178 // constant or derived from a PHI node themselves. 7179 PHINode *PHI = nullptr; 7180 for (Value *Op : UseInst->operands()) { 7181 if (isa<Constant>(Op)) continue; 7182 7183 Instruction *OpInst = dyn_cast<Instruction>(Op); 7184 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 7185 7186 PHINode *P = dyn_cast<PHINode>(OpInst); 7187 if (!P) 7188 // If this operand is already visited, reuse the prior result. 7189 // We may have P != PHI if this is the deepest point at which the 7190 // inconsistent paths meet. 7191 P = PHIMap.lookup(OpInst); 7192 if (!P) { 7193 // Recurse and memoize the results, whether a phi is found or not. 7194 // This recursive call invalidates pointers into PHIMap. 7195 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 7196 PHIMap[OpInst] = P; 7197 } 7198 if (!P) 7199 return nullptr; // Not evolving from PHI 7200 if (PHI && PHI != P) 7201 return nullptr; // Evolving from multiple different PHIs. 7202 PHI = P; 7203 } 7204 // This is a expression evolving from a constant PHI! 7205 return PHI; 7206 } 7207 7208 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 7209 /// in the loop that V is derived from. We allow arbitrary operations along the 7210 /// way, but the operands of an operation must either be constants or a value 7211 /// derived from a constant PHI. If this expression does not fit with these 7212 /// constraints, return null. 7213 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 7214 Instruction *I = dyn_cast<Instruction>(V); 7215 if (!I || !canConstantEvolve(I, L)) return nullptr; 7216 7217 if (PHINode *PN = dyn_cast<PHINode>(I)) 7218 return PN; 7219 7220 // Record non-constant instructions contained by the loop. 7221 DenseMap<Instruction *, PHINode *> PHIMap; 7222 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 7223 } 7224 7225 /// EvaluateExpression - Given an expression that passes the 7226 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 7227 /// in the loop has the value PHIVal. If we can't fold this expression for some 7228 /// reason, return null. 7229 static Constant *EvaluateExpression(Value *V, const Loop *L, 7230 DenseMap<Instruction *, Constant *> &Vals, 7231 const DataLayout &DL, 7232 const TargetLibraryInfo *TLI) { 7233 // Convenient constant check, but redundant for recursive calls. 7234 if (Constant *C = dyn_cast<Constant>(V)) return C; 7235 Instruction *I = dyn_cast<Instruction>(V); 7236 if (!I) return nullptr; 7237 7238 if (Constant *C = Vals.lookup(I)) return C; 7239 7240 // An instruction inside the loop depends on a value outside the loop that we 7241 // weren't given a mapping for, or a value such as a call inside the loop. 7242 if (!canConstantEvolve(I, L)) return nullptr; 7243 7244 // An unmapped PHI can be due to a branch or another loop inside this loop, 7245 // or due to this not being the initial iteration through a loop where we 7246 // couldn't compute the evolution of this particular PHI last time. 7247 if (isa<PHINode>(I)) return nullptr; 7248 7249 std::vector<Constant*> Operands(I->getNumOperands()); 7250 7251 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 7252 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 7253 if (!Operand) { 7254 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 7255 if (!Operands[i]) return nullptr; 7256 continue; 7257 } 7258 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 7259 Vals[Operand] = C; 7260 if (!C) return nullptr; 7261 Operands[i] = C; 7262 } 7263 7264 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 7265 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7266 Operands[1], DL, TLI); 7267 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7268 if (!LI->isVolatile()) 7269 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7270 } 7271 return ConstantFoldInstOperands(I, Operands, DL, TLI); 7272 } 7273 7274 7275 // If every incoming value to PN except the one for BB is a specific Constant, 7276 // return that, else return nullptr. 7277 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 7278 Constant *IncomingVal = nullptr; 7279 7280 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 7281 if (PN->getIncomingBlock(i) == BB) 7282 continue; 7283 7284 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 7285 if (!CurrentVal) 7286 return nullptr; 7287 7288 if (IncomingVal != CurrentVal) { 7289 if (IncomingVal) 7290 return nullptr; 7291 IncomingVal = CurrentVal; 7292 } 7293 } 7294 7295 return IncomingVal; 7296 } 7297 7298 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 7299 /// in the header of its containing loop, we know the loop executes a 7300 /// constant number of times, and the PHI node is just a recurrence 7301 /// involving constants, fold it. 7302 Constant * 7303 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 7304 const APInt &BEs, 7305 const Loop *L) { 7306 auto I = ConstantEvolutionLoopExitValue.find(PN); 7307 if (I != ConstantEvolutionLoopExitValue.end()) 7308 return I->second; 7309 7310 if (BEs.ugt(MaxBruteForceIterations)) 7311 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 7312 7313 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 7314 7315 DenseMap<Instruction *, Constant *> CurrentIterVals; 7316 BasicBlock *Header = L->getHeader(); 7317 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7318 7319 BasicBlock *Latch = L->getLoopLatch(); 7320 if (!Latch) 7321 return nullptr; 7322 7323 for (auto &I : *Header) { 7324 PHINode *PHI = dyn_cast<PHINode>(&I); 7325 if (!PHI) break; 7326 auto *StartCST = getOtherIncomingValue(PHI, Latch); 7327 if (!StartCST) continue; 7328 CurrentIterVals[PHI] = StartCST; 7329 } 7330 if (!CurrentIterVals.count(PN)) 7331 return RetVal = nullptr; 7332 7333 Value *BEValue = PN->getIncomingValueForBlock(Latch); 7334 7335 // Execute the loop symbolically to determine the exit value. 7336 if (BEs.getActiveBits() >= 32) 7337 return RetVal = nullptr; // More than 2^32-1 iterations?? Not doing it! 7338 7339 unsigned NumIterations = BEs.getZExtValue(); // must be in range 7340 unsigned IterationNum = 0; 7341 const DataLayout &DL = getDataLayout(); 7342 for (; ; ++IterationNum) { 7343 if (IterationNum == NumIterations) 7344 return RetVal = CurrentIterVals[PN]; // Got exit value! 7345 7346 // Compute the value of the PHIs for the next iteration. 7347 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 7348 DenseMap<Instruction *, Constant *> NextIterVals; 7349 Constant *NextPHI = 7350 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7351 if (!NextPHI) 7352 return nullptr; // Couldn't evaluate! 7353 NextIterVals[PN] = NextPHI; 7354 7355 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 7356 7357 // Also evaluate the other PHI nodes. However, we don't get to stop if we 7358 // cease to be able to evaluate one of them or if they stop evolving, 7359 // because that doesn't necessarily prevent us from computing PN. 7360 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 7361 for (const auto &I : CurrentIterVals) { 7362 PHINode *PHI = dyn_cast<PHINode>(I.first); 7363 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 7364 PHIsToCompute.emplace_back(PHI, I.second); 7365 } 7366 // We use two distinct loops because EvaluateExpression may invalidate any 7367 // iterators into CurrentIterVals. 7368 for (const auto &I : PHIsToCompute) { 7369 PHINode *PHI = I.first; 7370 Constant *&NextPHI = NextIterVals[PHI]; 7371 if (!NextPHI) { // Not already computed. 7372 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7373 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7374 } 7375 if (NextPHI != I.second) 7376 StoppedEvolving = false; 7377 } 7378 7379 // If all entries in CurrentIterVals == NextIterVals then we can stop 7380 // iterating, the loop can't continue to change. 7381 if (StoppedEvolving) 7382 return RetVal = CurrentIterVals[PN]; 7383 7384 CurrentIterVals.swap(NextIterVals); 7385 } 7386 } 7387 7388 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 7389 Value *Cond, 7390 bool ExitWhen) { 7391 PHINode *PN = getConstantEvolvingPHI(Cond, L); 7392 if (!PN) return getCouldNotCompute(); 7393 7394 // If the loop is canonicalized, the PHI will have exactly two entries. 7395 // That's the only form we support here. 7396 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 7397 7398 DenseMap<Instruction *, Constant *> CurrentIterVals; 7399 BasicBlock *Header = L->getHeader(); 7400 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7401 7402 BasicBlock *Latch = L->getLoopLatch(); 7403 assert(Latch && "Should follow from NumIncomingValues == 2!"); 7404 7405 for (auto &I : *Header) { 7406 PHINode *PHI = dyn_cast<PHINode>(&I); 7407 if (!PHI) 7408 break; 7409 auto *StartCST = getOtherIncomingValue(PHI, Latch); 7410 if (!StartCST) continue; 7411 CurrentIterVals[PHI] = StartCST; 7412 } 7413 if (!CurrentIterVals.count(PN)) 7414 return getCouldNotCompute(); 7415 7416 // Okay, we find a PHI node that defines the trip count of this loop. Execute 7417 // the loop symbolically to determine when the condition gets a value of 7418 // "ExitWhen". 7419 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 7420 const DataLayout &DL = getDataLayout(); 7421 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 7422 auto *CondVal = dyn_cast_or_null<ConstantInt>( 7423 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 7424 7425 // Couldn't symbolically evaluate. 7426 if (!CondVal) return getCouldNotCompute(); 7427 7428 if (CondVal->getValue() == uint64_t(ExitWhen)) { 7429 ++NumBruteForceTripCountsComputed; 7430 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 7431 } 7432 7433 // Update all the PHI nodes for the next iteration. 7434 DenseMap<Instruction *, Constant *> NextIterVals; 7435 7436 // Create a list of which PHIs we need to compute. We want to do this before 7437 // calling EvaluateExpression on them because that may invalidate iterators 7438 // into CurrentIterVals. 7439 SmallVector<PHINode *, 8> PHIsToCompute; 7440 for (const auto &I : CurrentIterVals) { 7441 PHINode *PHI = dyn_cast<PHINode>(I.first); 7442 if (!PHI || PHI->getParent() != Header) continue; 7443 PHIsToCompute.push_back(PHI); 7444 } 7445 for (PHINode *PHI : PHIsToCompute) { 7446 Constant *&NextPHI = NextIterVals[PHI]; 7447 if (NextPHI) continue; // Already computed! 7448 7449 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7450 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7451 } 7452 CurrentIterVals.swap(NextIterVals); 7453 } 7454 7455 // Too many iterations were needed to evaluate. 7456 return getCouldNotCompute(); 7457 } 7458 7459 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 7460 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 7461 ValuesAtScopes[V]; 7462 // Check to see if we've folded this expression at this loop before. 7463 for (auto &LS : Values) 7464 if (LS.first == L) 7465 return LS.second ? LS.second : V; 7466 7467 Values.emplace_back(L, nullptr); 7468 7469 // Otherwise compute it. 7470 const SCEV *C = computeSCEVAtScope(V, L); 7471 for (auto &LS : reverse(ValuesAtScopes[V])) 7472 if (LS.first == L) { 7473 LS.second = C; 7474 break; 7475 } 7476 return C; 7477 } 7478 7479 /// This builds up a Constant using the ConstantExpr interface. That way, we 7480 /// will return Constants for objects which aren't represented by a 7481 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 7482 /// Returns NULL if the SCEV isn't representable as a Constant. 7483 static Constant *BuildConstantFromSCEV(const SCEV *V) { 7484 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 7485 case scCouldNotCompute: 7486 case scAddRecExpr: 7487 break; 7488 case scConstant: 7489 return cast<SCEVConstant>(V)->getValue(); 7490 case scUnknown: 7491 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 7492 case scSignExtend: { 7493 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 7494 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 7495 return ConstantExpr::getSExt(CastOp, SS->getType()); 7496 break; 7497 } 7498 case scZeroExtend: { 7499 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 7500 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 7501 return ConstantExpr::getZExt(CastOp, SZ->getType()); 7502 break; 7503 } 7504 case scTruncate: { 7505 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 7506 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 7507 return ConstantExpr::getTrunc(CastOp, ST->getType()); 7508 break; 7509 } 7510 case scAddExpr: { 7511 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 7512 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 7513 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7514 unsigned AS = PTy->getAddressSpace(); 7515 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7516 C = ConstantExpr::getBitCast(C, DestPtrTy); 7517 } 7518 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 7519 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 7520 if (!C2) return nullptr; 7521 7522 // First pointer! 7523 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 7524 unsigned AS = C2->getType()->getPointerAddressSpace(); 7525 std::swap(C, C2); 7526 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7527 // The offsets have been converted to bytes. We can add bytes to an 7528 // i8* by GEP with the byte count in the first index. 7529 C = ConstantExpr::getBitCast(C, DestPtrTy); 7530 } 7531 7532 // Don't bother trying to sum two pointers. We probably can't 7533 // statically compute a load that results from it anyway. 7534 if (C2->getType()->isPointerTy()) 7535 return nullptr; 7536 7537 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7538 if (PTy->getElementType()->isStructTy()) 7539 C2 = ConstantExpr::getIntegerCast( 7540 C2, Type::getInt32Ty(C->getContext()), true); 7541 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 7542 } else 7543 C = ConstantExpr::getAdd(C, C2); 7544 } 7545 return C; 7546 } 7547 break; 7548 } 7549 case scMulExpr: { 7550 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 7551 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 7552 // Don't bother with pointers at all. 7553 if (C->getType()->isPointerTy()) return nullptr; 7554 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 7555 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 7556 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 7557 C = ConstantExpr::getMul(C, C2); 7558 } 7559 return C; 7560 } 7561 break; 7562 } 7563 case scUDivExpr: { 7564 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 7565 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 7566 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 7567 if (LHS->getType() == RHS->getType()) 7568 return ConstantExpr::getUDiv(LHS, RHS); 7569 break; 7570 } 7571 case scSMaxExpr: 7572 case scUMaxExpr: 7573 break; // TODO: smax, umax. 7574 } 7575 return nullptr; 7576 } 7577 7578 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 7579 if (isa<SCEVConstant>(V)) return V; 7580 7581 // If this instruction is evolved from a constant-evolving PHI, compute the 7582 // exit value from the loop without using SCEVs. 7583 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 7584 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 7585 const Loop *LI = this->LI[I->getParent()]; 7586 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 7587 if (PHINode *PN = dyn_cast<PHINode>(I)) 7588 if (PN->getParent() == LI->getHeader()) { 7589 // Okay, there is no closed form solution for the PHI node. Check 7590 // to see if the loop that contains it has a known backedge-taken 7591 // count. If so, we may be able to force computation of the exit 7592 // value. 7593 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 7594 if (const SCEVConstant *BTCC = 7595 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 7596 // Okay, we know how many times the containing loop executes. If 7597 // this is a constant evolving PHI node, get the final value at 7598 // the specified iteration number. 7599 Constant *RV = 7600 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 7601 if (RV) return getSCEV(RV); 7602 } 7603 } 7604 7605 // Okay, this is an expression that we cannot symbolically evaluate 7606 // into a SCEV. Check to see if it's possible to symbolically evaluate 7607 // the arguments into constants, and if so, try to constant propagate the 7608 // result. This is particularly useful for computing loop exit values. 7609 if (CanConstantFold(I)) { 7610 SmallVector<Constant *, 4> Operands; 7611 bool MadeImprovement = false; 7612 for (Value *Op : I->operands()) { 7613 if (Constant *C = dyn_cast<Constant>(Op)) { 7614 Operands.push_back(C); 7615 continue; 7616 } 7617 7618 // If any of the operands is non-constant and if they are 7619 // non-integer and non-pointer, don't even try to analyze them 7620 // with scev techniques. 7621 if (!isSCEVable(Op->getType())) 7622 return V; 7623 7624 const SCEV *OrigV = getSCEV(Op); 7625 const SCEV *OpV = getSCEVAtScope(OrigV, L); 7626 MadeImprovement |= OrigV != OpV; 7627 7628 Constant *C = BuildConstantFromSCEV(OpV); 7629 if (!C) return V; 7630 if (C->getType() != Op->getType()) 7631 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 7632 Op->getType(), 7633 false), 7634 C, Op->getType()); 7635 Operands.push_back(C); 7636 } 7637 7638 // Check to see if getSCEVAtScope actually made an improvement. 7639 if (MadeImprovement) { 7640 Constant *C = nullptr; 7641 const DataLayout &DL = getDataLayout(); 7642 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 7643 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7644 Operands[1], DL, &TLI); 7645 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 7646 if (!LI->isVolatile()) 7647 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7648 } else 7649 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 7650 if (!C) return V; 7651 return getSCEV(C); 7652 } 7653 } 7654 } 7655 7656 // This is some other type of SCEVUnknown, just return it. 7657 return V; 7658 } 7659 7660 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 7661 // Avoid performing the look-up in the common case where the specified 7662 // expression has no loop-variant portions. 7663 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 7664 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 7665 if (OpAtScope != Comm->getOperand(i)) { 7666 // Okay, at least one of these operands is loop variant but might be 7667 // foldable. Build a new instance of the folded commutative expression. 7668 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 7669 Comm->op_begin()+i); 7670 NewOps.push_back(OpAtScope); 7671 7672 for (++i; i != e; ++i) { 7673 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 7674 NewOps.push_back(OpAtScope); 7675 } 7676 if (isa<SCEVAddExpr>(Comm)) 7677 return getAddExpr(NewOps); 7678 if (isa<SCEVMulExpr>(Comm)) 7679 return getMulExpr(NewOps); 7680 if (isa<SCEVSMaxExpr>(Comm)) 7681 return getSMaxExpr(NewOps); 7682 if (isa<SCEVUMaxExpr>(Comm)) 7683 return getUMaxExpr(NewOps); 7684 llvm_unreachable("Unknown commutative SCEV type!"); 7685 } 7686 } 7687 // If we got here, all operands are loop invariant. 7688 return Comm; 7689 } 7690 7691 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 7692 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 7693 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 7694 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 7695 return Div; // must be loop invariant 7696 return getUDivExpr(LHS, RHS); 7697 } 7698 7699 // If this is a loop recurrence for a loop that does not contain L, then we 7700 // are dealing with the final value computed by the loop. 7701 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 7702 // First, attempt to evaluate each operand. 7703 // Avoid performing the look-up in the common case where the specified 7704 // expression has no loop-variant portions. 7705 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 7706 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 7707 if (OpAtScope == AddRec->getOperand(i)) 7708 continue; 7709 7710 // Okay, at least one of these operands is loop variant but might be 7711 // foldable. Build a new instance of the folded commutative expression. 7712 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 7713 AddRec->op_begin()+i); 7714 NewOps.push_back(OpAtScope); 7715 for (++i; i != e; ++i) 7716 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 7717 7718 const SCEV *FoldedRec = 7719 getAddRecExpr(NewOps, AddRec->getLoop(), 7720 AddRec->getNoWrapFlags(SCEV::FlagNW)); 7721 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 7722 // The addrec may be folded to a nonrecurrence, for example, if the 7723 // induction variable is multiplied by zero after constant folding. Go 7724 // ahead and return the folded value. 7725 if (!AddRec) 7726 return FoldedRec; 7727 break; 7728 } 7729 7730 // If the scope is outside the addrec's loop, evaluate it by using the 7731 // loop exit value of the addrec. 7732 if (!AddRec->getLoop()->contains(L)) { 7733 // To evaluate this recurrence, we need to know how many times the AddRec 7734 // loop iterates. Compute this now. 7735 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 7736 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 7737 7738 // Then, evaluate the AddRec. 7739 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 7740 } 7741 7742 return AddRec; 7743 } 7744 7745 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 7746 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7747 if (Op == Cast->getOperand()) 7748 return Cast; // must be loop invariant 7749 return getZeroExtendExpr(Op, Cast->getType()); 7750 } 7751 7752 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 7753 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7754 if (Op == Cast->getOperand()) 7755 return Cast; // must be loop invariant 7756 return getSignExtendExpr(Op, Cast->getType()); 7757 } 7758 7759 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 7760 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7761 if (Op == Cast->getOperand()) 7762 return Cast; // must be loop invariant 7763 return getTruncateExpr(Op, Cast->getType()); 7764 } 7765 7766 llvm_unreachable("Unknown SCEV type!"); 7767 } 7768 7769 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 7770 return getSCEVAtScope(getSCEV(V), L); 7771 } 7772 7773 /// Finds the minimum unsigned root of the following equation: 7774 /// 7775 /// A * X = B (mod N) 7776 /// 7777 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 7778 /// A and B isn't important. 7779 /// 7780 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 7781 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 7782 ScalarEvolution &SE) { 7783 uint32_t BW = A.getBitWidth(); 7784 assert(BW == SE.getTypeSizeInBits(B->getType())); 7785 assert(A != 0 && "A must be non-zero."); 7786 7787 // 1. D = gcd(A, N) 7788 // 7789 // The gcd of A and N may have only one prime factor: 2. The number of 7790 // trailing zeros in A is its multiplicity 7791 uint32_t Mult2 = A.countTrailingZeros(); 7792 // D = 2^Mult2 7793 7794 // 2. Check if B is divisible by D. 7795 // 7796 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 7797 // is not less than multiplicity of this prime factor for D. 7798 if (SE.GetMinTrailingZeros(B) < Mult2) 7799 return SE.getCouldNotCompute(); 7800 7801 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 7802 // modulo (N / D). 7803 // 7804 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 7805 // (N / D) in general. The inverse itself always fits into BW bits, though, 7806 // so we immediately truncate it. 7807 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 7808 APInt Mod(BW + 1, 0); 7809 Mod.setBit(BW - Mult2); // Mod = N / D 7810 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 7811 7812 // 4. Compute the minimum unsigned root of the equation: 7813 // I * (B / D) mod (N / D) 7814 // To simplify the computation, we factor out the divide by D: 7815 // (I * B mod N) / D 7816 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 7817 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 7818 } 7819 7820 /// Find the roots of the quadratic equation for the given quadratic chrec 7821 /// {L,+,M,+,N}. This returns either the two roots (which might be the same) or 7822 /// two SCEVCouldNotCompute objects. 7823 /// 7824 static Optional<std::pair<const SCEVConstant *,const SCEVConstant *>> 7825 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 7826 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 7827 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 7828 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 7829 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 7830 7831 // We currently can only solve this if the coefficients are constants. 7832 if (!LC || !MC || !NC) 7833 return None; 7834 7835 uint32_t BitWidth = LC->getAPInt().getBitWidth(); 7836 const APInt &L = LC->getAPInt(); 7837 const APInt &M = MC->getAPInt(); 7838 const APInt &N = NC->getAPInt(); 7839 APInt Two(BitWidth, 2); 7840 7841 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 7842 7843 // The A coefficient is N/2 7844 APInt A = N.sdiv(Two); 7845 7846 // The B coefficient is M-N/2 7847 APInt B = M; 7848 B -= A; // A is the same as N/2. 7849 7850 // The C coefficient is L. 7851 const APInt& C = L; 7852 7853 // Compute the B^2-4ac term. 7854 APInt SqrtTerm = B; 7855 SqrtTerm *= B; 7856 SqrtTerm -= 4 * (A * C); 7857 7858 if (SqrtTerm.isNegative()) { 7859 // The loop is provably infinite. 7860 return None; 7861 } 7862 7863 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 7864 // integer value or else APInt::sqrt() will assert. 7865 APInt SqrtVal = SqrtTerm.sqrt(); 7866 7867 // Compute the two solutions for the quadratic formula. 7868 // The divisions must be performed as signed divisions. 7869 APInt NegB = -std::move(B); 7870 APInt TwoA = std::move(A); 7871 TwoA <<= 1; 7872 if (TwoA.isNullValue()) 7873 return None; 7874 7875 LLVMContext &Context = SE.getContext(); 7876 7877 ConstantInt *Solution1 = 7878 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); 7879 ConstantInt *Solution2 = 7880 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); 7881 7882 return std::make_pair(cast<SCEVConstant>(SE.getConstant(Solution1)), 7883 cast<SCEVConstant>(SE.getConstant(Solution2))); 7884 } 7885 7886 ScalarEvolution::ExitLimit 7887 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 7888 bool AllowPredicates) { 7889 7890 // This is only used for loops with a "x != y" exit test. The exit condition 7891 // is now expressed as a single expression, V = x-y. So the exit test is 7892 // effectively V != 0. We know and take advantage of the fact that this 7893 // expression only being used in a comparison by zero context. 7894 7895 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 7896 // If the value is a constant 7897 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 7898 // If the value is already zero, the branch will execute zero times. 7899 if (C->getValue()->isZero()) return C; 7900 return getCouldNotCompute(); // Otherwise it will loop infinitely. 7901 } 7902 7903 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 7904 if (!AddRec && AllowPredicates) 7905 // Try to make this an AddRec using runtime tests, in the first X 7906 // iterations of this loop, where X is the SCEV expression found by the 7907 // algorithm below. 7908 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 7909 7910 if (!AddRec || AddRec->getLoop() != L) 7911 return getCouldNotCompute(); 7912 7913 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 7914 // the quadratic equation to solve it. 7915 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 7916 if (auto Roots = SolveQuadraticEquation(AddRec, *this)) { 7917 const SCEVConstant *R1 = Roots->first; 7918 const SCEVConstant *R2 = Roots->second; 7919 // Pick the smallest positive root value. 7920 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 7921 CmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 7922 if (!CB->getZExtValue()) 7923 std::swap(R1, R2); // R1 is the minimum root now. 7924 7925 // We can only use this value if the chrec ends up with an exact zero 7926 // value at this index. When solving for "X*X != 5", for example, we 7927 // should not accept a root of 2. 7928 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); 7929 if (Val->isZero()) 7930 // We found a quadratic root! 7931 return ExitLimit(R1, R1, false, Predicates); 7932 } 7933 } 7934 return getCouldNotCompute(); 7935 } 7936 7937 // Otherwise we can only handle this if it is affine. 7938 if (!AddRec->isAffine()) 7939 return getCouldNotCompute(); 7940 7941 // If this is an affine expression, the execution count of this branch is 7942 // the minimum unsigned root of the following equation: 7943 // 7944 // Start + Step*N = 0 (mod 2^BW) 7945 // 7946 // equivalent to: 7947 // 7948 // Step*N = -Start (mod 2^BW) 7949 // 7950 // where BW is the common bit width of Start and Step. 7951 7952 // Get the initial value for the loop. 7953 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 7954 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 7955 7956 // For now we handle only constant steps. 7957 // 7958 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 7959 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 7960 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 7961 // We have not yet seen any such cases. 7962 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 7963 if (!StepC || StepC->getValue()->isZero()) 7964 return getCouldNotCompute(); 7965 7966 // For positive steps (counting up until unsigned overflow): 7967 // N = -Start/Step (as unsigned) 7968 // For negative steps (counting down to zero): 7969 // N = Start/-Step 7970 // First compute the unsigned distance from zero in the direction of Step. 7971 bool CountDown = StepC->getAPInt().isNegative(); 7972 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 7973 7974 // Handle unitary steps, which cannot wraparound. 7975 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 7976 // N = Distance (as unsigned) 7977 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 7978 APInt MaxBECount = getUnsignedRangeMax(Distance); 7979 7980 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 7981 // we end up with a loop whose backedge-taken count is n - 1. Detect this 7982 // case, and see if we can improve the bound. 7983 // 7984 // Explicitly handling this here is necessary because getUnsignedRange 7985 // isn't context-sensitive; it doesn't know that we only care about the 7986 // range inside the loop. 7987 const SCEV *Zero = getZero(Distance->getType()); 7988 const SCEV *One = getOne(Distance->getType()); 7989 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 7990 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 7991 // If Distance + 1 doesn't overflow, we can compute the maximum distance 7992 // as "unsigned_max(Distance + 1) - 1". 7993 ConstantRange CR = getUnsignedRange(DistancePlusOne); 7994 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 7995 } 7996 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 7997 } 7998 7999 // If the condition controls loop exit (the loop exits only if the expression 8000 // is true) and the addition is no-wrap we can use unsigned divide to 8001 // compute the backedge count. In this case, the step may not divide the 8002 // distance, but we don't care because if the condition is "missed" the loop 8003 // will have undefined behavior due to wrapping. 8004 if (ControlsExit && AddRec->hasNoSelfWrap() && 8005 loopHasNoAbnormalExits(AddRec->getLoop())) { 8006 const SCEV *Exact = 8007 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 8008 const SCEV *Max = 8009 Exact == getCouldNotCompute() 8010 ? Exact 8011 : getConstant(getUnsignedRangeMax(Exact)); 8012 return ExitLimit(Exact, Max, false, Predicates); 8013 } 8014 8015 // Solve the general equation. 8016 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 8017 getNegativeSCEV(Start), *this); 8018 const SCEV *M = E == getCouldNotCompute() 8019 ? E 8020 : getConstant(getUnsignedRangeMax(E)); 8021 return ExitLimit(E, M, false, Predicates); 8022 } 8023 8024 ScalarEvolution::ExitLimit 8025 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 8026 // Loops that look like: while (X == 0) are very strange indeed. We don't 8027 // handle them yet except for the trivial case. This could be expanded in the 8028 // future as needed. 8029 8030 // If the value is a constant, check to see if it is known to be non-zero 8031 // already. If so, the backedge will execute zero times. 8032 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8033 if (!C->getValue()->isZero()) 8034 return getZero(C->getType()); 8035 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8036 } 8037 8038 // We could implement others, but I really doubt anyone writes loops like 8039 // this, and if they did, they would already be constant folded. 8040 return getCouldNotCompute(); 8041 } 8042 8043 std::pair<BasicBlock *, BasicBlock *> 8044 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 8045 // If the block has a unique predecessor, then there is no path from the 8046 // predecessor to the block that does not go through the direct edge 8047 // from the predecessor to the block. 8048 if (BasicBlock *Pred = BB->getSinglePredecessor()) 8049 return {Pred, BB}; 8050 8051 // A loop's header is defined to be a block that dominates the loop. 8052 // If the header has a unique predecessor outside the loop, it must be 8053 // a block that has exactly one successor that can reach the loop. 8054 if (Loop *L = LI.getLoopFor(BB)) 8055 return {L->getLoopPredecessor(), L->getHeader()}; 8056 8057 return {nullptr, nullptr}; 8058 } 8059 8060 /// SCEV structural equivalence is usually sufficient for testing whether two 8061 /// expressions are equal, however for the purposes of looking for a condition 8062 /// guarding a loop, it can be useful to be a little more general, since a 8063 /// front-end may have replicated the controlling expression. 8064 /// 8065 static bool HasSameValue(const SCEV *A, const SCEV *B) { 8066 // Quick check to see if they are the same SCEV. 8067 if (A == B) return true; 8068 8069 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 8070 // Not all instructions that are "identical" compute the same value. For 8071 // instance, two distinct alloca instructions allocating the same type are 8072 // identical and do not read memory; but compute distinct values. 8073 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 8074 }; 8075 8076 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 8077 // two different instructions with the same value. Check for this case. 8078 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 8079 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 8080 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 8081 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 8082 if (ComputesEqualValues(AI, BI)) 8083 return true; 8084 8085 // Otherwise assume they may have a different value. 8086 return false; 8087 } 8088 8089 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 8090 const SCEV *&LHS, const SCEV *&RHS, 8091 unsigned Depth) { 8092 bool Changed = false; 8093 8094 // If we hit the max recursion limit bail out. 8095 if (Depth >= 3) 8096 return false; 8097 8098 // Canonicalize a constant to the right side. 8099 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 8100 // Check for both operands constant. 8101 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 8102 if (ConstantExpr::getICmp(Pred, 8103 LHSC->getValue(), 8104 RHSC->getValue())->isNullValue()) 8105 goto trivially_false; 8106 else 8107 goto trivially_true; 8108 } 8109 // Otherwise swap the operands to put the constant on the right. 8110 std::swap(LHS, RHS); 8111 Pred = ICmpInst::getSwappedPredicate(Pred); 8112 Changed = true; 8113 } 8114 8115 // If we're comparing an addrec with a value which is loop-invariant in the 8116 // addrec's loop, put the addrec on the left. Also make a dominance check, 8117 // as both operands could be addrecs loop-invariant in each other's loop. 8118 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 8119 const Loop *L = AR->getLoop(); 8120 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 8121 std::swap(LHS, RHS); 8122 Pred = ICmpInst::getSwappedPredicate(Pred); 8123 Changed = true; 8124 } 8125 } 8126 8127 // If there's a constant operand, canonicalize comparisons with boundary 8128 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 8129 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 8130 const APInt &RA = RC->getAPInt(); 8131 8132 bool SimplifiedByConstantRange = false; 8133 8134 if (!ICmpInst::isEquality(Pred)) { 8135 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 8136 if (ExactCR.isFullSet()) 8137 goto trivially_true; 8138 else if (ExactCR.isEmptySet()) 8139 goto trivially_false; 8140 8141 APInt NewRHS; 8142 CmpInst::Predicate NewPred; 8143 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 8144 ICmpInst::isEquality(NewPred)) { 8145 // We were able to convert an inequality to an equality. 8146 Pred = NewPred; 8147 RHS = getConstant(NewRHS); 8148 Changed = SimplifiedByConstantRange = true; 8149 } 8150 } 8151 8152 if (!SimplifiedByConstantRange) { 8153 switch (Pred) { 8154 default: 8155 break; 8156 case ICmpInst::ICMP_EQ: 8157 case ICmpInst::ICMP_NE: 8158 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 8159 if (!RA) 8160 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 8161 if (const SCEVMulExpr *ME = 8162 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 8163 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 8164 ME->getOperand(0)->isAllOnesValue()) { 8165 RHS = AE->getOperand(1); 8166 LHS = ME->getOperand(1); 8167 Changed = true; 8168 } 8169 break; 8170 8171 8172 // The "Should have been caught earlier!" messages refer to the fact 8173 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 8174 // should have fired on the corresponding cases, and canonicalized the 8175 // check to trivially_true or trivially_false. 8176 8177 case ICmpInst::ICMP_UGE: 8178 assert(!RA.isMinValue() && "Should have been caught earlier!"); 8179 Pred = ICmpInst::ICMP_UGT; 8180 RHS = getConstant(RA - 1); 8181 Changed = true; 8182 break; 8183 case ICmpInst::ICMP_ULE: 8184 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 8185 Pred = ICmpInst::ICMP_ULT; 8186 RHS = getConstant(RA + 1); 8187 Changed = true; 8188 break; 8189 case ICmpInst::ICMP_SGE: 8190 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 8191 Pred = ICmpInst::ICMP_SGT; 8192 RHS = getConstant(RA - 1); 8193 Changed = true; 8194 break; 8195 case ICmpInst::ICMP_SLE: 8196 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 8197 Pred = ICmpInst::ICMP_SLT; 8198 RHS = getConstant(RA + 1); 8199 Changed = true; 8200 break; 8201 } 8202 } 8203 } 8204 8205 // Check for obvious equality. 8206 if (HasSameValue(LHS, RHS)) { 8207 if (ICmpInst::isTrueWhenEqual(Pred)) 8208 goto trivially_true; 8209 if (ICmpInst::isFalseWhenEqual(Pred)) 8210 goto trivially_false; 8211 } 8212 8213 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 8214 // adding or subtracting 1 from one of the operands. 8215 switch (Pred) { 8216 case ICmpInst::ICMP_SLE: 8217 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 8218 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8219 SCEV::FlagNSW); 8220 Pred = ICmpInst::ICMP_SLT; 8221 Changed = true; 8222 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 8223 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 8224 SCEV::FlagNSW); 8225 Pred = ICmpInst::ICMP_SLT; 8226 Changed = true; 8227 } 8228 break; 8229 case ICmpInst::ICMP_SGE: 8230 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 8231 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 8232 SCEV::FlagNSW); 8233 Pred = ICmpInst::ICMP_SGT; 8234 Changed = true; 8235 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 8236 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8237 SCEV::FlagNSW); 8238 Pred = ICmpInst::ICMP_SGT; 8239 Changed = true; 8240 } 8241 break; 8242 case ICmpInst::ICMP_ULE: 8243 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 8244 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8245 SCEV::FlagNUW); 8246 Pred = ICmpInst::ICMP_ULT; 8247 Changed = true; 8248 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 8249 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 8250 Pred = ICmpInst::ICMP_ULT; 8251 Changed = true; 8252 } 8253 break; 8254 case ICmpInst::ICMP_UGE: 8255 if (!getUnsignedRangeMin(RHS).isMinValue()) { 8256 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 8257 Pred = ICmpInst::ICMP_UGT; 8258 Changed = true; 8259 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 8260 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8261 SCEV::FlagNUW); 8262 Pred = ICmpInst::ICMP_UGT; 8263 Changed = true; 8264 } 8265 break; 8266 default: 8267 break; 8268 } 8269 8270 // TODO: More simplifications are possible here. 8271 8272 // Recursively simplify until we either hit a recursion limit or nothing 8273 // changes. 8274 if (Changed) 8275 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 8276 8277 return Changed; 8278 8279 trivially_true: 8280 // Return 0 == 0. 8281 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8282 Pred = ICmpInst::ICMP_EQ; 8283 return true; 8284 8285 trivially_false: 8286 // Return 0 != 0. 8287 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8288 Pred = ICmpInst::ICMP_NE; 8289 return true; 8290 } 8291 8292 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 8293 return getSignedRangeMax(S).isNegative(); 8294 } 8295 8296 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 8297 return getSignedRangeMin(S).isStrictlyPositive(); 8298 } 8299 8300 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 8301 return !getSignedRangeMin(S).isNegative(); 8302 } 8303 8304 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 8305 return !getSignedRangeMax(S).isStrictlyPositive(); 8306 } 8307 8308 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 8309 return isKnownNegative(S) || isKnownPositive(S); 8310 } 8311 8312 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 8313 const SCEV *LHS, const SCEV *RHS) { 8314 // Canonicalize the inputs first. 8315 (void)SimplifyICmpOperands(Pred, LHS, RHS); 8316 8317 // If LHS or RHS is an addrec, check to see if the condition is true in 8318 // every iteration of the loop. 8319 // If LHS and RHS are both addrec, both conditions must be true in 8320 // every iteration of the loop. 8321 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 8322 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 8323 bool LeftGuarded = false; 8324 bool RightGuarded = false; 8325 if (LAR) { 8326 const Loop *L = LAR->getLoop(); 8327 if (isLoopEntryGuardedByCond(L, Pred, LAR->getStart(), RHS) && 8328 isLoopBackedgeGuardedByCond(L, Pred, LAR->getPostIncExpr(*this), RHS)) { 8329 if (!RAR) return true; 8330 LeftGuarded = true; 8331 } 8332 } 8333 if (RAR) { 8334 const Loop *L = RAR->getLoop(); 8335 if (isLoopEntryGuardedByCond(L, Pred, LHS, RAR->getStart()) && 8336 isLoopBackedgeGuardedByCond(L, Pred, LHS, RAR->getPostIncExpr(*this))) { 8337 if (!LAR) return true; 8338 RightGuarded = true; 8339 } 8340 } 8341 if (LeftGuarded && RightGuarded) 8342 return true; 8343 8344 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 8345 return true; 8346 8347 // Otherwise see what can be done with known constant ranges. 8348 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS); 8349 } 8350 8351 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 8352 ICmpInst::Predicate Pred, 8353 bool &Increasing) { 8354 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 8355 8356 #ifndef NDEBUG 8357 // Verify an invariant: inverting the predicate should turn a monotonically 8358 // increasing change to a monotonically decreasing one, and vice versa. 8359 bool IncreasingSwapped; 8360 bool ResultSwapped = isMonotonicPredicateImpl( 8361 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 8362 8363 assert(Result == ResultSwapped && "should be able to analyze both!"); 8364 if (ResultSwapped) 8365 assert(Increasing == !IncreasingSwapped && 8366 "monotonicity should flip as we flip the predicate"); 8367 #endif 8368 8369 return Result; 8370 } 8371 8372 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 8373 ICmpInst::Predicate Pred, 8374 bool &Increasing) { 8375 8376 // A zero step value for LHS means the induction variable is essentially a 8377 // loop invariant value. We don't really depend on the predicate actually 8378 // flipping from false to true (for increasing predicates, and the other way 8379 // around for decreasing predicates), all we care about is that *if* the 8380 // predicate changes then it only changes from false to true. 8381 // 8382 // A zero step value in itself is not very useful, but there may be places 8383 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 8384 // as general as possible. 8385 8386 switch (Pred) { 8387 default: 8388 return false; // Conservative answer 8389 8390 case ICmpInst::ICMP_UGT: 8391 case ICmpInst::ICMP_UGE: 8392 case ICmpInst::ICMP_ULT: 8393 case ICmpInst::ICMP_ULE: 8394 if (!LHS->hasNoUnsignedWrap()) 8395 return false; 8396 8397 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 8398 return true; 8399 8400 case ICmpInst::ICMP_SGT: 8401 case ICmpInst::ICMP_SGE: 8402 case ICmpInst::ICMP_SLT: 8403 case ICmpInst::ICMP_SLE: { 8404 if (!LHS->hasNoSignedWrap()) 8405 return false; 8406 8407 const SCEV *Step = LHS->getStepRecurrence(*this); 8408 8409 if (isKnownNonNegative(Step)) { 8410 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 8411 return true; 8412 } 8413 8414 if (isKnownNonPositive(Step)) { 8415 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 8416 return true; 8417 } 8418 8419 return false; 8420 } 8421 8422 } 8423 8424 llvm_unreachable("switch has default clause!"); 8425 } 8426 8427 bool ScalarEvolution::isLoopInvariantPredicate( 8428 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 8429 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 8430 const SCEV *&InvariantRHS) { 8431 8432 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 8433 if (!isLoopInvariant(RHS, L)) { 8434 if (!isLoopInvariant(LHS, L)) 8435 return false; 8436 8437 std::swap(LHS, RHS); 8438 Pred = ICmpInst::getSwappedPredicate(Pred); 8439 } 8440 8441 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 8442 if (!ArLHS || ArLHS->getLoop() != L) 8443 return false; 8444 8445 bool Increasing; 8446 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 8447 return false; 8448 8449 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 8450 // true as the loop iterates, and the backedge is control dependent on 8451 // "ArLHS `Pred` RHS" == true then we can reason as follows: 8452 // 8453 // * if the predicate was false in the first iteration then the predicate 8454 // is never evaluated again, since the loop exits without taking the 8455 // backedge. 8456 // * if the predicate was true in the first iteration then it will 8457 // continue to be true for all future iterations since it is 8458 // monotonically increasing. 8459 // 8460 // For both the above possibilities, we can replace the loop varying 8461 // predicate with its value on the first iteration of the loop (which is 8462 // loop invariant). 8463 // 8464 // A similar reasoning applies for a monotonically decreasing predicate, by 8465 // replacing true with false and false with true in the above two bullets. 8466 8467 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 8468 8469 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 8470 return false; 8471 8472 InvariantPred = Pred; 8473 InvariantLHS = ArLHS->getStart(); 8474 InvariantRHS = RHS; 8475 return true; 8476 } 8477 8478 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 8479 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 8480 if (HasSameValue(LHS, RHS)) 8481 return ICmpInst::isTrueWhenEqual(Pred); 8482 8483 // This code is split out from isKnownPredicate because it is called from 8484 // within isLoopEntryGuardedByCond. 8485 8486 auto CheckRanges = 8487 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 8488 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 8489 .contains(RangeLHS); 8490 }; 8491 8492 // The check at the top of the function catches the case where the values are 8493 // known to be equal. 8494 if (Pred == CmpInst::ICMP_EQ) 8495 return false; 8496 8497 if (Pred == CmpInst::ICMP_NE) 8498 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 8499 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 8500 isKnownNonZero(getMinusSCEV(LHS, RHS)); 8501 8502 if (CmpInst::isSigned(Pred)) 8503 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 8504 8505 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 8506 } 8507 8508 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 8509 const SCEV *LHS, 8510 const SCEV *RHS) { 8511 8512 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 8513 // Return Y via OutY. 8514 auto MatchBinaryAddToConst = 8515 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 8516 SCEV::NoWrapFlags ExpectedFlags) { 8517 const SCEV *NonConstOp, *ConstOp; 8518 SCEV::NoWrapFlags FlagsPresent; 8519 8520 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 8521 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 8522 return false; 8523 8524 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 8525 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 8526 }; 8527 8528 APInt C; 8529 8530 switch (Pred) { 8531 default: 8532 break; 8533 8534 case ICmpInst::ICMP_SGE: 8535 std::swap(LHS, RHS); 8536 LLVM_FALLTHROUGH; 8537 case ICmpInst::ICMP_SLE: 8538 // X s<= (X + C)<nsw> if C >= 0 8539 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 8540 return true; 8541 8542 // (X + C)<nsw> s<= X if C <= 0 8543 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 8544 !C.isStrictlyPositive()) 8545 return true; 8546 break; 8547 8548 case ICmpInst::ICMP_SGT: 8549 std::swap(LHS, RHS); 8550 LLVM_FALLTHROUGH; 8551 case ICmpInst::ICMP_SLT: 8552 // X s< (X + C)<nsw> if C > 0 8553 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 8554 C.isStrictlyPositive()) 8555 return true; 8556 8557 // (X + C)<nsw> s< X if C < 0 8558 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 8559 return true; 8560 break; 8561 } 8562 8563 return false; 8564 } 8565 8566 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 8567 const SCEV *LHS, 8568 const SCEV *RHS) { 8569 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 8570 return false; 8571 8572 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 8573 // the stack can result in exponential time complexity. 8574 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 8575 8576 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 8577 // 8578 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 8579 // isKnownPredicate. isKnownPredicate is more powerful, but also more 8580 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 8581 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 8582 // use isKnownPredicate later if needed. 8583 return isKnownNonNegative(RHS) && 8584 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 8585 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 8586 } 8587 8588 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 8589 ICmpInst::Predicate Pred, 8590 const SCEV *LHS, const SCEV *RHS) { 8591 // No need to even try if we know the module has no guards. 8592 if (!HasGuards) 8593 return false; 8594 8595 return any_of(*BB, [&](Instruction &I) { 8596 using namespace llvm::PatternMatch; 8597 8598 Value *Condition; 8599 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 8600 m_Value(Condition))) && 8601 isImpliedCond(Pred, LHS, RHS, Condition, false); 8602 }); 8603 } 8604 8605 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 8606 /// protected by a conditional between LHS and RHS. This is used to 8607 /// to eliminate casts. 8608 bool 8609 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 8610 ICmpInst::Predicate Pred, 8611 const SCEV *LHS, const SCEV *RHS) { 8612 // Interpret a null as meaning no loop, where there is obviously no guard 8613 // (interprocedural conditions notwithstanding). 8614 if (!L) return true; 8615 8616 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 8617 return true; 8618 8619 BasicBlock *Latch = L->getLoopLatch(); 8620 if (!Latch) 8621 return false; 8622 8623 BranchInst *LoopContinuePredicate = 8624 dyn_cast<BranchInst>(Latch->getTerminator()); 8625 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 8626 isImpliedCond(Pred, LHS, RHS, 8627 LoopContinuePredicate->getCondition(), 8628 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 8629 return true; 8630 8631 // We don't want more than one activation of the following loops on the stack 8632 // -- that can lead to O(n!) time complexity. 8633 if (WalkingBEDominatingConds) 8634 return false; 8635 8636 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 8637 8638 // See if we can exploit a trip count to prove the predicate. 8639 const auto &BETakenInfo = getBackedgeTakenInfo(L); 8640 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 8641 if (LatchBECount != getCouldNotCompute()) { 8642 // We know that Latch branches back to the loop header exactly 8643 // LatchBECount times. This means the backdege condition at Latch is 8644 // equivalent to "{0,+,1} u< LatchBECount". 8645 Type *Ty = LatchBECount->getType(); 8646 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 8647 const SCEV *LoopCounter = 8648 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 8649 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 8650 LatchBECount)) 8651 return true; 8652 } 8653 8654 // Check conditions due to any @llvm.assume intrinsics. 8655 for (auto &AssumeVH : AC.assumptions()) { 8656 if (!AssumeVH) 8657 continue; 8658 auto *CI = cast<CallInst>(AssumeVH); 8659 if (!DT.dominates(CI, Latch->getTerminator())) 8660 continue; 8661 8662 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 8663 return true; 8664 } 8665 8666 // If the loop is not reachable from the entry block, we risk running into an 8667 // infinite loop as we walk up into the dom tree. These loops do not matter 8668 // anyway, so we just return a conservative answer when we see them. 8669 if (!DT.isReachableFromEntry(L->getHeader())) 8670 return false; 8671 8672 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 8673 return true; 8674 8675 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 8676 DTN != HeaderDTN; DTN = DTN->getIDom()) { 8677 8678 assert(DTN && "should reach the loop header before reaching the root!"); 8679 8680 BasicBlock *BB = DTN->getBlock(); 8681 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 8682 return true; 8683 8684 BasicBlock *PBB = BB->getSinglePredecessor(); 8685 if (!PBB) 8686 continue; 8687 8688 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 8689 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 8690 continue; 8691 8692 Value *Condition = ContinuePredicate->getCondition(); 8693 8694 // If we have an edge `E` within the loop body that dominates the only 8695 // latch, the condition guarding `E` also guards the backedge. This 8696 // reasoning works only for loops with a single latch. 8697 8698 BasicBlockEdge DominatingEdge(PBB, BB); 8699 if (DominatingEdge.isSingleEdge()) { 8700 // We're constructively (and conservatively) enumerating edges within the 8701 // loop body that dominate the latch. The dominator tree better agree 8702 // with us on this: 8703 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 8704 8705 if (isImpliedCond(Pred, LHS, RHS, Condition, 8706 BB != ContinuePredicate->getSuccessor(0))) 8707 return true; 8708 } 8709 } 8710 8711 return false; 8712 } 8713 8714 bool 8715 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 8716 ICmpInst::Predicate Pred, 8717 const SCEV *LHS, const SCEV *RHS) { 8718 // Interpret a null as meaning no loop, where there is obviously no guard 8719 // (interprocedural conditions notwithstanding). 8720 if (!L) return false; 8721 8722 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 8723 return true; 8724 8725 // Starting at the loop predecessor, climb up the predecessor chain, as long 8726 // as there are predecessors that can be found that have unique successors 8727 // leading to the original header. 8728 for (std::pair<BasicBlock *, BasicBlock *> 8729 Pair(L->getLoopPredecessor(), L->getHeader()); 8730 Pair.first; 8731 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 8732 8733 if (isImpliedViaGuard(Pair.first, Pred, LHS, RHS)) 8734 return true; 8735 8736 BranchInst *LoopEntryPredicate = 8737 dyn_cast<BranchInst>(Pair.first->getTerminator()); 8738 if (!LoopEntryPredicate || 8739 LoopEntryPredicate->isUnconditional()) 8740 continue; 8741 8742 if (isImpliedCond(Pred, LHS, RHS, 8743 LoopEntryPredicate->getCondition(), 8744 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 8745 return true; 8746 } 8747 8748 // Check conditions due to any @llvm.assume intrinsics. 8749 for (auto &AssumeVH : AC.assumptions()) { 8750 if (!AssumeVH) 8751 continue; 8752 auto *CI = cast<CallInst>(AssumeVH); 8753 if (!DT.dominates(CI, L->getHeader())) 8754 continue; 8755 8756 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 8757 return true; 8758 } 8759 8760 return false; 8761 } 8762 8763 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 8764 const SCEV *LHS, const SCEV *RHS, 8765 Value *FoundCondValue, 8766 bool Inverse) { 8767 if (!PendingLoopPredicates.insert(FoundCondValue).second) 8768 return false; 8769 8770 auto ClearOnExit = 8771 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 8772 8773 // Recursively handle And and Or conditions. 8774 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 8775 if (BO->getOpcode() == Instruction::And) { 8776 if (!Inverse) 8777 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 8778 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 8779 } else if (BO->getOpcode() == Instruction::Or) { 8780 if (Inverse) 8781 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 8782 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 8783 } 8784 } 8785 8786 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 8787 if (!ICI) return false; 8788 8789 // Now that we found a conditional branch that dominates the loop or controls 8790 // the loop latch. Check to see if it is the comparison we are looking for. 8791 ICmpInst::Predicate FoundPred; 8792 if (Inverse) 8793 FoundPred = ICI->getInversePredicate(); 8794 else 8795 FoundPred = ICI->getPredicate(); 8796 8797 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 8798 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 8799 8800 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 8801 } 8802 8803 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 8804 const SCEV *RHS, 8805 ICmpInst::Predicate FoundPred, 8806 const SCEV *FoundLHS, 8807 const SCEV *FoundRHS) { 8808 // Balance the types. 8809 if (getTypeSizeInBits(LHS->getType()) < 8810 getTypeSizeInBits(FoundLHS->getType())) { 8811 if (CmpInst::isSigned(Pred)) { 8812 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 8813 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 8814 } else { 8815 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 8816 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 8817 } 8818 } else if (getTypeSizeInBits(LHS->getType()) > 8819 getTypeSizeInBits(FoundLHS->getType())) { 8820 if (CmpInst::isSigned(FoundPred)) { 8821 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 8822 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 8823 } else { 8824 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 8825 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 8826 } 8827 } 8828 8829 // Canonicalize the query to match the way instcombine will have 8830 // canonicalized the comparison. 8831 if (SimplifyICmpOperands(Pred, LHS, RHS)) 8832 if (LHS == RHS) 8833 return CmpInst::isTrueWhenEqual(Pred); 8834 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 8835 if (FoundLHS == FoundRHS) 8836 return CmpInst::isFalseWhenEqual(FoundPred); 8837 8838 // Check to see if we can make the LHS or RHS match. 8839 if (LHS == FoundRHS || RHS == FoundLHS) { 8840 if (isa<SCEVConstant>(RHS)) { 8841 std::swap(FoundLHS, FoundRHS); 8842 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 8843 } else { 8844 std::swap(LHS, RHS); 8845 Pred = ICmpInst::getSwappedPredicate(Pred); 8846 } 8847 } 8848 8849 // Check whether the found predicate is the same as the desired predicate. 8850 if (FoundPred == Pred) 8851 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 8852 8853 // Check whether swapping the found predicate makes it the same as the 8854 // desired predicate. 8855 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 8856 if (isa<SCEVConstant>(RHS)) 8857 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 8858 else 8859 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 8860 RHS, LHS, FoundLHS, FoundRHS); 8861 } 8862 8863 // Unsigned comparison is the same as signed comparison when both the operands 8864 // are non-negative. 8865 if (CmpInst::isUnsigned(FoundPred) && 8866 CmpInst::getSignedPredicate(FoundPred) == Pred && 8867 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 8868 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 8869 8870 // Check if we can make progress by sharpening ranges. 8871 if (FoundPred == ICmpInst::ICMP_NE && 8872 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 8873 8874 const SCEVConstant *C = nullptr; 8875 const SCEV *V = nullptr; 8876 8877 if (isa<SCEVConstant>(FoundLHS)) { 8878 C = cast<SCEVConstant>(FoundLHS); 8879 V = FoundRHS; 8880 } else { 8881 C = cast<SCEVConstant>(FoundRHS); 8882 V = FoundLHS; 8883 } 8884 8885 // The guarding predicate tells us that C != V. If the known range 8886 // of V is [C, t), we can sharpen the range to [C + 1, t). The 8887 // range we consider has to correspond to same signedness as the 8888 // predicate we're interested in folding. 8889 8890 APInt Min = ICmpInst::isSigned(Pred) ? 8891 getSignedRangeMin(V) : getUnsignedRangeMin(V); 8892 8893 if (Min == C->getAPInt()) { 8894 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 8895 // This is true even if (Min + 1) wraps around -- in case of 8896 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 8897 8898 APInt SharperMin = Min + 1; 8899 8900 switch (Pred) { 8901 case ICmpInst::ICMP_SGE: 8902 case ICmpInst::ICMP_UGE: 8903 // We know V `Pred` SharperMin. If this implies LHS `Pred` 8904 // RHS, we're done. 8905 if (isImpliedCondOperands(Pred, LHS, RHS, V, 8906 getConstant(SharperMin))) 8907 return true; 8908 LLVM_FALLTHROUGH; 8909 8910 case ICmpInst::ICMP_SGT: 8911 case ICmpInst::ICMP_UGT: 8912 // We know from the range information that (V `Pred` Min || 8913 // V == Min). We know from the guarding condition that !(V 8914 // == Min). This gives us 8915 // 8916 // V `Pred` Min || V == Min && !(V == Min) 8917 // => V `Pred` Min 8918 // 8919 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 8920 8921 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 8922 return true; 8923 LLVM_FALLTHROUGH; 8924 8925 default: 8926 // No change 8927 break; 8928 } 8929 } 8930 } 8931 8932 // Check whether the actual condition is beyond sufficient. 8933 if (FoundPred == ICmpInst::ICMP_EQ) 8934 if (ICmpInst::isTrueWhenEqual(Pred)) 8935 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 8936 return true; 8937 if (Pred == ICmpInst::ICMP_NE) 8938 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 8939 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 8940 return true; 8941 8942 // Otherwise assume the worst. 8943 return false; 8944 } 8945 8946 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 8947 const SCEV *&L, const SCEV *&R, 8948 SCEV::NoWrapFlags &Flags) { 8949 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 8950 if (!AE || AE->getNumOperands() != 2) 8951 return false; 8952 8953 L = AE->getOperand(0); 8954 R = AE->getOperand(1); 8955 Flags = AE->getNoWrapFlags(); 8956 return true; 8957 } 8958 8959 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 8960 const SCEV *Less) { 8961 // We avoid subtracting expressions here because this function is usually 8962 // fairly deep in the call stack (i.e. is called many times). 8963 8964 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 8965 const auto *LAR = cast<SCEVAddRecExpr>(Less); 8966 const auto *MAR = cast<SCEVAddRecExpr>(More); 8967 8968 if (LAR->getLoop() != MAR->getLoop()) 8969 return None; 8970 8971 // We look at affine expressions only; not for correctness but to keep 8972 // getStepRecurrence cheap. 8973 if (!LAR->isAffine() || !MAR->isAffine()) 8974 return None; 8975 8976 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 8977 return None; 8978 8979 Less = LAR->getStart(); 8980 More = MAR->getStart(); 8981 8982 // fall through 8983 } 8984 8985 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 8986 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 8987 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 8988 return M - L; 8989 } 8990 8991 const SCEV *L, *R; 8992 SCEV::NoWrapFlags Flags; 8993 if (splitBinaryAdd(Less, L, R, Flags)) 8994 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 8995 if (R == More) 8996 return -(LC->getAPInt()); 8997 8998 if (splitBinaryAdd(More, L, R, Flags)) 8999 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 9000 if (R == Less) 9001 return LC->getAPInt(); 9002 9003 return None; 9004 } 9005 9006 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 9007 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 9008 const SCEV *FoundLHS, const SCEV *FoundRHS) { 9009 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 9010 return false; 9011 9012 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9013 if (!AddRecLHS) 9014 return false; 9015 9016 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 9017 if (!AddRecFoundLHS) 9018 return false; 9019 9020 // We'd like to let SCEV reason about control dependencies, so we constrain 9021 // both the inequalities to be about add recurrences on the same loop. This 9022 // way we can use isLoopEntryGuardedByCond later. 9023 9024 const Loop *L = AddRecFoundLHS->getLoop(); 9025 if (L != AddRecLHS->getLoop()) 9026 return false; 9027 9028 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 9029 // 9030 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 9031 // ... (2) 9032 // 9033 // Informal proof for (2), assuming (1) [*]: 9034 // 9035 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 9036 // 9037 // Then 9038 // 9039 // FoundLHS s< FoundRHS s< INT_MIN - C 9040 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 9041 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 9042 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 9043 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 9044 // <=> FoundLHS + C s< FoundRHS + C 9045 // 9046 // [*]: (1) can be proved by ruling out overflow. 9047 // 9048 // [**]: This can be proved by analyzing all the four possibilities: 9049 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 9050 // (A s>= 0, B s>= 0). 9051 // 9052 // Note: 9053 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 9054 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 9055 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 9056 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 9057 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 9058 // C)". 9059 9060 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 9061 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 9062 if (!LDiff || !RDiff || *LDiff != *RDiff) 9063 return false; 9064 9065 if (LDiff->isMinValue()) 9066 return true; 9067 9068 APInt FoundRHSLimit; 9069 9070 if (Pred == CmpInst::ICMP_ULT) { 9071 FoundRHSLimit = -(*RDiff); 9072 } else { 9073 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 9074 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 9075 } 9076 9077 // Try to prove (1) or (2), as needed. 9078 return isLoopEntryGuardedByCond(L, Pred, FoundRHS, 9079 getConstant(FoundRHSLimit)); 9080 } 9081 9082 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 9083 const SCEV *LHS, const SCEV *RHS, 9084 const SCEV *FoundLHS, 9085 const SCEV *FoundRHS) { 9086 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9087 return true; 9088 9089 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9090 return true; 9091 9092 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 9093 FoundLHS, FoundRHS) || 9094 // ~x < ~y --> x > y 9095 isImpliedCondOperandsHelper(Pred, LHS, RHS, 9096 getNotSCEV(FoundRHS), 9097 getNotSCEV(FoundLHS)); 9098 } 9099 9100 9101 /// If Expr computes ~A, return A else return nullptr 9102 static const SCEV *MatchNotExpr(const SCEV *Expr) { 9103 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 9104 if (!Add || Add->getNumOperands() != 2 || 9105 !Add->getOperand(0)->isAllOnesValue()) 9106 return nullptr; 9107 9108 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 9109 if (!AddRHS || AddRHS->getNumOperands() != 2 || 9110 !AddRHS->getOperand(0)->isAllOnesValue()) 9111 return nullptr; 9112 9113 return AddRHS->getOperand(1); 9114 } 9115 9116 9117 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values? 9118 template<typename MaxExprType> 9119 static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, 9120 const SCEV *Candidate) { 9121 const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr); 9122 if (!MaxExpr) return false; 9123 9124 return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end(); 9125 } 9126 9127 9128 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values? 9129 template<typename MaxExprType> 9130 static bool IsMinConsistingOf(ScalarEvolution &SE, 9131 const SCEV *MaybeMinExpr, 9132 const SCEV *Candidate) { 9133 const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr); 9134 if (!MaybeMaxExpr) 9135 return false; 9136 9137 return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate)); 9138 } 9139 9140 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 9141 ICmpInst::Predicate Pred, 9142 const SCEV *LHS, const SCEV *RHS) { 9143 9144 // If both sides are affine addrecs for the same loop, with equal 9145 // steps, and we know the recurrences don't wrap, then we only 9146 // need to check the predicate on the starting values. 9147 9148 if (!ICmpInst::isRelational(Pred)) 9149 return false; 9150 9151 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 9152 if (!LAR) 9153 return false; 9154 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 9155 if (!RAR) 9156 return false; 9157 if (LAR->getLoop() != RAR->getLoop()) 9158 return false; 9159 if (!LAR->isAffine() || !RAR->isAffine()) 9160 return false; 9161 9162 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 9163 return false; 9164 9165 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 9166 SCEV::FlagNSW : SCEV::FlagNUW; 9167 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 9168 return false; 9169 9170 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 9171 } 9172 9173 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 9174 /// expression? 9175 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 9176 ICmpInst::Predicate Pred, 9177 const SCEV *LHS, const SCEV *RHS) { 9178 switch (Pred) { 9179 default: 9180 return false; 9181 9182 case ICmpInst::ICMP_SGE: 9183 std::swap(LHS, RHS); 9184 LLVM_FALLTHROUGH; 9185 case ICmpInst::ICMP_SLE: 9186 return 9187 // min(A, ...) <= A 9188 IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) || 9189 // A <= max(A, ...) 9190 IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 9191 9192 case ICmpInst::ICMP_UGE: 9193 std::swap(LHS, RHS); 9194 LLVM_FALLTHROUGH; 9195 case ICmpInst::ICMP_ULE: 9196 return 9197 // min(A, ...) <= A 9198 IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) || 9199 // A <= max(A, ...) 9200 IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 9201 } 9202 9203 llvm_unreachable("covered switch fell through?!"); 9204 } 9205 9206 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 9207 const SCEV *LHS, const SCEV *RHS, 9208 const SCEV *FoundLHS, 9209 const SCEV *FoundRHS, 9210 unsigned Depth) { 9211 assert(getTypeSizeInBits(LHS->getType()) == 9212 getTypeSizeInBits(RHS->getType()) && 9213 "LHS and RHS have different sizes?"); 9214 assert(getTypeSizeInBits(FoundLHS->getType()) == 9215 getTypeSizeInBits(FoundRHS->getType()) && 9216 "FoundLHS and FoundRHS have different sizes?"); 9217 // We want to avoid hurting the compile time with analysis of too big trees. 9218 if (Depth > MaxSCEVOperationsImplicationDepth) 9219 return false; 9220 // We only want to work with ICMP_SGT comparison so far. 9221 // TODO: Extend to ICMP_UGT? 9222 if (Pred == ICmpInst::ICMP_SLT) { 9223 Pred = ICmpInst::ICMP_SGT; 9224 std::swap(LHS, RHS); 9225 std::swap(FoundLHS, FoundRHS); 9226 } 9227 if (Pred != ICmpInst::ICMP_SGT) 9228 return false; 9229 9230 auto GetOpFromSExt = [&](const SCEV *S) { 9231 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 9232 return Ext->getOperand(); 9233 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 9234 // the constant in some cases. 9235 return S; 9236 }; 9237 9238 // Acquire values from extensions. 9239 auto *OrigFoundLHS = FoundLHS; 9240 LHS = GetOpFromSExt(LHS); 9241 FoundLHS = GetOpFromSExt(FoundLHS); 9242 9243 // Is the SGT predicate can be proved trivially or using the found context. 9244 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 9245 return isKnownViaSimpleReasoning(ICmpInst::ICMP_SGT, S1, S2) || 9246 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 9247 FoundRHS, Depth + 1); 9248 }; 9249 9250 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 9251 // We want to avoid creation of any new non-constant SCEV. Since we are 9252 // going to compare the operands to RHS, we should be certain that we don't 9253 // need any size extensions for this. So let's decline all cases when the 9254 // sizes of types of LHS and RHS do not match. 9255 // TODO: Maybe try to get RHS from sext to catch more cases? 9256 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 9257 return false; 9258 9259 // Should not overflow. 9260 if (!LHSAddExpr->hasNoSignedWrap()) 9261 return false; 9262 9263 auto *LL = LHSAddExpr->getOperand(0); 9264 auto *LR = LHSAddExpr->getOperand(1); 9265 auto *MinusOne = getNegativeSCEV(getOne(RHS->getType())); 9266 9267 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 9268 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 9269 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 9270 }; 9271 // Try to prove the following rule: 9272 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 9273 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 9274 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 9275 return true; 9276 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 9277 Value *LL, *LR; 9278 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 9279 using namespace llvm::PatternMatch; 9280 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 9281 // Rules for division. 9282 // We are going to perform some comparisons with Denominator and its 9283 // derivative expressions. In general case, creating a SCEV for it may 9284 // lead to a complex analysis of the entire graph, and in particular it 9285 // can request trip count recalculation for the same loop. This would 9286 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 9287 // this, we only want to create SCEVs that are constants in this section. 9288 // So we bail if Denominator is not a constant. 9289 if (!isa<ConstantInt>(LR)) 9290 return false; 9291 9292 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 9293 9294 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 9295 // then a SCEV for the numerator already exists and matches with FoundLHS. 9296 auto *Numerator = getExistingSCEV(LL); 9297 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 9298 return false; 9299 9300 // Make sure that the numerator matches with FoundLHS and the denominator 9301 // is positive. 9302 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 9303 return false; 9304 9305 auto *DTy = Denominator->getType(); 9306 auto *FRHSTy = FoundRHS->getType(); 9307 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 9308 // One of types is a pointer and another one is not. We cannot extend 9309 // them properly to a wider type, so let us just reject this case. 9310 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 9311 // to avoid this check. 9312 return false; 9313 9314 // Given that: 9315 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 9316 auto *WTy = getWiderType(DTy, FRHSTy); 9317 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 9318 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 9319 9320 // Try to prove the following rule: 9321 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 9322 // For example, given that FoundLHS > 2. It means that FoundLHS is at 9323 // least 3. If we divide it by Denominator < 4, we will have at least 1. 9324 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 9325 if (isKnownNonPositive(RHS) && 9326 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 9327 return true; 9328 9329 // Try to prove the following rule: 9330 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 9331 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 9332 // If we divide it by Denominator > 2, then: 9333 // 1. If FoundLHS is negative, then the result is 0. 9334 // 2. If FoundLHS is non-negative, then the result is non-negative. 9335 // Anyways, the result is non-negative. 9336 auto *MinusOne = getNegativeSCEV(getOne(WTy)); 9337 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 9338 if (isKnownNegative(RHS) && 9339 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 9340 return true; 9341 } 9342 } 9343 9344 return false; 9345 } 9346 9347 bool 9348 ScalarEvolution::isKnownViaSimpleReasoning(ICmpInst::Predicate Pred, 9349 const SCEV *LHS, const SCEV *RHS) { 9350 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 9351 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 9352 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 9353 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 9354 } 9355 9356 bool 9357 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 9358 const SCEV *LHS, const SCEV *RHS, 9359 const SCEV *FoundLHS, 9360 const SCEV *FoundRHS) { 9361 switch (Pred) { 9362 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 9363 case ICmpInst::ICMP_EQ: 9364 case ICmpInst::ICMP_NE: 9365 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 9366 return true; 9367 break; 9368 case ICmpInst::ICMP_SLT: 9369 case ICmpInst::ICMP_SLE: 9370 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 9371 isKnownViaSimpleReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 9372 return true; 9373 break; 9374 case ICmpInst::ICMP_SGT: 9375 case ICmpInst::ICMP_SGE: 9376 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 9377 isKnownViaSimpleReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 9378 return true; 9379 break; 9380 case ICmpInst::ICMP_ULT: 9381 case ICmpInst::ICMP_ULE: 9382 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 9383 isKnownViaSimpleReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 9384 return true; 9385 break; 9386 case ICmpInst::ICMP_UGT: 9387 case ICmpInst::ICMP_UGE: 9388 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 9389 isKnownViaSimpleReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 9390 return true; 9391 break; 9392 } 9393 9394 // Maybe it can be proved via operations? 9395 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9396 return true; 9397 9398 return false; 9399 } 9400 9401 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 9402 const SCEV *LHS, 9403 const SCEV *RHS, 9404 const SCEV *FoundLHS, 9405 const SCEV *FoundRHS) { 9406 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 9407 // The restriction on `FoundRHS` be lifted easily -- it exists only to 9408 // reduce the compile time impact of this optimization. 9409 return false; 9410 9411 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 9412 if (!Addend) 9413 return false; 9414 9415 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 9416 9417 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 9418 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 9419 ConstantRange FoundLHSRange = 9420 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 9421 9422 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 9423 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 9424 9425 // We can also compute the range of values for `LHS` that satisfy the 9426 // consequent, "`LHS` `Pred` `RHS`": 9427 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 9428 ConstantRange SatisfyingLHSRange = 9429 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 9430 9431 // The antecedent implies the consequent if every value of `LHS` that 9432 // satisfies the antecedent also satisfies the consequent. 9433 return SatisfyingLHSRange.contains(LHSRange); 9434 } 9435 9436 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 9437 bool IsSigned, bool NoWrap) { 9438 assert(isKnownPositive(Stride) && "Positive stride expected!"); 9439 9440 if (NoWrap) return false; 9441 9442 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9443 const SCEV *One = getOne(Stride->getType()); 9444 9445 if (IsSigned) { 9446 APInt MaxRHS = getSignedRangeMax(RHS); 9447 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 9448 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 9449 9450 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 9451 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 9452 } 9453 9454 APInt MaxRHS = getUnsignedRangeMax(RHS); 9455 APInt MaxValue = APInt::getMaxValue(BitWidth); 9456 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 9457 9458 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 9459 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 9460 } 9461 9462 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 9463 bool IsSigned, bool NoWrap) { 9464 if (NoWrap) return false; 9465 9466 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9467 const SCEV *One = getOne(Stride->getType()); 9468 9469 if (IsSigned) { 9470 APInt MinRHS = getSignedRangeMin(RHS); 9471 APInt MinValue = APInt::getSignedMinValue(BitWidth); 9472 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 9473 9474 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 9475 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 9476 } 9477 9478 APInt MinRHS = getUnsignedRangeMin(RHS); 9479 APInt MinValue = APInt::getMinValue(BitWidth); 9480 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 9481 9482 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 9483 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 9484 } 9485 9486 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 9487 bool Equality) { 9488 const SCEV *One = getOne(Step->getType()); 9489 Delta = Equality ? getAddExpr(Delta, Step) 9490 : getAddExpr(Delta, getMinusSCEV(Step, One)); 9491 return getUDivExpr(Delta, Step); 9492 } 9493 9494 ScalarEvolution::ExitLimit 9495 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 9496 const Loop *L, bool IsSigned, 9497 bool ControlsExit, bool AllowPredicates) { 9498 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9499 // We handle only IV < Invariant 9500 if (!isLoopInvariant(RHS, L)) 9501 return getCouldNotCompute(); 9502 9503 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 9504 bool PredicatedIV = false; 9505 9506 if (!IV && AllowPredicates) { 9507 // Try to make this an AddRec using runtime tests, in the first X 9508 // iterations of this loop, where X is the SCEV expression found by the 9509 // algorithm below. 9510 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 9511 PredicatedIV = true; 9512 } 9513 9514 // Avoid weird loops 9515 if (!IV || IV->getLoop() != L || !IV->isAffine()) 9516 return getCouldNotCompute(); 9517 9518 bool NoWrap = ControlsExit && 9519 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 9520 9521 const SCEV *Stride = IV->getStepRecurrence(*this); 9522 9523 bool PositiveStride = isKnownPositive(Stride); 9524 9525 // Avoid negative or zero stride values. 9526 if (!PositiveStride) { 9527 // We can compute the correct backedge taken count for loops with unknown 9528 // strides if we can prove that the loop is not an infinite loop with side 9529 // effects. Here's the loop structure we are trying to handle - 9530 // 9531 // i = start 9532 // do { 9533 // A[i] = i; 9534 // i += s; 9535 // } while (i < end); 9536 // 9537 // The backedge taken count for such loops is evaluated as - 9538 // (max(end, start + stride) - start - 1) /u stride 9539 // 9540 // The additional preconditions that we need to check to prove correctness 9541 // of the above formula is as follows - 9542 // 9543 // a) IV is either nuw or nsw depending upon signedness (indicated by the 9544 // NoWrap flag). 9545 // b) loop is single exit with no side effects. 9546 // 9547 // 9548 // Precondition a) implies that if the stride is negative, this is a single 9549 // trip loop. The backedge taken count formula reduces to zero in this case. 9550 // 9551 // Precondition b) implies that the unknown stride cannot be zero otherwise 9552 // we have UB. 9553 // 9554 // The positive stride case is the same as isKnownPositive(Stride) returning 9555 // true (original behavior of the function). 9556 // 9557 // We want to make sure that the stride is truly unknown as there are edge 9558 // cases where ScalarEvolution propagates no wrap flags to the 9559 // post-increment/decrement IV even though the increment/decrement operation 9560 // itself is wrapping. The computed backedge taken count may be wrong in 9561 // such cases. This is prevented by checking that the stride is not known to 9562 // be either positive or non-positive. For example, no wrap flags are 9563 // propagated to the post-increment IV of this loop with a trip count of 2 - 9564 // 9565 // unsigned char i; 9566 // for(i=127; i<128; i+=129) 9567 // A[i] = i; 9568 // 9569 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 9570 !loopHasNoSideEffects(L)) 9571 return getCouldNotCompute(); 9572 9573 } else if (!Stride->isOne() && 9574 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 9575 // Avoid proven overflow cases: this will ensure that the backedge taken 9576 // count will not generate any unsigned overflow. Relaxed no-overflow 9577 // conditions exploit NoWrapFlags, allowing to optimize in presence of 9578 // undefined behaviors like the case of C language. 9579 return getCouldNotCompute(); 9580 9581 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 9582 : ICmpInst::ICMP_ULT; 9583 const SCEV *Start = IV->getStart(); 9584 const SCEV *End = RHS; 9585 // If the backedge is taken at least once, then it will be taken 9586 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 9587 // is the LHS value of the less-than comparison the first time it is evaluated 9588 // and End is the RHS. 9589 const SCEV *BECountIfBackedgeTaken = 9590 computeBECount(getMinusSCEV(End, Start), Stride, false); 9591 // If the loop entry is guarded by the result of the backedge test of the 9592 // first loop iteration, then we know the backedge will be taken at least 9593 // once and so the backedge taken count is as above. If not then we use the 9594 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 9595 // as if the backedge is taken at least once max(End,Start) is End and so the 9596 // result is as above, and if not max(End,Start) is Start so we get a backedge 9597 // count of zero. 9598 const SCEV *BECount; 9599 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 9600 BECount = BECountIfBackedgeTaken; 9601 else { 9602 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 9603 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 9604 } 9605 9606 const SCEV *MaxBECount; 9607 bool MaxOrZero = false; 9608 if (isa<SCEVConstant>(BECount)) 9609 MaxBECount = BECount; 9610 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 9611 // If we know exactly how many times the backedge will be taken if it's 9612 // taken at least once, then the backedge count will either be that or 9613 // zero. 9614 MaxBECount = BECountIfBackedgeTaken; 9615 MaxOrZero = true; 9616 } else { 9617 // Calculate the maximum backedge count based on the range of values 9618 // permitted by Start, End, and Stride. 9619 APInt MinStart = IsSigned ? getSignedRangeMin(Start) 9620 : getUnsignedRangeMin(Start); 9621 9622 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 9623 9624 APInt StrideForMaxBECount; 9625 9626 if (PositiveStride) 9627 StrideForMaxBECount = 9628 IsSigned ? getSignedRangeMin(Stride) 9629 : getUnsignedRangeMin(Stride); 9630 else 9631 // Using a stride of 1 is safe when computing max backedge taken count for 9632 // a loop with unknown stride. 9633 StrideForMaxBECount = APInt(BitWidth, 1, IsSigned); 9634 9635 APInt Limit = 9636 IsSigned ? APInt::getSignedMaxValue(BitWidth) - (StrideForMaxBECount - 1) 9637 : APInt::getMaxValue(BitWidth) - (StrideForMaxBECount - 1); 9638 9639 // Although End can be a MAX expression we estimate MaxEnd considering only 9640 // the case End = RHS. This is safe because in the other case (End - Start) 9641 // is zero, leading to a zero maximum backedge taken count. 9642 APInt MaxEnd = 9643 IsSigned ? APIntOps::smin(getSignedRangeMax(RHS), Limit) 9644 : APIntOps::umin(getUnsignedRangeMax(RHS), Limit); 9645 9646 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart), 9647 getConstant(StrideForMaxBECount), false); 9648 } 9649 9650 if (isa<SCEVCouldNotCompute>(MaxBECount) && 9651 !isa<SCEVCouldNotCompute>(BECount)) 9652 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 9653 9654 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 9655 } 9656 9657 ScalarEvolution::ExitLimit 9658 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 9659 const Loop *L, bool IsSigned, 9660 bool ControlsExit, bool AllowPredicates) { 9661 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9662 // We handle only IV > Invariant 9663 if (!isLoopInvariant(RHS, L)) 9664 return getCouldNotCompute(); 9665 9666 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 9667 if (!IV && AllowPredicates) 9668 // Try to make this an AddRec using runtime tests, in the first X 9669 // iterations of this loop, where X is the SCEV expression found by the 9670 // algorithm below. 9671 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 9672 9673 // Avoid weird loops 9674 if (!IV || IV->getLoop() != L || !IV->isAffine()) 9675 return getCouldNotCompute(); 9676 9677 bool NoWrap = ControlsExit && 9678 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 9679 9680 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 9681 9682 // Avoid negative or zero stride values 9683 if (!isKnownPositive(Stride)) 9684 return getCouldNotCompute(); 9685 9686 // Avoid proven overflow cases: this will ensure that the backedge taken count 9687 // will not generate any unsigned overflow. Relaxed no-overflow conditions 9688 // exploit NoWrapFlags, allowing to optimize in presence of undefined 9689 // behaviors like the case of C language. 9690 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 9691 return getCouldNotCompute(); 9692 9693 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 9694 : ICmpInst::ICMP_UGT; 9695 9696 const SCEV *Start = IV->getStart(); 9697 const SCEV *End = RHS; 9698 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) 9699 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 9700 9701 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 9702 9703 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 9704 : getUnsignedRangeMax(Start); 9705 9706 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 9707 : getUnsignedRangeMin(Stride); 9708 9709 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 9710 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 9711 : APInt::getMinValue(BitWidth) + (MinStride - 1); 9712 9713 // Although End can be a MIN expression we estimate MinEnd considering only 9714 // the case End = RHS. This is safe because in the other case (Start - End) 9715 // is zero, leading to a zero maximum backedge taken count. 9716 APInt MinEnd = 9717 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 9718 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 9719 9720 9721 const SCEV *MaxBECount = getCouldNotCompute(); 9722 if (isa<SCEVConstant>(BECount)) 9723 MaxBECount = BECount; 9724 else 9725 MaxBECount = computeBECount(getConstant(MaxStart - MinEnd), 9726 getConstant(MinStride), false); 9727 9728 if (isa<SCEVCouldNotCompute>(MaxBECount)) 9729 MaxBECount = BECount; 9730 9731 return ExitLimit(BECount, MaxBECount, false, Predicates); 9732 } 9733 9734 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 9735 ScalarEvolution &SE) const { 9736 if (Range.isFullSet()) // Infinite loop. 9737 return SE.getCouldNotCompute(); 9738 9739 // If the start is a non-zero constant, shift the range to simplify things. 9740 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 9741 if (!SC->getValue()->isZero()) { 9742 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 9743 Operands[0] = SE.getZero(SC->getType()); 9744 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 9745 getNoWrapFlags(FlagNW)); 9746 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 9747 return ShiftedAddRec->getNumIterationsInRange( 9748 Range.subtract(SC->getAPInt()), SE); 9749 // This is strange and shouldn't happen. 9750 return SE.getCouldNotCompute(); 9751 } 9752 9753 // The only time we can solve this is when we have all constant indices. 9754 // Otherwise, we cannot determine the overflow conditions. 9755 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 9756 return SE.getCouldNotCompute(); 9757 9758 // Okay at this point we know that all elements of the chrec are constants and 9759 // that the start element is zero. 9760 9761 // First check to see if the range contains zero. If not, the first 9762 // iteration exits. 9763 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 9764 if (!Range.contains(APInt(BitWidth, 0))) 9765 return SE.getZero(getType()); 9766 9767 if (isAffine()) { 9768 // If this is an affine expression then we have this situation: 9769 // Solve {0,+,A} in Range === Ax in Range 9770 9771 // We know that zero is in the range. If A is positive then we know that 9772 // the upper value of the range must be the first possible exit value. 9773 // If A is negative then the lower of the range is the last possible loop 9774 // value. Also note that we already checked for a full range. 9775 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 9776 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 9777 9778 // The exit value should be (End+A)/A. 9779 APInt ExitVal = (End + A).udiv(A); 9780 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 9781 9782 // Evaluate at the exit value. If we really did fall out of the valid 9783 // range, then we computed our trip count, otherwise wrap around or other 9784 // things must have happened. 9785 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 9786 if (Range.contains(Val->getValue())) 9787 return SE.getCouldNotCompute(); // Something strange happened 9788 9789 // Ensure that the previous value is in the range. This is a sanity check. 9790 assert(Range.contains( 9791 EvaluateConstantChrecAtConstant(this, 9792 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 9793 "Linear scev computation is off in a bad way!"); 9794 return SE.getConstant(ExitValue); 9795 } else if (isQuadratic()) { 9796 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 9797 // quadratic equation to solve it. To do this, we must frame our problem in 9798 // terms of figuring out when zero is crossed, instead of when 9799 // Range.getUpper() is crossed. 9800 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); 9801 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 9802 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(), FlagAnyWrap); 9803 9804 // Next, solve the constructed addrec 9805 if (auto Roots = 9806 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE)) { 9807 const SCEVConstant *R1 = Roots->first; 9808 const SCEVConstant *R2 = Roots->second; 9809 // Pick the smallest positive root value. 9810 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 9811 ICmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 9812 if (!CB->getZExtValue()) 9813 std::swap(R1, R2); // R1 is the minimum root now. 9814 9815 // Make sure the root is not off by one. The returned iteration should 9816 // not be in the range, but the previous one should be. When solving 9817 // for "X*X < 5", for example, we should not return a root of 2. 9818 ConstantInt *R1Val = 9819 EvaluateConstantChrecAtConstant(this, R1->getValue(), SE); 9820 if (Range.contains(R1Val->getValue())) { 9821 // The next iteration must be out of the range... 9822 ConstantInt *NextVal = 9823 ConstantInt::get(SE.getContext(), R1->getAPInt() + 1); 9824 9825 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 9826 if (!Range.contains(R1Val->getValue())) 9827 return SE.getConstant(NextVal); 9828 return SE.getCouldNotCompute(); // Something strange happened 9829 } 9830 9831 // If R1 was not in the range, then it is a good return value. Make 9832 // sure that R1-1 WAS in the range though, just in case. 9833 ConstantInt *NextVal = 9834 ConstantInt::get(SE.getContext(), R1->getAPInt() - 1); 9835 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 9836 if (Range.contains(R1Val->getValue())) 9837 return R1; 9838 return SE.getCouldNotCompute(); // Something strange happened 9839 } 9840 } 9841 } 9842 9843 return SE.getCouldNotCompute(); 9844 } 9845 9846 // Return true when S contains at least an undef value. 9847 static inline bool containsUndefs(const SCEV *S) { 9848 return SCEVExprContains(S, [](const SCEV *S) { 9849 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 9850 return isa<UndefValue>(SU->getValue()); 9851 else if (const auto *SC = dyn_cast<SCEVConstant>(S)) 9852 return isa<UndefValue>(SC->getValue()); 9853 return false; 9854 }); 9855 } 9856 9857 namespace { 9858 // Collect all steps of SCEV expressions. 9859 struct SCEVCollectStrides { 9860 ScalarEvolution &SE; 9861 SmallVectorImpl<const SCEV *> &Strides; 9862 9863 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 9864 : SE(SE), Strides(S) {} 9865 9866 bool follow(const SCEV *S) { 9867 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 9868 Strides.push_back(AR->getStepRecurrence(SE)); 9869 return true; 9870 } 9871 bool isDone() const { return false; } 9872 }; 9873 9874 // Collect all SCEVUnknown and SCEVMulExpr expressions. 9875 struct SCEVCollectTerms { 9876 SmallVectorImpl<const SCEV *> &Terms; 9877 9878 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) 9879 : Terms(T) {} 9880 9881 bool follow(const SCEV *S) { 9882 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 9883 isa<SCEVSignExtendExpr>(S)) { 9884 if (!containsUndefs(S)) 9885 Terms.push_back(S); 9886 9887 // Stop recursion: once we collected a term, do not walk its operands. 9888 return false; 9889 } 9890 9891 // Keep looking. 9892 return true; 9893 } 9894 bool isDone() const { return false; } 9895 }; 9896 9897 // Check if a SCEV contains an AddRecExpr. 9898 struct SCEVHasAddRec { 9899 bool &ContainsAddRec; 9900 9901 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 9902 ContainsAddRec = false; 9903 } 9904 9905 bool follow(const SCEV *S) { 9906 if (isa<SCEVAddRecExpr>(S)) { 9907 ContainsAddRec = true; 9908 9909 // Stop recursion: once we collected a term, do not walk its operands. 9910 return false; 9911 } 9912 9913 // Keep looking. 9914 return true; 9915 } 9916 bool isDone() const { return false; } 9917 }; 9918 9919 // Find factors that are multiplied with an expression that (possibly as a 9920 // subexpression) contains an AddRecExpr. In the expression: 9921 // 9922 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 9923 // 9924 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 9925 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 9926 // parameters as they form a product with an induction variable. 9927 // 9928 // This collector expects all array size parameters to be in the same MulExpr. 9929 // It might be necessary to later add support for collecting parameters that are 9930 // spread over different nested MulExpr. 9931 struct SCEVCollectAddRecMultiplies { 9932 SmallVectorImpl<const SCEV *> &Terms; 9933 ScalarEvolution &SE; 9934 9935 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 9936 : Terms(T), SE(SE) {} 9937 9938 bool follow(const SCEV *S) { 9939 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 9940 bool HasAddRec = false; 9941 SmallVector<const SCEV *, 0> Operands; 9942 for (auto Op : Mul->operands()) { 9943 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 9944 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 9945 Operands.push_back(Op); 9946 } else if (Unknown) { 9947 HasAddRec = true; 9948 } else { 9949 bool ContainsAddRec; 9950 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 9951 visitAll(Op, ContiansAddRec); 9952 HasAddRec |= ContainsAddRec; 9953 } 9954 } 9955 if (Operands.size() == 0) 9956 return true; 9957 9958 if (!HasAddRec) 9959 return false; 9960 9961 Terms.push_back(SE.getMulExpr(Operands)); 9962 // Stop recursion: once we collected a term, do not walk its operands. 9963 return false; 9964 } 9965 9966 // Keep looking. 9967 return true; 9968 } 9969 bool isDone() const { return false; } 9970 }; 9971 } 9972 9973 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 9974 /// two places: 9975 /// 1) The strides of AddRec expressions. 9976 /// 2) Unknowns that are multiplied with AddRec expressions. 9977 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 9978 SmallVectorImpl<const SCEV *> &Terms) { 9979 SmallVector<const SCEV *, 4> Strides; 9980 SCEVCollectStrides StrideCollector(*this, Strides); 9981 visitAll(Expr, StrideCollector); 9982 9983 DEBUG({ 9984 dbgs() << "Strides:\n"; 9985 for (const SCEV *S : Strides) 9986 dbgs() << *S << "\n"; 9987 }); 9988 9989 for (const SCEV *S : Strides) { 9990 SCEVCollectTerms TermCollector(Terms); 9991 visitAll(S, TermCollector); 9992 } 9993 9994 DEBUG({ 9995 dbgs() << "Terms:\n"; 9996 for (const SCEV *T : Terms) 9997 dbgs() << *T << "\n"; 9998 }); 9999 10000 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 10001 visitAll(Expr, MulCollector); 10002 } 10003 10004 static bool findArrayDimensionsRec(ScalarEvolution &SE, 10005 SmallVectorImpl<const SCEV *> &Terms, 10006 SmallVectorImpl<const SCEV *> &Sizes) { 10007 int Last = Terms.size() - 1; 10008 const SCEV *Step = Terms[Last]; 10009 10010 // End of recursion. 10011 if (Last == 0) { 10012 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 10013 SmallVector<const SCEV *, 2> Qs; 10014 for (const SCEV *Op : M->operands()) 10015 if (!isa<SCEVConstant>(Op)) 10016 Qs.push_back(Op); 10017 10018 Step = SE.getMulExpr(Qs); 10019 } 10020 10021 Sizes.push_back(Step); 10022 return true; 10023 } 10024 10025 for (const SCEV *&Term : Terms) { 10026 // Normalize the terms before the next call to findArrayDimensionsRec. 10027 const SCEV *Q, *R; 10028 SCEVDivision::divide(SE, Term, Step, &Q, &R); 10029 10030 // Bail out when GCD does not evenly divide one of the terms. 10031 if (!R->isZero()) 10032 return false; 10033 10034 Term = Q; 10035 } 10036 10037 // Remove all SCEVConstants. 10038 Terms.erase( 10039 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 10040 Terms.end()); 10041 10042 if (Terms.size() > 0) 10043 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 10044 return false; 10045 10046 Sizes.push_back(Step); 10047 return true; 10048 } 10049 10050 10051 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 10052 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 10053 for (const SCEV *T : Terms) 10054 if (SCEVExprContains(T, isa<SCEVUnknown, const SCEV *>)) 10055 return true; 10056 return false; 10057 } 10058 10059 // Return the number of product terms in S. 10060 static inline int numberOfTerms(const SCEV *S) { 10061 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 10062 return Expr->getNumOperands(); 10063 return 1; 10064 } 10065 10066 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 10067 if (isa<SCEVConstant>(T)) 10068 return nullptr; 10069 10070 if (isa<SCEVUnknown>(T)) 10071 return T; 10072 10073 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 10074 SmallVector<const SCEV *, 2> Factors; 10075 for (const SCEV *Op : M->operands()) 10076 if (!isa<SCEVConstant>(Op)) 10077 Factors.push_back(Op); 10078 10079 return SE.getMulExpr(Factors); 10080 } 10081 10082 return T; 10083 } 10084 10085 /// Return the size of an element read or written by Inst. 10086 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 10087 Type *Ty; 10088 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 10089 Ty = Store->getValueOperand()->getType(); 10090 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 10091 Ty = Load->getType(); 10092 else 10093 return nullptr; 10094 10095 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 10096 return getSizeOfExpr(ETy, Ty); 10097 } 10098 10099 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 10100 SmallVectorImpl<const SCEV *> &Sizes, 10101 const SCEV *ElementSize) { 10102 if (Terms.size() < 1 || !ElementSize) 10103 return; 10104 10105 // Early return when Terms do not contain parameters: we do not delinearize 10106 // non parametric SCEVs. 10107 if (!containsParameters(Terms)) 10108 return; 10109 10110 DEBUG({ 10111 dbgs() << "Terms:\n"; 10112 for (const SCEV *T : Terms) 10113 dbgs() << *T << "\n"; 10114 }); 10115 10116 // Remove duplicates. 10117 array_pod_sort(Terms.begin(), Terms.end()); 10118 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 10119 10120 // Put larger terms first. 10121 std::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) { 10122 return numberOfTerms(LHS) > numberOfTerms(RHS); 10123 }); 10124 10125 // Try to divide all terms by the element size. If term is not divisible by 10126 // element size, proceed with the original term. 10127 for (const SCEV *&Term : Terms) { 10128 const SCEV *Q, *R; 10129 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 10130 if (!Q->isZero()) 10131 Term = Q; 10132 } 10133 10134 SmallVector<const SCEV *, 4> NewTerms; 10135 10136 // Remove constant factors. 10137 for (const SCEV *T : Terms) 10138 if (const SCEV *NewT = removeConstantFactors(*this, T)) 10139 NewTerms.push_back(NewT); 10140 10141 DEBUG({ 10142 dbgs() << "Terms after sorting:\n"; 10143 for (const SCEV *T : NewTerms) 10144 dbgs() << *T << "\n"; 10145 }); 10146 10147 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 10148 Sizes.clear(); 10149 return; 10150 } 10151 10152 // The last element to be pushed into Sizes is the size of an element. 10153 Sizes.push_back(ElementSize); 10154 10155 DEBUG({ 10156 dbgs() << "Sizes:\n"; 10157 for (const SCEV *S : Sizes) 10158 dbgs() << *S << "\n"; 10159 }); 10160 } 10161 10162 void ScalarEvolution::computeAccessFunctions( 10163 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 10164 SmallVectorImpl<const SCEV *> &Sizes) { 10165 10166 // Early exit in case this SCEV is not an affine multivariate function. 10167 if (Sizes.empty()) 10168 return; 10169 10170 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 10171 if (!AR->isAffine()) 10172 return; 10173 10174 const SCEV *Res = Expr; 10175 int Last = Sizes.size() - 1; 10176 for (int i = Last; i >= 0; i--) { 10177 const SCEV *Q, *R; 10178 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 10179 10180 DEBUG({ 10181 dbgs() << "Res: " << *Res << "\n"; 10182 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 10183 dbgs() << "Res divided by Sizes[i]:\n"; 10184 dbgs() << "Quotient: " << *Q << "\n"; 10185 dbgs() << "Remainder: " << *R << "\n"; 10186 }); 10187 10188 Res = Q; 10189 10190 // Do not record the last subscript corresponding to the size of elements in 10191 // the array. 10192 if (i == Last) { 10193 10194 // Bail out if the remainder is too complex. 10195 if (isa<SCEVAddRecExpr>(R)) { 10196 Subscripts.clear(); 10197 Sizes.clear(); 10198 return; 10199 } 10200 10201 continue; 10202 } 10203 10204 // Record the access function for the current subscript. 10205 Subscripts.push_back(R); 10206 } 10207 10208 // Also push in last position the remainder of the last division: it will be 10209 // the access function of the innermost dimension. 10210 Subscripts.push_back(Res); 10211 10212 std::reverse(Subscripts.begin(), Subscripts.end()); 10213 10214 DEBUG({ 10215 dbgs() << "Subscripts:\n"; 10216 for (const SCEV *S : Subscripts) 10217 dbgs() << *S << "\n"; 10218 }); 10219 } 10220 10221 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 10222 /// sizes of an array access. Returns the remainder of the delinearization that 10223 /// is the offset start of the array. The SCEV->delinearize algorithm computes 10224 /// the multiples of SCEV coefficients: that is a pattern matching of sub 10225 /// expressions in the stride and base of a SCEV corresponding to the 10226 /// computation of a GCD (greatest common divisor) of base and stride. When 10227 /// SCEV->delinearize fails, it returns the SCEV unchanged. 10228 /// 10229 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 10230 /// 10231 /// void foo(long n, long m, long o, double A[n][m][o]) { 10232 /// 10233 /// for (long i = 0; i < n; i++) 10234 /// for (long j = 0; j < m; j++) 10235 /// for (long k = 0; k < o; k++) 10236 /// A[i][j][k] = 1.0; 10237 /// } 10238 /// 10239 /// the delinearization input is the following AddRec SCEV: 10240 /// 10241 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 10242 /// 10243 /// From this SCEV, we are able to say that the base offset of the access is %A 10244 /// because it appears as an offset that does not divide any of the strides in 10245 /// the loops: 10246 /// 10247 /// CHECK: Base offset: %A 10248 /// 10249 /// and then SCEV->delinearize determines the size of some of the dimensions of 10250 /// the array as these are the multiples by which the strides are happening: 10251 /// 10252 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 10253 /// 10254 /// Note that the outermost dimension remains of UnknownSize because there are 10255 /// no strides that would help identifying the size of the last dimension: when 10256 /// the array has been statically allocated, one could compute the size of that 10257 /// dimension by dividing the overall size of the array by the size of the known 10258 /// dimensions: %m * %o * 8. 10259 /// 10260 /// Finally delinearize provides the access functions for the array reference 10261 /// that does correspond to A[i][j][k] of the above C testcase: 10262 /// 10263 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 10264 /// 10265 /// The testcases are checking the output of a function pass: 10266 /// DelinearizationPass that walks through all loads and stores of a function 10267 /// asking for the SCEV of the memory access with respect to all enclosing 10268 /// loops, calling SCEV->delinearize on that and printing the results. 10269 10270 void ScalarEvolution::delinearize(const SCEV *Expr, 10271 SmallVectorImpl<const SCEV *> &Subscripts, 10272 SmallVectorImpl<const SCEV *> &Sizes, 10273 const SCEV *ElementSize) { 10274 // First step: collect parametric terms. 10275 SmallVector<const SCEV *, 4> Terms; 10276 collectParametricTerms(Expr, Terms); 10277 10278 if (Terms.empty()) 10279 return; 10280 10281 // Second step: find subscript sizes. 10282 findArrayDimensions(Terms, Sizes, ElementSize); 10283 10284 if (Sizes.empty()) 10285 return; 10286 10287 // Third step: compute the access functions for each subscript. 10288 computeAccessFunctions(Expr, Subscripts, Sizes); 10289 10290 if (Subscripts.empty()) 10291 return; 10292 10293 DEBUG({ 10294 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 10295 dbgs() << "ArrayDecl[UnknownSize]"; 10296 for (const SCEV *S : Sizes) 10297 dbgs() << "[" << *S << "]"; 10298 10299 dbgs() << "\nArrayRef"; 10300 for (const SCEV *S : Subscripts) 10301 dbgs() << "[" << *S << "]"; 10302 dbgs() << "\n"; 10303 }); 10304 } 10305 10306 //===----------------------------------------------------------------------===// 10307 // SCEVCallbackVH Class Implementation 10308 //===----------------------------------------------------------------------===// 10309 10310 void ScalarEvolution::SCEVCallbackVH::deleted() { 10311 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 10312 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 10313 SE->ConstantEvolutionLoopExitValue.erase(PN); 10314 SE->eraseValueFromMap(getValPtr()); 10315 // this now dangles! 10316 } 10317 10318 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 10319 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 10320 10321 // Forget all the expressions associated with users of the old value, 10322 // so that future queries will recompute the expressions using the new 10323 // value. 10324 Value *Old = getValPtr(); 10325 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 10326 SmallPtrSet<User *, 8> Visited; 10327 while (!Worklist.empty()) { 10328 User *U = Worklist.pop_back_val(); 10329 // Deleting the Old value will cause this to dangle. Postpone 10330 // that until everything else is done. 10331 if (U == Old) 10332 continue; 10333 if (!Visited.insert(U).second) 10334 continue; 10335 if (PHINode *PN = dyn_cast<PHINode>(U)) 10336 SE->ConstantEvolutionLoopExitValue.erase(PN); 10337 SE->eraseValueFromMap(U); 10338 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 10339 } 10340 // Delete the Old value. 10341 if (PHINode *PN = dyn_cast<PHINode>(Old)) 10342 SE->ConstantEvolutionLoopExitValue.erase(PN); 10343 SE->eraseValueFromMap(Old); 10344 // this now dangles! 10345 } 10346 10347 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 10348 : CallbackVH(V), SE(se) {} 10349 10350 //===----------------------------------------------------------------------===// 10351 // ScalarEvolution Class Implementation 10352 //===----------------------------------------------------------------------===// 10353 10354 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 10355 AssumptionCache &AC, DominatorTree &DT, 10356 LoopInfo &LI) 10357 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 10358 CouldNotCompute(new SCEVCouldNotCompute()), 10359 WalkingBEDominatingConds(false), ProvingSplitPredicate(false), 10360 ValuesAtScopes(64), LoopDispositions(64), BlockDispositions(64), 10361 FirstUnknown(nullptr) { 10362 10363 // To use guards for proving predicates, we need to scan every instruction in 10364 // relevant basic blocks, and not just terminators. Doing this is a waste of 10365 // time if the IR does not actually contain any calls to 10366 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 10367 // 10368 // This pessimizes the case where a pass that preserves ScalarEvolution wants 10369 // to _add_ guards to the module when there weren't any before, and wants 10370 // ScalarEvolution to optimize based on those guards. For now we prefer to be 10371 // efficient in lieu of being smart in that rather obscure case. 10372 10373 auto *GuardDecl = F.getParent()->getFunction( 10374 Intrinsic::getName(Intrinsic::experimental_guard)); 10375 HasGuards = GuardDecl && !GuardDecl->use_empty(); 10376 } 10377 10378 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 10379 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 10380 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 10381 ValueExprMap(std::move(Arg.ValueExprMap)), 10382 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 10383 WalkingBEDominatingConds(false), ProvingSplitPredicate(false), 10384 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 10385 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 10386 PredicatedBackedgeTakenCounts( 10387 std::move(Arg.PredicatedBackedgeTakenCounts)), 10388 ConstantEvolutionLoopExitValue( 10389 std::move(Arg.ConstantEvolutionLoopExitValue)), 10390 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 10391 LoopDispositions(std::move(Arg.LoopDispositions)), 10392 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 10393 BlockDispositions(std::move(Arg.BlockDispositions)), 10394 UnsignedRanges(std::move(Arg.UnsignedRanges)), 10395 SignedRanges(std::move(Arg.SignedRanges)), 10396 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 10397 UniquePreds(std::move(Arg.UniquePreds)), 10398 SCEVAllocator(std::move(Arg.SCEVAllocator)), 10399 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 10400 FirstUnknown(Arg.FirstUnknown) { 10401 Arg.FirstUnknown = nullptr; 10402 } 10403 10404 ScalarEvolution::~ScalarEvolution() { 10405 // Iterate through all the SCEVUnknown instances and call their 10406 // destructors, so that they release their references to their values. 10407 for (SCEVUnknown *U = FirstUnknown; U;) { 10408 SCEVUnknown *Tmp = U; 10409 U = U->Next; 10410 Tmp->~SCEVUnknown(); 10411 } 10412 FirstUnknown = nullptr; 10413 10414 ExprValueMap.clear(); 10415 ValueExprMap.clear(); 10416 HasRecMap.clear(); 10417 10418 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 10419 // that a loop had multiple computable exits. 10420 for (auto &BTCI : BackedgeTakenCounts) 10421 BTCI.second.clear(); 10422 for (auto &BTCI : PredicatedBackedgeTakenCounts) 10423 BTCI.second.clear(); 10424 10425 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 10426 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 10427 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 10428 } 10429 10430 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 10431 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 10432 } 10433 10434 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 10435 const Loop *L) { 10436 // Print all inner loops first 10437 for (Loop *I : *L) 10438 PrintLoopInfo(OS, SE, I); 10439 10440 OS << "Loop "; 10441 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10442 OS << ": "; 10443 10444 SmallVector<BasicBlock *, 8> ExitBlocks; 10445 L->getExitBlocks(ExitBlocks); 10446 if (ExitBlocks.size() != 1) 10447 OS << "<multiple exits> "; 10448 10449 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 10450 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 10451 } else { 10452 OS << "Unpredictable backedge-taken count. "; 10453 } 10454 10455 OS << "\n" 10456 "Loop "; 10457 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10458 OS << ": "; 10459 10460 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 10461 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 10462 if (SE->isBackedgeTakenCountMaxOrZero(L)) 10463 OS << ", actual taken count either this or zero."; 10464 } else { 10465 OS << "Unpredictable max backedge-taken count. "; 10466 } 10467 10468 OS << "\n" 10469 "Loop "; 10470 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10471 OS << ": "; 10472 10473 SCEVUnionPredicate Pred; 10474 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 10475 if (!isa<SCEVCouldNotCompute>(PBT)) { 10476 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 10477 OS << " Predicates:\n"; 10478 Pred.print(OS, 4); 10479 } else { 10480 OS << "Unpredictable predicated backedge-taken count. "; 10481 } 10482 OS << "\n"; 10483 10484 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 10485 OS << "Loop "; 10486 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10487 OS << ": "; 10488 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 10489 } 10490 } 10491 10492 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 10493 switch (LD) { 10494 case ScalarEvolution::LoopVariant: 10495 return "Variant"; 10496 case ScalarEvolution::LoopInvariant: 10497 return "Invariant"; 10498 case ScalarEvolution::LoopComputable: 10499 return "Computable"; 10500 } 10501 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 10502 } 10503 10504 void ScalarEvolution::print(raw_ostream &OS) const { 10505 // ScalarEvolution's implementation of the print method is to print 10506 // out SCEV values of all instructions that are interesting. Doing 10507 // this potentially causes it to create new SCEV objects though, 10508 // which technically conflicts with the const qualifier. This isn't 10509 // observable from outside the class though, so casting away the 10510 // const isn't dangerous. 10511 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 10512 10513 OS << "Classifying expressions for: "; 10514 F.printAsOperand(OS, /*PrintType=*/false); 10515 OS << "\n"; 10516 for (Instruction &I : instructions(F)) 10517 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 10518 OS << I << '\n'; 10519 OS << " --> "; 10520 const SCEV *SV = SE.getSCEV(&I); 10521 SV->print(OS); 10522 if (!isa<SCEVCouldNotCompute>(SV)) { 10523 OS << " U: "; 10524 SE.getUnsignedRange(SV).print(OS); 10525 OS << " S: "; 10526 SE.getSignedRange(SV).print(OS); 10527 } 10528 10529 const Loop *L = LI.getLoopFor(I.getParent()); 10530 10531 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 10532 if (AtUse != SV) { 10533 OS << " --> "; 10534 AtUse->print(OS); 10535 if (!isa<SCEVCouldNotCompute>(AtUse)) { 10536 OS << " U: "; 10537 SE.getUnsignedRange(AtUse).print(OS); 10538 OS << " S: "; 10539 SE.getSignedRange(AtUse).print(OS); 10540 } 10541 } 10542 10543 if (L) { 10544 OS << "\t\t" "Exits: "; 10545 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 10546 if (!SE.isLoopInvariant(ExitValue, L)) { 10547 OS << "<<Unknown>>"; 10548 } else { 10549 OS << *ExitValue; 10550 } 10551 10552 bool First = true; 10553 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 10554 if (First) { 10555 OS << "\t\t" "LoopDispositions: { "; 10556 First = false; 10557 } else { 10558 OS << ", "; 10559 } 10560 10561 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10562 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 10563 } 10564 10565 for (auto *InnerL : depth_first(L)) { 10566 if (InnerL == L) 10567 continue; 10568 if (First) { 10569 OS << "\t\t" "LoopDispositions: { "; 10570 First = false; 10571 } else { 10572 OS << ", "; 10573 } 10574 10575 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10576 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 10577 } 10578 10579 OS << " }"; 10580 } 10581 10582 OS << "\n"; 10583 } 10584 10585 OS << "Determining loop execution counts for: "; 10586 F.printAsOperand(OS, /*PrintType=*/false); 10587 OS << "\n"; 10588 for (Loop *I : LI) 10589 PrintLoopInfo(OS, &SE, I); 10590 } 10591 10592 ScalarEvolution::LoopDisposition 10593 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 10594 auto &Values = LoopDispositions[S]; 10595 for (auto &V : Values) { 10596 if (V.getPointer() == L) 10597 return V.getInt(); 10598 } 10599 Values.emplace_back(L, LoopVariant); 10600 LoopDisposition D = computeLoopDisposition(S, L); 10601 auto &Values2 = LoopDispositions[S]; 10602 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 10603 if (V.getPointer() == L) { 10604 V.setInt(D); 10605 break; 10606 } 10607 } 10608 return D; 10609 } 10610 10611 ScalarEvolution::LoopDisposition 10612 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 10613 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 10614 case scConstant: 10615 return LoopInvariant; 10616 case scTruncate: 10617 case scZeroExtend: 10618 case scSignExtend: 10619 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 10620 case scAddRecExpr: { 10621 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 10622 10623 // If L is the addrec's loop, it's computable. 10624 if (AR->getLoop() == L) 10625 return LoopComputable; 10626 10627 // Add recurrences are never invariant in the function-body (null loop). 10628 if (!L) 10629 return LoopVariant; 10630 10631 // This recurrence is variant w.r.t. L if L contains AR's loop. 10632 if (L->contains(AR->getLoop())) 10633 return LoopVariant; 10634 10635 // This recurrence is invariant w.r.t. L if AR's loop contains L. 10636 if (AR->getLoop()->contains(L)) 10637 return LoopInvariant; 10638 10639 // This recurrence is variant w.r.t. L if any of its operands 10640 // are variant. 10641 for (auto *Op : AR->operands()) 10642 if (!isLoopInvariant(Op, L)) 10643 return LoopVariant; 10644 10645 // Otherwise it's loop-invariant. 10646 return LoopInvariant; 10647 } 10648 case scAddExpr: 10649 case scMulExpr: 10650 case scUMaxExpr: 10651 case scSMaxExpr: { 10652 bool HasVarying = false; 10653 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 10654 LoopDisposition D = getLoopDisposition(Op, L); 10655 if (D == LoopVariant) 10656 return LoopVariant; 10657 if (D == LoopComputable) 10658 HasVarying = true; 10659 } 10660 return HasVarying ? LoopComputable : LoopInvariant; 10661 } 10662 case scUDivExpr: { 10663 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 10664 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 10665 if (LD == LoopVariant) 10666 return LoopVariant; 10667 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 10668 if (RD == LoopVariant) 10669 return LoopVariant; 10670 return (LD == LoopInvariant && RD == LoopInvariant) ? 10671 LoopInvariant : LoopComputable; 10672 } 10673 case scUnknown: 10674 // All non-instruction values are loop invariant. All instructions are loop 10675 // invariant if they are not contained in the specified loop. 10676 // Instructions are never considered invariant in the function body 10677 // (null loop) because they are defined within the "loop". 10678 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 10679 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 10680 return LoopInvariant; 10681 case scCouldNotCompute: 10682 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 10683 } 10684 llvm_unreachable("Unknown SCEV kind!"); 10685 } 10686 10687 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 10688 return getLoopDisposition(S, L) == LoopInvariant; 10689 } 10690 10691 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 10692 return getLoopDisposition(S, L) == LoopComputable; 10693 } 10694 10695 ScalarEvolution::BlockDisposition 10696 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 10697 auto &Values = BlockDispositions[S]; 10698 for (auto &V : Values) { 10699 if (V.getPointer() == BB) 10700 return V.getInt(); 10701 } 10702 Values.emplace_back(BB, DoesNotDominateBlock); 10703 BlockDisposition D = computeBlockDisposition(S, BB); 10704 auto &Values2 = BlockDispositions[S]; 10705 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 10706 if (V.getPointer() == BB) { 10707 V.setInt(D); 10708 break; 10709 } 10710 } 10711 return D; 10712 } 10713 10714 ScalarEvolution::BlockDisposition 10715 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 10716 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 10717 case scConstant: 10718 return ProperlyDominatesBlock; 10719 case scTruncate: 10720 case scZeroExtend: 10721 case scSignExtend: 10722 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 10723 case scAddRecExpr: { 10724 // This uses a "dominates" query instead of "properly dominates" query 10725 // to test for proper dominance too, because the instruction which 10726 // produces the addrec's value is a PHI, and a PHI effectively properly 10727 // dominates its entire containing block. 10728 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 10729 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 10730 return DoesNotDominateBlock; 10731 10732 // Fall through into SCEVNAryExpr handling. 10733 LLVM_FALLTHROUGH; 10734 } 10735 case scAddExpr: 10736 case scMulExpr: 10737 case scUMaxExpr: 10738 case scSMaxExpr: { 10739 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 10740 bool Proper = true; 10741 for (const SCEV *NAryOp : NAry->operands()) { 10742 BlockDisposition D = getBlockDisposition(NAryOp, BB); 10743 if (D == DoesNotDominateBlock) 10744 return DoesNotDominateBlock; 10745 if (D == DominatesBlock) 10746 Proper = false; 10747 } 10748 return Proper ? ProperlyDominatesBlock : DominatesBlock; 10749 } 10750 case scUDivExpr: { 10751 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 10752 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 10753 BlockDisposition LD = getBlockDisposition(LHS, BB); 10754 if (LD == DoesNotDominateBlock) 10755 return DoesNotDominateBlock; 10756 BlockDisposition RD = getBlockDisposition(RHS, BB); 10757 if (RD == DoesNotDominateBlock) 10758 return DoesNotDominateBlock; 10759 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 10760 ProperlyDominatesBlock : DominatesBlock; 10761 } 10762 case scUnknown: 10763 if (Instruction *I = 10764 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 10765 if (I->getParent() == BB) 10766 return DominatesBlock; 10767 if (DT.properlyDominates(I->getParent(), BB)) 10768 return ProperlyDominatesBlock; 10769 return DoesNotDominateBlock; 10770 } 10771 return ProperlyDominatesBlock; 10772 case scCouldNotCompute: 10773 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 10774 } 10775 llvm_unreachable("Unknown SCEV kind!"); 10776 } 10777 10778 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 10779 return getBlockDisposition(S, BB) >= DominatesBlock; 10780 } 10781 10782 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 10783 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 10784 } 10785 10786 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 10787 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 10788 } 10789 10790 void ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 10791 ValuesAtScopes.erase(S); 10792 LoopDispositions.erase(S); 10793 BlockDispositions.erase(S); 10794 UnsignedRanges.erase(S); 10795 SignedRanges.erase(S); 10796 ExprValueMap.erase(S); 10797 HasRecMap.erase(S); 10798 MinTrailingZerosCache.erase(S); 10799 10800 for (auto I = PredicatedSCEVRewrites.begin(); 10801 I != PredicatedSCEVRewrites.end();) { 10802 std::pair<const SCEV *, const Loop *> Entry = I->first; 10803 if (Entry.first == S) 10804 PredicatedSCEVRewrites.erase(I++); 10805 else 10806 ++I; 10807 } 10808 10809 auto RemoveSCEVFromBackedgeMap = 10810 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 10811 for (auto I = Map.begin(), E = Map.end(); I != E;) { 10812 BackedgeTakenInfo &BEInfo = I->second; 10813 if (BEInfo.hasOperand(S, this)) { 10814 BEInfo.clear(); 10815 Map.erase(I++); 10816 } else 10817 ++I; 10818 } 10819 }; 10820 10821 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 10822 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 10823 } 10824 10825 void ScalarEvolution::verify() const { 10826 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 10827 ScalarEvolution SE2(F, TLI, AC, DT, LI); 10828 10829 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 10830 10831 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 10832 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 10833 const SCEV *visitConstant(const SCEVConstant *Constant) { 10834 return SE.getConstant(Constant->getAPInt()); 10835 } 10836 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 10837 return SE.getUnknown(Expr->getValue()); 10838 } 10839 10840 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 10841 return SE.getCouldNotCompute(); 10842 } 10843 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 10844 }; 10845 10846 SCEVMapper SCM(SE2); 10847 10848 while (!LoopStack.empty()) { 10849 auto *L = LoopStack.pop_back_val(); 10850 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 10851 10852 auto *CurBECount = SCM.visit( 10853 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 10854 auto *NewBECount = SE2.getBackedgeTakenCount(L); 10855 10856 if (CurBECount == SE2.getCouldNotCompute() || 10857 NewBECount == SE2.getCouldNotCompute()) { 10858 // NB! This situation is legal, but is very suspicious -- whatever pass 10859 // change the loop to make a trip count go from could not compute to 10860 // computable or vice-versa *should have* invalidated SCEV. However, we 10861 // choose not to assert here (for now) since we don't want false 10862 // positives. 10863 continue; 10864 } 10865 10866 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 10867 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 10868 // not propagate undef aggressively). This means we can (and do) fail 10869 // verification in cases where a transform makes the trip count of a loop 10870 // go from "undef" to "undef+1" (say). The transform is fine, since in 10871 // both cases the loop iterates "undef" times, but SCEV thinks we 10872 // increased the trip count of the loop by 1 incorrectly. 10873 continue; 10874 } 10875 10876 if (SE.getTypeSizeInBits(CurBECount->getType()) > 10877 SE.getTypeSizeInBits(NewBECount->getType())) 10878 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 10879 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 10880 SE.getTypeSizeInBits(NewBECount->getType())) 10881 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 10882 10883 auto *ConstantDelta = 10884 dyn_cast<SCEVConstant>(SE2.getMinusSCEV(CurBECount, NewBECount)); 10885 10886 if (ConstantDelta && ConstantDelta->getAPInt() != 0) { 10887 dbgs() << "Trip Count Changed!\n"; 10888 dbgs() << "Old: " << *CurBECount << "\n"; 10889 dbgs() << "New: " << *NewBECount << "\n"; 10890 dbgs() << "Delta: " << *ConstantDelta << "\n"; 10891 std::abort(); 10892 } 10893 } 10894 } 10895 10896 bool ScalarEvolution::invalidate( 10897 Function &F, const PreservedAnalyses &PA, 10898 FunctionAnalysisManager::Invalidator &Inv) { 10899 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 10900 // of its dependencies is invalidated. 10901 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 10902 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 10903 Inv.invalidate<AssumptionAnalysis>(F, PA) || 10904 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 10905 Inv.invalidate<LoopAnalysis>(F, PA); 10906 } 10907 10908 AnalysisKey ScalarEvolutionAnalysis::Key; 10909 10910 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 10911 FunctionAnalysisManager &AM) { 10912 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 10913 AM.getResult<AssumptionAnalysis>(F), 10914 AM.getResult<DominatorTreeAnalysis>(F), 10915 AM.getResult<LoopAnalysis>(F)); 10916 } 10917 10918 PreservedAnalyses 10919 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 10920 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 10921 return PreservedAnalyses::all(); 10922 } 10923 10924 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 10925 "Scalar Evolution Analysis", false, true) 10926 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 10927 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 10928 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 10929 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 10930 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 10931 "Scalar Evolution Analysis", false, true) 10932 char ScalarEvolutionWrapperPass::ID = 0; 10933 10934 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 10935 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 10936 } 10937 10938 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 10939 SE.reset(new ScalarEvolution( 10940 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 10941 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 10942 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 10943 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 10944 return false; 10945 } 10946 10947 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 10948 10949 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 10950 SE->print(OS); 10951 } 10952 10953 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 10954 if (!VerifySCEV) 10955 return; 10956 10957 SE->verify(); 10958 } 10959 10960 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 10961 AU.setPreservesAll(); 10962 AU.addRequiredTransitive<AssumptionCacheTracker>(); 10963 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 10964 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 10965 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 10966 } 10967 10968 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 10969 const SCEV *RHS) { 10970 FoldingSetNodeID ID; 10971 assert(LHS->getType() == RHS->getType() && 10972 "Type mismatch between LHS and RHS"); 10973 // Unique this node based on the arguments 10974 ID.AddInteger(SCEVPredicate::P_Equal); 10975 ID.AddPointer(LHS); 10976 ID.AddPointer(RHS); 10977 void *IP = nullptr; 10978 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 10979 return S; 10980 SCEVEqualPredicate *Eq = new (SCEVAllocator) 10981 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 10982 UniquePreds.InsertNode(Eq, IP); 10983 return Eq; 10984 } 10985 10986 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 10987 const SCEVAddRecExpr *AR, 10988 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 10989 FoldingSetNodeID ID; 10990 // Unique this node based on the arguments 10991 ID.AddInteger(SCEVPredicate::P_Wrap); 10992 ID.AddPointer(AR); 10993 ID.AddInteger(AddedFlags); 10994 void *IP = nullptr; 10995 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 10996 return S; 10997 auto *OF = new (SCEVAllocator) 10998 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 10999 UniquePreds.InsertNode(OF, IP); 11000 return OF; 11001 } 11002 11003 namespace { 11004 11005 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 11006 public: 11007 /// Rewrites \p S in the context of a loop L and the SCEV predication 11008 /// infrastructure. 11009 /// 11010 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 11011 /// equivalences present in \p Pred. 11012 /// 11013 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 11014 /// \p NewPreds such that the result will be an AddRecExpr. 11015 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 11016 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 11017 SCEVUnionPredicate *Pred) { 11018 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 11019 return Rewriter.visit(S); 11020 } 11021 11022 SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 11023 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 11024 SCEVUnionPredicate *Pred) 11025 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 11026 11027 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 11028 if (Pred) { 11029 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 11030 for (auto *Pred : ExprPreds) 11031 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 11032 if (IPred->getLHS() == Expr) 11033 return IPred->getRHS(); 11034 } 11035 return convertToAddRecWithPreds(Expr); 11036 } 11037 11038 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 11039 const SCEV *Operand = visit(Expr->getOperand()); 11040 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 11041 if (AR && AR->getLoop() == L && AR->isAffine()) { 11042 // This couldn't be folded because the operand didn't have the nuw 11043 // flag. Add the nusw flag as an assumption that we could make. 11044 const SCEV *Step = AR->getStepRecurrence(SE); 11045 Type *Ty = Expr->getType(); 11046 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 11047 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 11048 SE.getSignExtendExpr(Step, Ty), L, 11049 AR->getNoWrapFlags()); 11050 } 11051 return SE.getZeroExtendExpr(Operand, Expr->getType()); 11052 } 11053 11054 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 11055 const SCEV *Operand = visit(Expr->getOperand()); 11056 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 11057 if (AR && AR->getLoop() == L && AR->isAffine()) { 11058 // This couldn't be folded because the operand didn't have the nsw 11059 // flag. Add the nssw flag as an assumption that we could make. 11060 const SCEV *Step = AR->getStepRecurrence(SE); 11061 Type *Ty = Expr->getType(); 11062 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 11063 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 11064 SE.getSignExtendExpr(Step, Ty), L, 11065 AR->getNoWrapFlags()); 11066 } 11067 return SE.getSignExtendExpr(Operand, Expr->getType()); 11068 } 11069 11070 private: 11071 bool addOverflowAssumption(const SCEVPredicate *P) { 11072 if (!NewPreds) { 11073 // Check if we've already made this assumption. 11074 return Pred && Pred->implies(P); 11075 } 11076 NewPreds->insert(P); 11077 return true; 11078 } 11079 11080 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 11081 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 11082 auto *A = SE.getWrapPredicate(AR, AddedFlags); 11083 return addOverflowAssumption(A); 11084 } 11085 11086 // If \p Expr represents a PHINode, we try to see if it can be represented 11087 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 11088 // to add this predicate as a runtime overflow check, we return the AddRec. 11089 // If \p Expr does not meet these conditions (is not a PHI node, or we 11090 // couldn't create an AddRec for it, or couldn't add the predicate), we just 11091 // return \p Expr. 11092 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 11093 if (!isa<PHINode>(Expr->getValue())) 11094 return Expr; 11095 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 11096 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 11097 if (!PredicatedRewrite) 11098 return Expr; 11099 for (auto *P : PredicatedRewrite->second){ 11100 if (!addOverflowAssumption(P)) 11101 return Expr; 11102 } 11103 return PredicatedRewrite->first; 11104 } 11105 11106 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 11107 SCEVUnionPredicate *Pred; 11108 const Loop *L; 11109 }; 11110 } // end anonymous namespace 11111 11112 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 11113 SCEVUnionPredicate &Preds) { 11114 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 11115 } 11116 11117 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 11118 const SCEV *S, const Loop *L, 11119 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 11120 11121 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 11122 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 11123 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 11124 11125 if (!AddRec) 11126 return nullptr; 11127 11128 // Since the transformation was successful, we can now transfer the SCEV 11129 // predicates. 11130 for (auto *P : TransformPreds) 11131 Preds.insert(P); 11132 11133 return AddRec; 11134 } 11135 11136 /// SCEV predicates 11137 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 11138 SCEVPredicateKind Kind) 11139 : FastID(ID), Kind(Kind) {} 11140 11141 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 11142 const SCEV *LHS, const SCEV *RHS) 11143 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 11144 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 11145 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 11146 } 11147 11148 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 11149 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 11150 11151 if (!Op) 11152 return false; 11153 11154 return Op->LHS == LHS && Op->RHS == RHS; 11155 } 11156 11157 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 11158 11159 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 11160 11161 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 11162 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 11163 } 11164 11165 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 11166 const SCEVAddRecExpr *AR, 11167 IncrementWrapFlags Flags) 11168 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 11169 11170 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 11171 11172 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 11173 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 11174 11175 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 11176 } 11177 11178 bool SCEVWrapPredicate::isAlwaysTrue() const { 11179 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 11180 IncrementWrapFlags IFlags = Flags; 11181 11182 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 11183 IFlags = clearFlags(IFlags, IncrementNSSW); 11184 11185 return IFlags == IncrementAnyWrap; 11186 } 11187 11188 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 11189 OS.indent(Depth) << *getExpr() << " Added Flags: "; 11190 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 11191 OS << "<nusw>"; 11192 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 11193 OS << "<nssw>"; 11194 OS << "\n"; 11195 } 11196 11197 SCEVWrapPredicate::IncrementWrapFlags 11198 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 11199 ScalarEvolution &SE) { 11200 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 11201 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 11202 11203 // We can safely transfer the NSW flag as NSSW. 11204 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 11205 ImpliedFlags = IncrementNSSW; 11206 11207 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 11208 // If the increment is positive, the SCEV NUW flag will also imply the 11209 // WrapPredicate NUSW flag. 11210 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 11211 if (Step->getValue()->getValue().isNonNegative()) 11212 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 11213 } 11214 11215 return ImpliedFlags; 11216 } 11217 11218 /// Union predicates don't get cached so create a dummy set ID for it. 11219 SCEVUnionPredicate::SCEVUnionPredicate() 11220 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 11221 11222 bool SCEVUnionPredicate::isAlwaysTrue() const { 11223 return all_of(Preds, 11224 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 11225 } 11226 11227 ArrayRef<const SCEVPredicate *> 11228 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 11229 auto I = SCEVToPreds.find(Expr); 11230 if (I == SCEVToPreds.end()) 11231 return ArrayRef<const SCEVPredicate *>(); 11232 return I->second; 11233 } 11234 11235 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 11236 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 11237 return all_of(Set->Preds, 11238 [this](const SCEVPredicate *I) { return this->implies(I); }); 11239 11240 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 11241 if (ScevPredsIt == SCEVToPreds.end()) 11242 return false; 11243 auto &SCEVPreds = ScevPredsIt->second; 11244 11245 return any_of(SCEVPreds, 11246 [N](const SCEVPredicate *I) { return I->implies(N); }); 11247 } 11248 11249 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 11250 11251 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 11252 for (auto Pred : Preds) 11253 Pred->print(OS, Depth); 11254 } 11255 11256 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 11257 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 11258 for (auto Pred : Set->Preds) 11259 add(Pred); 11260 return; 11261 } 11262 11263 if (implies(N)) 11264 return; 11265 11266 const SCEV *Key = N->getExpr(); 11267 assert(Key && "Only SCEVUnionPredicate doesn't have an " 11268 " associated expression!"); 11269 11270 SCEVToPreds[Key].push_back(N); 11271 Preds.push_back(N); 11272 } 11273 11274 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 11275 Loop &L) 11276 : SE(SE), L(L), Generation(0), BackedgeCount(nullptr) {} 11277 11278 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 11279 const SCEV *Expr = SE.getSCEV(V); 11280 RewriteEntry &Entry = RewriteMap[Expr]; 11281 11282 // If we already have an entry and the version matches, return it. 11283 if (Entry.second && Generation == Entry.first) 11284 return Entry.second; 11285 11286 // We found an entry but it's stale. Rewrite the stale entry 11287 // according to the current predicate. 11288 if (Entry.second) 11289 Expr = Entry.second; 11290 11291 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 11292 Entry = {Generation, NewSCEV}; 11293 11294 return NewSCEV; 11295 } 11296 11297 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 11298 if (!BackedgeCount) { 11299 SCEVUnionPredicate BackedgePred; 11300 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 11301 addPredicate(BackedgePred); 11302 } 11303 return BackedgeCount; 11304 } 11305 11306 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 11307 if (Preds.implies(&Pred)) 11308 return; 11309 Preds.add(&Pred); 11310 updateGeneration(); 11311 } 11312 11313 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 11314 return Preds; 11315 } 11316 11317 void PredicatedScalarEvolution::updateGeneration() { 11318 // If the generation number wrapped recompute everything. 11319 if (++Generation == 0) { 11320 for (auto &II : RewriteMap) { 11321 const SCEV *Rewritten = II.second.second; 11322 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 11323 } 11324 } 11325 } 11326 11327 void PredicatedScalarEvolution::setNoOverflow( 11328 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 11329 const SCEV *Expr = getSCEV(V); 11330 const auto *AR = cast<SCEVAddRecExpr>(Expr); 11331 11332 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 11333 11334 // Clear the statically implied flags. 11335 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 11336 addPredicate(*SE.getWrapPredicate(AR, Flags)); 11337 11338 auto II = FlagsMap.insert({V, Flags}); 11339 if (!II.second) 11340 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 11341 } 11342 11343 bool PredicatedScalarEvolution::hasNoOverflow( 11344 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 11345 const SCEV *Expr = getSCEV(V); 11346 const auto *AR = cast<SCEVAddRecExpr>(Expr); 11347 11348 Flags = SCEVWrapPredicate::clearFlags( 11349 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 11350 11351 auto II = FlagsMap.find(V); 11352 11353 if (II != FlagsMap.end()) 11354 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 11355 11356 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 11357 } 11358 11359 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 11360 const SCEV *Expr = this->getSCEV(V); 11361 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 11362 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 11363 11364 if (!New) 11365 return nullptr; 11366 11367 for (auto *P : NewPreds) 11368 Preds.add(P); 11369 11370 updateGeneration(); 11371 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 11372 return New; 11373 } 11374 11375 PredicatedScalarEvolution::PredicatedScalarEvolution( 11376 const PredicatedScalarEvolution &Init) 11377 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 11378 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 11379 for (const auto &I : Init.FlagsMap) 11380 FlagsMap.insert(I); 11381 } 11382 11383 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 11384 // For each block. 11385 for (auto *BB : L.getBlocks()) 11386 for (auto &I : *BB) { 11387 if (!SE.isSCEVable(I.getType())) 11388 continue; 11389 11390 auto *Expr = SE.getSCEV(&I); 11391 auto II = RewriteMap.find(Expr); 11392 11393 if (II == RewriteMap.end()) 11394 continue; 11395 11396 // Don't print things that are not interesting. 11397 if (II->second.second == Expr) 11398 continue; 11399 11400 OS.indent(Depth) << "[PSE]" << I << ":\n"; 11401 OS.indent(Depth + 2) << *Expr << "\n"; 11402 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 11403 } 11404 } 11405