1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #include "llvm/Analysis/ScalarEvolution.h" 62 #include "llvm/ADT/Optional.h" 63 #include "llvm/ADT/STLExtras.h" 64 #include "llvm/ADT/ScopeExit.h" 65 #include "llvm/ADT/Sequence.h" 66 #include "llvm/ADT/SmallPtrSet.h" 67 #include "llvm/ADT/Statistic.h" 68 #include "llvm/Analysis/AssumptionCache.h" 69 #include "llvm/Analysis/ConstantFolding.h" 70 #include "llvm/Analysis/InstructionSimplify.h" 71 #include "llvm/Analysis/LoopInfo.h" 72 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 73 #include "llvm/Analysis/TargetLibraryInfo.h" 74 #include "llvm/Analysis/ValueTracking.h" 75 #include "llvm/IR/ConstantRange.h" 76 #include "llvm/IR/Constants.h" 77 #include "llvm/IR/DataLayout.h" 78 #include "llvm/IR/DerivedTypes.h" 79 #include "llvm/IR/Dominators.h" 80 #include "llvm/IR/GetElementPtrTypeIterator.h" 81 #include "llvm/IR/GlobalAlias.h" 82 #include "llvm/IR/GlobalVariable.h" 83 #include "llvm/IR/InstIterator.h" 84 #include "llvm/IR/Instructions.h" 85 #include "llvm/IR/LLVMContext.h" 86 #include "llvm/IR/Metadata.h" 87 #include "llvm/IR/Operator.h" 88 #include "llvm/IR/PatternMatch.h" 89 #include "llvm/Support/CommandLine.h" 90 #include "llvm/Support/Debug.h" 91 #include "llvm/Support/ErrorHandling.h" 92 #include "llvm/Support/KnownBits.h" 93 #include "llvm/Support/MathExtras.h" 94 #include "llvm/Support/SaveAndRestore.h" 95 #include "llvm/Support/raw_ostream.h" 96 #include <algorithm> 97 using namespace llvm; 98 99 #define DEBUG_TYPE "scalar-evolution" 100 101 STATISTIC(NumArrayLenItCounts, 102 "Number of trip counts computed with array length"); 103 STATISTIC(NumTripCountsComputed, 104 "Number of loops with predictable loop counts"); 105 STATISTIC(NumTripCountsNotComputed, 106 "Number of loops without predictable loop counts"); 107 STATISTIC(NumBruteForceTripCountsComputed, 108 "Number of loops with trip counts computed by force"); 109 110 static cl::opt<unsigned> 111 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 112 cl::desc("Maximum number of iterations SCEV will " 113 "symbolically execute a constant " 114 "derived loop"), 115 cl::init(100)); 116 117 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 118 static cl::opt<bool> 119 VerifySCEV("verify-scev", 120 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 121 static cl::opt<bool> 122 VerifySCEVMap("verify-scev-maps", 123 cl::desc("Verify no dangling value in ScalarEvolution's " 124 "ExprValueMap (slow)")); 125 126 static cl::opt<unsigned> MulOpsInlineThreshold( 127 "scev-mulops-inline-threshold", cl::Hidden, 128 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 129 cl::init(32)); 130 131 static cl::opt<unsigned> AddOpsInlineThreshold( 132 "scev-addops-inline-threshold", cl::Hidden, 133 cl::desc("Threshold for inlining addition operands into a SCEV"), 134 cl::init(500)); 135 136 static cl::opt<unsigned> MaxSCEVCompareDepth( 137 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 138 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 139 cl::init(32)); 140 141 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 142 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 143 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 144 cl::init(2)); 145 146 static cl::opt<unsigned> MaxValueCompareDepth( 147 "scalar-evolution-max-value-compare-depth", cl::Hidden, 148 cl::desc("Maximum depth of recursive value complexity comparisons"), 149 cl::init(2)); 150 151 static cl::opt<unsigned> 152 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 153 cl::desc("Maximum depth of recursive arithmetics"), 154 cl::init(32)); 155 156 static cl::opt<unsigned> MaxConstantEvolvingDepth( 157 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 158 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 159 160 static cl::opt<unsigned> 161 MaxExtDepth("scalar-evolution-max-ext-depth", cl::Hidden, 162 cl::desc("Maximum depth of recursive SExt/ZExt"), 163 cl::init(8)); 164 165 //===----------------------------------------------------------------------===// 166 // SCEV class definitions 167 //===----------------------------------------------------------------------===// 168 169 //===----------------------------------------------------------------------===// 170 // Implementation of the SCEV class. 171 // 172 173 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 174 LLVM_DUMP_METHOD void SCEV::dump() const { 175 print(dbgs()); 176 dbgs() << '\n'; 177 } 178 #endif 179 180 void SCEV::print(raw_ostream &OS) const { 181 switch (static_cast<SCEVTypes>(getSCEVType())) { 182 case scConstant: 183 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 184 return; 185 case scTruncate: { 186 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 187 const SCEV *Op = Trunc->getOperand(); 188 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 189 << *Trunc->getType() << ")"; 190 return; 191 } 192 case scZeroExtend: { 193 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 194 const SCEV *Op = ZExt->getOperand(); 195 OS << "(zext " << *Op->getType() << " " << *Op << " to " 196 << *ZExt->getType() << ")"; 197 return; 198 } 199 case scSignExtend: { 200 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 201 const SCEV *Op = SExt->getOperand(); 202 OS << "(sext " << *Op->getType() << " " << *Op << " to " 203 << *SExt->getType() << ")"; 204 return; 205 } 206 case scAddRecExpr: { 207 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 208 OS << "{" << *AR->getOperand(0); 209 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 210 OS << ",+," << *AR->getOperand(i); 211 OS << "}<"; 212 if (AR->hasNoUnsignedWrap()) 213 OS << "nuw><"; 214 if (AR->hasNoSignedWrap()) 215 OS << "nsw><"; 216 if (AR->hasNoSelfWrap() && 217 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 218 OS << "nw><"; 219 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 220 OS << ">"; 221 return; 222 } 223 case scAddExpr: 224 case scMulExpr: 225 case scUMaxExpr: 226 case scSMaxExpr: { 227 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 228 const char *OpStr = nullptr; 229 switch (NAry->getSCEVType()) { 230 case scAddExpr: OpStr = " + "; break; 231 case scMulExpr: OpStr = " * "; break; 232 case scUMaxExpr: OpStr = " umax "; break; 233 case scSMaxExpr: OpStr = " smax "; break; 234 } 235 OS << "("; 236 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 237 I != E; ++I) { 238 OS << **I; 239 if (std::next(I) != E) 240 OS << OpStr; 241 } 242 OS << ")"; 243 switch (NAry->getSCEVType()) { 244 case scAddExpr: 245 case scMulExpr: 246 if (NAry->hasNoUnsignedWrap()) 247 OS << "<nuw>"; 248 if (NAry->hasNoSignedWrap()) 249 OS << "<nsw>"; 250 } 251 return; 252 } 253 case scUDivExpr: { 254 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 255 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 256 return; 257 } 258 case scUnknown: { 259 const SCEVUnknown *U = cast<SCEVUnknown>(this); 260 Type *AllocTy; 261 if (U->isSizeOf(AllocTy)) { 262 OS << "sizeof(" << *AllocTy << ")"; 263 return; 264 } 265 if (U->isAlignOf(AllocTy)) { 266 OS << "alignof(" << *AllocTy << ")"; 267 return; 268 } 269 270 Type *CTy; 271 Constant *FieldNo; 272 if (U->isOffsetOf(CTy, FieldNo)) { 273 OS << "offsetof(" << *CTy << ", "; 274 FieldNo->printAsOperand(OS, false); 275 OS << ")"; 276 return; 277 } 278 279 // Otherwise just print it normally. 280 U->getValue()->printAsOperand(OS, false); 281 return; 282 } 283 case scCouldNotCompute: 284 OS << "***COULDNOTCOMPUTE***"; 285 return; 286 } 287 llvm_unreachable("Unknown SCEV kind!"); 288 } 289 290 Type *SCEV::getType() const { 291 switch (static_cast<SCEVTypes>(getSCEVType())) { 292 case scConstant: 293 return cast<SCEVConstant>(this)->getType(); 294 case scTruncate: 295 case scZeroExtend: 296 case scSignExtend: 297 return cast<SCEVCastExpr>(this)->getType(); 298 case scAddRecExpr: 299 case scMulExpr: 300 case scUMaxExpr: 301 case scSMaxExpr: 302 return cast<SCEVNAryExpr>(this)->getType(); 303 case scAddExpr: 304 return cast<SCEVAddExpr>(this)->getType(); 305 case scUDivExpr: 306 return cast<SCEVUDivExpr>(this)->getType(); 307 case scUnknown: 308 return cast<SCEVUnknown>(this)->getType(); 309 case scCouldNotCompute: 310 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 311 } 312 llvm_unreachable("Unknown SCEV kind!"); 313 } 314 315 bool SCEV::isZero() const { 316 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 317 return SC->getValue()->isZero(); 318 return false; 319 } 320 321 bool SCEV::isOne() const { 322 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 323 return SC->getValue()->isOne(); 324 return false; 325 } 326 327 bool SCEV::isAllOnesValue() const { 328 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 329 return SC->getValue()->isMinusOne(); 330 return false; 331 } 332 333 bool SCEV::isNonConstantNegative() const { 334 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 335 if (!Mul) return false; 336 337 // If there is a constant factor, it will be first. 338 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 339 if (!SC) return false; 340 341 // Return true if the value is negative, this matches things like (-42 * V). 342 return SC->getAPInt().isNegative(); 343 } 344 345 SCEVCouldNotCompute::SCEVCouldNotCompute() : 346 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} 347 348 bool SCEVCouldNotCompute::classof(const SCEV *S) { 349 return S->getSCEVType() == scCouldNotCompute; 350 } 351 352 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 353 FoldingSetNodeID ID; 354 ID.AddInteger(scConstant); 355 ID.AddPointer(V); 356 void *IP = nullptr; 357 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 358 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 359 UniqueSCEVs.InsertNode(S, IP); 360 return S; 361 } 362 363 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 364 return getConstant(ConstantInt::get(getContext(), Val)); 365 } 366 367 const SCEV * 368 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 369 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 370 return getConstant(ConstantInt::get(ITy, V, isSigned)); 371 } 372 373 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 374 unsigned SCEVTy, const SCEV *op, Type *ty) 375 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 376 377 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 378 const SCEV *op, Type *ty) 379 : SCEVCastExpr(ID, scTruncate, op, ty) { 380 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 381 (Ty->isIntegerTy() || Ty->isPointerTy()) && 382 "Cannot truncate non-integer value!"); 383 } 384 385 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 386 const SCEV *op, Type *ty) 387 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 388 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 389 (Ty->isIntegerTy() || Ty->isPointerTy()) && 390 "Cannot zero extend non-integer value!"); 391 } 392 393 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 394 const SCEV *op, Type *ty) 395 : SCEVCastExpr(ID, scSignExtend, op, ty) { 396 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 397 (Ty->isIntegerTy() || Ty->isPointerTy()) && 398 "Cannot sign extend non-integer value!"); 399 } 400 401 void SCEVUnknown::deleted() { 402 // Clear this SCEVUnknown from various maps. 403 SE->forgetMemoizedResults(this); 404 405 // Remove this SCEVUnknown from the uniquing map. 406 SE->UniqueSCEVs.RemoveNode(this); 407 408 // Release the value. 409 setValPtr(nullptr); 410 } 411 412 void SCEVUnknown::allUsesReplacedWith(Value *New) { 413 // Clear this SCEVUnknown from various maps. 414 SE->forgetMemoizedResults(this); 415 416 // Remove this SCEVUnknown from the uniquing map. 417 SE->UniqueSCEVs.RemoveNode(this); 418 419 // Update this SCEVUnknown to point to the new value. This is needed 420 // because there may still be outstanding SCEVs which still point to 421 // this SCEVUnknown. 422 setValPtr(New); 423 } 424 425 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 426 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 427 if (VCE->getOpcode() == Instruction::PtrToInt) 428 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 429 if (CE->getOpcode() == Instruction::GetElementPtr && 430 CE->getOperand(0)->isNullValue() && 431 CE->getNumOperands() == 2) 432 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 433 if (CI->isOne()) { 434 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 435 ->getElementType(); 436 return true; 437 } 438 439 return false; 440 } 441 442 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 443 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 444 if (VCE->getOpcode() == Instruction::PtrToInt) 445 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 446 if (CE->getOpcode() == Instruction::GetElementPtr && 447 CE->getOperand(0)->isNullValue()) { 448 Type *Ty = 449 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 450 if (StructType *STy = dyn_cast<StructType>(Ty)) 451 if (!STy->isPacked() && 452 CE->getNumOperands() == 3 && 453 CE->getOperand(1)->isNullValue()) { 454 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 455 if (CI->isOne() && 456 STy->getNumElements() == 2 && 457 STy->getElementType(0)->isIntegerTy(1)) { 458 AllocTy = STy->getElementType(1); 459 return true; 460 } 461 } 462 } 463 464 return false; 465 } 466 467 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 468 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 469 if (VCE->getOpcode() == Instruction::PtrToInt) 470 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 471 if (CE->getOpcode() == Instruction::GetElementPtr && 472 CE->getNumOperands() == 3 && 473 CE->getOperand(0)->isNullValue() && 474 CE->getOperand(1)->isNullValue()) { 475 Type *Ty = 476 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 477 // Ignore vector types here so that ScalarEvolutionExpander doesn't 478 // emit getelementptrs that index into vectors. 479 if (Ty->isStructTy() || Ty->isArrayTy()) { 480 CTy = Ty; 481 FieldNo = CE->getOperand(2); 482 return true; 483 } 484 } 485 486 return false; 487 } 488 489 //===----------------------------------------------------------------------===// 490 // SCEV Utilities 491 //===----------------------------------------------------------------------===// 492 493 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 494 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 495 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 496 /// have been previously deemed to be "equally complex" by this routine. It is 497 /// intended to avoid exponential time complexity in cases like: 498 /// 499 /// %a = f(%x, %y) 500 /// %b = f(%a, %a) 501 /// %c = f(%b, %b) 502 /// 503 /// %d = f(%x, %y) 504 /// %e = f(%d, %d) 505 /// %f = f(%e, %e) 506 /// 507 /// CompareValueComplexity(%f, %c) 508 /// 509 /// Since we do not continue running this routine on expression trees once we 510 /// have seen unequal values, there is no need to track them in the cache. 511 static int 512 CompareValueComplexity(SmallSet<std::pair<Value *, Value *>, 8> &EqCache, 513 const LoopInfo *const LI, Value *LV, Value *RV, 514 unsigned Depth) { 515 if (Depth > MaxValueCompareDepth || EqCache.count({LV, RV})) 516 return 0; 517 518 // Order pointer values after integer values. This helps SCEVExpander form 519 // GEPs. 520 bool LIsPointer = LV->getType()->isPointerTy(), 521 RIsPointer = RV->getType()->isPointerTy(); 522 if (LIsPointer != RIsPointer) 523 return (int)LIsPointer - (int)RIsPointer; 524 525 // Compare getValueID values. 526 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 527 if (LID != RID) 528 return (int)LID - (int)RID; 529 530 // Sort arguments by their position. 531 if (const auto *LA = dyn_cast<Argument>(LV)) { 532 const auto *RA = cast<Argument>(RV); 533 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 534 return (int)LArgNo - (int)RArgNo; 535 } 536 537 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 538 const auto *RGV = cast<GlobalValue>(RV); 539 540 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 541 auto LT = GV->getLinkage(); 542 return !(GlobalValue::isPrivateLinkage(LT) || 543 GlobalValue::isInternalLinkage(LT)); 544 }; 545 546 // Use the names to distinguish the two values, but only if the 547 // names are semantically important. 548 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 549 return LGV->getName().compare(RGV->getName()); 550 } 551 552 // For instructions, compare their loop depth, and their operand count. This 553 // is pretty loose. 554 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 555 const auto *RInst = cast<Instruction>(RV); 556 557 // Compare loop depths. 558 const BasicBlock *LParent = LInst->getParent(), 559 *RParent = RInst->getParent(); 560 if (LParent != RParent) { 561 unsigned LDepth = LI->getLoopDepth(LParent), 562 RDepth = LI->getLoopDepth(RParent); 563 if (LDepth != RDepth) 564 return (int)LDepth - (int)RDepth; 565 } 566 567 // Compare the number of operands. 568 unsigned LNumOps = LInst->getNumOperands(), 569 RNumOps = RInst->getNumOperands(); 570 if (LNumOps != RNumOps) 571 return (int)LNumOps - (int)RNumOps; 572 573 for (unsigned Idx : seq(0u, LNumOps)) { 574 int Result = 575 CompareValueComplexity(EqCache, LI, LInst->getOperand(Idx), 576 RInst->getOperand(Idx), Depth + 1); 577 if (Result != 0) 578 return Result; 579 } 580 } 581 582 EqCache.insert({LV, RV}); 583 return 0; 584 } 585 586 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 587 // than RHS, respectively. A three-way result allows recursive comparisons to be 588 // more efficient. 589 static int CompareSCEVComplexity( 590 SmallSet<std::pair<const SCEV *, const SCEV *>, 8> &EqCacheSCEV, 591 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 592 DominatorTree &DT, unsigned Depth = 0) { 593 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 594 if (LHS == RHS) 595 return 0; 596 597 // Primarily, sort the SCEVs by their getSCEVType(). 598 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 599 if (LType != RType) 600 return (int)LType - (int)RType; 601 602 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.count({LHS, RHS})) 603 return 0; 604 // Aside from the getSCEVType() ordering, the particular ordering 605 // isn't very important except that it's beneficial to be consistent, 606 // so that (a + b) and (b + a) don't end up as different expressions. 607 switch (static_cast<SCEVTypes>(LType)) { 608 case scUnknown: { 609 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 610 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 611 612 SmallSet<std::pair<Value *, Value *>, 8> EqCache; 613 int X = CompareValueComplexity(EqCache, LI, LU->getValue(), RU->getValue(), 614 Depth + 1); 615 if (X == 0) 616 EqCacheSCEV.insert({LHS, RHS}); 617 return X; 618 } 619 620 case scConstant: { 621 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 622 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 623 624 // Compare constant values. 625 const APInt &LA = LC->getAPInt(); 626 const APInt &RA = RC->getAPInt(); 627 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 628 if (LBitWidth != RBitWidth) 629 return (int)LBitWidth - (int)RBitWidth; 630 return LA.ult(RA) ? -1 : 1; 631 } 632 633 case scAddRecExpr: { 634 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 635 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 636 637 // There is always a dominance between two recs that are used by one SCEV, 638 // so we can safely sort recs by loop header dominance. We require such 639 // order in getAddExpr. 640 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 641 if (LLoop != RLoop) { 642 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 643 assert(LHead != RHead && "Two loops share the same header?"); 644 if (DT.dominates(LHead, RHead)) 645 return 1; 646 else 647 assert(DT.dominates(RHead, LHead) && 648 "No dominance between recurrences used by one SCEV?"); 649 return -1; 650 } 651 652 // Addrec complexity grows with operand count. 653 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 654 if (LNumOps != RNumOps) 655 return (int)LNumOps - (int)RNumOps; 656 657 // Lexicographically compare. 658 for (unsigned i = 0; i != LNumOps; ++i) { 659 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LA->getOperand(i), 660 RA->getOperand(i), DT, Depth + 1); 661 if (X != 0) 662 return X; 663 } 664 EqCacheSCEV.insert({LHS, RHS}); 665 return 0; 666 } 667 668 case scAddExpr: 669 case scMulExpr: 670 case scSMaxExpr: 671 case scUMaxExpr: { 672 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 673 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 674 675 // Lexicographically compare n-ary expressions. 676 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 677 if (LNumOps != RNumOps) 678 return (int)LNumOps - (int)RNumOps; 679 680 for (unsigned i = 0; i != LNumOps; ++i) { 681 if (i >= RNumOps) 682 return 1; 683 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getOperand(i), 684 RC->getOperand(i), DT, Depth + 1); 685 if (X != 0) 686 return X; 687 } 688 EqCacheSCEV.insert({LHS, RHS}); 689 return 0; 690 } 691 692 case scUDivExpr: { 693 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 694 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 695 696 // Lexicographically compare udiv expressions. 697 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getLHS(), RC->getLHS(), 698 DT, Depth + 1); 699 if (X != 0) 700 return X; 701 X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getRHS(), RC->getRHS(), DT, 702 Depth + 1); 703 if (X == 0) 704 EqCacheSCEV.insert({LHS, RHS}); 705 return X; 706 } 707 708 case scTruncate: 709 case scZeroExtend: 710 case scSignExtend: { 711 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 712 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 713 714 // Compare cast expressions by operand. 715 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getOperand(), 716 RC->getOperand(), DT, Depth + 1); 717 if (X == 0) 718 EqCacheSCEV.insert({LHS, RHS}); 719 return X; 720 } 721 722 case scCouldNotCompute: 723 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 724 } 725 llvm_unreachable("Unknown SCEV kind!"); 726 } 727 728 /// Given a list of SCEV objects, order them by their complexity, and group 729 /// objects of the same complexity together by value. When this routine is 730 /// finished, we know that any duplicates in the vector are consecutive and that 731 /// complexity is monotonically increasing. 732 /// 733 /// Note that we go take special precautions to ensure that we get deterministic 734 /// results from this routine. In other words, we don't want the results of 735 /// this to depend on where the addresses of various SCEV objects happened to 736 /// land in memory. 737 /// 738 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 739 LoopInfo *LI, DominatorTree &DT) { 740 if (Ops.size() < 2) return; // Noop 741 742 SmallSet<std::pair<const SCEV *, const SCEV *>, 8> EqCache; 743 if (Ops.size() == 2) { 744 // This is the common case, which also happens to be trivially simple. 745 // Special case it. 746 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 747 if (CompareSCEVComplexity(EqCache, LI, RHS, LHS, DT) < 0) 748 std::swap(LHS, RHS); 749 return; 750 } 751 752 // Do the rough sort by complexity. 753 std::stable_sort(Ops.begin(), Ops.end(), 754 [&EqCache, LI, &DT](const SCEV *LHS, const SCEV *RHS) { 755 return 756 CompareSCEVComplexity(EqCache, LI, LHS, RHS, DT) < 0; 757 }); 758 759 // Now that we are sorted by complexity, group elements of the same 760 // complexity. Note that this is, at worst, N^2, but the vector is likely to 761 // be extremely short in practice. Note that we take this approach because we 762 // do not want to depend on the addresses of the objects we are grouping. 763 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 764 const SCEV *S = Ops[i]; 765 unsigned Complexity = S->getSCEVType(); 766 767 // If there are any objects of the same complexity and same value as this 768 // one, group them. 769 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 770 if (Ops[j] == S) { // Found a duplicate. 771 // Move it to immediately after i'th element. 772 std::swap(Ops[i+1], Ops[j]); 773 ++i; // no need to rescan it. 774 if (i == e-2) return; // Done! 775 } 776 } 777 } 778 } 779 780 // Returns the size of the SCEV S. 781 static inline int sizeOfSCEV(const SCEV *S) { 782 struct FindSCEVSize { 783 int Size; 784 FindSCEVSize() : Size(0) {} 785 786 bool follow(const SCEV *S) { 787 ++Size; 788 // Keep looking at all operands of S. 789 return true; 790 } 791 bool isDone() const { 792 return false; 793 } 794 }; 795 796 FindSCEVSize F; 797 SCEVTraversal<FindSCEVSize> ST(F); 798 ST.visitAll(S); 799 return F.Size; 800 } 801 802 namespace { 803 804 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { 805 public: 806 // Computes the Quotient and Remainder of the division of Numerator by 807 // Denominator. 808 static void divide(ScalarEvolution &SE, const SCEV *Numerator, 809 const SCEV *Denominator, const SCEV **Quotient, 810 const SCEV **Remainder) { 811 assert(Numerator && Denominator && "Uninitialized SCEV"); 812 813 SCEVDivision D(SE, Numerator, Denominator); 814 815 // Check for the trivial case here to avoid having to check for it in the 816 // rest of the code. 817 if (Numerator == Denominator) { 818 *Quotient = D.One; 819 *Remainder = D.Zero; 820 return; 821 } 822 823 if (Numerator->isZero()) { 824 *Quotient = D.Zero; 825 *Remainder = D.Zero; 826 return; 827 } 828 829 // A simple case when N/1. The quotient is N. 830 if (Denominator->isOne()) { 831 *Quotient = Numerator; 832 *Remainder = D.Zero; 833 return; 834 } 835 836 // Split the Denominator when it is a product. 837 if (const SCEVMulExpr *T = dyn_cast<SCEVMulExpr>(Denominator)) { 838 const SCEV *Q, *R; 839 *Quotient = Numerator; 840 for (const SCEV *Op : T->operands()) { 841 divide(SE, *Quotient, Op, &Q, &R); 842 *Quotient = Q; 843 844 // Bail out when the Numerator is not divisible by one of the terms of 845 // the Denominator. 846 if (!R->isZero()) { 847 *Quotient = D.Zero; 848 *Remainder = Numerator; 849 return; 850 } 851 } 852 *Remainder = D.Zero; 853 return; 854 } 855 856 D.visit(Numerator); 857 *Quotient = D.Quotient; 858 *Remainder = D.Remainder; 859 } 860 861 // Except in the trivial case described above, we do not know how to divide 862 // Expr by Denominator for the following functions with empty implementation. 863 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} 864 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} 865 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} 866 void visitUDivExpr(const SCEVUDivExpr *Numerator) {} 867 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} 868 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} 869 void visitUnknown(const SCEVUnknown *Numerator) {} 870 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} 871 872 void visitConstant(const SCEVConstant *Numerator) { 873 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { 874 APInt NumeratorVal = Numerator->getAPInt(); 875 APInt DenominatorVal = D->getAPInt(); 876 uint32_t NumeratorBW = NumeratorVal.getBitWidth(); 877 uint32_t DenominatorBW = DenominatorVal.getBitWidth(); 878 879 if (NumeratorBW > DenominatorBW) 880 DenominatorVal = DenominatorVal.sext(NumeratorBW); 881 else if (NumeratorBW < DenominatorBW) 882 NumeratorVal = NumeratorVal.sext(DenominatorBW); 883 884 APInt QuotientVal(NumeratorVal.getBitWidth(), 0); 885 APInt RemainderVal(NumeratorVal.getBitWidth(), 0); 886 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); 887 Quotient = SE.getConstant(QuotientVal); 888 Remainder = SE.getConstant(RemainderVal); 889 return; 890 } 891 } 892 893 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { 894 const SCEV *StartQ, *StartR, *StepQ, *StepR; 895 if (!Numerator->isAffine()) 896 return cannotDivide(Numerator); 897 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); 898 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); 899 // Bail out if the types do not match. 900 Type *Ty = Denominator->getType(); 901 if (Ty != StartQ->getType() || Ty != StartR->getType() || 902 Ty != StepQ->getType() || Ty != StepR->getType()) 903 return cannotDivide(Numerator); 904 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), 905 Numerator->getNoWrapFlags()); 906 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), 907 Numerator->getNoWrapFlags()); 908 } 909 910 void visitAddExpr(const SCEVAddExpr *Numerator) { 911 SmallVector<const SCEV *, 2> Qs, Rs; 912 Type *Ty = Denominator->getType(); 913 914 for (const SCEV *Op : Numerator->operands()) { 915 const SCEV *Q, *R; 916 divide(SE, Op, Denominator, &Q, &R); 917 918 // Bail out if types do not match. 919 if (Ty != Q->getType() || Ty != R->getType()) 920 return cannotDivide(Numerator); 921 922 Qs.push_back(Q); 923 Rs.push_back(R); 924 } 925 926 if (Qs.size() == 1) { 927 Quotient = Qs[0]; 928 Remainder = Rs[0]; 929 return; 930 } 931 932 Quotient = SE.getAddExpr(Qs); 933 Remainder = SE.getAddExpr(Rs); 934 } 935 936 void visitMulExpr(const SCEVMulExpr *Numerator) { 937 SmallVector<const SCEV *, 2> Qs; 938 Type *Ty = Denominator->getType(); 939 940 bool FoundDenominatorTerm = false; 941 for (const SCEV *Op : Numerator->operands()) { 942 // Bail out if types do not match. 943 if (Ty != Op->getType()) 944 return cannotDivide(Numerator); 945 946 if (FoundDenominatorTerm) { 947 Qs.push_back(Op); 948 continue; 949 } 950 951 // Check whether Denominator divides one of the product operands. 952 const SCEV *Q, *R; 953 divide(SE, Op, Denominator, &Q, &R); 954 if (!R->isZero()) { 955 Qs.push_back(Op); 956 continue; 957 } 958 959 // Bail out if types do not match. 960 if (Ty != Q->getType()) 961 return cannotDivide(Numerator); 962 963 FoundDenominatorTerm = true; 964 Qs.push_back(Q); 965 } 966 967 if (FoundDenominatorTerm) { 968 Remainder = Zero; 969 if (Qs.size() == 1) 970 Quotient = Qs[0]; 971 else 972 Quotient = SE.getMulExpr(Qs); 973 return; 974 } 975 976 if (!isa<SCEVUnknown>(Denominator)) 977 return cannotDivide(Numerator); 978 979 // The Remainder is obtained by replacing Denominator by 0 in Numerator. 980 ValueToValueMap RewriteMap; 981 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 982 cast<SCEVConstant>(Zero)->getValue(); 983 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 984 985 if (Remainder->isZero()) { 986 // The Quotient is obtained by replacing Denominator by 1 in Numerator. 987 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 988 cast<SCEVConstant>(One)->getValue(); 989 Quotient = 990 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 991 return; 992 } 993 994 // Quotient is (Numerator - Remainder) divided by Denominator. 995 const SCEV *Q, *R; 996 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); 997 // This SCEV does not seem to simplify: fail the division here. 998 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) 999 return cannotDivide(Numerator); 1000 divide(SE, Diff, Denominator, &Q, &R); 1001 if (R != Zero) 1002 return cannotDivide(Numerator); 1003 Quotient = Q; 1004 } 1005 1006 private: 1007 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, 1008 const SCEV *Denominator) 1009 : SE(S), Denominator(Denominator) { 1010 Zero = SE.getZero(Denominator->getType()); 1011 One = SE.getOne(Denominator->getType()); 1012 1013 // We generally do not know how to divide Expr by Denominator. We 1014 // initialize the division to a "cannot divide" state to simplify the rest 1015 // of the code. 1016 cannotDivide(Numerator); 1017 } 1018 1019 // Convenience function for giving up on the division. We set the quotient to 1020 // be equal to zero and the remainder to be equal to the numerator. 1021 void cannotDivide(const SCEV *Numerator) { 1022 Quotient = Zero; 1023 Remainder = Numerator; 1024 } 1025 1026 ScalarEvolution &SE; 1027 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; 1028 }; 1029 1030 } 1031 1032 //===----------------------------------------------------------------------===// 1033 // Simple SCEV method implementations 1034 //===----------------------------------------------------------------------===// 1035 1036 /// Compute BC(It, K). The result has width W. Assume, K > 0. 1037 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 1038 ScalarEvolution &SE, 1039 Type *ResultTy) { 1040 // Handle the simplest case efficiently. 1041 if (K == 1) 1042 return SE.getTruncateOrZeroExtend(It, ResultTy); 1043 1044 // We are using the following formula for BC(It, K): 1045 // 1046 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 1047 // 1048 // Suppose, W is the bitwidth of the return value. We must be prepared for 1049 // overflow. Hence, we must assure that the result of our computation is 1050 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 1051 // safe in modular arithmetic. 1052 // 1053 // However, this code doesn't use exactly that formula; the formula it uses 1054 // is something like the following, where T is the number of factors of 2 in 1055 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 1056 // exponentiation: 1057 // 1058 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 1059 // 1060 // This formula is trivially equivalent to the previous formula. However, 1061 // this formula can be implemented much more efficiently. The trick is that 1062 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 1063 // arithmetic. To do exact division in modular arithmetic, all we have 1064 // to do is multiply by the inverse. Therefore, this step can be done at 1065 // width W. 1066 // 1067 // The next issue is how to safely do the division by 2^T. The way this 1068 // is done is by doing the multiplication step at a width of at least W + T 1069 // bits. This way, the bottom W+T bits of the product are accurate. Then, 1070 // when we perform the division by 2^T (which is equivalent to a right shift 1071 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 1072 // truncated out after the division by 2^T. 1073 // 1074 // In comparison to just directly using the first formula, this technique 1075 // is much more efficient; using the first formula requires W * K bits, 1076 // but this formula less than W + K bits. Also, the first formula requires 1077 // a division step, whereas this formula only requires multiplies and shifts. 1078 // 1079 // It doesn't matter whether the subtraction step is done in the calculation 1080 // width or the input iteration count's width; if the subtraction overflows, 1081 // the result must be zero anyway. We prefer here to do it in the width of 1082 // the induction variable because it helps a lot for certain cases; CodeGen 1083 // isn't smart enough to ignore the overflow, which leads to much less 1084 // efficient code if the width of the subtraction is wider than the native 1085 // register width. 1086 // 1087 // (It's possible to not widen at all by pulling out factors of 2 before 1088 // the multiplication; for example, K=2 can be calculated as 1089 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 1090 // extra arithmetic, so it's not an obvious win, and it gets 1091 // much more complicated for K > 3.) 1092 1093 // Protection from insane SCEVs; this bound is conservative, 1094 // but it probably doesn't matter. 1095 if (K > 1000) 1096 return SE.getCouldNotCompute(); 1097 1098 unsigned W = SE.getTypeSizeInBits(ResultTy); 1099 1100 // Calculate K! / 2^T and T; we divide out the factors of two before 1101 // multiplying for calculating K! / 2^T to avoid overflow. 1102 // Other overflow doesn't matter because we only care about the bottom 1103 // W bits of the result. 1104 APInt OddFactorial(W, 1); 1105 unsigned T = 1; 1106 for (unsigned i = 3; i <= K; ++i) { 1107 APInt Mult(W, i); 1108 unsigned TwoFactors = Mult.countTrailingZeros(); 1109 T += TwoFactors; 1110 Mult.lshrInPlace(TwoFactors); 1111 OddFactorial *= Mult; 1112 } 1113 1114 // We need at least W + T bits for the multiplication step 1115 unsigned CalculationBits = W + T; 1116 1117 // Calculate 2^T, at width T+W. 1118 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1119 1120 // Calculate the multiplicative inverse of K! / 2^T; 1121 // this multiplication factor will perform the exact division by 1122 // K! / 2^T. 1123 APInt Mod = APInt::getSignedMinValue(W+1); 1124 APInt MultiplyFactor = OddFactorial.zext(W+1); 1125 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1126 MultiplyFactor = MultiplyFactor.trunc(W); 1127 1128 // Calculate the product, at width T+W 1129 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1130 CalculationBits); 1131 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1132 for (unsigned i = 1; i != K; ++i) { 1133 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1134 Dividend = SE.getMulExpr(Dividend, 1135 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1136 } 1137 1138 // Divide by 2^T 1139 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1140 1141 // Truncate the result, and divide by K! / 2^T. 1142 1143 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1144 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1145 } 1146 1147 /// Return the value of this chain of recurrences at the specified iteration 1148 /// number. We can evaluate this recurrence by multiplying each element in the 1149 /// chain by the binomial coefficient corresponding to it. In other words, we 1150 /// can evaluate {A,+,B,+,C,+,D} as: 1151 /// 1152 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1153 /// 1154 /// where BC(It, k) stands for binomial coefficient. 1155 /// 1156 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1157 ScalarEvolution &SE) const { 1158 const SCEV *Result = getStart(); 1159 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1160 // The computation is correct in the face of overflow provided that the 1161 // multiplication is performed _after_ the evaluation of the binomial 1162 // coefficient. 1163 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1164 if (isa<SCEVCouldNotCompute>(Coeff)) 1165 return Coeff; 1166 1167 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1168 } 1169 return Result; 1170 } 1171 1172 //===----------------------------------------------------------------------===// 1173 // SCEV Expression folder implementations 1174 //===----------------------------------------------------------------------===// 1175 1176 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 1177 Type *Ty) { 1178 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1179 "This is not a truncating conversion!"); 1180 assert(isSCEVable(Ty) && 1181 "This is not a conversion to a SCEVable type!"); 1182 Ty = getEffectiveSCEVType(Ty); 1183 1184 FoldingSetNodeID ID; 1185 ID.AddInteger(scTruncate); 1186 ID.AddPointer(Op); 1187 ID.AddPointer(Ty); 1188 void *IP = nullptr; 1189 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1190 1191 // Fold if the operand is constant. 1192 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1193 return getConstant( 1194 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1195 1196 // trunc(trunc(x)) --> trunc(x) 1197 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1198 return getTruncateExpr(ST->getOperand(), Ty); 1199 1200 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1201 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1202 return getTruncateOrSignExtend(SS->getOperand(), Ty); 1203 1204 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1205 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1206 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 1207 1208 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can 1209 // eliminate all the truncates, or we replace other casts with truncates. 1210 if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) { 1211 SmallVector<const SCEV *, 4> Operands; 1212 bool hasTrunc = false; 1213 for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) { 1214 const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty); 1215 if (!isa<SCEVCastExpr>(SA->getOperand(i))) 1216 hasTrunc = isa<SCEVTruncateExpr>(S); 1217 Operands.push_back(S); 1218 } 1219 if (!hasTrunc) 1220 return getAddExpr(Operands); 1221 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1222 } 1223 1224 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can 1225 // eliminate all the truncates, or we replace other casts with truncates. 1226 if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) { 1227 SmallVector<const SCEV *, 4> Operands; 1228 bool hasTrunc = false; 1229 for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) { 1230 const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty); 1231 if (!isa<SCEVCastExpr>(SM->getOperand(i))) 1232 hasTrunc = isa<SCEVTruncateExpr>(S); 1233 Operands.push_back(S); 1234 } 1235 if (!hasTrunc) 1236 return getMulExpr(Operands); 1237 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1238 } 1239 1240 // If the input value is a chrec scev, truncate the chrec's operands. 1241 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1242 SmallVector<const SCEV *, 4> Operands; 1243 for (const SCEV *Op : AddRec->operands()) 1244 Operands.push_back(getTruncateExpr(Op, Ty)); 1245 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1246 } 1247 1248 // The cast wasn't folded; create an explicit cast node. We can reuse 1249 // the existing insert position since if we get here, we won't have 1250 // made any changes which would invalidate it. 1251 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1252 Op, Ty); 1253 UniqueSCEVs.InsertNode(S, IP); 1254 return S; 1255 } 1256 1257 // Get the limit of a recurrence such that incrementing by Step cannot cause 1258 // signed overflow as long as the value of the recurrence within the 1259 // loop does not exceed this limit before incrementing. 1260 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1261 ICmpInst::Predicate *Pred, 1262 ScalarEvolution *SE) { 1263 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1264 if (SE->isKnownPositive(Step)) { 1265 *Pred = ICmpInst::ICMP_SLT; 1266 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1267 SE->getSignedRangeMax(Step)); 1268 } 1269 if (SE->isKnownNegative(Step)) { 1270 *Pred = ICmpInst::ICMP_SGT; 1271 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1272 SE->getSignedRangeMin(Step)); 1273 } 1274 return nullptr; 1275 } 1276 1277 // Get the limit of a recurrence such that incrementing by Step cannot cause 1278 // unsigned overflow as long as the value of the recurrence within the loop does 1279 // not exceed this limit before incrementing. 1280 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1281 ICmpInst::Predicate *Pred, 1282 ScalarEvolution *SE) { 1283 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1284 *Pred = ICmpInst::ICMP_ULT; 1285 1286 return SE->getConstant(APInt::getMinValue(BitWidth) - 1287 SE->getUnsignedRangeMax(Step)); 1288 } 1289 1290 namespace { 1291 1292 struct ExtendOpTraitsBase { 1293 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1294 unsigned); 1295 }; 1296 1297 // Used to make code generic over signed and unsigned overflow. 1298 template <typename ExtendOp> struct ExtendOpTraits { 1299 // Members present: 1300 // 1301 // static const SCEV::NoWrapFlags WrapType; 1302 // 1303 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1304 // 1305 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1306 // ICmpInst::Predicate *Pred, 1307 // ScalarEvolution *SE); 1308 }; 1309 1310 template <> 1311 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1312 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1313 1314 static const GetExtendExprTy GetExtendExpr; 1315 1316 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1317 ICmpInst::Predicate *Pred, 1318 ScalarEvolution *SE) { 1319 return getSignedOverflowLimitForStep(Step, Pred, SE); 1320 } 1321 }; 1322 1323 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1324 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1325 1326 template <> 1327 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1328 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1329 1330 static const GetExtendExprTy GetExtendExpr; 1331 1332 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1333 ICmpInst::Predicate *Pred, 1334 ScalarEvolution *SE) { 1335 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1336 } 1337 }; 1338 1339 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1340 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1341 } 1342 1343 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1344 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1345 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1346 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1347 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1348 // expression "Step + sext/zext(PreIncAR)" is congruent with 1349 // "sext/zext(PostIncAR)" 1350 template <typename ExtendOpTy> 1351 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1352 ScalarEvolution *SE, unsigned Depth) { 1353 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1354 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1355 1356 const Loop *L = AR->getLoop(); 1357 const SCEV *Start = AR->getStart(); 1358 const SCEV *Step = AR->getStepRecurrence(*SE); 1359 1360 // Check for a simple looking step prior to loop entry. 1361 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1362 if (!SA) 1363 return nullptr; 1364 1365 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1366 // subtraction is expensive. For this purpose, perform a quick and dirty 1367 // difference, by checking for Step in the operand list. 1368 SmallVector<const SCEV *, 4> DiffOps; 1369 for (const SCEV *Op : SA->operands()) 1370 if (Op != Step) 1371 DiffOps.push_back(Op); 1372 1373 if (DiffOps.size() == SA->getNumOperands()) 1374 return nullptr; 1375 1376 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1377 // `Step`: 1378 1379 // 1. NSW/NUW flags on the step increment. 1380 auto PreStartFlags = 1381 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1382 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1383 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1384 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1385 1386 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1387 // "S+X does not sign/unsign-overflow". 1388 // 1389 1390 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1391 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1392 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1393 return PreStart; 1394 1395 // 2. Direct overflow check on the step operation's expression. 1396 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1397 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1398 const SCEV *OperandExtendedStart = 1399 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1400 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1401 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1402 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1403 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1404 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1405 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1406 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1407 } 1408 return PreStart; 1409 } 1410 1411 // 3. Loop precondition. 1412 ICmpInst::Predicate Pred; 1413 const SCEV *OverflowLimit = 1414 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1415 1416 if (OverflowLimit && 1417 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1418 return PreStart; 1419 1420 return nullptr; 1421 } 1422 1423 // Get the normalized zero or sign extended expression for this AddRec's Start. 1424 template <typename ExtendOpTy> 1425 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1426 ScalarEvolution *SE, 1427 unsigned Depth) { 1428 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1429 1430 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1431 if (!PreStart) 1432 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1433 1434 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1435 Depth), 1436 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1437 } 1438 1439 // Try to prove away overflow by looking at "nearby" add recurrences. A 1440 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1441 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1442 // 1443 // Formally: 1444 // 1445 // {S,+,X} == {S-T,+,X} + T 1446 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1447 // 1448 // If ({S-T,+,X} + T) does not overflow ... (1) 1449 // 1450 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1451 // 1452 // If {S-T,+,X} does not overflow ... (2) 1453 // 1454 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1455 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1456 // 1457 // If (S-T)+T does not overflow ... (3) 1458 // 1459 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1460 // == {Ext(S),+,Ext(X)} == LHS 1461 // 1462 // Thus, if (1), (2) and (3) are true for some T, then 1463 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1464 // 1465 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1466 // does not overflow" restricted to the 0th iteration. Therefore we only need 1467 // to check for (1) and (2). 1468 // 1469 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1470 // is `Delta` (defined below). 1471 // 1472 template <typename ExtendOpTy> 1473 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1474 const SCEV *Step, 1475 const Loop *L) { 1476 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1477 1478 // We restrict `Start` to a constant to prevent SCEV from spending too much 1479 // time here. It is correct (but more expensive) to continue with a 1480 // non-constant `Start` and do a general SCEV subtraction to compute 1481 // `PreStart` below. 1482 // 1483 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1484 if (!StartC) 1485 return false; 1486 1487 APInt StartAI = StartC->getAPInt(); 1488 1489 for (unsigned Delta : {-2, -1, 1, 2}) { 1490 const SCEV *PreStart = getConstant(StartAI - Delta); 1491 1492 FoldingSetNodeID ID; 1493 ID.AddInteger(scAddRecExpr); 1494 ID.AddPointer(PreStart); 1495 ID.AddPointer(Step); 1496 ID.AddPointer(L); 1497 void *IP = nullptr; 1498 const auto *PreAR = 1499 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1500 1501 // Give up if we don't already have the add recurrence we need because 1502 // actually constructing an add recurrence is relatively expensive. 1503 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1504 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1505 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1506 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1507 DeltaS, &Pred, this); 1508 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1509 return true; 1510 } 1511 } 1512 1513 return false; 1514 } 1515 1516 const SCEV * 1517 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1518 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1519 "This is not an extending conversion!"); 1520 assert(isSCEVable(Ty) && 1521 "This is not a conversion to a SCEVable type!"); 1522 Ty = getEffectiveSCEVType(Ty); 1523 1524 // Fold if the operand is constant. 1525 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1526 return getConstant( 1527 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1528 1529 // zext(zext(x)) --> zext(x) 1530 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1531 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1532 1533 // Before doing any expensive analysis, check to see if we've already 1534 // computed a SCEV for this Op and Ty. 1535 FoldingSetNodeID ID; 1536 ID.AddInteger(scZeroExtend); 1537 ID.AddPointer(Op); 1538 ID.AddPointer(Ty); 1539 void *IP = nullptr; 1540 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1541 if (Depth > MaxExtDepth) { 1542 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1543 Op, Ty); 1544 UniqueSCEVs.InsertNode(S, IP); 1545 return S; 1546 } 1547 1548 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1549 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1550 // It's possible the bits taken off by the truncate were all zero bits. If 1551 // so, we should be able to simplify this further. 1552 const SCEV *X = ST->getOperand(); 1553 ConstantRange CR = getUnsignedRange(X); 1554 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1555 unsigned NewBits = getTypeSizeInBits(Ty); 1556 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1557 CR.zextOrTrunc(NewBits))) 1558 return getTruncateOrZeroExtend(X, Ty); 1559 } 1560 1561 // If the input value is a chrec scev, and we can prove that the value 1562 // did not overflow the old, smaller, value, we can zero extend all of the 1563 // operands (often constants). This allows analysis of something like 1564 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1565 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1566 if (AR->isAffine()) { 1567 const SCEV *Start = AR->getStart(); 1568 const SCEV *Step = AR->getStepRecurrence(*this); 1569 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1570 const Loop *L = AR->getLoop(); 1571 1572 if (!AR->hasNoUnsignedWrap()) { 1573 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1574 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1575 } 1576 1577 // If we have special knowledge that this addrec won't overflow, 1578 // we don't need to do any further analysis. 1579 if (AR->hasNoUnsignedWrap()) 1580 return getAddRecExpr( 1581 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1582 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1583 1584 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1585 // Note that this serves two purposes: It filters out loops that are 1586 // simply not analyzable, and it covers the case where this code is 1587 // being called from within backedge-taken count analysis, such that 1588 // attempting to ask for the backedge-taken count would likely result 1589 // in infinite recursion. In the later case, the analysis code will 1590 // cope with a conservative value, and it will take care to purge 1591 // that value once it has finished. 1592 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1593 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1594 // Manually compute the final value for AR, checking for 1595 // overflow. 1596 1597 // Check whether the backedge-taken count can be losslessly casted to 1598 // the addrec's type. The count is always unsigned. 1599 const SCEV *CastedMaxBECount = 1600 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1601 const SCEV *RecastedMaxBECount = 1602 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1603 if (MaxBECount == RecastedMaxBECount) { 1604 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1605 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1606 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1607 SCEV::FlagAnyWrap, Depth + 1); 1608 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1609 SCEV::FlagAnyWrap, 1610 Depth + 1), 1611 WideTy, Depth + 1); 1612 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1613 const SCEV *WideMaxBECount = 1614 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1615 const SCEV *OperandExtendedAdd = 1616 getAddExpr(WideStart, 1617 getMulExpr(WideMaxBECount, 1618 getZeroExtendExpr(Step, WideTy, Depth + 1), 1619 SCEV::FlagAnyWrap, Depth + 1), 1620 SCEV::FlagAnyWrap, Depth + 1); 1621 if (ZAdd == OperandExtendedAdd) { 1622 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1623 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1624 // Return the expression with the addrec on the outside. 1625 return getAddRecExpr( 1626 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1627 Depth + 1), 1628 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1629 AR->getNoWrapFlags()); 1630 } 1631 // Similar to above, only this time treat the step value as signed. 1632 // This covers loops that count down. 1633 OperandExtendedAdd = 1634 getAddExpr(WideStart, 1635 getMulExpr(WideMaxBECount, 1636 getSignExtendExpr(Step, WideTy, Depth + 1), 1637 SCEV::FlagAnyWrap, Depth + 1), 1638 SCEV::FlagAnyWrap, Depth + 1); 1639 if (ZAdd == OperandExtendedAdd) { 1640 // Cache knowledge of AR NW, which is propagated to this AddRec. 1641 // Negative step causes unsigned wrap, but it still can't self-wrap. 1642 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1643 // Return the expression with the addrec on the outside. 1644 return getAddRecExpr( 1645 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1646 Depth + 1), 1647 getSignExtendExpr(Step, Ty, Depth + 1), L, 1648 AR->getNoWrapFlags()); 1649 } 1650 } 1651 } 1652 1653 // Normally, in the cases we can prove no-overflow via a 1654 // backedge guarding condition, we can also compute a backedge 1655 // taken count for the loop. The exceptions are assumptions and 1656 // guards present in the loop -- SCEV is not great at exploiting 1657 // these to compute max backedge taken counts, but can still use 1658 // these to prove lack of overflow. Use this fact to avoid 1659 // doing extra work that may not pay off. 1660 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1661 !AC.assumptions().empty()) { 1662 // If the backedge is guarded by a comparison with the pre-inc 1663 // value the addrec is safe. Also, if the entry is guarded by 1664 // a comparison with the start value and the backedge is 1665 // guarded by a comparison with the post-inc value, the addrec 1666 // is safe. 1667 if (isKnownPositive(Step)) { 1668 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1669 getUnsignedRangeMax(Step)); 1670 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1671 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && 1672 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, 1673 AR->getPostIncExpr(*this), N))) { 1674 // Cache knowledge of AR NUW, which is propagated to this 1675 // AddRec. 1676 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1677 // Return the expression with the addrec on the outside. 1678 return getAddRecExpr( 1679 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1680 Depth + 1), 1681 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1682 AR->getNoWrapFlags()); 1683 } 1684 } else if (isKnownNegative(Step)) { 1685 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1686 getSignedRangeMin(Step)); 1687 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1688 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) && 1689 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, 1690 AR->getPostIncExpr(*this), N))) { 1691 // Cache knowledge of AR NW, which is propagated to this 1692 // AddRec. Negative step causes unsigned wrap, but it 1693 // still can't self-wrap. 1694 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1695 // Return the expression with the addrec on the outside. 1696 return getAddRecExpr( 1697 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1698 Depth + 1), 1699 getSignExtendExpr(Step, Ty, Depth + 1), L, 1700 AR->getNoWrapFlags()); 1701 } 1702 } 1703 } 1704 1705 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1706 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1707 return getAddRecExpr( 1708 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1709 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1710 } 1711 } 1712 1713 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1714 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1715 if (SA->hasNoUnsignedWrap()) { 1716 // If the addition does not unsign overflow then we can, by definition, 1717 // commute the zero extension with the addition operation. 1718 SmallVector<const SCEV *, 4> Ops; 1719 for (const auto *Op : SA->operands()) 1720 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1721 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1722 } 1723 } 1724 1725 // The cast wasn't folded; create an explicit cast node. 1726 // Recompute the insert position, as it may have been invalidated. 1727 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1728 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1729 Op, Ty); 1730 UniqueSCEVs.InsertNode(S, IP); 1731 return S; 1732 } 1733 1734 const SCEV * 1735 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1736 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1737 "This is not an extending conversion!"); 1738 assert(isSCEVable(Ty) && 1739 "This is not a conversion to a SCEVable type!"); 1740 Ty = getEffectiveSCEVType(Ty); 1741 1742 // Fold if the operand is constant. 1743 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1744 return getConstant( 1745 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1746 1747 // sext(sext(x)) --> sext(x) 1748 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1749 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1750 1751 // sext(zext(x)) --> zext(x) 1752 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1753 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1754 1755 // Before doing any expensive analysis, check to see if we've already 1756 // computed a SCEV for this Op and Ty. 1757 FoldingSetNodeID ID; 1758 ID.AddInteger(scSignExtend); 1759 ID.AddPointer(Op); 1760 ID.AddPointer(Ty); 1761 void *IP = nullptr; 1762 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1763 // Limit recursion depth. 1764 if (Depth > MaxExtDepth) { 1765 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1766 Op, Ty); 1767 UniqueSCEVs.InsertNode(S, IP); 1768 return S; 1769 } 1770 1771 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1772 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1773 // It's possible the bits taken off by the truncate were all sign bits. If 1774 // so, we should be able to simplify this further. 1775 const SCEV *X = ST->getOperand(); 1776 ConstantRange CR = getSignedRange(X); 1777 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1778 unsigned NewBits = getTypeSizeInBits(Ty); 1779 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1780 CR.sextOrTrunc(NewBits))) 1781 return getTruncateOrSignExtend(X, Ty); 1782 } 1783 1784 // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2 1785 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1786 if (SA->getNumOperands() == 2) { 1787 auto *SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0)); 1788 auto *SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1)); 1789 if (SMul && SC1) { 1790 if (auto *SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) { 1791 const APInt &C1 = SC1->getAPInt(); 1792 const APInt &C2 = SC2->getAPInt(); 1793 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && 1794 C2.ugt(C1) && C2.isPowerOf2()) 1795 return getAddExpr(getSignExtendExpr(SC1, Ty, Depth + 1), 1796 getSignExtendExpr(SMul, Ty, Depth + 1), 1797 SCEV::FlagAnyWrap, Depth + 1); 1798 } 1799 } 1800 } 1801 1802 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1803 if (SA->hasNoSignedWrap()) { 1804 // If the addition does not sign overflow then we can, by definition, 1805 // commute the sign extension with the addition operation. 1806 SmallVector<const SCEV *, 4> Ops; 1807 for (const auto *Op : SA->operands()) 1808 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1809 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1810 } 1811 } 1812 // If the input value is a chrec scev, and we can prove that the value 1813 // did not overflow the old, smaller, value, we can sign extend all of the 1814 // operands (often constants). This allows analysis of something like 1815 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1816 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1817 if (AR->isAffine()) { 1818 const SCEV *Start = AR->getStart(); 1819 const SCEV *Step = AR->getStepRecurrence(*this); 1820 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1821 const Loop *L = AR->getLoop(); 1822 1823 if (!AR->hasNoSignedWrap()) { 1824 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1825 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1826 } 1827 1828 // If we have special knowledge that this addrec won't overflow, 1829 // we don't need to do any further analysis. 1830 if (AR->hasNoSignedWrap()) 1831 return getAddRecExpr( 1832 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1833 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1834 1835 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1836 // Note that this serves two purposes: It filters out loops that are 1837 // simply not analyzable, and it covers the case where this code is 1838 // being called from within backedge-taken count analysis, such that 1839 // attempting to ask for the backedge-taken count would likely result 1840 // in infinite recursion. In the later case, the analysis code will 1841 // cope with a conservative value, and it will take care to purge 1842 // that value once it has finished. 1843 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1844 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1845 // Manually compute the final value for AR, checking for 1846 // overflow. 1847 1848 // Check whether the backedge-taken count can be losslessly casted to 1849 // the addrec's type. The count is always unsigned. 1850 const SCEV *CastedMaxBECount = 1851 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1852 const SCEV *RecastedMaxBECount = 1853 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1854 if (MaxBECount == RecastedMaxBECount) { 1855 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1856 // Check whether Start+Step*MaxBECount has no signed overflow. 1857 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 1858 SCEV::FlagAnyWrap, Depth + 1); 1859 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 1860 SCEV::FlagAnyWrap, 1861 Depth + 1), 1862 WideTy, Depth + 1); 1863 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 1864 const SCEV *WideMaxBECount = 1865 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1866 const SCEV *OperandExtendedAdd = 1867 getAddExpr(WideStart, 1868 getMulExpr(WideMaxBECount, 1869 getSignExtendExpr(Step, WideTy, Depth + 1), 1870 SCEV::FlagAnyWrap, Depth + 1), 1871 SCEV::FlagAnyWrap, Depth + 1); 1872 if (SAdd == OperandExtendedAdd) { 1873 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1874 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1875 // Return the expression with the addrec on the outside. 1876 return getAddRecExpr( 1877 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1878 Depth + 1), 1879 getSignExtendExpr(Step, Ty, Depth + 1), L, 1880 AR->getNoWrapFlags()); 1881 } 1882 // Similar to above, only this time treat the step value as unsigned. 1883 // This covers loops that count up with an unsigned step. 1884 OperandExtendedAdd = 1885 getAddExpr(WideStart, 1886 getMulExpr(WideMaxBECount, 1887 getZeroExtendExpr(Step, WideTy, Depth + 1), 1888 SCEV::FlagAnyWrap, Depth + 1), 1889 SCEV::FlagAnyWrap, Depth + 1); 1890 if (SAdd == OperandExtendedAdd) { 1891 // If AR wraps around then 1892 // 1893 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 1894 // => SAdd != OperandExtendedAdd 1895 // 1896 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 1897 // (SAdd == OperandExtendedAdd => AR is NW) 1898 1899 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1900 1901 // Return the expression with the addrec on the outside. 1902 return getAddRecExpr( 1903 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1904 Depth + 1), 1905 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1906 AR->getNoWrapFlags()); 1907 } 1908 } 1909 } 1910 1911 // Normally, in the cases we can prove no-overflow via a 1912 // backedge guarding condition, we can also compute a backedge 1913 // taken count for the loop. The exceptions are assumptions and 1914 // guards present in the loop -- SCEV is not great at exploiting 1915 // these to compute max backedge taken counts, but can still use 1916 // these to prove lack of overflow. Use this fact to avoid 1917 // doing extra work that may not pay off. 1918 1919 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1920 !AC.assumptions().empty()) { 1921 // If the backedge is guarded by a comparison with the pre-inc 1922 // value the addrec is safe. Also, if the entry is guarded by 1923 // a comparison with the start value and the backedge is 1924 // guarded by a comparison with the post-inc value, the addrec 1925 // is safe. 1926 ICmpInst::Predicate Pred; 1927 const SCEV *OverflowLimit = 1928 getSignedOverflowLimitForStep(Step, &Pred, this); 1929 if (OverflowLimit && 1930 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 1931 (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) && 1932 isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this), 1933 OverflowLimit)))) { 1934 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 1935 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1936 return getAddRecExpr( 1937 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1938 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1939 } 1940 } 1941 1942 // If Start and Step are constants, check if we can apply this 1943 // transformation: 1944 // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2 1945 auto *SC1 = dyn_cast<SCEVConstant>(Start); 1946 auto *SC2 = dyn_cast<SCEVConstant>(Step); 1947 if (SC1 && SC2) { 1948 const APInt &C1 = SC1->getAPInt(); 1949 const APInt &C2 = SC2->getAPInt(); 1950 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) && 1951 C2.isPowerOf2()) { 1952 Start = getSignExtendExpr(Start, Ty, Depth + 1); 1953 const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L, 1954 AR->getNoWrapFlags()); 1955 return getAddExpr(Start, getSignExtendExpr(NewAR, Ty, Depth + 1), 1956 SCEV::FlagAnyWrap, Depth + 1); 1957 } 1958 } 1959 1960 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 1961 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1962 return getAddRecExpr( 1963 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1964 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1965 } 1966 } 1967 1968 // If the input value is provably positive and we could not simplify 1969 // away the sext build a zext instead. 1970 if (isKnownNonNegative(Op)) 1971 return getZeroExtendExpr(Op, Ty, Depth + 1); 1972 1973 // The cast wasn't folded; create an explicit cast node. 1974 // Recompute the insert position, as it may have been invalidated. 1975 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1976 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1977 Op, Ty); 1978 UniqueSCEVs.InsertNode(S, IP); 1979 return S; 1980 } 1981 1982 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 1983 /// unspecified bits out to the given type. 1984 /// 1985 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 1986 Type *Ty) { 1987 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1988 "This is not an extending conversion!"); 1989 assert(isSCEVable(Ty) && 1990 "This is not a conversion to a SCEVable type!"); 1991 Ty = getEffectiveSCEVType(Ty); 1992 1993 // Sign-extend negative constants. 1994 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1995 if (SC->getAPInt().isNegative()) 1996 return getSignExtendExpr(Op, Ty); 1997 1998 // Peel off a truncate cast. 1999 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2000 const SCEV *NewOp = T->getOperand(); 2001 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2002 return getAnyExtendExpr(NewOp, Ty); 2003 return getTruncateOrNoop(NewOp, Ty); 2004 } 2005 2006 // Next try a zext cast. If the cast is folded, use it. 2007 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2008 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2009 return ZExt; 2010 2011 // Next try a sext cast. If the cast is folded, use it. 2012 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2013 if (!isa<SCEVSignExtendExpr>(SExt)) 2014 return SExt; 2015 2016 // Force the cast to be folded into the operands of an addrec. 2017 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2018 SmallVector<const SCEV *, 4> Ops; 2019 for (const SCEV *Op : AR->operands()) 2020 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2021 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2022 } 2023 2024 // If the expression is obviously signed, use the sext cast value. 2025 if (isa<SCEVSMaxExpr>(Op)) 2026 return SExt; 2027 2028 // Absent any other information, use the zext cast value. 2029 return ZExt; 2030 } 2031 2032 /// Process the given Ops list, which is a list of operands to be added under 2033 /// the given scale, update the given map. This is a helper function for 2034 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2035 /// that would form an add expression like this: 2036 /// 2037 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2038 /// 2039 /// where A and B are constants, update the map with these values: 2040 /// 2041 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2042 /// 2043 /// and add 13 + A*B*29 to AccumulatedConstant. 2044 /// This will allow getAddRecExpr to produce this: 2045 /// 2046 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2047 /// 2048 /// This form often exposes folding opportunities that are hidden in 2049 /// the original operand list. 2050 /// 2051 /// Return true iff it appears that any interesting folding opportunities 2052 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2053 /// the common case where no interesting opportunities are present, and 2054 /// is also used as a check to avoid infinite recursion. 2055 /// 2056 static bool 2057 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2058 SmallVectorImpl<const SCEV *> &NewOps, 2059 APInt &AccumulatedConstant, 2060 const SCEV *const *Ops, size_t NumOperands, 2061 const APInt &Scale, 2062 ScalarEvolution &SE) { 2063 bool Interesting = false; 2064 2065 // Iterate over the add operands. They are sorted, with constants first. 2066 unsigned i = 0; 2067 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2068 ++i; 2069 // Pull a buried constant out to the outside. 2070 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2071 Interesting = true; 2072 AccumulatedConstant += Scale * C->getAPInt(); 2073 } 2074 2075 // Next comes everything else. We're especially interested in multiplies 2076 // here, but they're in the middle, so just visit the rest with one loop. 2077 for (; i != NumOperands; ++i) { 2078 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2079 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2080 APInt NewScale = 2081 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2082 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2083 // A multiplication of a constant with another add; recurse. 2084 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2085 Interesting |= 2086 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2087 Add->op_begin(), Add->getNumOperands(), 2088 NewScale, SE); 2089 } else { 2090 // A multiplication of a constant with some other value. Update 2091 // the map. 2092 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2093 const SCEV *Key = SE.getMulExpr(MulOps); 2094 auto Pair = M.insert({Key, NewScale}); 2095 if (Pair.second) { 2096 NewOps.push_back(Pair.first->first); 2097 } else { 2098 Pair.first->second += NewScale; 2099 // The map already had an entry for this value, which may indicate 2100 // a folding opportunity. 2101 Interesting = true; 2102 } 2103 } 2104 } else { 2105 // An ordinary operand. Update the map. 2106 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2107 M.insert({Ops[i], Scale}); 2108 if (Pair.second) { 2109 NewOps.push_back(Pair.first->first); 2110 } else { 2111 Pair.first->second += Scale; 2112 // The map already had an entry for this value, which may indicate 2113 // a folding opportunity. 2114 Interesting = true; 2115 } 2116 } 2117 } 2118 2119 return Interesting; 2120 } 2121 2122 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2123 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2124 // can't-overflow flags for the operation if possible. 2125 static SCEV::NoWrapFlags 2126 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2127 const SmallVectorImpl<const SCEV *> &Ops, 2128 SCEV::NoWrapFlags Flags) { 2129 using namespace std::placeholders; 2130 typedef OverflowingBinaryOperator OBO; 2131 2132 bool CanAnalyze = 2133 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2134 (void)CanAnalyze; 2135 assert(CanAnalyze && "don't call from other places!"); 2136 2137 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2138 SCEV::NoWrapFlags SignOrUnsignWrap = 2139 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2140 2141 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2142 auto IsKnownNonNegative = [&](const SCEV *S) { 2143 return SE->isKnownNonNegative(S); 2144 }; 2145 2146 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2147 Flags = 2148 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2149 2150 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2151 2152 if (SignOrUnsignWrap != SignOrUnsignMask && Type == scAddExpr && 2153 Ops.size() == 2 && isa<SCEVConstant>(Ops[0])) { 2154 2155 // (A + C) --> (A + C)<nsw> if the addition does not sign overflow 2156 // (A + C) --> (A + C)<nuw> if the addition does not unsign overflow 2157 2158 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2159 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2160 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2161 Instruction::Add, C, OBO::NoSignedWrap); 2162 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2163 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2164 } 2165 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2166 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2167 Instruction::Add, C, OBO::NoUnsignedWrap); 2168 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2169 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2170 } 2171 } 2172 2173 return Flags; 2174 } 2175 2176 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2177 if (!isLoopInvariant(S, L)) 2178 return false; 2179 // If a value depends on a SCEVUnknown which is defined after the loop, we 2180 // conservatively assume that we cannot calculate it at the loop's entry. 2181 struct FindDominatedSCEVUnknown { 2182 bool Found = false; 2183 const Loop *L; 2184 DominatorTree &DT; 2185 LoopInfo &LI; 2186 2187 FindDominatedSCEVUnknown(const Loop *L, DominatorTree &DT, LoopInfo &LI) 2188 : L(L), DT(DT), LI(LI) {} 2189 2190 bool checkSCEVUnknown(const SCEVUnknown *SU) { 2191 if (auto *I = dyn_cast<Instruction>(SU->getValue())) { 2192 if (DT.dominates(L->getHeader(), I->getParent())) 2193 Found = true; 2194 else 2195 assert(DT.dominates(I->getParent(), L->getHeader()) && 2196 "No dominance relationship between SCEV and loop?"); 2197 } 2198 return false; 2199 } 2200 2201 bool follow(const SCEV *S) { 2202 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 2203 case scConstant: 2204 return false; 2205 case scAddRecExpr: 2206 case scTruncate: 2207 case scZeroExtend: 2208 case scSignExtend: 2209 case scAddExpr: 2210 case scMulExpr: 2211 case scUMaxExpr: 2212 case scSMaxExpr: 2213 case scUDivExpr: 2214 return true; 2215 case scUnknown: 2216 return checkSCEVUnknown(cast<SCEVUnknown>(S)); 2217 case scCouldNotCompute: 2218 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 2219 } 2220 return false; 2221 } 2222 2223 bool isDone() { return Found; } 2224 }; 2225 2226 FindDominatedSCEVUnknown FSU(L, DT, LI); 2227 SCEVTraversal<FindDominatedSCEVUnknown> ST(FSU); 2228 ST.visitAll(S); 2229 return !FSU.Found; 2230 } 2231 2232 /// Get a canonical add expression, or something simpler if possible. 2233 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2234 SCEV::NoWrapFlags Flags, 2235 unsigned Depth) { 2236 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2237 "only nuw or nsw allowed"); 2238 assert(!Ops.empty() && "Cannot get empty add!"); 2239 if (Ops.size() == 1) return Ops[0]; 2240 #ifndef NDEBUG 2241 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2242 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2243 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2244 "SCEVAddExpr operand types don't match!"); 2245 #endif 2246 2247 // Sort by complexity, this groups all similar expression types together. 2248 GroupByComplexity(Ops, &LI, DT); 2249 2250 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2251 2252 // If there are any constants, fold them together. 2253 unsigned Idx = 0; 2254 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2255 ++Idx; 2256 assert(Idx < Ops.size()); 2257 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2258 // We found two constants, fold them together! 2259 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2260 if (Ops.size() == 2) return Ops[0]; 2261 Ops.erase(Ops.begin()+1); // Erase the folded element 2262 LHSC = cast<SCEVConstant>(Ops[0]); 2263 } 2264 2265 // If we are left with a constant zero being added, strip it off. 2266 if (LHSC->getValue()->isZero()) { 2267 Ops.erase(Ops.begin()); 2268 --Idx; 2269 } 2270 2271 if (Ops.size() == 1) return Ops[0]; 2272 } 2273 2274 // Limit recursion calls depth. 2275 if (Depth > MaxArithDepth) 2276 return getOrCreateAddExpr(Ops, Flags); 2277 2278 // Okay, check to see if the same value occurs in the operand list more than 2279 // once. If so, merge them together into an multiply expression. Since we 2280 // sorted the list, these values are required to be adjacent. 2281 Type *Ty = Ops[0]->getType(); 2282 bool FoundMatch = false; 2283 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2284 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2285 // Scan ahead to count how many equal operands there are. 2286 unsigned Count = 2; 2287 while (i+Count != e && Ops[i+Count] == Ops[i]) 2288 ++Count; 2289 // Merge the values into a multiply. 2290 const SCEV *Scale = getConstant(Ty, Count); 2291 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2292 if (Ops.size() == Count) 2293 return Mul; 2294 Ops[i] = Mul; 2295 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2296 --i; e -= Count - 1; 2297 FoundMatch = true; 2298 } 2299 if (FoundMatch) 2300 return getAddExpr(Ops, Flags); 2301 2302 // Check for truncates. If all the operands are truncated from the same 2303 // type, see if factoring out the truncate would permit the result to be 2304 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) 2305 // if the contents of the resulting outer trunc fold to something simple. 2306 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { 2307 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); 2308 Type *DstType = Trunc->getType(); 2309 Type *SrcType = Trunc->getOperand()->getType(); 2310 SmallVector<const SCEV *, 8> LargeOps; 2311 bool Ok = true; 2312 // Check all the operands to see if they can be represented in the 2313 // source type of the truncate. 2314 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2315 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2316 if (T->getOperand()->getType() != SrcType) { 2317 Ok = false; 2318 break; 2319 } 2320 LargeOps.push_back(T->getOperand()); 2321 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2322 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2323 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2324 SmallVector<const SCEV *, 8> LargeMulOps; 2325 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2326 if (const SCEVTruncateExpr *T = 2327 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2328 if (T->getOperand()->getType() != SrcType) { 2329 Ok = false; 2330 break; 2331 } 2332 LargeMulOps.push_back(T->getOperand()); 2333 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2334 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2335 } else { 2336 Ok = false; 2337 break; 2338 } 2339 } 2340 if (Ok) 2341 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2342 } else { 2343 Ok = false; 2344 break; 2345 } 2346 } 2347 if (Ok) { 2348 // Evaluate the expression in the larger type. 2349 const SCEV *Fold = getAddExpr(LargeOps, Flags, Depth + 1); 2350 // If it folds to something simple, use it. Otherwise, don't. 2351 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2352 return getTruncateExpr(Fold, DstType); 2353 } 2354 } 2355 2356 // Skip past any other cast SCEVs. 2357 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2358 ++Idx; 2359 2360 // If there are add operands they would be next. 2361 if (Idx < Ops.size()) { 2362 bool DeletedAdd = false; 2363 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2364 if (Ops.size() > AddOpsInlineThreshold || 2365 Add->getNumOperands() > AddOpsInlineThreshold) 2366 break; 2367 // If we have an add, expand the add operands onto the end of the operands 2368 // list. 2369 Ops.erase(Ops.begin()+Idx); 2370 Ops.append(Add->op_begin(), Add->op_end()); 2371 DeletedAdd = true; 2372 } 2373 2374 // If we deleted at least one add, we added operands to the end of the list, 2375 // and they are not necessarily sorted. Recurse to resort and resimplify 2376 // any operands we just acquired. 2377 if (DeletedAdd) 2378 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2379 } 2380 2381 // Skip over the add expression until we get to a multiply. 2382 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2383 ++Idx; 2384 2385 // Check to see if there are any folding opportunities present with 2386 // operands multiplied by constant values. 2387 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2388 uint64_t BitWidth = getTypeSizeInBits(Ty); 2389 DenseMap<const SCEV *, APInt> M; 2390 SmallVector<const SCEV *, 8> NewOps; 2391 APInt AccumulatedConstant(BitWidth, 0); 2392 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2393 Ops.data(), Ops.size(), 2394 APInt(BitWidth, 1), *this)) { 2395 struct APIntCompare { 2396 bool operator()(const APInt &LHS, const APInt &RHS) const { 2397 return LHS.ult(RHS); 2398 } 2399 }; 2400 2401 // Some interesting folding opportunity is present, so its worthwhile to 2402 // re-generate the operands list. Group the operands by constant scale, 2403 // to avoid multiplying by the same constant scale multiple times. 2404 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2405 for (const SCEV *NewOp : NewOps) 2406 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2407 // Re-generate the operands list. 2408 Ops.clear(); 2409 if (AccumulatedConstant != 0) 2410 Ops.push_back(getConstant(AccumulatedConstant)); 2411 for (auto &MulOp : MulOpLists) 2412 if (MulOp.first != 0) 2413 Ops.push_back(getMulExpr( 2414 getConstant(MulOp.first), 2415 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2416 SCEV::FlagAnyWrap, Depth + 1)); 2417 if (Ops.empty()) 2418 return getZero(Ty); 2419 if (Ops.size() == 1) 2420 return Ops[0]; 2421 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2422 } 2423 } 2424 2425 // If we are adding something to a multiply expression, make sure the 2426 // something is not already an operand of the multiply. If so, merge it into 2427 // the multiply. 2428 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2429 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2430 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2431 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2432 if (isa<SCEVConstant>(MulOpSCEV)) 2433 continue; 2434 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2435 if (MulOpSCEV == Ops[AddOp]) { 2436 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2437 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2438 if (Mul->getNumOperands() != 2) { 2439 // If the multiply has more than two operands, we must get the 2440 // Y*Z term. 2441 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2442 Mul->op_begin()+MulOp); 2443 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2444 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2445 } 2446 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2447 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2448 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2449 SCEV::FlagAnyWrap, Depth + 1); 2450 if (Ops.size() == 2) return OuterMul; 2451 if (AddOp < Idx) { 2452 Ops.erase(Ops.begin()+AddOp); 2453 Ops.erase(Ops.begin()+Idx-1); 2454 } else { 2455 Ops.erase(Ops.begin()+Idx); 2456 Ops.erase(Ops.begin()+AddOp-1); 2457 } 2458 Ops.push_back(OuterMul); 2459 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2460 } 2461 2462 // Check this multiply against other multiplies being added together. 2463 for (unsigned OtherMulIdx = Idx+1; 2464 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2465 ++OtherMulIdx) { 2466 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2467 // If MulOp occurs in OtherMul, we can fold the two multiplies 2468 // together. 2469 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2470 OMulOp != e; ++OMulOp) 2471 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2472 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2473 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2474 if (Mul->getNumOperands() != 2) { 2475 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2476 Mul->op_begin()+MulOp); 2477 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2478 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2479 } 2480 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2481 if (OtherMul->getNumOperands() != 2) { 2482 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2483 OtherMul->op_begin()+OMulOp); 2484 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2485 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2486 } 2487 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2488 const SCEV *InnerMulSum = 2489 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2490 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2491 SCEV::FlagAnyWrap, Depth + 1); 2492 if (Ops.size() == 2) return OuterMul; 2493 Ops.erase(Ops.begin()+Idx); 2494 Ops.erase(Ops.begin()+OtherMulIdx-1); 2495 Ops.push_back(OuterMul); 2496 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2497 } 2498 } 2499 } 2500 } 2501 2502 // If there are any add recurrences in the operands list, see if any other 2503 // added values are loop invariant. If so, we can fold them into the 2504 // recurrence. 2505 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2506 ++Idx; 2507 2508 // Scan over all recurrences, trying to fold loop invariants into them. 2509 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2510 // Scan all of the other operands to this add and add them to the vector if 2511 // they are loop invariant w.r.t. the recurrence. 2512 SmallVector<const SCEV *, 8> LIOps; 2513 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2514 const Loop *AddRecLoop = AddRec->getLoop(); 2515 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2516 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2517 LIOps.push_back(Ops[i]); 2518 Ops.erase(Ops.begin()+i); 2519 --i; --e; 2520 } 2521 2522 // If we found some loop invariants, fold them into the recurrence. 2523 if (!LIOps.empty()) { 2524 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2525 LIOps.push_back(AddRec->getStart()); 2526 2527 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2528 AddRec->op_end()); 2529 // This follows from the fact that the no-wrap flags on the outer add 2530 // expression are applicable on the 0th iteration, when the add recurrence 2531 // will be equal to its start value. 2532 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2533 2534 // Build the new addrec. Propagate the NUW and NSW flags if both the 2535 // outer add and the inner addrec are guaranteed to have no overflow. 2536 // Always propagate NW. 2537 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2538 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2539 2540 // If all of the other operands were loop invariant, we are done. 2541 if (Ops.size() == 1) return NewRec; 2542 2543 // Otherwise, add the folded AddRec by the non-invariant parts. 2544 for (unsigned i = 0;; ++i) 2545 if (Ops[i] == AddRec) { 2546 Ops[i] = NewRec; 2547 break; 2548 } 2549 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2550 } 2551 2552 // Okay, if there weren't any loop invariants to be folded, check to see if 2553 // there are multiple AddRec's with the same loop induction variable being 2554 // added together. If so, we can fold them. 2555 for (unsigned OtherIdx = Idx+1; 2556 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2557 ++OtherIdx) { 2558 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2559 // so that the 1st found AddRecExpr is dominated by all others. 2560 assert(DT.dominates( 2561 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2562 AddRec->getLoop()->getHeader()) && 2563 "AddRecExprs are not sorted in reverse dominance order?"); 2564 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2565 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2566 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2567 AddRec->op_end()); 2568 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2569 ++OtherIdx) { 2570 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2571 if (OtherAddRec->getLoop() == AddRecLoop) { 2572 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2573 i != e; ++i) { 2574 if (i >= AddRecOps.size()) { 2575 AddRecOps.append(OtherAddRec->op_begin()+i, 2576 OtherAddRec->op_end()); 2577 break; 2578 } 2579 SmallVector<const SCEV *, 2> TwoOps = { 2580 AddRecOps[i], OtherAddRec->getOperand(i)}; 2581 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2582 } 2583 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2584 } 2585 } 2586 // Step size has changed, so we cannot guarantee no self-wraparound. 2587 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2588 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2589 } 2590 } 2591 2592 // Otherwise couldn't fold anything into this recurrence. Move onto the 2593 // next one. 2594 } 2595 2596 // Okay, it looks like we really DO need an add expr. Check to see if we 2597 // already have one, otherwise create a new one. 2598 return getOrCreateAddExpr(Ops, Flags); 2599 } 2600 2601 const SCEV * 2602 ScalarEvolution::getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2603 SCEV::NoWrapFlags Flags) { 2604 FoldingSetNodeID ID; 2605 ID.AddInteger(scAddExpr); 2606 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2607 ID.AddPointer(Ops[i]); 2608 void *IP = nullptr; 2609 SCEVAddExpr *S = 2610 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2611 if (!S) { 2612 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2613 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2614 S = new (SCEVAllocator) 2615 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2616 UniqueSCEVs.InsertNode(S, IP); 2617 } 2618 S->setNoWrapFlags(Flags); 2619 return S; 2620 } 2621 2622 const SCEV * 2623 ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2624 SCEV::NoWrapFlags Flags) { 2625 FoldingSetNodeID ID; 2626 ID.AddInteger(scMulExpr); 2627 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2628 ID.AddPointer(Ops[i]); 2629 void *IP = nullptr; 2630 SCEVMulExpr *S = 2631 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2632 if (!S) { 2633 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2634 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2635 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2636 O, Ops.size()); 2637 UniqueSCEVs.InsertNode(S, IP); 2638 } 2639 S->setNoWrapFlags(Flags); 2640 return S; 2641 } 2642 2643 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2644 uint64_t k = i*j; 2645 if (j > 1 && k / j != i) Overflow = true; 2646 return k; 2647 } 2648 2649 /// Compute the result of "n choose k", the binomial coefficient. If an 2650 /// intermediate computation overflows, Overflow will be set and the return will 2651 /// be garbage. Overflow is not cleared on absence of overflow. 2652 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2653 // We use the multiplicative formula: 2654 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2655 // At each iteration, we take the n-th term of the numeral and divide by the 2656 // (k-n)th term of the denominator. This division will always produce an 2657 // integral result, and helps reduce the chance of overflow in the 2658 // intermediate computations. However, we can still overflow even when the 2659 // final result would fit. 2660 2661 if (n == 0 || n == k) return 1; 2662 if (k > n) return 0; 2663 2664 if (k > n/2) 2665 k = n-k; 2666 2667 uint64_t r = 1; 2668 for (uint64_t i = 1; i <= k; ++i) { 2669 r = umul_ov(r, n-(i-1), Overflow); 2670 r /= i; 2671 } 2672 return r; 2673 } 2674 2675 /// Determine if any of the operands in this SCEV are a constant or if 2676 /// any of the add or multiply expressions in this SCEV contain a constant. 2677 static bool containsConstantSomewhere(const SCEV *StartExpr) { 2678 SmallVector<const SCEV *, 4> Ops; 2679 Ops.push_back(StartExpr); 2680 while (!Ops.empty()) { 2681 const SCEV *CurrentExpr = Ops.pop_back_val(); 2682 if (isa<SCEVConstant>(*CurrentExpr)) 2683 return true; 2684 2685 if (isa<SCEVAddExpr>(*CurrentExpr) || isa<SCEVMulExpr>(*CurrentExpr)) { 2686 const auto *CurrentNAry = cast<SCEVNAryExpr>(CurrentExpr); 2687 Ops.append(CurrentNAry->op_begin(), CurrentNAry->op_end()); 2688 } 2689 } 2690 return false; 2691 } 2692 2693 /// Get a canonical multiply expression, or something simpler if possible. 2694 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2695 SCEV::NoWrapFlags Flags, 2696 unsigned Depth) { 2697 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2698 "only nuw or nsw allowed"); 2699 assert(!Ops.empty() && "Cannot get empty mul!"); 2700 if (Ops.size() == 1) return Ops[0]; 2701 #ifndef NDEBUG 2702 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2703 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2704 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2705 "SCEVMulExpr operand types don't match!"); 2706 #endif 2707 2708 // Sort by complexity, this groups all similar expression types together. 2709 GroupByComplexity(Ops, &LI, DT); 2710 2711 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2712 2713 // Limit recursion calls depth. 2714 if (Depth > MaxArithDepth) 2715 return getOrCreateMulExpr(Ops, Flags); 2716 2717 // If there are any constants, fold them together. 2718 unsigned Idx = 0; 2719 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2720 2721 // C1*(C2+V) -> C1*C2 + C1*V 2722 if (Ops.size() == 2) 2723 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2724 // If any of Add's ops are Adds or Muls with a constant, 2725 // apply this transformation as well. 2726 if (Add->getNumOperands() == 2) 2727 if (containsConstantSomewhere(Add)) 2728 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2729 SCEV::FlagAnyWrap, Depth + 1), 2730 getMulExpr(LHSC, Add->getOperand(1), 2731 SCEV::FlagAnyWrap, Depth + 1), 2732 SCEV::FlagAnyWrap, Depth + 1); 2733 2734 ++Idx; 2735 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2736 // We found two constants, fold them together! 2737 ConstantInt *Fold = 2738 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2739 Ops[0] = getConstant(Fold); 2740 Ops.erase(Ops.begin()+1); // Erase the folded element 2741 if (Ops.size() == 1) return Ops[0]; 2742 LHSC = cast<SCEVConstant>(Ops[0]); 2743 } 2744 2745 // If we are left with a constant one being multiplied, strip it off. 2746 if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) { 2747 Ops.erase(Ops.begin()); 2748 --Idx; 2749 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2750 // If we have a multiply of zero, it will always be zero. 2751 return Ops[0]; 2752 } else if (Ops[0]->isAllOnesValue()) { 2753 // If we have a mul by -1 of an add, try distributing the -1 among the 2754 // add operands. 2755 if (Ops.size() == 2) { 2756 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2757 SmallVector<const SCEV *, 4> NewOps; 2758 bool AnyFolded = false; 2759 for (const SCEV *AddOp : Add->operands()) { 2760 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2761 Depth + 1); 2762 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2763 NewOps.push_back(Mul); 2764 } 2765 if (AnyFolded) 2766 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2767 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2768 // Negation preserves a recurrence's no self-wrap property. 2769 SmallVector<const SCEV *, 4> Operands; 2770 for (const SCEV *AddRecOp : AddRec->operands()) 2771 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2772 Depth + 1)); 2773 2774 return getAddRecExpr(Operands, AddRec->getLoop(), 2775 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2776 } 2777 } 2778 } 2779 2780 if (Ops.size() == 1) 2781 return Ops[0]; 2782 } 2783 2784 // Skip over the add expression until we get to a multiply. 2785 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2786 ++Idx; 2787 2788 // If there are mul operands inline them all into this expression. 2789 if (Idx < Ops.size()) { 2790 bool DeletedMul = false; 2791 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2792 if (Ops.size() > MulOpsInlineThreshold) 2793 break; 2794 // If we have an mul, expand the mul operands onto the end of the 2795 // operands list. 2796 Ops.erase(Ops.begin()+Idx); 2797 Ops.append(Mul->op_begin(), Mul->op_end()); 2798 DeletedMul = true; 2799 } 2800 2801 // If we deleted at least one mul, we added operands to the end of the 2802 // list, and they are not necessarily sorted. Recurse to resort and 2803 // resimplify any operands we just acquired. 2804 if (DeletedMul) 2805 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2806 } 2807 2808 // If there are any add recurrences in the operands list, see if any other 2809 // added values are loop invariant. If so, we can fold them into the 2810 // recurrence. 2811 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2812 ++Idx; 2813 2814 // Scan over all recurrences, trying to fold loop invariants into them. 2815 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2816 // Scan all of the other operands to this mul and add them to the vector 2817 // if they are loop invariant w.r.t. the recurrence. 2818 SmallVector<const SCEV *, 8> LIOps; 2819 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2820 const Loop *AddRecLoop = AddRec->getLoop(); 2821 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2822 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2823 LIOps.push_back(Ops[i]); 2824 Ops.erase(Ops.begin()+i); 2825 --i; --e; 2826 } 2827 2828 // If we found some loop invariants, fold them into the recurrence. 2829 if (!LIOps.empty()) { 2830 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2831 SmallVector<const SCEV *, 4> NewOps; 2832 NewOps.reserve(AddRec->getNumOperands()); 2833 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 2834 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2835 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 2836 SCEV::FlagAnyWrap, Depth + 1)); 2837 2838 // Build the new addrec. Propagate the NUW and NSW flags if both the 2839 // outer mul and the inner addrec are guaranteed to have no overflow. 2840 // 2841 // No self-wrap cannot be guaranteed after changing the step size, but 2842 // will be inferred if either NUW or NSW is true. 2843 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 2844 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 2845 2846 // If all of the other operands were loop invariant, we are done. 2847 if (Ops.size() == 1) return NewRec; 2848 2849 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2850 for (unsigned i = 0;; ++i) 2851 if (Ops[i] == AddRec) { 2852 Ops[i] = NewRec; 2853 break; 2854 } 2855 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2856 } 2857 2858 // Okay, if there weren't any loop invariants to be folded, check to see 2859 // if there are multiple AddRec's with the same loop induction variable 2860 // being multiplied together. If so, we can fold them. 2861 2862 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2863 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2864 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2865 // ]]],+,...up to x=2n}. 2866 // Note that the arguments to choose() are always integers with values 2867 // known at compile time, never SCEV objects. 2868 // 2869 // The implementation avoids pointless extra computations when the two 2870 // addrec's are of different length (mathematically, it's equivalent to 2871 // an infinite stream of zeros on the right). 2872 bool OpsModified = false; 2873 for (unsigned OtherIdx = Idx+1; 2874 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2875 ++OtherIdx) { 2876 const SCEVAddRecExpr *OtherAddRec = 2877 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2878 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 2879 continue; 2880 2881 bool Overflow = false; 2882 Type *Ty = AddRec->getType(); 2883 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 2884 SmallVector<const SCEV*, 7> AddRecOps; 2885 for (int x = 0, xe = AddRec->getNumOperands() + 2886 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 2887 const SCEV *Term = getZero(Ty); 2888 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 2889 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 2890 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 2891 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 2892 z < ze && !Overflow; ++z) { 2893 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 2894 uint64_t Coeff; 2895 if (LargerThan64Bits) 2896 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 2897 else 2898 Coeff = Coeff1*Coeff2; 2899 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 2900 const SCEV *Term1 = AddRec->getOperand(y-z); 2901 const SCEV *Term2 = OtherAddRec->getOperand(z); 2902 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1, Term2, 2903 SCEV::FlagAnyWrap, Depth + 1), 2904 SCEV::FlagAnyWrap, Depth + 1); 2905 } 2906 } 2907 AddRecOps.push_back(Term); 2908 } 2909 if (!Overflow) { 2910 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(), 2911 SCEV::FlagAnyWrap); 2912 if (Ops.size() == 2) return NewAddRec; 2913 Ops[Idx] = NewAddRec; 2914 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2915 OpsModified = true; 2916 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 2917 if (!AddRec) 2918 break; 2919 } 2920 } 2921 if (OpsModified) 2922 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2923 2924 // Otherwise couldn't fold anything into this recurrence. Move onto the 2925 // next one. 2926 } 2927 2928 // Okay, it looks like we really DO need an mul expr. Check to see if we 2929 // already have one, otherwise create a new one. 2930 return getOrCreateMulExpr(Ops, Flags); 2931 } 2932 2933 /// Get a canonical unsigned division expression, or something simpler if 2934 /// possible. 2935 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 2936 const SCEV *RHS) { 2937 assert(getEffectiveSCEVType(LHS->getType()) == 2938 getEffectiveSCEVType(RHS->getType()) && 2939 "SCEVUDivExpr operand types don't match!"); 2940 2941 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 2942 if (RHSC->getValue()->isOne()) 2943 return LHS; // X udiv 1 --> x 2944 // If the denominator is zero, the result of the udiv is undefined. Don't 2945 // try to analyze it, because the resolution chosen here may differ from 2946 // the resolution chosen in other parts of the compiler. 2947 if (!RHSC->getValue()->isZero()) { 2948 // Determine if the division can be folded into the operands of 2949 // its operands. 2950 // TODO: Generalize this to non-constants by using known-bits information. 2951 Type *Ty = LHS->getType(); 2952 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 2953 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 2954 // For non-power-of-two values, effectively round the value up to the 2955 // nearest power of two. 2956 if (!RHSC->getAPInt().isPowerOf2()) 2957 ++MaxShiftAmt; 2958 IntegerType *ExtTy = 2959 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 2960 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 2961 if (const SCEVConstant *Step = 2962 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 2963 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 2964 const APInt &StepInt = Step->getAPInt(); 2965 const APInt &DivInt = RHSC->getAPInt(); 2966 if (!StepInt.urem(DivInt) && 2967 getZeroExtendExpr(AR, ExtTy) == 2968 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 2969 getZeroExtendExpr(Step, ExtTy), 2970 AR->getLoop(), SCEV::FlagAnyWrap)) { 2971 SmallVector<const SCEV *, 4> Operands; 2972 for (const SCEV *Op : AR->operands()) 2973 Operands.push_back(getUDivExpr(Op, RHS)); 2974 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 2975 } 2976 /// Get a canonical UDivExpr for a recurrence. 2977 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 2978 // We can currently only fold X%N if X is constant. 2979 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 2980 if (StartC && !DivInt.urem(StepInt) && 2981 getZeroExtendExpr(AR, ExtTy) == 2982 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 2983 getZeroExtendExpr(Step, ExtTy), 2984 AR->getLoop(), SCEV::FlagAnyWrap)) { 2985 const APInt &StartInt = StartC->getAPInt(); 2986 const APInt &StartRem = StartInt.urem(StepInt); 2987 if (StartRem != 0) 2988 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, 2989 AR->getLoop(), SCEV::FlagNW); 2990 } 2991 } 2992 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 2993 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 2994 SmallVector<const SCEV *, 4> Operands; 2995 for (const SCEV *Op : M->operands()) 2996 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 2997 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 2998 // Find an operand that's safely divisible. 2999 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3000 const SCEV *Op = M->getOperand(i); 3001 const SCEV *Div = getUDivExpr(Op, RHSC); 3002 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3003 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3004 M->op_end()); 3005 Operands[i] = Div; 3006 return getMulExpr(Operands); 3007 } 3008 } 3009 } 3010 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3011 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3012 SmallVector<const SCEV *, 4> Operands; 3013 for (const SCEV *Op : A->operands()) 3014 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3015 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3016 Operands.clear(); 3017 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3018 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3019 if (isa<SCEVUDivExpr>(Op) || 3020 getMulExpr(Op, RHS) != A->getOperand(i)) 3021 break; 3022 Operands.push_back(Op); 3023 } 3024 if (Operands.size() == A->getNumOperands()) 3025 return getAddExpr(Operands); 3026 } 3027 } 3028 3029 // Fold if both operands are constant. 3030 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3031 Constant *LHSCV = LHSC->getValue(); 3032 Constant *RHSCV = RHSC->getValue(); 3033 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3034 RHSCV))); 3035 } 3036 } 3037 } 3038 3039 FoldingSetNodeID ID; 3040 ID.AddInteger(scUDivExpr); 3041 ID.AddPointer(LHS); 3042 ID.AddPointer(RHS); 3043 void *IP = nullptr; 3044 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3045 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3046 LHS, RHS); 3047 UniqueSCEVs.InsertNode(S, IP); 3048 return S; 3049 } 3050 3051 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3052 APInt A = C1->getAPInt().abs(); 3053 APInt B = C2->getAPInt().abs(); 3054 uint32_t ABW = A.getBitWidth(); 3055 uint32_t BBW = B.getBitWidth(); 3056 3057 if (ABW > BBW) 3058 B = B.zext(ABW); 3059 else if (ABW < BBW) 3060 A = A.zext(BBW); 3061 3062 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3063 } 3064 3065 /// Get a canonical unsigned division expression, or something simpler if 3066 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3067 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3068 /// it's not exact because the udiv may be clearing bits. 3069 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3070 const SCEV *RHS) { 3071 // TODO: we could try to find factors in all sorts of things, but for now we 3072 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3073 // end of this file for inspiration. 3074 3075 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3076 if (!Mul || !Mul->hasNoUnsignedWrap()) 3077 return getUDivExpr(LHS, RHS); 3078 3079 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3080 // If the mulexpr multiplies by a constant, then that constant must be the 3081 // first element of the mulexpr. 3082 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3083 if (LHSCst == RHSCst) { 3084 SmallVector<const SCEV *, 2> Operands; 3085 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3086 return getMulExpr(Operands); 3087 } 3088 3089 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3090 // that there's a factor provided by one of the other terms. We need to 3091 // check. 3092 APInt Factor = gcd(LHSCst, RHSCst); 3093 if (!Factor.isIntN(1)) { 3094 LHSCst = 3095 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3096 RHSCst = 3097 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3098 SmallVector<const SCEV *, 2> Operands; 3099 Operands.push_back(LHSCst); 3100 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3101 LHS = getMulExpr(Operands); 3102 RHS = RHSCst; 3103 Mul = dyn_cast<SCEVMulExpr>(LHS); 3104 if (!Mul) 3105 return getUDivExactExpr(LHS, RHS); 3106 } 3107 } 3108 } 3109 3110 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3111 if (Mul->getOperand(i) == RHS) { 3112 SmallVector<const SCEV *, 2> Operands; 3113 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3114 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3115 return getMulExpr(Operands); 3116 } 3117 } 3118 3119 return getUDivExpr(LHS, RHS); 3120 } 3121 3122 /// Get an add recurrence expression for the specified loop. Simplify the 3123 /// expression as much as possible. 3124 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3125 const Loop *L, 3126 SCEV::NoWrapFlags Flags) { 3127 SmallVector<const SCEV *, 4> Operands; 3128 Operands.push_back(Start); 3129 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3130 if (StepChrec->getLoop() == L) { 3131 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3132 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3133 } 3134 3135 Operands.push_back(Step); 3136 return getAddRecExpr(Operands, L, Flags); 3137 } 3138 3139 /// Get an add recurrence expression for the specified loop. Simplify the 3140 /// expression as much as possible. 3141 const SCEV * 3142 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3143 const Loop *L, SCEV::NoWrapFlags Flags) { 3144 if (Operands.size() == 1) return Operands[0]; 3145 #ifndef NDEBUG 3146 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3147 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3148 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3149 "SCEVAddRecExpr operand types don't match!"); 3150 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3151 assert(isLoopInvariant(Operands[i], L) && 3152 "SCEVAddRecExpr operand is not loop-invariant!"); 3153 #endif 3154 3155 if (Operands.back()->isZero()) { 3156 Operands.pop_back(); 3157 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3158 } 3159 3160 // It's tempting to want to call getMaxBackedgeTakenCount count here and 3161 // use that information to infer NUW and NSW flags. However, computing a 3162 // BE count requires calling getAddRecExpr, so we may not yet have a 3163 // meaningful BE count at this point (and if we don't, we'd be stuck 3164 // with a SCEVCouldNotCompute as the cached BE count). 3165 3166 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3167 3168 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3169 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3170 const Loop *NestedLoop = NestedAR->getLoop(); 3171 if (L->contains(NestedLoop) 3172 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3173 : (!NestedLoop->contains(L) && 3174 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3175 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3176 NestedAR->op_end()); 3177 Operands[0] = NestedAR->getStart(); 3178 // AddRecs require their operands be loop-invariant with respect to their 3179 // loops. Don't perform this transformation if it would break this 3180 // requirement. 3181 bool AllInvariant = all_of( 3182 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3183 3184 if (AllInvariant) { 3185 // Create a recurrence for the outer loop with the same step size. 3186 // 3187 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3188 // inner recurrence has the same property. 3189 SCEV::NoWrapFlags OuterFlags = 3190 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3191 3192 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3193 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3194 return isLoopInvariant(Op, NestedLoop); 3195 }); 3196 3197 if (AllInvariant) { 3198 // Ok, both add recurrences are valid after the transformation. 3199 // 3200 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3201 // the outer recurrence has the same property. 3202 SCEV::NoWrapFlags InnerFlags = 3203 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3204 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3205 } 3206 } 3207 // Reset Operands to its original state. 3208 Operands[0] = NestedAR; 3209 } 3210 } 3211 3212 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3213 // already have one, otherwise create a new one. 3214 FoldingSetNodeID ID; 3215 ID.AddInteger(scAddRecExpr); 3216 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3217 ID.AddPointer(Operands[i]); 3218 ID.AddPointer(L); 3219 void *IP = nullptr; 3220 SCEVAddRecExpr *S = 3221 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 3222 if (!S) { 3223 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); 3224 std::uninitialized_copy(Operands.begin(), Operands.end(), O); 3225 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), 3226 O, Operands.size(), L); 3227 UniqueSCEVs.InsertNode(S, IP); 3228 } 3229 S->setNoWrapFlags(Flags); 3230 return S; 3231 } 3232 3233 const SCEV * 3234 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3235 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3236 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3237 // getSCEV(Base)->getType() has the same address space as Base->getType() 3238 // because SCEV::getType() preserves the address space. 3239 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); 3240 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3241 // instruction to its SCEV, because the Instruction may be guarded by control 3242 // flow and the no-overflow bits may not be valid for the expression in any 3243 // context. This can be fixed similarly to how these flags are handled for 3244 // adds. 3245 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3246 : SCEV::FlagAnyWrap; 3247 3248 const SCEV *TotalOffset = getZero(IntPtrTy); 3249 // The array size is unimportant. The first thing we do on CurTy is getting 3250 // its element type. 3251 Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0); 3252 for (const SCEV *IndexExpr : IndexExprs) { 3253 // Compute the (potentially symbolic) offset in bytes for this index. 3254 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3255 // For a struct, add the member offset. 3256 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3257 unsigned FieldNo = Index->getZExtValue(); 3258 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); 3259 3260 // Add the field offset to the running total offset. 3261 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3262 3263 // Update CurTy to the type of the field at Index. 3264 CurTy = STy->getTypeAtIndex(Index); 3265 } else { 3266 // Update CurTy to its element type. 3267 CurTy = cast<SequentialType>(CurTy)->getElementType(); 3268 // For an array, add the element offset, explicitly scaled. 3269 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); 3270 // Getelementptr indices are signed. 3271 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); 3272 3273 // Multiply the index by the element size to compute the element offset. 3274 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3275 3276 // Add the element offset to the running total offset. 3277 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3278 } 3279 } 3280 3281 // Add the total offset from all the GEP indices to the base. 3282 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3283 } 3284 3285 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 3286 const SCEV *RHS) { 3287 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3288 return getSMaxExpr(Ops); 3289 } 3290 3291 const SCEV * 3292 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3293 assert(!Ops.empty() && "Cannot get empty smax!"); 3294 if (Ops.size() == 1) return Ops[0]; 3295 #ifndef NDEBUG 3296 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3297 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3298 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3299 "SCEVSMaxExpr operand types don't match!"); 3300 #endif 3301 3302 // Sort by complexity, this groups all similar expression types together. 3303 GroupByComplexity(Ops, &LI, DT); 3304 3305 // If there are any constants, fold them together. 3306 unsigned Idx = 0; 3307 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3308 ++Idx; 3309 assert(Idx < Ops.size()); 3310 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3311 // We found two constants, fold them together! 3312 ConstantInt *Fold = ConstantInt::get( 3313 getContext(), APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt())); 3314 Ops[0] = getConstant(Fold); 3315 Ops.erase(Ops.begin()+1); // Erase the folded element 3316 if (Ops.size() == 1) return Ops[0]; 3317 LHSC = cast<SCEVConstant>(Ops[0]); 3318 } 3319 3320 // If we are left with a constant minimum-int, strip it off. 3321 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 3322 Ops.erase(Ops.begin()); 3323 --Idx; 3324 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 3325 // If we have an smax with a constant maximum-int, it will always be 3326 // maximum-int. 3327 return Ops[0]; 3328 } 3329 3330 if (Ops.size() == 1) return Ops[0]; 3331 } 3332 3333 // Find the first SMax 3334 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 3335 ++Idx; 3336 3337 // Check to see if one of the operands is an SMax. If so, expand its operands 3338 // onto our operand list, and recurse to simplify. 3339 if (Idx < Ops.size()) { 3340 bool DeletedSMax = false; 3341 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 3342 Ops.erase(Ops.begin()+Idx); 3343 Ops.append(SMax->op_begin(), SMax->op_end()); 3344 DeletedSMax = true; 3345 } 3346 3347 if (DeletedSMax) 3348 return getSMaxExpr(Ops); 3349 } 3350 3351 // Okay, check to see if the same value occurs in the operand list twice. If 3352 // so, delete one. Since we sorted the list, these values are required to 3353 // be adjacent. 3354 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3355 // X smax Y smax Y --> X smax Y 3356 // X smax Y --> X, if X is always greater than Y 3357 if (Ops[i] == Ops[i+1] || 3358 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 3359 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3360 --i; --e; 3361 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 3362 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3363 --i; --e; 3364 } 3365 3366 if (Ops.size() == 1) return Ops[0]; 3367 3368 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3369 3370 // Okay, it looks like we really DO need an smax expr. Check to see if we 3371 // already have one, otherwise create a new one. 3372 FoldingSetNodeID ID; 3373 ID.AddInteger(scSMaxExpr); 3374 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3375 ID.AddPointer(Ops[i]); 3376 void *IP = nullptr; 3377 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3378 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3379 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3380 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), 3381 O, Ops.size()); 3382 UniqueSCEVs.InsertNode(S, IP); 3383 return S; 3384 } 3385 3386 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 3387 const SCEV *RHS) { 3388 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3389 return getUMaxExpr(Ops); 3390 } 3391 3392 const SCEV * 3393 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3394 assert(!Ops.empty() && "Cannot get empty umax!"); 3395 if (Ops.size() == 1) return Ops[0]; 3396 #ifndef NDEBUG 3397 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3398 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3399 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3400 "SCEVUMaxExpr operand types don't match!"); 3401 #endif 3402 3403 // Sort by complexity, this groups all similar expression types together. 3404 GroupByComplexity(Ops, &LI, DT); 3405 3406 // If there are any constants, fold them together. 3407 unsigned Idx = 0; 3408 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3409 ++Idx; 3410 assert(Idx < Ops.size()); 3411 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3412 // We found two constants, fold them together! 3413 ConstantInt *Fold = ConstantInt::get( 3414 getContext(), APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt())); 3415 Ops[0] = getConstant(Fold); 3416 Ops.erase(Ops.begin()+1); // Erase the folded element 3417 if (Ops.size() == 1) return Ops[0]; 3418 LHSC = cast<SCEVConstant>(Ops[0]); 3419 } 3420 3421 // If we are left with a constant minimum-int, strip it off. 3422 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 3423 Ops.erase(Ops.begin()); 3424 --Idx; 3425 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 3426 // If we have an umax with a constant maximum-int, it will always be 3427 // maximum-int. 3428 return Ops[0]; 3429 } 3430 3431 if (Ops.size() == 1) return Ops[0]; 3432 } 3433 3434 // Find the first UMax 3435 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 3436 ++Idx; 3437 3438 // Check to see if one of the operands is a UMax. If so, expand its operands 3439 // onto our operand list, and recurse to simplify. 3440 if (Idx < Ops.size()) { 3441 bool DeletedUMax = false; 3442 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 3443 Ops.erase(Ops.begin()+Idx); 3444 Ops.append(UMax->op_begin(), UMax->op_end()); 3445 DeletedUMax = true; 3446 } 3447 3448 if (DeletedUMax) 3449 return getUMaxExpr(Ops); 3450 } 3451 3452 // Okay, check to see if the same value occurs in the operand list twice. If 3453 // so, delete one. Since we sorted the list, these values are required to 3454 // be adjacent. 3455 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3456 // X umax Y umax Y --> X umax Y 3457 // X umax Y --> X, if X is always greater than Y 3458 if (Ops[i] == Ops[i+1] || 3459 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) { 3460 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3461 --i; --e; 3462 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) { 3463 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3464 --i; --e; 3465 } 3466 3467 if (Ops.size() == 1) return Ops[0]; 3468 3469 assert(!Ops.empty() && "Reduced umax down to nothing!"); 3470 3471 // Okay, it looks like we really DO need a umax expr. Check to see if we 3472 // already have one, otherwise create a new one. 3473 FoldingSetNodeID ID; 3474 ID.AddInteger(scUMaxExpr); 3475 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3476 ID.AddPointer(Ops[i]); 3477 void *IP = nullptr; 3478 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3479 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3480 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3481 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 3482 O, Ops.size()); 3483 UniqueSCEVs.InsertNode(S, IP); 3484 return S; 3485 } 3486 3487 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3488 const SCEV *RHS) { 3489 // ~smax(~x, ~y) == smin(x, y). 3490 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3491 } 3492 3493 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3494 const SCEV *RHS) { 3495 // ~umax(~x, ~y) == umin(x, y) 3496 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3497 } 3498 3499 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3500 // We can bypass creating a target-independent 3501 // constant expression and then folding it back into a ConstantInt. 3502 // This is just a compile-time optimization. 3503 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3504 } 3505 3506 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3507 StructType *STy, 3508 unsigned FieldNo) { 3509 // We can bypass creating a target-independent 3510 // constant expression and then folding it back into a ConstantInt. 3511 // This is just a compile-time optimization. 3512 return getConstant( 3513 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3514 } 3515 3516 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3517 // Don't attempt to do anything other than create a SCEVUnknown object 3518 // here. createSCEV only calls getUnknown after checking for all other 3519 // interesting possibilities, and any other code that calls getUnknown 3520 // is doing so in order to hide a value from SCEV canonicalization. 3521 3522 FoldingSetNodeID ID; 3523 ID.AddInteger(scUnknown); 3524 ID.AddPointer(V); 3525 void *IP = nullptr; 3526 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3527 assert(cast<SCEVUnknown>(S)->getValue() == V && 3528 "Stale SCEVUnknown in uniquing map!"); 3529 return S; 3530 } 3531 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3532 FirstUnknown); 3533 FirstUnknown = cast<SCEVUnknown>(S); 3534 UniqueSCEVs.InsertNode(S, IP); 3535 return S; 3536 } 3537 3538 //===----------------------------------------------------------------------===// 3539 // Basic SCEV Analysis and PHI Idiom Recognition Code 3540 // 3541 3542 /// Test if values of the given type are analyzable within the SCEV 3543 /// framework. This primarily includes integer types, and it can optionally 3544 /// include pointer types if the ScalarEvolution class has access to 3545 /// target-specific information. 3546 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3547 // Integers and pointers are always SCEVable. 3548 return Ty->isIntegerTy() || Ty->isPointerTy(); 3549 } 3550 3551 /// Return the size in bits of the specified type, for which isSCEVable must 3552 /// return true. 3553 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3554 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3555 return getDataLayout().getTypeSizeInBits(Ty); 3556 } 3557 3558 /// Return a type with the same bitwidth as the given type and which represents 3559 /// how SCEV will treat the given type, for which isSCEVable must return 3560 /// true. For pointer types, this is the pointer-sized integer type. 3561 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3562 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3563 3564 if (Ty->isIntegerTy()) 3565 return Ty; 3566 3567 // The only other support type is pointer. 3568 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3569 return getDataLayout().getIntPtrType(Ty); 3570 } 3571 3572 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3573 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3574 } 3575 3576 const SCEV *ScalarEvolution::getCouldNotCompute() { 3577 return CouldNotCompute.get(); 3578 } 3579 3580 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3581 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3582 auto *SU = dyn_cast<SCEVUnknown>(S); 3583 return SU && SU->getValue() == nullptr; 3584 }); 3585 3586 return !ContainsNulls; 3587 } 3588 3589 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3590 HasRecMapType::iterator I = HasRecMap.find(S); 3591 if (I != HasRecMap.end()) 3592 return I->second; 3593 3594 bool FoundAddRec = SCEVExprContains(S, isa<SCEVAddRecExpr, const SCEV *>); 3595 HasRecMap.insert({S, FoundAddRec}); 3596 return FoundAddRec; 3597 } 3598 3599 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3600 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3601 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3602 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3603 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3604 if (!Add) 3605 return {S, nullptr}; 3606 3607 if (Add->getNumOperands() != 2) 3608 return {S, nullptr}; 3609 3610 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3611 if (!ConstOp) 3612 return {S, nullptr}; 3613 3614 return {Add->getOperand(1), ConstOp->getValue()}; 3615 } 3616 3617 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3618 /// by the value and offset from any ValueOffsetPair in the set. 3619 SetVector<ScalarEvolution::ValueOffsetPair> * 3620 ScalarEvolution::getSCEVValues(const SCEV *S) { 3621 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3622 if (SI == ExprValueMap.end()) 3623 return nullptr; 3624 #ifndef NDEBUG 3625 if (VerifySCEVMap) { 3626 // Check there is no dangling Value in the set returned. 3627 for (const auto &VE : SI->second) 3628 assert(ValueExprMap.count(VE.first)); 3629 } 3630 #endif 3631 return &SI->second; 3632 } 3633 3634 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3635 /// cannot be used separately. eraseValueFromMap should be used to remove 3636 /// V from ValueExprMap and ExprValueMap at the same time. 3637 void ScalarEvolution::eraseValueFromMap(Value *V) { 3638 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3639 if (I != ValueExprMap.end()) { 3640 const SCEV *S = I->second; 3641 // Remove {V, 0} from the set of ExprValueMap[S] 3642 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3643 SV->remove({V, nullptr}); 3644 3645 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3646 const SCEV *Stripped; 3647 ConstantInt *Offset; 3648 std::tie(Stripped, Offset) = splitAddExpr(S); 3649 if (Offset != nullptr) { 3650 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3651 SV->remove({V, Offset}); 3652 } 3653 ValueExprMap.erase(V); 3654 } 3655 } 3656 3657 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3658 /// create a new one. 3659 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3660 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3661 3662 const SCEV *S = getExistingSCEV(V); 3663 if (S == nullptr) { 3664 S = createSCEV(V); 3665 // During PHI resolution, it is possible to create two SCEVs for the same 3666 // V, so it is needed to double check whether V->S is inserted into 3667 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3668 std::pair<ValueExprMapType::iterator, bool> Pair = 3669 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3670 if (Pair.second) { 3671 ExprValueMap[S].insert({V, nullptr}); 3672 3673 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3674 // ExprValueMap. 3675 const SCEV *Stripped = S; 3676 ConstantInt *Offset = nullptr; 3677 std::tie(Stripped, Offset) = splitAddExpr(S); 3678 // If stripped is SCEVUnknown, don't bother to save 3679 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3680 // increase the complexity of the expansion code. 3681 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3682 // because it may generate add/sub instead of GEP in SCEV expansion. 3683 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3684 !isa<GetElementPtrInst>(V)) 3685 ExprValueMap[Stripped].insert({V, Offset}); 3686 } 3687 } 3688 return S; 3689 } 3690 3691 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3692 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3693 3694 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3695 if (I != ValueExprMap.end()) { 3696 const SCEV *S = I->second; 3697 if (checkValidity(S)) 3698 return S; 3699 eraseValueFromMap(V); 3700 forgetMemoizedResults(S); 3701 } 3702 return nullptr; 3703 } 3704 3705 /// Return a SCEV corresponding to -V = -1*V 3706 /// 3707 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3708 SCEV::NoWrapFlags Flags) { 3709 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3710 return getConstant( 3711 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3712 3713 Type *Ty = V->getType(); 3714 Ty = getEffectiveSCEVType(Ty); 3715 return getMulExpr( 3716 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 3717 } 3718 3719 /// Return a SCEV corresponding to ~V = -1-V 3720 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3721 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3722 return getConstant( 3723 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3724 3725 Type *Ty = V->getType(); 3726 Ty = getEffectiveSCEVType(Ty); 3727 const SCEV *AllOnes = 3728 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 3729 return getMinusSCEV(AllOnes, V); 3730 } 3731 3732 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3733 SCEV::NoWrapFlags Flags, 3734 unsigned Depth) { 3735 // Fast path: X - X --> 0. 3736 if (LHS == RHS) 3737 return getZero(LHS->getType()); 3738 3739 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3740 // makes it so that we cannot make much use of NUW. 3741 auto AddFlags = SCEV::FlagAnyWrap; 3742 const bool RHSIsNotMinSigned = 3743 !getSignedRangeMin(RHS).isMinSignedValue(); 3744 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3745 // Let M be the minimum representable signed value. Then (-1)*RHS 3746 // signed-wraps if and only if RHS is M. That can happen even for 3747 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3748 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3749 // (-1)*RHS, we need to prove that RHS != M. 3750 // 3751 // If LHS is non-negative and we know that LHS - RHS does not 3752 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 3753 // either by proving that RHS > M or that LHS >= 0. 3754 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 3755 AddFlags = SCEV::FlagNSW; 3756 } 3757 } 3758 3759 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 3760 // RHS is NSW and LHS >= 0. 3761 // 3762 // The difficulty here is that the NSW flag may have been proven 3763 // relative to a loop that is to be found in a recurrence in LHS and 3764 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 3765 // larger scope than intended. 3766 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3767 3768 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 3769 } 3770 3771 const SCEV * 3772 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) { 3773 Type *SrcTy = V->getType(); 3774 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3775 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3776 "Cannot truncate or zero extend with non-integer arguments!"); 3777 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3778 return V; // No conversion 3779 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3780 return getTruncateExpr(V, Ty); 3781 return getZeroExtendExpr(V, Ty); 3782 } 3783 3784 const SCEV * 3785 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 3786 Type *Ty) { 3787 Type *SrcTy = V->getType(); 3788 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3789 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3790 "Cannot truncate or zero extend with non-integer arguments!"); 3791 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3792 return V; // No conversion 3793 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3794 return getTruncateExpr(V, Ty); 3795 return getSignExtendExpr(V, Ty); 3796 } 3797 3798 const SCEV * 3799 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 3800 Type *SrcTy = V->getType(); 3801 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3802 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3803 "Cannot noop or zero extend with non-integer arguments!"); 3804 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3805 "getNoopOrZeroExtend cannot truncate!"); 3806 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3807 return V; // No conversion 3808 return getZeroExtendExpr(V, Ty); 3809 } 3810 3811 const SCEV * 3812 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 3813 Type *SrcTy = V->getType(); 3814 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3815 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3816 "Cannot noop or sign extend with non-integer arguments!"); 3817 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3818 "getNoopOrSignExtend cannot truncate!"); 3819 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3820 return V; // No conversion 3821 return getSignExtendExpr(V, Ty); 3822 } 3823 3824 const SCEV * 3825 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 3826 Type *SrcTy = V->getType(); 3827 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3828 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3829 "Cannot noop or any extend with non-integer arguments!"); 3830 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3831 "getNoopOrAnyExtend cannot truncate!"); 3832 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3833 return V; // No conversion 3834 return getAnyExtendExpr(V, Ty); 3835 } 3836 3837 const SCEV * 3838 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 3839 Type *SrcTy = V->getType(); 3840 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3841 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3842 "Cannot truncate or noop with non-integer arguments!"); 3843 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 3844 "getTruncateOrNoop cannot extend!"); 3845 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3846 return V; // No conversion 3847 return getTruncateExpr(V, Ty); 3848 } 3849 3850 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 3851 const SCEV *RHS) { 3852 const SCEV *PromotedLHS = LHS; 3853 const SCEV *PromotedRHS = RHS; 3854 3855 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3856 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3857 else 3858 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3859 3860 return getUMaxExpr(PromotedLHS, PromotedRHS); 3861 } 3862 3863 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 3864 const SCEV *RHS) { 3865 const SCEV *PromotedLHS = LHS; 3866 const SCEV *PromotedRHS = RHS; 3867 3868 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3869 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3870 else 3871 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3872 3873 return getUMinExpr(PromotedLHS, PromotedRHS); 3874 } 3875 3876 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 3877 // A pointer operand may evaluate to a nonpointer expression, such as null. 3878 if (!V->getType()->isPointerTy()) 3879 return V; 3880 3881 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 3882 return getPointerBase(Cast->getOperand()); 3883 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 3884 const SCEV *PtrOp = nullptr; 3885 for (const SCEV *NAryOp : NAry->operands()) { 3886 if (NAryOp->getType()->isPointerTy()) { 3887 // Cannot find the base of an expression with multiple pointer operands. 3888 if (PtrOp) 3889 return V; 3890 PtrOp = NAryOp; 3891 } 3892 } 3893 if (!PtrOp) 3894 return V; 3895 return getPointerBase(PtrOp); 3896 } 3897 return V; 3898 } 3899 3900 /// Push users of the given Instruction onto the given Worklist. 3901 static void 3902 PushDefUseChildren(Instruction *I, 3903 SmallVectorImpl<Instruction *> &Worklist) { 3904 // Push the def-use children onto the Worklist stack. 3905 for (User *U : I->users()) 3906 Worklist.push_back(cast<Instruction>(U)); 3907 } 3908 3909 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 3910 SmallVector<Instruction *, 16> Worklist; 3911 PushDefUseChildren(PN, Worklist); 3912 3913 SmallPtrSet<Instruction *, 8> Visited; 3914 Visited.insert(PN); 3915 while (!Worklist.empty()) { 3916 Instruction *I = Worklist.pop_back_val(); 3917 if (!Visited.insert(I).second) 3918 continue; 3919 3920 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 3921 if (It != ValueExprMap.end()) { 3922 const SCEV *Old = It->second; 3923 3924 // Short-circuit the def-use traversal if the symbolic name 3925 // ceases to appear in expressions. 3926 if (Old != SymName && !hasOperand(Old, SymName)) 3927 continue; 3928 3929 // SCEVUnknown for a PHI either means that it has an unrecognized 3930 // structure, it's a PHI that's in the progress of being computed 3931 // by createNodeForPHI, or it's a single-value PHI. In the first case, 3932 // additional loop trip count information isn't going to change anything. 3933 // In the second case, createNodeForPHI will perform the necessary 3934 // updates on its own when it gets to that point. In the third, we do 3935 // want to forget the SCEVUnknown. 3936 if (!isa<PHINode>(I) || 3937 !isa<SCEVUnknown>(Old) || 3938 (I != PN && Old == SymName)) { 3939 eraseValueFromMap(It->first); 3940 forgetMemoizedResults(Old); 3941 } 3942 } 3943 3944 PushDefUseChildren(I, Worklist); 3945 } 3946 } 3947 3948 namespace { 3949 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 3950 public: 3951 static const SCEV *rewrite(const SCEV *S, const Loop *L, 3952 ScalarEvolution &SE) { 3953 SCEVInitRewriter Rewriter(L, SE); 3954 const SCEV *Result = Rewriter.visit(S); 3955 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 3956 } 3957 3958 SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 3959 : SCEVRewriteVisitor(SE), L(L), Valid(true) {} 3960 3961 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 3962 if (!SE.isLoopInvariant(Expr, L)) 3963 Valid = false; 3964 return Expr; 3965 } 3966 3967 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 3968 // Only allow AddRecExprs for this loop. 3969 if (Expr->getLoop() == L) 3970 return Expr->getStart(); 3971 Valid = false; 3972 return Expr; 3973 } 3974 3975 bool isValid() { return Valid; } 3976 3977 private: 3978 const Loop *L; 3979 bool Valid; 3980 }; 3981 3982 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 3983 public: 3984 static const SCEV *rewrite(const SCEV *S, const Loop *L, 3985 ScalarEvolution &SE) { 3986 SCEVShiftRewriter Rewriter(L, SE); 3987 const SCEV *Result = Rewriter.visit(S); 3988 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 3989 } 3990 3991 SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 3992 : SCEVRewriteVisitor(SE), L(L), Valid(true) {} 3993 3994 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 3995 // Only allow AddRecExprs for this loop. 3996 if (!SE.isLoopInvariant(Expr, L)) 3997 Valid = false; 3998 return Expr; 3999 } 4000 4001 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4002 if (Expr->getLoop() == L && Expr->isAffine()) 4003 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4004 Valid = false; 4005 return Expr; 4006 } 4007 bool isValid() { return Valid; } 4008 4009 private: 4010 const Loop *L; 4011 bool Valid; 4012 }; 4013 } // end anonymous namespace 4014 4015 SCEV::NoWrapFlags 4016 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4017 if (!AR->isAffine()) 4018 return SCEV::FlagAnyWrap; 4019 4020 typedef OverflowingBinaryOperator OBO; 4021 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4022 4023 if (!AR->hasNoSignedWrap()) { 4024 ConstantRange AddRecRange = getSignedRange(AR); 4025 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4026 4027 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4028 Instruction::Add, IncRange, OBO::NoSignedWrap); 4029 if (NSWRegion.contains(AddRecRange)) 4030 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4031 } 4032 4033 if (!AR->hasNoUnsignedWrap()) { 4034 ConstantRange AddRecRange = getUnsignedRange(AR); 4035 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4036 4037 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4038 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4039 if (NUWRegion.contains(AddRecRange)) 4040 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4041 } 4042 4043 return Result; 4044 } 4045 4046 namespace { 4047 /// Represents an abstract binary operation. This may exist as a 4048 /// normal instruction or constant expression, or may have been 4049 /// derived from an expression tree. 4050 struct BinaryOp { 4051 unsigned Opcode; 4052 Value *LHS; 4053 Value *RHS; 4054 bool IsNSW; 4055 bool IsNUW; 4056 4057 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4058 /// constant expression. 4059 Operator *Op; 4060 4061 explicit BinaryOp(Operator *Op) 4062 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4063 IsNSW(false), IsNUW(false), Op(Op) { 4064 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4065 IsNSW = OBO->hasNoSignedWrap(); 4066 IsNUW = OBO->hasNoUnsignedWrap(); 4067 } 4068 } 4069 4070 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4071 bool IsNUW = false) 4072 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW), 4073 Op(nullptr) {} 4074 }; 4075 } 4076 4077 4078 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4079 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4080 auto *Op = dyn_cast<Operator>(V); 4081 if (!Op) 4082 return None; 4083 4084 // Implementation detail: all the cleverness here should happen without 4085 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4086 // SCEV expressions when possible, and we should not break that. 4087 4088 switch (Op->getOpcode()) { 4089 case Instruction::Add: 4090 case Instruction::Sub: 4091 case Instruction::Mul: 4092 case Instruction::UDiv: 4093 case Instruction::And: 4094 case Instruction::Or: 4095 case Instruction::AShr: 4096 case Instruction::Shl: 4097 return BinaryOp(Op); 4098 4099 case Instruction::Xor: 4100 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4101 // If the RHS of the xor is a signmask, then this is just an add. 4102 // Instcombine turns add of signmask into xor as a strength reduction step. 4103 if (RHSC->getValue().isSignMask()) 4104 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4105 return BinaryOp(Op); 4106 4107 case Instruction::LShr: 4108 // Turn logical shift right of a constant into a unsigned divide. 4109 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4110 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4111 4112 // If the shift count is not less than the bitwidth, the result of 4113 // the shift is undefined. Don't try to analyze it, because the 4114 // resolution chosen here may differ from the resolution chosen in 4115 // other parts of the compiler. 4116 if (SA->getValue().ult(BitWidth)) { 4117 Constant *X = 4118 ConstantInt::get(SA->getContext(), 4119 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4120 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4121 } 4122 } 4123 return BinaryOp(Op); 4124 4125 case Instruction::ExtractValue: { 4126 auto *EVI = cast<ExtractValueInst>(Op); 4127 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4128 break; 4129 4130 auto *CI = dyn_cast<CallInst>(EVI->getAggregateOperand()); 4131 if (!CI) 4132 break; 4133 4134 if (auto *F = CI->getCalledFunction()) 4135 switch (F->getIntrinsicID()) { 4136 case Intrinsic::sadd_with_overflow: 4137 case Intrinsic::uadd_with_overflow: { 4138 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4139 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4140 CI->getArgOperand(1)); 4141 4142 // Now that we know that all uses of the arithmetic-result component of 4143 // CI are guarded by the overflow check, we can go ahead and pretend 4144 // that the arithmetic is non-overflowing. 4145 if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow) 4146 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4147 CI->getArgOperand(1), /* IsNSW = */ true, 4148 /* IsNUW = */ false); 4149 else 4150 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4151 CI->getArgOperand(1), /* IsNSW = */ false, 4152 /* IsNUW*/ true); 4153 } 4154 4155 case Intrinsic::ssub_with_overflow: 4156 case Intrinsic::usub_with_overflow: 4157 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4158 CI->getArgOperand(1)); 4159 4160 case Intrinsic::smul_with_overflow: 4161 case Intrinsic::umul_with_overflow: 4162 return BinaryOp(Instruction::Mul, CI->getArgOperand(0), 4163 CI->getArgOperand(1)); 4164 default: 4165 break; 4166 } 4167 } 4168 4169 default: 4170 break; 4171 } 4172 4173 return None; 4174 } 4175 4176 /// A helper function for createAddRecFromPHI to handle simple cases. 4177 /// 4178 /// This function tries to find an AddRec expression for the simplest (yet most 4179 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 4180 /// If it fails, createAddRecFromPHI will use a more general, but slow, 4181 /// technique for finding the AddRec expression. 4182 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 4183 Value *BEValueV, 4184 Value *StartValueV) { 4185 const Loop *L = LI.getLoopFor(PN->getParent()); 4186 assert(L && L->getHeader() == PN->getParent()); 4187 assert(BEValueV && StartValueV); 4188 4189 auto BO = MatchBinaryOp(BEValueV, DT); 4190 if (!BO) 4191 return nullptr; 4192 4193 if (BO->Opcode != Instruction::Add) 4194 return nullptr; 4195 4196 const SCEV *Accum = nullptr; 4197 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 4198 Accum = getSCEV(BO->RHS); 4199 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 4200 Accum = getSCEV(BO->LHS); 4201 4202 if (!Accum) 4203 return nullptr; 4204 4205 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4206 if (BO->IsNUW) 4207 Flags = setFlags(Flags, SCEV::FlagNUW); 4208 if (BO->IsNSW) 4209 Flags = setFlags(Flags, SCEV::FlagNSW); 4210 4211 const SCEV *StartVal = getSCEV(StartValueV); 4212 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4213 4214 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4215 4216 // We can add Flags to the post-inc expression only if we 4217 // know that it is *undefined behavior* for BEValueV to 4218 // overflow. 4219 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4220 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4221 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4222 4223 return PHISCEV; 4224 } 4225 4226 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 4227 const Loop *L = LI.getLoopFor(PN->getParent()); 4228 if (!L || L->getHeader() != PN->getParent()) 4229 return nullptr; 4230 4231 // The loop may have multiple entrances or multiple exits; we can analyze 4232 // this phi as an addrec if it has a unique entry value and a unique 4233 // backedge value. 4234 Value *BEValueV = nullptr, *StartValueV = nullptr; 4235 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4236 Value *V = PN->getIncomingValue(i); 4237 if (L->contains(PN->getIncomingBlock(i))) { 4238 if (!BEValueV) { 4239 BEValueV = V; 4240 } else if (BEValueV != V) { 4241 BEValueV = nullptr; 4242 break; 4243 } 4244 } else if (!StartValueV) { 4245 StartValueV = V; 4246 } else if (StartValueV != V) { 4247 StartValueV = nullptr; 4248 break; 4249 } 4250 } 4251 if (!BEValueV || !StartValueV) 4252 return nullptr; 4253 4254 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 4255 "PHI node already processed?"); 4256 4257 // First, try to find AddRec expression without creating a fictituos symbolic 4258 // value for PN. 4259 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 4260 return S; 4261 4262 // Handle PHI node value symbolically. 4263 const SCEV *SymbolicName = getUnknown(PN); 4264 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 4265 4266 // Using this symbolic name for the PHI, analyze the value coming around 4267 // the back-edge. 4268 const SCEV *BEValue = getSCEV(BEValueV); 4269 4270 // NOTE: If BEValue is loop invariant, we know that the PHI node just 4271 // has a special value for the first iteration of the loop. 4272 4273 // If the value coming around the backedge is an add with the symbolic 4274 // value we just inserted, then we found a simple induction variable! 4275 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 4276 // If there is a single occurrence of the symbolic value, replace it 4277 // with a recurrence. 4278 unsigned FoundIndex = Add->getNumOperands(); 4279 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4280 if (Add->getOperand(i) == SymbolicName) 4281 if (FoundIndex == e) { 4282 FoundIndex = i; 4283 break; 4284 } 4285 4286 if (FoundIndex != Add->getNumOperands()) { 4287 // Create an add with everything but the specified operand. 4288 SmallVector<const SCEV *, 8> Ops; 4289 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4290 if (i != FoundIndex) 4291 Ops.push_back(Add->getOperand(i)); 4292 const SCEV *Accum = getAddExpr(Ops); 4293 4294 // This is not a valid addrec if the step amount is varying each 4295 // loop iteration, but is not itself an addrec in this loop. 4296 if (isLoopInvariant(Accum, L) || 4297 (isa<SCEVAddRecExpr>(Accum) && 4298 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 4299 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4300 4301 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 4302 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 4303 if (BO->IsNUW) 4304 Flags = setFlags(Flags, SCEV::FlagNUW); 4305 if (BO->IsNSW) 4306 Flags = setFlags(Flags, SCEV::FlagNSW); 4307 } 4308 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 4309 // If the increment is an inbounds GEP, then we know the address 4310 // space cannot be wrapped around. We cannot make any guarantee 4311 // about signed or unsigned overflow because pointers are 4312 // unsigned but we may have a negative index from the base 4313 // pointer. We can guarantee that no unsigned wrap occurs if the 4314 // indices form a positive value. 4315 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 4316 Flags = setFlags(Flags, SCEV::FlagNW); 4317 4318 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 4319 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 4320 Flags = setFlags(Flags, SCEV::FlagNUW); 4321 } 4322 4323 // We cannot transfer nuw and nsw flags from subtraction 4324 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 4325 // for instance. 4326 } 4327 4328 const SCEV *StartVal = getSCEV(StartValueV); 4329 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4330 4331 // Okay, for the entire analysis of this edge we assumed the PHI 4332 // to be symbolic. We now need to go back and purge all of the 4333 // entries for the scalars that use the symbolic expression. 4334 forgetSymbolicName(PN, SymbolicName); 4335 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4336 4337 // We can add Flags to the post-inc expression only if we 4338 // know that it is *undefined behavior* for BEValueV to 4339 // overflow. 4340 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4341 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4342 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4343 4344 return PHISCEV; 4345 } 4346 } 4347 } else { 4348 // Otherwise, this could be a loop like this: 4349 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 4350 // In this case, j = {1,+,1} and BEValue is j. 4351 // Because the other in-value of i (0) fits the evolution of BEValue 4352 // i really is an addrec evolution. 4353 // 4354 // We can generalize this saying that i is the shifted value of BEValue 4355 // by one iteration: 4356 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 4357 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 4358 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this); 4359 if (Shifted != getCouldNotCompute() && 4360 Start != getCouldNotCompute()) { 4361 const SCEV *StartVal = getSCEV(StartValueV); 4362 if (Start == StartVal) { 4363 // Okay, for the entire analysis of this edge we assumed the PHI 4364 // to be symbolic. We now need to go back and purge all of the 4365 // entries for the scalars that use the symbolic expression. 4366 forgetSymbolicName(PN, SymbolicName); 4367 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 4368 return Shifted; 4369 } 4370 } 4371 } 4372 4373 // Remove the temporary PHI node SCEV that has been inserted while intending 4374 // to create an AddRecExpr for this PHI node. We can not keep this temporary 4375 // as it will prevent later (possibly simpler) SCEV expressions to be added 4376 // to the ValueExprMap. 4377 eraseValueFromMap(PN); 4378 4379 return nullptr; 4380 } 4381 4382 // Checks if the SCEV S is available at BB. S is considered available at BB 4383 // if S can be materialized at BB without introducing a fault. 4384 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 4385 BasicBlock *BB) { 4386 struct CheckAvailable { 4387 bool TraversalDone = false; 4388 bool Available = true; 4389 4390 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 4391 BasicBlock *BB = nullptr; 4392 DominatorTree &DT; 4393 4394 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 4395 : L(L), BB(BB), DT(DT) {} 4396 4397 bool setUnavailable() { 4398 TraversalDone = true; 4399 Available = false; 4400 return false; 4401 } 4402 4403 bool follow(const SCEV *S) { 4404 switch (S->getSCEVType()) { 4405 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 4406 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 4407 // These expressions are available if their operand(s) is/are. 4408 return true; 4409 4410 case scAddRecExpr: { 4411 // We allow add recurrences that are on the loop BB is in, or some 4412 // outer loop. This guarantees availability because the value of the 4413 // add recurrence at BB is simply the "current" value of the induction 4414 // variable. We can relax this in the future; for instance an add 4415 // recurrence on a sibling dominating loop is also available at BB. 4416 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 4417 if (L && (ARLoop == L || ARLoop->contains(L))) 4418 return true; 4419 4420 return setUnavailable(); 4421 } 4422 4423 case scUnknown: { 4424 // For SCEVUnknown, we check for simple dominance. 4425 const auto *SU = cast<SCEVUnknown>(S); 4426 Value *V = SU->getValue(); 4427 4428 if (isa<Argument>(V)) 4429 return false; 4430 4431 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 4432 return false; 4433 4434 return setUnavailable(); 4435 } 4436 4437 case scUDivExpr: 4438 case scCouldNotCompute: 4439 // We do not try to smart about these at all. 4440 return setUnavailable(); 4441 } 4442 llvm_unreachable("switch should be fully covered!"); 4443 } 4444 4445 bool isDone() { return TraversalDone; } 4446 }; 4447 4448 CheckAvailable CA(L, BB, DT); 4449 SCEVTraversal<CheckAvailable> ST(CA); 4450 4451 ST.visitAll(S); 4452 return CA.Available; 4453 } 4454 4455 // Try to match a control flow sequence that branches out at BI and merges back 4456 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 4457 // match. 4458 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 4459 Value *&C, Value *&LHS, Value *&RHS) { 4460 C = BI->getCondition(); 4461 4462 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 4463 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 4464 4465 if (!LeftEdge.isSingleEdge()) 4466 return false; 4467 4468 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 4469 4470 Use &LeftUse = Merge->getOperandUse(0); 4471 Use &RightUse = Merge->getOperandUse(1); 4472 4473 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 4474 LHS = LeftUse; 4475 RHS = RightUse; 4476 return true; 4477 } 4478 4479 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 4480 LHS = RightUse; 4481 RHS = LeftUse; 4482 return true; 4483 } 4484 4485 return false; 4486 } 4487 4488 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 4489 auto IsReachable = 4490 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 4491 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 4492 const Loop *L = LI.getLoopFor(PN->getParent()); 4493 4494 // We don't want to break LCSSA, even in a SCEV expression tree. 4495 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 4496 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 4497 return nullptr; 4498 4499 // Try to match 4500 // 4501 // br %cond, label %left, label %right 4502 // left: 4503 // br label %merge 4504 // right: 4505 // br label %merge 4506 // merge: 4507 // V = phi [ %x, %left ], [ %y, %right ] 4508 // 4509 // as "select %cond, %x, %y" 4510 4511 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 4512 assert(IDom && "At least the entry block should dominate PN"); 4513 4514 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 4515 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 4516 4517 if (BI && BI->isConditional() && 4518 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 4519 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 4520 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 4521 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 4522 } 4523 4524 return nullptr; 4525 } 4526 4527 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 4528 if (const SCEV *S = createAddRecFromPHI(PN)) 4529 return S; 4530 4531 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 4532 return S; 4533 4534 // If the PHI has a single incoming value, follow that value, unless the 4535 // PHI's incoming blocks are in a different loop, in which case doing so 4536 // risks breaking LCSSA form. Instcombine would normally zap these, but 4537 // it doesn't have DominatorTree information, so it may miss cases. 4538 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 4539 if (LI.replacementPreservesLCSSAForm(PN, V)) 4540 return getSCEV(V); 4541 4542 // If it's not a loop phi, we can't handle it yet. 4543 return getUnknown(PN); 4544 } 4545 4546 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 4547 Value *Cond, 4548 Value *TrueVal, 4549 Value *FalseVal) { 4550 // Handle "constant" branch or select. This can occur for instance when a 4551 // loop pass transforms an inner loop and moves on to process the outer loop. 4552 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 4553 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 4554 4555 // Try to match some simple smax or umax patterns. 4556 auto *ICI = dyn_cast<ICmpInst>(Cond); 4557 if (!ICI) 4558 return getUnknown(I); 4559 4560 Value *LHS = ICI->getOperand(0); 4561 Value *RHS = ICI->getOperand(1); 4562 4563 switch (ICI->getPredicate()) { 4564 case ICmpInst::ICMP_SLT: 4565 case ICmpInst::ICMP_SLE: 4566 std::swap(LHS, RHS); 4567 LLVM_FALLTHROUGH; 4568 case ICmpInst::ICMP_SGT: 4569 case ICmpInst::ICMP_SGE: 4570 // a >s b ? a+x : b+x -> smax(a, b)+x 4571 // a >s b ? b+x : a+x -> smin(a, b)+x 4572 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 4573 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 4574 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 4575 const SCEV *LA = getSCEV(TrueVal); 4576 const SCEV *RA = getSCEV(FalseVal); 4577 const SCEV *LDiff = getMinusSCEV(LA, LS); 4578 const SCEV *RDiff = getMinusSCEV(RA, RS); 4579 if (LDiff == RDiff) 4580 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 4581 LDiff = getMinusSCEV(LA, RS); 4582 RDiff = getMinusSCEV(RA, LS); 4583 if (LDiff == RDiff) 4584 return getAddExpr(getSMinExpr(LS, RS), LDiff); 4585 } 4586 break; 4587 case ICmpInst::ICMP_ULT: 4588 case ICmpInst::ICMP_ULE: 4589 std::swap(LHS, RHS); 4590 LLVM_FALLTHROUGH; 4591 case ICmpInst::ICMP_UGT: 4592 case ICmpInst::ICMP_UGE: 4593 // a >u b ? a+x : b+x -> umax(a, b)+x 4594 // a >u b ? b+x : a+x -> umin(a, b)+x 4595 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 4596 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4597 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 4598 const SCEV *LA = getSCEV(TrueVal); 4599 const SCEV *RA = getSCEV(FalseVal); 4600 const SCEV *LDiff = getMinusSCEV(LA, LS); 4601 const SCEV *RDiff = getMinusSCEV(RA, RS); 4602 if (LDiff == RDiff) 4603 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 4604 LDiff = getMinusSCEV(LA, RS); 4605 RDiff = getMinusSCEV(RA, LS); 4606 if (LDiff == RDiff) 4607 return getAddExpr(getUMinExpr(LS, RS), LDiff); 4608 } 4609 break; 4610 case ICmpInst::ICMP_NE: 4611 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 4612 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 4613 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 4614 const SCEV *One = getOne(I->getType()); 4615 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4616 const SCEV *LA = getSCEV(TrueVal); 4617 const SCEV *RA = getSCEV(FalseVal); 4618 const SCEV *LDiff = getMinusSCEV(LA, LS); 4619 const SCEV *RDiff = getMinusSCEV(RA, One); 4620 if (LDiff == RDiff) 4621 return getAddExpr(getUMaxExpr(One, LS), LDiff); 4622 } 4623 break; 4624 case ICmpInst::ICMP_EQ: 4625 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 4626 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 4627 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 4628 const SCEV *One = getOne(I->getType()); 4629 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4630 const SCEV *LA = getSCEV(TrueVal); 4631 const SCEV *RA = getSCEV(FalseVal); 4632 const SCEV *LDiff = getMinusSCEV(LA, One); 4633 const SCEV *RDiff = getMinusSCEV(RA, LS); 4634 if (LDiff == RDiff) 4635 return getAddExpr(getUMaxExpr(One, LS), LDiff); 4636 } 4637 break; 4638 default: 4639 break; 4640 } 4641 4642 return getUnknown(I); 4643 } 4644 4645 /// Expand GEP instructions into add and multiply operations. This allows them 4646 /// to be analyzed by regular SCEV code. 4647 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 4648 // Don't attempt to analyze GEPs over unsized objects. 4649 if (!GEP->getSourceElementType()->isSized()) 4650 return getUnknown(GEP); 4651 4652 SmallVector<const SCEV *, 4> IndexExprs; 4653 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 4654 IndexExprs.push_back(getSCEV(*Index)); 4655 return getGEPExpr(GEP, IndexExprs); 4656 } 4657 4658 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 4659 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 4660 return C->getAPInt().countTrailingZeros(); 4661 4662 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 4663 return std::min(GetMinTrailingZeros(T->getOperand()), 4664 (uint32_t)getTypeSizeInBits(T->getType())); 4665 4666 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 4667 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 4668 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 4669 ? getTypeSizeInBits(E->getType()) 4670 : OpRes; 4671 } 4672 4673 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 4674 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 4675 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 4676 ? getTypeSizeInBits(E->getType()) 4677 : OpRes; 4678 } 4679 4680 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 4681 // The result is the min of all operands results. 4682 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 4683 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 4684 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 4685 return MinOpRes; 4686 } 4687 4688 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 4689 // The result is the sum of all operands results. 4690 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 4691 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 4692 for (unsigned i = 1, e = M->getNumOperands(); 4693 SumOpRes != BitWidth && i != e; ++i) 4694 SumOpRes = 4695 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 4696 return SumOpRes; 4697 } 4698 4699 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 4700 // The result is the min of all operands results. 4701 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 4702 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 4703 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 4704 return MinOpRes; 4705 } 4706 4707 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 4708 // The result is the min of all operands results. 4709 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 4710 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 4711 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 4712 return MinOpRes; 4713 } 4714 4715 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 4716 // The result is the min of all operands results. 4717 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 4718 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 4719 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 4720 return MinOpRes; 4721 } 4722 4723 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 4724 // For a SCEVUnknown, ask ValueTracking. 4725 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 4726 return Known.countMinTrailingZeros(); 4727 } 4728 4729 // SCEVUDivExpr 4730 return 0; 4731 } 4732 4733 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 4734 auto I = MinTrailingZerosCache.find(S); 4735 if (I != MinTrailingZerosCache.end()) 4736 return I->second; 4737 4738 uint32_t Result = GetMinTrailingZerosImpl(S); 4739 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 4740 assert(InsertPair.second && "Should insert a new key"); 4741 return InsertPair.first->second; 4742 } 4743 4744 /// Helper method to assign a range to V from metadata present in the IR. 4745 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 4746 if (Instruction *I = dyn_cast<Instruction>(V)) 4747 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 4748 return getConstantRangeFromMetadata(*MD); 4749 4750 return None; 4751 } 4752 4753 /// Determine the range for a particular SCEV. If SignHint is 4754 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 4755 /// with a "cleaner" unsigned (resp. signed) representation. 4756 const ConstantRange & 4757 ScalarEvolution::getRangeRef(const SCEV *S, 4758 ScalarEvolution::RangeSignHint SignHint) { 4759 DenseMap<const SCEV *, ConstantRange> &Cache = 4760 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 4761 : SignedRanges; 4762 4763 // See if we've computed this range already. 4764 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 4765 if (I != Cache.end()) 4766 return I->second; 4767 4768 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 4769 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 4770 4771 unsigned BitWidth = getTypeSizeInBits(S->getType()); 4772 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 4773 4774 // If the value has known zeros, the maximum value will have those known zeros 4775 // as well. 4776 uint32_t TZ = GetMinTrailingZeros(S); 4777 if (TZ != 0) { 4778 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 4779 ConservativeResult = 4780 ConstantRange(APInt::getMinValue(BitWidth), 4781 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 4782 else 4783 ConservativeResult = ConstantRange( 4784 APInt::getSignedMinValue(BitWidth), 4785 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 4786 } 4787 4788 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 4789 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 4790 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 4791 X = X.add(getRangeRef(Add->getOperand(i), SignHint)); 4792 return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); 4793 } 4794 4795 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 4796 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 4797 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 4798 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 4799 return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); 4800 } 4801 4802 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 4803 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 4804 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 4805 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 4806 return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); 4807 } 4808 4809 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 4810 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 4811 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 4812 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 4813 return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); 4814 } 4815 4816 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 4817 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 4818 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 4819 return setRange(UDiv, SignHint, 4820 ConservativeResult.intersectWith(X.udiv(Y))); 4821 } 4822 4823 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 4824 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 4825 return setRange(ZExt, SignHint, 4826 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 4827 } 4828 4829 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 4830 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 4831 return setRange(SExt, SignHint, 4832 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 4833 } 4834 4835 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 4836 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 4837 return setRange(Trunc, SignHint, 4838 ConservativeResult.intersectWith(X.truncate(BitWidth))); 4839 } 4840 4841 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 4842 // If there's no unsigned wrap, the value will never be less than its 4843 // initial value. 4844 if (AddRec->hasNoUnsignedWrap()) 4845 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 4846 if (!C->getValue()->isZero()) 4847 ConservativeResult = ConservativeResult.intersectWith( 4848 ConstantRange(C->getAPInt(), APInt(BitWidth, 0))); 4849 4850 // If there's no signed wrap, and all the operands have the same sign or 4851 // zero, the value won't ever change sign. 4852 if (AddRec->hasNoSignedWrap()) { 4853 bool AllNonNeg = true; 4854 bool AllNonPos = true; 4855 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 4856 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 4857 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 4858 } 4859 if (AllNonNeg) 4860 ConservativeResult = ConservativeResult.intersectWith( 4861 ConstantRange(APInt(BitWidth, 0), 4862 APInt::getSignedMinValue(BitWidth))); 4863 else if (AllNonPos) 4864 ConservativeResult = ConservativeResult.intersectWith( 4865 ConstantRange(APInt::getSignedMinValue(BitWidth), 4866 APInt(BitWidth, 1))); 4867 } 4868 4869 // TODO: non-affine addrec 4870 if (AddRec->isAffine()) { 4871 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 4872 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 4873 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 4874 auto RangeFromAffine = getRangeForAffineAR( 4875 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 4876 BitWidth); 4877 if (!RangeFromAffine.isFullSet()) 4878 ConservativeResult = 4879 ConservativeResult.intersectWith(RangeFromAffine); 4880 4881 auto RangeFromFactoring = getRangeViaFactoring( 4882 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 4883 BitWidth); 4884 if (!RangeFromFactoring.isFullSet()) 4885 ConservativeResult = 4886 ConservativeResult.intersectWith(RangeFromFactoring); 4887 } 4888 } 4889 4890 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 4891 } 4892 4893 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 4894 // Check if the IR explicitly contains !range metadata. 4895 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 4896 if (MDRange.hasValue()) 4897 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue()); 4898 4899 // Split here to avoid paying the compile-time cost of calling both 4900 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 4901 // if needed. 4902 const DataLayout &DL = getDataLayout(); 4903 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 4904 // For a SCEVUnknown, ask ValueTracking. 4905 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 4906 if (Known.One != ~Known.Zero + 1) 4907 ConservativeResult = 4908 ConservativeResult.intersectWith(ConstantRange(Known.One, 4909 ~Known.Zero + 1)); 4910 } else { 4911 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 4912 "generalize as needed!"); 4913 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 4914 if (NS > 1) 4915 ConservativeResult = ConservativeResult.intersectWith( 4916 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 4917 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1)); 4918 } 4919 4920 return setRange(U, SignHint, std::move(ConservativeResult)); 4921 } 4922 4923 return setRange(S, SignHint, std::move(ConservativeResult)); 4924 } 4925 4926 // Given a StartRange, Step and MaxBECount for an expression compute a range of 4927 // values that the expression can take. Initially, the expression has a value 4928 // from StartRange and then is changed by Step up to MaxBECount times. Signed 4929 // argument defines if we treat Step as signed or unsigned. 4930 static ConstantRange getRangeForAffineARHelper(APInt Step, 4931 const ConstantRange &StartRange, 4932 const APInt &MaxBECount, 4933 unsigned BitWidth, bool Signed) { 4934 // If either Step or MaxBECount is 0, then the expression won't change, and we 4935 // just need to return the initial range. 4936 if (Step == 0 || MaxBECount == 0) 4937 return StartRange; 4938 4939 // If we don't know anything about the initial value (i.e. StartRange is 4940 // FullRange), then we don't know anything about the final range either. 4941 // Return FullRange. 4942 if (StartRange.isFullSet()) 4943 return ConstantRange(BitWidth, /* isFullSet = */ true); 4944 4945 // If Step is signed and negative, then we use its absolute value, but we also 4946 // note that we're moving in the opposite direction. 4947 bool Descending = Signed && Step.isNegative(); 4948 4949 if (Signed) 4950 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 4951 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 4952 // This equations hold true due to the well-defined wrap-around behavior of 4953 // APInt. 4954 Step = Step.abs(); 4955 4956 // Check if Offset is more than full span of BitWidth. If it is, the 4957 // expression is guaranteed to overflow. 4958 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 4959 return ConstantRange(BitWidth, /* isFullSet = */ true); 4960 4961 // Offset is by how much the expression can change. Checks above guarantee no 4962 // overflow here. 4963 APInt Offset = Step * MaxBECount; 4964 4965 // Minimum value of the final range will match the minimal value of StartRange 4966 // if the expression is increasing and will be decreased by Offset otherwise. 4967 // Maximum value of the final range will match the maximal value of StartRange 4968 // if the expression is decreasing and will be increased by Offset otherwise. 4969 APInt StartLower = StartRange.getLower(); 4970 APInt StartUpper = StartRange.getUpper() - 1; 4971 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 4972 : (StartUpper + std::move(Offset)); 4973 4974 // It's possible that the new minimum/maximum value will fall into the initial 4975 // range (due to wrap around). This means that the expression can take any 4976 // value in this bitwidth, and we have to return full range. 4977 if (StartRange.contains(MovedBoundary)) 4978 return ConstantRange(BitWidth, /* isFullSet = */ true); 4979 4980 APInt NewLower = 4981 Descending ? std::move(MovedBoundary) : std::move(StartLower); 4982 APInt NewUpper = 4983 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 4984 NewUpper += 1; 4985 4986 // If we end up with full range, return a proper full range. 4987 if (NewLower == NewUpper) 4988 return ConstantRange(BitWidth, /* isFullSet = */ true); 4989 4990 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 4991 return ConstantRange(std::move(NewLower), std::move(NewUpper)); 4992 } 4993 4994 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 4995 const SCEV *Step, 4996 const SCEV *MaxBECount, 4997 unsigned BitWidth) { 4998 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 4999 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5000 "Precondition!"); 5001 5002 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5003 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5004 5005 // First, consider step signed. 5006 ConstantRange StartSRange = getSignedRange(Start); 5007 ConstantRange StepSRange = getSignedRange(Step); 5008 5009 // If Step can be both positive and negative, we need to find ranges for the 5010 // maximum absolute step values in both directions and union them. 5011 ConstantRange SR = 5012 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5013 MaxBECountValue, BitWidth, /* Signed = */ true); 5014 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5015 StartSRange, MaxBECountValue, 5016 BitWidth, /* Signed = */ true)); 5017 5018 // Next, consider step unsigned. 5019 ConstantRange UR = getRangeForAffineARHelper( 5020 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5021 MaxBECountValue, BitWidth, /* Signed = */ false); 5022 5023 // Finally, intersect signed and unsigned ranges. 5024 return SR.intersectWith(UR); 5025 } 5026 5027 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 5028 const SCEV *Step, 5029 const SCEV *MaxBECount, 5030 unsigned BitWidth) { 5031 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 5032 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 5033 5034 struct SelectPattern { 5035 Value *Condition = nullptr; 5036 APInt TrueValue; 5037 APInt FalseValue; 5038 5039 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 5040 const SCEV *S) { 5041 Optional<unsigned> CastOp; 5042 APInt Offset(BitWidth, 0); 5043 5044 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 5045 "Should be!"); 5046 5047 // Peel off a constant offset: 5048 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 5049 // In the future we could consider being smarter here and handle 5050 // {Start+Step,+,Step} too. 5051 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 5052 return; 5053 5054 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 5055 S = SA->getOperand(1); 5056 } 5057 5058 // Peel off a cast operation 5059 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 5060 CastOp = SCast->getSCEVType(); 5061 S = SCast->getOperand(); 5062 } 5063 5064 using namespace llvm::PatternMatch; 5065 5066 auto *SU = dyn_cast<SCEVUnknown>(S); 5067 const APInt *TrueVal, *FalseVal; 5068 if (!SU || 5069 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 5070 m_APInt(FalseVal)))) { 5071 Condition = nullptr; 5072 return; 5073 } 5074 5075 TrueValue = *TrueVal; 5076 FalseValue = *FalseVal; 5077 5078 // Re-apply the cast we peeled off earlier 5079 if (CastOp.hasValue()) 5080 switch (*CastOp) { 5081 default: 5082 llvm_unreachable("Unknown SCEV cast type!"); 5083 5084 case scTruncate: 5085 TrueValue = TrueValue.trunc(BitWidth); 5086 FalseValue = FalseValue.trunc(BitWidth); 5087 break; 5088 case scZeroExtend: 5089 TrueValue = TrueValue.zext(BitWidth); 5090 FalseValue = FalseValue.zext(BitWidth); 5091 break; 5092 case scSignExtend: 5093 TrueValue = TrueValue.sext(BitWidth); 5094 FalseValue = FalseValue.sext(BitWidth); 5095 break; 5096 } 5097 5098 // Re-apply the constant offset we peeled off earlier 5099 TrueValue += Offset; 5100 FalseValue += Offset; 5101 } 5102 5103 bool isRecognized() { return Condition != nullptr; } 5104 }; 5105 5106 SelectPattern StartPattern(*this, BitWidth, Start); 5107 if (!StartPattern.isRecognized()) 5108 return ConstantRange(BitWidth, /* isFullSet = */ true); 5109 5110 SelectPattern StepPattern(*this, BitWidth, Step); 5111 if (!StepPattern.isRecognized()) 5112 return ConstantRange(BitWidth, /* isFullSet = */ true); 5113 5114 if (StartPattern.Condition != StepPattern.Condition) { 5115 // We don't handle this case today; but we could, by considering four 5116 // possibilities below instead of two. I'm not sure if there are cases where 5117 // that will help over what getRange already does, though. 5118 return ConstantRange(BitWidth, /* isFullSet = */ true); 5119 } 5120 5121 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 5122 // construct arbitrary general SCEV expressions here. This function is called 5123 // from deep in the call stack, and calling getSCEV (on a sext instruction, 5124 // say) can end up caching a suboptimal value. 5125 5126 // FIXME: without the explicit `this` receiver below, MSVC errors out with 5127 // C2352 and C2512 (otherwise it isn't needed). 5128 5129 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 5130 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 5131 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 5132 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 5133 5134 ConstantRange TrueRange = 5135 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 5136 ConstantRange FalseRange = 5137 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 5138 5139 return TrueRange.unionWith(FalseRange); 5140 } 5141 5142 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 5143 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 5144 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 5145 5146 // Return early if there are no flags to propagate to the SCEV. 5147 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5148 if (BinOp->hasNoUnsignedWrap()) 5149 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 5150 if (BinOp->hasNoSignedWrap()) 5151 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 5152 if (Flags == SCEV::FlagAnyWrap) 5153 return SCEV::FlagAnyWrap; 5154 5155 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 5156 } 5157 5158 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 5159 // Here we check that I is in the header of the innermost loop containing I, 5160 // since we only deal with instructions in the loop header. The actual loop we 5161 // need to check later will come from an add recurrence, but getting that 5162 // requires computing the SCEV of the operands, which can be expensive. This 5163 // check we can do cheaply to rule out some cases early. 5164 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 5165 if (InnermostContainingLoop == nullptr || 5166 InnermostContainingLoop->getHeader() != I->getParent()) 5167 return false; 5168 5169 // Only proceed if we can prove that I does not yield poison. 5170 if (!programUndefinedIfFullPoison(I)) 5171 return false; 5172 5173 // At this point we know that if I is executed, then it does not wrap 5174 // according to at least one of NSW or NUW. If I is not executed, then we do 5175 // not know if the calculation that I represents would wrap. Multiple 5176 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 5177 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 5178 // derived from other instructions that map to the same SCEV. We cannot make 5179 // that guarantee for cases where I is not executed. So we need to find the 5180 // loop that I is considered in relation to and prove that I is executed for 5181 // every iteration of that loop. That implies that the value that I 5182 // calculates does not wrap anywhere in the loop, so then we can apply the 5183 // flags to the SCEV. 5184 // 5185 // We check isLoopInvariant to disambiguate in case we are adding recurrences 5186 // from different loops, so that we know which loop to prove that I is 5187 // executed in. 5188 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 5189 // I could be an extractvalue from a call to an overflow intrinsic. 5190 // TODO: We can do better here in some cases. 5191 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 5192 return false; 5193 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 5194 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 5195 bool AllOtherOpsLoopInvariant = true; 5196 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 5197 ++OtherOpIndex) { 5198 if (OtherOpIndex != OpIndex) { 5199 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 5200 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 5201 AllOtherOpsLoopInvariant = false; 5202 break; 5203 } 5204 } 5205 } 5206 if (AllOtherOpsLoopInvariant && 5207 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 5208 return true; 5209 } 5210 } 5211 return false; 5212 } 5213 5214 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 5215 // If we know that \c I can never be poison period, then that's enough. 5216 if (isSCEVExprNeverPoison(I)) 5217 return true; 5218 5219 // For an add recurrence specifically, we assume that infinite loops without 5220 // side effects are undefined behavior, and then reason as follows: 5221 // 5222 // If the add recurrence is poison in any iteration, it is poison on all 5223 // future iterations (since incrementing poison yields poison). If the result 5224 // of the add recurrence is fed into the loop latch condition and the loop 5225 // does not contain any throws or exiting blocks other than the latch, we now 5226 // have the ability to "choose" whether the backedge is taken or not (by 5227 // choosing a sufficiently evil value for the poison feeding into the branch) 5228 // for every iteration including and after the one in which \p I first became 5229 // poison. There are two possibilities (let's call the iteration in which \p 5230 // I first became poison as K): 5231 // 5232 // 1. In the set of iterations including and after K, the loop body executes 5233 // no side effects. In this case executing the backege an infinte number 5234 // of times will yield undefined behavior. 5235 // 5236 // 2. In the set of iterations including and after K, the loop body executes 5237 // at least one side effect. In this case, that specific instance of side 5238 // effect is control dependent on poison, which also yields undefined 5239 // behavior. 5240 5241 auto *ExitingBB = L->getExitingBlock(); 5242 auto *LatchBB = L->getLoopLatch(); 5243 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 5244 return false; 5245 5246 SmallPtrSet<const Instruction *, 16> Pushed; 5247 SmallVector<const Instruction *, 8> PoisonStack; 5248 5249 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 5250 // things that are known to be fully poison under that assumption go on the 5251 // PoisonStack. 5252 Pushed.insert(I); 5253 PoisonStack.push_back(I); 5254 5255 bool LatchControlDependentOnPoison = false; 5256 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 5257 const Instruction *Poison = PoisonStack.pop_back_val(); 5258 5259 for (auto *PoisonUser : Poison->users()) { 5260 if (propagatesFullPoison(cast<Instruction>(PoisonUser))) { 5261 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 5262 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 5263 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 5264 assert(BI->isConditional() && "Only possibility!"); 5265 if (BI->getParent() == LatchBB) { 5266 LatchControlDependentOnPoison = true; 5267 break; 5268 } 5269 } 5270 } 5271 } 5272 5273 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 5274 } 5275 5276 ScalarEvolution::LoopProperties 5277 ScalarEvolution::getLoopProperties(const Loop *L) { 5278 typedef ScalarEvolution::LoopProperties LoopProperties; 5279 5280 auto Itr = LoopPropertiesCache.find(L); 5281 if (Itr == LoopPropertiesCache.end()) { 5282 auto HasSideEffects = [](Instruction *I) { 5283 if (auto *SI = dyn_cast<StoreInst>(I)) 5284 return !SI->isSimple(); 5285 5286 return I->mayHaveSideEffects(); 5287 }; 5288 5289 LoopProperties LP = {/* HasNoAbnormalExits */ true, 5290 /*HasNoSideEffects*/ true}; 5291 5292 for (auto *BB : L->getBlocks()) 5293 for (auto &I : *BB) { 5294 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5295 LP.HasNoAbnormalExits = false; 5296 if (HasSideEffects(&I)) 5297 LP.HasNoSideEffects = false; 5298 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 5299 break; // We're already as pessimistic as we can get. 5300 } 5301 5302 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 5303 assert(InsertPair.second && "We just checked!"); 5304 Itr = InsertPair.first; 5305 } 5306 5307 return Itr->second; 5308 } 5309 5310 const SCEV *ScalarEvolution::createSCEV(Value *V) { 5311 if (!isSCEVable(V->getType())) 5312 return getUnknown(V); 5313 5314 if (Instruction *I = dyn_cast<Instruction>(V)) { 5315 // Don't attempt to analyze instructions in blocks that aren't 5316 // reachable. Such instructions don't matter, and they aren't required 5317 // to obey basic rules for definitions dominating uses which this 5318 // analysis depends on. 5319 if (!DT.isReachableFromEntry(I->getParent())) 5320 return getUnknown(V); 5321 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 5322 return getConstant(CI); 5323 else if (isa<ConstantPointerNull>(V)) 5324 return getZero(V->getType()); 5325 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 5326 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 5327 else if (!isa<ConstantExpr>(V)) 5328 return getUnknown(V); 5329 5330 Operator *U = cast<Operator>(V); 5331 if (auto BO = MatchBinaryOp(U, DT)) { 5332 switch (BO->Opcode) { 5333 case Instruction::Add: { 5334 // The simple thing to do would be to just call getSCEV on both operands 5335 // and call getAddExpr with the result. However if we're looking at a 5336 // bunch of things all added together, this can be quite inefficient, 5337 // because it leads to N-1 getAddExpr calls for N ultimate operands. 5338 // Instead, gather up all the operands and make a single getAddExpr call. 5339 // LLVM IR canonical form means we need only traverse the left operands. 5340 SmallVector<const SCEV *, 4> AddOps; 5341 do { 5342 if (BO->Op) { 5343 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 5344 AddOps.push_back(OpSCEV); 5345 break; 5346 } 5347 5348 // If a NUW or NSW flag can be applied to the SCEV for this 5349 // addition, then compute the SCEV for this addition by itself 5350 // with a separate call to getAddExpr. We need to do that 5351 // instead of pushing the operands of the addition onto AddOps, 5352 // since the flags are only known to apply to this particular 5353 // addition - they may not apply to other additions that can be 5354 // formed with operands from AddOps. 5355 const SCEV *RHS = getSCEV(BO->RHS); 5356 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 5357 if (Flags != SCEV::FlagAnyWrap) { 5358 const SCEV *LHS = getSCEV(BO->LHS); 5359 if (BO->Opcode == Instruction::Sub) 5360 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 5361 else 5362 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 5363 break; 5364 } 5365 } 5366 5367 if (BO->Opcode == Instruction::Sub) 5368 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 5369 else 5370 AddOps.push_back(getSCEV(BO->RHS)); 5371 5372 auto NewBO = MatchBinaryOp(BO->LHS, DT); 5373 if (!NewBO || (NewBO->Opcode != Instruction::Add && 5374 NewBO->Opcode != Instruction::Sub)) { 5375 AddOps.push_back(getSCEV(BO->LHS)); 5376 break; 5377 } 5378 BO = NewBO; 5379 } while (true); 5380 5381 return getAddExpr(AddOps); 5382 } 5383 5384 case Instruction::Mul: { 5385 SmallVector<const SCEV *, 4> MulOps; 5386 do { 5387 if (BO->Op) { 5388 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 5389 MulOps.push_back(OpSCEV); 5390 break; 5391 } 5392 5393 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 5394 if (Flags != SCEV::FlagAnyWrap) { 5395 MulOps.push_back( 5396 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 5397 break; 5398 } 5399 } 5400 5401 MulOps.push_back(getSCEV(BO->RHS)); 5402 auto NewBO = MatchBinaryOp(BO->LHS, DT); 5403 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 5404 MulOps.push_back(getSCEV(BO->LHS)); 5405 break; 5406 } 5407 BO = NewBO; 5408 } while (true); 5409 5410 return getMulExpr(MulOps); 5411 } 5412 case Instruction::UDiv: 5413 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 5414 case Instruction::Sub: { 5415 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5416 if (BO->Op) 5417 Flags = getNoWrapFlagsFromUB(BO->Op); 5418 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 5419 } 5420 case Instruction::And: 5421 // For an expression like x&255 that merely masks off the high bits, 5422 // use zext(trunc(x)) as the SCEV expression. 5423 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5424 if (CI->isZero()) 5425 return getSCEV(BO->RHS); 5426 if (CI->isMinusOne()) 5427 return getSCEV(BO->LHS); 5428 const APInt &A = CI->getValue(); 5429 5430 // Instcombine's ShrinkDemandedConstant may strip bits out of 5431 // constants, obscuring what would otherwise be a low-bits mask. 5432 // Use computeKnownBits to compute what ShrinkDemandedConstant 5433 // knew about to reconstruct a low-bits mask value. 5434 unsigned LZ = A.countLeadingZeros(); 5435 unsigned TZ = A.countTrailingZeros(); 5436 unsigned BitWidth = A.getBitWidth(); 5437 KnownBits Known(BitWidth); 5438 computeKnownBits(BO->LHS, Known, getDataLayout(), 5439 0, &AC, nullptr, &DT); 5440 5441 APInt EffectiveMask = 5442 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 5443 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 5444 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 5445 const SCEV *LHS = getSCEV(BO->LHS); 5446 const SCEV *ShiftedLHS = nullptr; 5447 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 5448 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 5449 // For an expression like (x * 8) & 8, simplify the multiply. 5450 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 5451 unsigned GCD = std::min(MulZeros, TZ); 5452 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 5453 SmallVector<const SCEV*, 4> MulOps; 5454 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 5455 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 5456 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 5457 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 5458 } 5459 } 5460 if (!ShiftedLHS) 5461 ShiftedLHS = getUDivExpr(LHS, MulCount); 5462 return getMulExpr( 5463 getZeroExtendExpr( 5464 getTruncateExpr(ShiftedLHS, 5465 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 5466 BO->LHS->getType()), 5467 MulCount); 5468 } 5469 } 5470 break; 5471 5472 case Instruction::Or: 5473 // If the RHS of the Or is a constant, we may have something like: 5474 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 5475 // optimizations will transparently handle this case. 5476 // 5477 // In order for this transformation to be safe, the LHS must be of the 5478 // form X*(2^n) and the Or constant must be less than 2^n. 5479 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5480 const SCEV *LHS = getSCEV(BO->LHS); 5481 const APInt &CIVal = CI->getValue(); 5482 if (GetMinTrailingZeros(LHS) >= 5483 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 5484 // Build a plain add SCEV. 5485 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 5486 // If the LHS of the add was an addrec and it has no-wrap flags, 5487 // transfer the no-wrap flags, since an or won't introduce a wrap. 5488 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 5489 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 5490 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( 5491 OldAR->getNoWrapFlags()); 5492 } 5493 return S; 5494 } 5495 } 5496 break; 5497 5498 case Instruction::Xor: 5499 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5500 // If the RHS of xor is -1, then this is a not operation. 5501 if (CI->isMinusOne()) 5502 return getNotSCEV(getSCEV(BO->LHS)); 5503 5504 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 5505 // This is a variant of the check for xor with -1, and it handles 5506 // the case where instcombine has trimmed non-demanded bits out 5507 // of an xor with -1. 5508 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 5509 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 5510 if (LBO->getOpcode() == Instruction::And && 5511 LCI->getValue() == CI->getValue()) 5512 if (const SCEVZeroExtendExpr *Z = 5513 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 5514 Type *UTy = BO->LHS->getType(); 5515 const SCEV *Z0 = Z->getOperand(); 5516 Type *Z0Ty = Z0->getType(); 5517 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 5518 5519 // If C is a low-bits mask, the zero extend is serving to 5520 // mask off the high bits. Complement the operand and 5521 // re-apply the zext. 5522 if (CI->getValue().isMask(Z0TySize)) 5523 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 5524 5525 // If C is a single bit, it may be in the sign-bit position 5526 // before the zero-extend. In this case, represent the xor 5527 // using an add, which is equivalent, and re-apply the zext. 5528 APInt Trunc = CI->getValue().trunc(Z0TySize); 5529 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 5530 Trunc.isSignMask()) 5531 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 5532 UTy); 5533 } 5534 } 5535 break; 5536 5537 case Instruction::Shl: 5538 // Turn shift left of a constant amount into a multiply. 5539 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 5540 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 5541 5542 // If the shift count is not less than the bitwidth, the result of 5543 // the shift is undefined. Don't try to analyze it, because the 5544 // resolution chosen here may differ from the resolution chosen in 5545 // other parts of the compiler. 5546 if (SA->getValue().uge(BitWidth)) 5547 break; 5548 5549 // It is currently not resolved how to interpret NSW for left 5550 // shift by BitWidth - 1, so we avoid applying flags in that 5551 // case. Remove this check (or this comment) once the situation 5552 // is resolved. See 5553 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html 5554 // and http://reviews.llvm.org/D8890 . 5555 auto Flags = SCEV::FlagAnyWrap; 5556 if (BO->Op && SA->getValue().ult(BitWidth - 1)) 5557 Flags = getNoWrapFlagsFromUB(BO->Op); 5558 5559 Constant *X = ConstantInt::get(getContext(), 5560 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 5561 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 5562 } 5563 break; 5564 5565 case Instruction::AShr: 5566 // AShr X, C, where C is a constant. 5567 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 5568 if (!CI) 5569 break; 5570 5571 Type *OuterTy = BO->LHS->getType(); 5572 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 5573 // If the shift count is not less than the bitwidth, the result of 5574 // the shift is undefined. Don't try to analyze it, because the 5575 // resolution chosen here may differ from the resolution chosen in 5576 // other parts of the compiler. 5577 if (CI->getValue().uge(BitWidth)) 5578 break; 5579 5580 if (CI->isZero()) 5581 return getSCEV(BO->LHS); // shift by zero --> noop 5582 5583 uint64_t AShrAmt = CI->getZExtValue(); 5584 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 5585 5586 Operator *L = dyn_cast<Operator>(BO->LHS); 5587 if (L && L->getOpcode() == Instruction::Shl) { 5588 // X = Shl A, n 5589 // Y = AShr X, m 5590 // Both n and m are constant. 5591 5592 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 5593 if (L->getOperand(1) == BO->RHS) 5594 // For a two-shift sext-inreg, i.e. n = m, 5595 // use sext(trunc(x)) as the SCEV expression. 5596 return getSignExtendExpr( 5597 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 5598 5599 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 5600 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 5601 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 5602 if (ShlAmt > AShrAmt) { 5603 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 5604 // expression. We already checked that ShlAmt < BitWidth, so 5605 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 5606 // ShlAmt - AShrAmt < Amt. 5607 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 5608 ShlAmt - AShrAmt); 5609 return getSignExtendExpr( 5610 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 5611 getConstant(Mul)), OuterTy); 5612 } 5613 } 5614 } 5615 break; 5616 } 5617 } 5618 5619 switch (U->getOpcode()) { 5620 case Instruction::Trunc: 5621 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 5622 5623 case Instruction::ZExt: 5624 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 5625 5626 case Instruction::SExt: 5627 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 5628 5629 case Instruction::BitCast: 5630 // BitCasts are no-op casts so we just eliminate the cast. 5631 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 5632 return getSCEV(U->getOperand(0)); 5633 break; 5634 5635 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 5636 // lead to pointer expressions which cannot safely be expanded to GEPs, 5637 // because ScalarEvolution doesn't respect the GEP aliasing rules when 5638 // simplifying integer expressions. 5639 5640 case Instruction::GetElementPtr: 5641 return createNodeForGEP(cast<GEPOperator>(U)); 5642 5643 case Instruction::PHI: 5644 return createNodeForPHI(cast<PHINode>(U)); 5645 5646 case Instruction::Select: 5647 // U can also be a select constant expr, which let fall through. Since 5648 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 5649 // constant expressions cannot have instructions as operands, we'd have 5650 // returned getUnknown for a select constant expressions anyway. 5651 if (isa<Instruction>(U)) 5652 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 5653 U->getOperand(1), U->getOperand(2)); 5654 break; 5655 5656 case Instruction::Call: 5657 case Instruction::Invoke: 5658 if (Value *RV = CallSite(U).getReturnedArgOperand()) 5659 return getSCEV(RV); 5660 break; 5661 } 5662 5663 return getUnknown(V); 5664 } 5665 5666 5667 5668 //===----------------------------------------------------------------------===// 5669 // Iteration Count Computation Code 5670 // 5671 5672 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 5673 if (!ExitCount) 5674 return 0; 5675 5676 ConstantInt *ExitConst = ExitCount->getValue(); 5677 5678 // Guard against huge trip counts. 5679 if (ExitConst->getValue().getActiveBits() > 32) 5680 return 0; 5681 5682 // In case of integer overflow, this returns 0, which is correct. 5683 return ((unsigned)ExitConst->getZExtValue()) + 1; 5684 } 5685 5686 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 5687 if (BasicBlock *ExitingBB = L->getExitingBlock()) 5688 return getSmallConstantTripCount(L, ExitingBB); 5689 5690 // No trip count information for multiple exits. 5691 return 0; 5692 } 5693 5694 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L, 5695 BasicBlock *ExitingBlock) { 5696 assert(ExitingBlock && "Must pass a non-null exiting block!"); 5697 assert(L->isLoopExiting(ExitingBlock) && 5698 "Exiting block must actually branch out of the loop!"); 5699 const SCEVConstant *ExitCount = 5700 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 5701 return getConstantTripCount(ExitCount); 5702 } 5703 5704 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 5705 const auto *MaxExitCount = 5706 dyn_cast<SCEVConstant>(getMaxBackedgeTakenCount(L)); 5707 return getConstantTripCount(MaxExitCount); 5708 } 5709 5710 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 5711 if (BasicBlock *ExitingBB = L->getExitingBlock()) 5712 return getSmallConstantTripMultiple(L, ExitingBB); 5713 5714 // No trip multiple information for multiple exits. 5715 return 0; 5716 } 5717 5718 /// Returns the largest constant divisor of the trip count of this loop as a 5719 /// normal unsigned value, if possible. This means that the actual trip count is 5720 /// always a multiple of the returned value (don't forget the trip count could 5721 /// very well be zero as well!). 5722 /// 5723 /// Returns 1 if the trip count is unknown or not guaranteed to be the 5724 /// multiple of a constant (which is also the case if the trip count is simply 5725 /// constant, use getSmallConstantTripCount for that case), Will also return 1 5726 /// if the trip count is very large (>= 2^32). 5727 /// 5728 /// As explained in the comments for getSmallConstantTripCount, this assumes 5729 /// that control exits the loop via ExitingBlock. 5730 unsigned 5731 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 5732 BasicBlock *ExitingBlock) { 5733 assert(ExitingBlock && "Must pass a non-null exiting block!"); 5734 assert(L->isLoopExiting(ExitingBlock) && 5735 "Exiting block must actually branch out of the loop!"); 5736 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 5737 if (ExitCount == getCouldNotCompute()) 5738 return 1; 5739 5740 // Get the trip count from the BE count by adding 1. 5741 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 5742 5743 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 5744 if (!TC) 5745 // Attempt to factor more general cases. Returns the greatest power of 5746 // two divisor. If overflow happens, the trip count expression is still 5747 // divisible by the greatest power of 2 divisor returned. 5748 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 5749 5750 ConstantInt *Result = TC->getValue(); 5751 5752 // Guard against huge trip counts (this requires checking 5753 // for zero to handle the case where the trip count == -1 and the 5754 // addition wraps). 5755 if (!Result || Result->getValue().getActiveBits() > 32 || 5756 Result->getValue().getActiveBits() == 0) 5757 return 1; 5758 5759 return (unsigned)Result->getZExtValue(); 5760 } 5761 5762 /// Get the expression for the number of loop iterations for which this loop is 5763 /// guaranteed not to exit via ExitingBlock. Otherwise return 5764 /// SCEVCouldNotCompute. 5765 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 5766 BasicBlock *ExitingBlock) { 5767 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 5768 } 5769 5770 const SCEV * 5771 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 5772 SCEVUnionPredicate &Preds) { 5773 return getPredicatedBackedgeTakenInfo(L).getExact(this, &Preds); 5774 } 5775 5776 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 5777 return getBackedgeTakenInfo(L).getExact(this); 5778 } 5779 5780 /// Similar to getBackedgeTakenCount, except return the least SCEV value that is 5781 /// known never to be less than the actual backedge taken count. 5782 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 5783 return getBackedgeTakenInfo(L).getMax(this); 5784 } 5785 5786 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 5787 return getBackedgeTakenInfo(L).isMaxOrZero(this); 5788 } 5789 5790 /// Push PHI nodes in the header of the given loop onto the given Worklist. 5791 static void 5792 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 5793 BasicBlock *Header = L->getHeader(); 5794 5795 // Push all Loop-header PHIs onto the Worklist stack. 5796 for (BasicBlock::iterator I = Header->begin(); 5797 PHINode *PN = dyn_cast<PHINode>(I); ++I) 5798 Worklist.push_back(PN); 5799 } 5800 5801 const ScalarEvolution::BackedgeTakenInfo & 5802 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 5803 auto &BTI = getBackedgeTakenInfo(L); 5804 if (BTI.hasFullInfo()) 5805 return BTI; 5806 5807 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 5808 5809 if (!Pair.second) 5810 return Pair.first->second; 5811 5812 BackedgeTakenInfo Result = 5813 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 5814 5815 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 5816 } 5817 5818 const ScalarEvolution::BackedgeTakenInfo & 5819 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 5820 // Initially insert an invalid entry for this loop. If the insertion 5821 // succeeds, proceed to actually compute a backedge-taken count and 5822 // update the value. The temporary CouldNotCompute value tells SCEV 5823 // code elsewhere that it shouldn't attempt to request a new 5824 // backedge-taken count, which could result in infinite recursion. 5825 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 5826 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 5827 if (!Pair.second) 5828 return Pair.first->second; 5829 5830 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 5831 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 5832 // must be cleared in this scope. 5833 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 5834 5835 if (Result.getExact(this) != getCouldNotCompute()) { 5836 assert(isLoopInvariant(Result.getExact(this), L) && 5837 isLoopInvariant(Result.getMax(this), L) && 5838 "Computed backedge-taken count isn't loop invariant for loop!"); 5839 ++NumTripCountsComputed; 5840 } 5841 else if (Result.getMax(this) == getCouldNotCompute() && 5842 isa<PHINode>(L->getHeader()->begin())) { 5843 // Only count loops that have phi nodes as not being computable. 5844 ++NumTripCountsNotComputed; 5845 } 5846 5847 // Now that we know more about the trip count for this loop, forget any 5848 // existing SCEV values for PHI nodes in this loop since they are only 5849 // conservative estimates made without the benefit of trip count 5850 // information. This is similar to the code in forgetLoop, except that 5851 // it handles SCEVUnknown PHI nodes specially. 5852 if (Result.hasAnyInfo()) { 5853 SmallVector<Instruction *, 16> Worklist; 5854 PushLoopPHIs(L, Worklist); 5855 5856 SmallPtrSet<Instruction *, 8> Visited; 5857 while (!Worklist.empty()) { 5858 Instruction *I = Worklist.pop_back_val(); 5859 if (!Visited.insert(I).second) 5860 continue; 5861 5862 ValueExprMapType::iterator It = 5863 ValueExprMap.find_as(static_cast<Value *>(I)); 5864 if (It != ValueExprMap.end()) { 5865 const SCEV *Old = It->second; 5866 5867 // SCEVUnknown for a PHI either means that it has an unrecognized 5868 // structure, or it's a PHI that's in the progress of being computed 5869 // by createNodeForPHI. In the former case, additional loop trip 5870 // count information isn't going to change anything. In the later 5871 // case, createNodeForPHI will perform the necessary updates on its 5872 // own when it gets to that point. 5873 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 5874 eraseValueFromMap(It->first); 5875 forgetMemoizedResults(Old); 5876 } 5877 if (PHINode *PN = dyn_cast<PHINode>(I)) 5878 ConstantEvolutionLoopExitValue.erase(PN); 5879 } 5880 5881 PushDefUseChildren(I, Worklist); 5882 } 5883 } 5884 5885 // Re-lookup the insert position, since the call to 5886 // computeBackedgeTakenCount above could result in a 5887 // recusive call to getBackedgeTakenInfo (on a different 5888 // loop), which would invalidate the iterator computed 5889 // earlier. 5890 return BackedgeTakenCounts.find(L)->second = std::move(Result); 5891 } 5892 5893 void ScalarEvolution::forgetLoop(const Loop *L) { 5894 // Drop any stored trip count value. 5895 auto RemoveLoopFromBackedgeMap = 5896 [L](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 5897 auto BTCPos = Map.find(L); 5898 if (BTCPos != Map.end()) { 5899 BTCPos->second.clear(); 5900 Map.erase(BTCPos); 5901 } 5902 }; 5903 5904 RemoveLoopFromBackedgeMap(BackedgeTakenCounts); 5905 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts); 5906 5907 // Drop information about expressions based on loop-header PHIs. 5908 SmallVector<Instruction *, 16> Worklist; 5909 PushLoopPHIs(L, Worklist); 5910 5911 SmallPtrSet<Instruction *, 8> Visited; 5912 while (!Worklist.empty()) { 5913 Instruction *I = Worklist.pop_back_val(); 5914 if (!Visited.insert(I).second) 5915 continue; 5916 5917 ValueExprMapType::iterator It = 5918 ValueExprMap.find_as(static_cast<Value *>(I)); 5919 if (It != ValueExprMap.end()) { 5920 eraseValueFromMap(It->first); 5921 forgetMemoizedResults(It->second); 5922 if (PHINode *PN = dyn_cast<PHINode>(I)) 5923 ConstantEvolutionLoopExitValue.erase(PN); 5924 } 5925 5926 PushDefUseChildren(I, Worklist); 5927 } 5928 5929 // Forget all contained loops too, to avoid dangling entries in the 5930 // ValuesAtScopes map. 5931 for (Loop *I : *L) 5932 forgetLoop(I); 5933 5934 LoopPropertiesCache.erase(L); 5935 } 5936 5937 void ScalarEvolution::forgetValue(Value *V) { 5938 Instruction *I = dyn_cast<Instruction>(V); 5939 if (!I) return; 5940 5941 // Drop information about expressions based on loop-header PHIs. 5942 SmallVector<Instruction *, 16> Worklist; 5943 Worklist.push_back(I); 5944 5945 SmallPtrSet<Instruction *, 8> Visited; 5946 while (!Worklist.empty()) { 5947 I = Worklist.pop_back_val(); 5948 if (!Visited.insert(I).second) 5949 continue; 5950 5951 ValueExprMapType::iterator It = 5952 ValueExprMap.find_as(static_cast<Value *>(I)); 5953 if (It != ValueExprMap.end()) { 5954 eraseValueFromMap(It->first); 5955 forgetMemoizedResults(It->second); 5956 if (PHINode *PN = dyn_cast<PHINode>(I)) 5957 ConstantEvolutionLoopExitValue.erase(PN); 5958 } 5959 5960 PushDefUseChildren(I, Worklist); 5961 } 5962 } 5963 5964 /// Get the exact loop backedge taken count considering all loop exits. A 5965 /// computable result can only be returned for loops with a single exit. 5966 /// Returning the minimum taken count among all exits is incorrect because one 5967 /// of the loop's exit limit's may have been skipped. howFarToZero assumes that 5968 /// the limit of each loop test is never skipped. This is a valid assumption as 5969 /// long as the loop exits via that test. For precise results, it is the 5970 /// caller's responsibility to specify the relevant loop exit using 5971 /// getExact(ExitingBlock, SE). 5972 const SCEV * 5973 ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE, 5974 SCEVUnionPredicate *Preds) const { 5975 // If any exits were not computable, the loop is not computable. 5976 if (!isComplete() || ExitNotTaken.empty()) 5977 return SE->getCouldNotCompute(); 5978 5979 const SCEV *BECount = nullptr; 5980 for (auto &ENT : ExitNotTaken) { 5981 assert(ENT.ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV"); 5982 5983 if (!BECount) 5984 BECount = ENT.ExactNotTaken; 5985 else if (BECount != ENT.ExactNotTaken) 5986 return SE->getCouldNotCompute(); 5987 if (Preds && !ENT.hasAlwaysTruePredicate()) 5988 Preds->add(ENT.Predicate.get()); 5989 5990 assert((Preds || ENT.hasAlwaysTruePredicate()) && 5991 "Predicate should be always true!"); 5992 } 5993 5994 assert(BECount && "Invalid not taken count for loop exit"); 5995 return BECount; 5996 } 5997 5998 /// Get the exact not taken count for this loop exit. 5999 const SCEV * 6000 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 6001 ScalarEvolution *SE) const { 6002 for (auto &ENT : ExitNotTaken) 6003 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6004 return ENT.ExactNotTaken; 6005 6006 return SE->getCouldNotCompute(); 6007 } 6008 6009 /// getMax - Get the max backedge taken count for the loop. 6010 const SCEV * 6011 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 6012 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6013 return !ENT.hasAlwaysTruePredicate(); 6014 }; 6015 6016 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax()) 6017 return SE->getCouldNotCompute(); 6018 6019 assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) && 6020 "No point in having a non-constant max backedge taken count!"); 6021 return getMax(); 6022 } 6023 6024 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const { 6025 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6026 return !ENT.hasAlwaysTruePredicate(); 6027 }; 6028 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 6029 } 6030 6031 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 6032 ScalarEvolution *SE) const { 6033 if (getMax() && getMax() != SE->getCouldNotCompute() && 6034 SE->hasOperand(getMax(), S)) 6035 return true; 6036 6037 for (auto &ENT : ExitNotTaken) 6038 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 6039 SE->hasOperand(ENT.ExactNotTaken, S)) 6040 return true; 6041 6042 return false; 6043 } 6044 6045 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 6046 : ExactNotTaken(E), MaxNotTaken(E), MaxOrZero(false) { 6047 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6048 isa<SCEVConstant>(MaxNotTaken)) && 6049 "No point in having a non-constant max backedge taken count!"); 6050 } 6051 6052 ScalarEvolution::ExitLimit::ExitLimit( 6053 const SCEV *E, const SCEV *M, bool MaxOrZero, 6054 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 6055 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 6056 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 6057 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 6058 "Exact is not allowed to be less precise than Max"); 6059 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6060 isa<SCEVConstant>(MaxNotTaken)) && 6061 "No point in having a non-constant max backedge taken count!"); 6062 for (auto *PredSet : PredSetList) 6063 for (auto *P : *PredSet) 6064 addPredicate(P); 6065 } 6066 6067 ScalarEvolution::ExitLimit::ExitLimit( 6068 const SCEV *E, const SCEV *M, bool MaxOrZero, 6069 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 6070 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 6071 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6072 isa<SCEVConstant>(MaxNotTaken)) && 6073 "No point in having a non-constant max backedge taken count!"); 6074 } 6075 6076 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 6077 bool MaxOrZero) 6078 : ExitLimit(E, M, MaxOrZero, None) { 6079 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6080 isa<SCEVConstant>(MaxNotTaken)) && 6081 "No point in having a non-constant max backedge taken count!"); 6082 } 6083 6084 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 6085 /// computable exit into a persistent ExitNotTakenInfo array. 6086 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 6087 SmallVectorImpl<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> 6088 &&ExitCounts, 6089 bool Complete, const SCEV *MaxCount, bool MaxOrZero) 6090 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { 6091 typedef ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo EdgeExitInfo; 6092 ExitNotTaken.reserve(ExitCounts.size()); 6093 std::transform( 6094 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 6095 [&](const EdgeExitInfo &EEI) { 6096 BasicBlock *ExitBB = EEI.first; 6097 const ExitLimit &EL = EEI.second; 6098 if (EL.Predicates.empty()) 6099 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, nullptr); 6100 6101 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 6102 for (auto *Pred : EL.Predicates) 6103 Predicate->add(Pred); 6104 6105 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, std::move(Predicate)); 6106 }); 6107 assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) && 6108 "No point in having a non-constant max backedge taken count!"); 6109 } 6110 6111 /// Invalidate this result and free the ExitNotTakenInfo array. 6112 void ScalarEvolution::BackedgeTakenInfo::clear() { 6113 ExitNotTaken.clear(); 6114 } 6115 6116 /// Compute the number of times the backedge of the specified loop will execute. 6117 ScalarEvolution::BackedgeTakenInfo 6118 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 6119 bool AllowPredicates) { 6120 SmallVector<BasicBlock *, 8> ExitingBlocks; 6121 L->getExitingBlocks(ExitingBlocks); 6122 6123 typedef ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo EdgeExitInfo; 6124 6125 SmallVector<EdgeExitInfo, 4> ExitCounts; 6126 bool CouldComputeBECount = true; 6127 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 6128 const SCEV *MustExitMaxBECount = nullptr; 6129 const SCEV *MayExitMaxBECount = nullptr; 6130 bool MustExitMaxOrZero = false; 6131 6132 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 6133 // and compute maxBECount. 6134 // Do a union of all the predicates here. 6135 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 6136 BasicBlock *ExitBB = ExitingBlocks[i]; 6137 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 6138 6139 assert((AllowPredicates || EL.Predicates.empty()) && 6140 "Predicated exit limit when predicates are not allowed!"); 6141 6142 // 1. For each exit that can be computed, add an entry to ExitCounts. 6143 // CouldComputeBECount is true only if all exits can be computed. 6144 if (EL.ExactNotTaken == getCouldNotCompute()) 6145 // We couldn't compute an exact value for this exit, so 6146 // we won't be able to compute an exact value for the loop. 6147 CouldComputeBECount = false; 6148 else 6149 ExitCounts.emplace_back(ExitBB, EL); 6150 6151 // 2. Derive the loop's MaxBECount from each exit's max number of 6152 // non-exiting iterations. Partition the loop exits into two kinds: 6153 // LoopMustExits and LoopMayExits. 6154 // 6155 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 6156 // is a LoopMayExit. If any computable LoopMustExit is found, then 6157 // MaxBECount is the minimum EL.MaxNotTaken of computable 6158 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 6159 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 6160 // computable EL.MaxNotTaken. 6161 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 6162 DT.dominates(ExitBB, Latch)) { 6163 if (!MustExitMaxBECount) { 6164 MustExitMaxBECount = EL.MaxNotTaken; 6165 MustExitMaxOrZero = EL.MaxOrZero; 6166 } else { 6167 MustExitMaxBECount = 6168 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 6169 } 6170 } else if (MayExitMaxBECount != getCouldNotCompute()) { 6171 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 6172 MayExitMaxBECount = EL.MaxNotTaken; 6173 else { 6174 MayExitMaxBECount = 6175 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 6176 } 6177 } 6178 } 6179 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 6180 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 6181 // The loop backedge will be taken the maximum or zero times if there's 6182 // a single exit that must be taken the maximum or zero times. 6183 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 6184 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 6185 MaxBECount, MaxOrZero); 6186 } 6187 6188 ScalarEvolution::ExitLimit 6189 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 6190 bool AllowPredicates) { 6191 6192 // Okay, we've chosen an exiting block. See what condition causes us to exit 6193 // at this block and remember the exit block and whether all other targets 6194 // lead to the loop header. 6195 bool MustExecuteLoopHeader = true; 6196 BasicBlock *Exit = nullptr; 6197 for (auto *SBB : successors(ExitingBlock)) 6198 if (!L->contains(SBB)) { 6199 if (Exit) // Multiple exit successors. 6200 return getCouldNotCompute(); 6201 Exit = SBB; 6202 } else if (SBB != L->getHeader()) { 6203 MustExecuteLoopHeader = false; 6204 } 6205 6206 // At this point, we know we have a conditional branch that determines whether 6207 // the loop is exited. However, we don't know if the branch is executed each 6208 // time through the loop. If not, then the execution count of the branch will 6209 // not be equal to the trip count of the loop. 6210 // 6211 // Currently we check for this by checking to see if the Exit branch goes to 6212 // the loop header. If so, we know it will always execute the same number of 6213 // times as the loop. We also handle the case where the exit block *is* the 6214 // loop header. This is common for un-rotated loops. 6215 // 6216 // If both of those tests fail, walk up the unique predecessor chain to the 6217 // header, stopping if there is an edge that doesn't exit the loop. If the 6218 // header is reached, the execution count of the branch will be equal to the 6219 // trip count of the loop. 6220 // 6221 // More extensive analysis could be done to handle more cases here. 6222 // 6223 if (!MustExecuteLoopHeader && ExitingBlock != L->getHeader()) { 6224 // The simple checks failed, try climbing the unique predecessor chain 6225 // up to the header. 6226 bool Ok = false; 6227 for (BasicBlock *BB = ExitingBlock; BB; ) { 6228 BasicBlock *Pred = BB->getUniquePredecessor(); 6229 if (!Pred) 6230 return getCouldNotCompute(); 6231 TerminatorInst *PredTerm = Pred->getTerminator(); 6232 for (const BasicBlock *PredSucc : PredTerm->successors()) { 6233 if (PredSucc == BB) 6234 continue; 6235 // If the predecessor has a successor that isn't BB and isn't 6236 // outside the loop, assume the worst. 6237 if (L->contains(PredSucc)) 6238 return getCouldNotCompute(); 6239 } 6240 if (Pred == L->getHeader()) { 6241 Ok = true; 6242 break; 6243 } 6244 BB = Pred; 6245 } 6246 if (!Ok) 6247 return getCouldNotCompute(); 6248 } 6249 6250 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 6251 TerminatorInst *Term = ExitingBlock->getTerminator(); 6252 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 6253 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 6254 // Proceed to the next level to examine the exit condition expression. 6255 return computeExitLimitFromCond( 6256 L, BI->getCondition(), BI->getSuccessor(0), BI->getSuccessor(1), 6257 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 6258 } 6259 6260 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) 6261 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 6262 /*ControlsExit=*/IsOnlyExit); 6263 6264 return getCouldNotCompute(); 6265 } 6266 6267 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 6268 const Loop *L, Value *ExitCond, BasicBlock *TBB, BasicBlock *FBB, 6269 bool ControlsExit, bool AllowPredicates) { 6270 ScalarEvolution::ExitLimitCacheTy Cache(L, TBB, FBB, AllowPredicates); 6271 return computeExitLimitFromCondCached(Cache, L, ExitCond, TBB, FBB, 6272 ControlsExit, AllowPredicates); 6273 } 6274 6275 Optional<ScalarEvolution::ExitLimit> 6276 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 6277 BasicBlock *TBB, BasicBlock *FBB, 6278 bool ControlsExit, bool AllowPredicates) { 6279 (void)this->L; 6280 (void)this->TBB; 6281 (void)this->FBB; 6282 (void)this->AllowPredicates; 6283 6284 assert(this->L == L && this->TBB == TBB && this->FBB == FBB && 6285 this->AllowPredicates == AllowPredicates && 6286 "Variance in assumed invariant key components!"); 6287 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 6288 if (Itr == TripCountMap.end()) 6289 return None; 6290 return Itr->second; 6291 } 6292 6293 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 6294 BasicBlock *TBB, BasicBlock *FBB, 6295 bool ControlsExit, 6296 bool AllowPredicates, 6297 const ExitLimit &EL) { 6298 assert(this->L == L && this->TBB == TBB && this->FBB == FBB && 6299 this->AllowPredicates == AllowPredicates && 6300 "Variance in assumed invariant key components!"); 6301 6302 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 6303 assert(InsertResult.second && "Expected successful insertion!"); 6304 (void)InsertResult; 6305 } 6306 6307 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 6308 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, BasicBlock *TBB, 6309 BasicBlock *FBB, bool ControlsExit, bool AllowPredicates) { 6310 6311 if (auto MaybeEL = 6312 Cache.find(L, ExitCond, TBB, FBB, ControlsExit, AllowPredicates)) 6313 return *MaybeEL; 6314 6315 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, TBB, FBB, 6316 ControlsExit, AllowPredicates); 6317 Cache.insert(L, ExitCond, TBB, FBB, ControlsExit, AllowPredicates, EL); 6318 return EL; 6319 } 6320 6321 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 6322 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, BasicBlock *TBB, 6323 BasicBlock *FBB, bool ControlsExit, bool AllowPredicates) { 6324 // Check if the controlling expression for this loop is an And or Or. 6325 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 6326 if (BO->getOpcode() == Instruction::And) { 6327 // Recurse on the operands of the and. 6328 bool EitherMayExit = L->contains(TBB); 6329 ExitLimit EL0 = computeExitLimitFromCondCached( 6330 Cache, L, BO->getOperand(0), TBB, FBB, ControlsExit && !EitherMayExit, 6331 AllowPredicates); 6332 ExitLimit EL1 = computeExitLimitFromCondCached( 6333 Cache, L, BO->getOperand(1), TBB, FBB, ControlsExit && !EitherMayExit, 6334 AllowPredicates); 6335 const SCEV *BECount = getCouldNotCompute(); 6336 const SCEV *MaxBECount = getCouldNotCompute(); 6337 if (EitherMayExit) { 6338 // Both conditions must be true for the loop to continue executing. 6339 // Choose the less conservative count. 6340 if (EL0.ExactNotTaken == getCouldNotCompute() || 6341 EL1.ExactNotTaken == getCouldNotCompute()) 6342 BECount = getCouldNotCompute(); 6343 else 6344 BECount = 6345 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 6346 if (EL0.MaxNotTaken == getCouldNotCompute()) 6347 MaxBECount = EL1.MaxNotTaken; 6348 else if (EL1.MaxNotTaken == getCouldNotCompute()) 6349 MaxBECount = EL0.MaxNotTaken; 6350 else 6351 MaxBECount = 6352 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 6353 } else { 6354 // Both conditions must be true at the same time for the loop to exit. 6355 // For now, be conservative. 6356 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 6357 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 6358 MaxBECount = EL0.MaxNotTaken; 6359 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 6360 BECount = EL0.ExactNotTaken; 6361 } 6362 6363 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 6364 // to be more aggressive when computing BECount than when computing 6365 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 6366 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 6367 // to not. 6368 if (isa<SCEVCouldNotCompute>(MaxBECount) && 6369 !isa<SCEVCouldNotCompute>(BECount)) 6370 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 6371 6372 return ExitLimit(BECount, MaxBECount, false, 6373 {&EL0.Predicates, &EL1.Predicates}); 6374 } 6375 if (BO->getOpcode() == Instruction::Or) { 6376 // Recurse on the operands of the or. 6377 bool EitherMayExit = L->contains(FBB); 6378 ExitLimit EL0 = computeExitLimitFromCondCached( 6379 Cache, L, BO->getOperand(0), TBB, FBB, ControlsExit && !EitherMayExit, 6380 AllowPredicates); 6381 ExitLimit EL1 = computeExitLimitFromCondCached( 6382 Cache, L, BO->getOperand(1), TBB, FBB, ControlsExit && !EitherMayExit, 6383 AllowPredicates); 6384 const SCEV *BECount = getCouldNotCompute(); 6385 const SCEV *MaxBECount = getCouldNotCompute(); 6386 if (EitherMayExit) { 6387 // Both conditions must be false for the loop to continue executing. 6388 // Choose the less conservative count. 6389 if (EL0.ExactNotTaken == getCouldNotCompute() || 6390 EL1.ExactNotTaken == getCouldNotCompute()) 6391 BECount = getCouldNotCompute(); 6392 else 6393 BECount = 6394 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 6395 if (EL0.MaxNotTaken == getCouldNotCompute()) 6396 MaxBECount = EL1.MaxNotTaken; 6397 else if (EL1.MaxNotTaken == getCouldNotCompute()) 6398 MaxBECount = EL0.MaxNotTaken; 6399 else 6400 MaxBECount = 6401 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 6402 } else { 6403 // Both conditions must be false at the same time for the loop to exit. 6404 // For now, be conservative. 6405 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 6406 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 6407 MaxBECount = EL0.MaxNotTaken; 6408 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 6409 BECount = EL0.ExactNotTaken; 6410 } 6411 6412 return ExitLimit(BECount, MaxBECount, false, 6413 {&EL0.Predicates, &EL1.Predicates}); 6414 } 6415 } 6416 6417 // With an icmp, it may be feasible to compute an exact backedge-taken count. 6418 // Proceed to the next level to examine the icmp. 6419 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 6420 ExitLimit EL = 6421 computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit); 6422 if (EL.hasFullInfo() || !AllowPredicates) 6423 return EL; 6424 6425 // Try again, but use SCEV predicates this time. 6426 return computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit, 6427 /*AllowPredicates=*/true); 6428 } 6429 6430 // Check for a constant condition. These are normally stripped out by 6431 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 6432 // preserve the CFG and is temporarily leaving constant conditions 6433 // in place. 6434 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 6435 if (L->contains(FBB) == !CI->getZExtValue()) 6436 // The backedge is always taken. 6437 return getCouldNotCompute(); 6438 else 6439 // The backedge is never taken. 6440 return getZero(CI->getType()); 6441 } 6442 6443 // If it's not an integer or pointer comparison then compute it the hard way. 6444 return computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 6445 } 6446 6447 ScalarEvolution::ExitLimit 6448 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 6449 ICmpInst *ExitCond, 6450 BasicBlock *TBB, 6451 BasicBlock *FBB, 6452 bool ControlsExit, 6453 bool AllowPredicates) { 6454 6455 // If the condition was exit on true, convert the condition to exit on false 6456 ICmpInst::Predicate Cond; 6457 if (!L->contains(FBB)) 6458 Cond = ExitCond->getPredicate(); 6459 else 6460 Cond = ExitCond->getInversePredicate(); 6461 6462 // Handle common loops like: for (X = "string"; *X; ++X) 6463 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 6464 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 6465 ExitLimit ItCnt = 6466 computeLoadConstantCompareExitLimit(LI, RHS, L, Cond); 6467 if (ItCnt.hasAnyInfo()) 6468 return ItCnt; 6469 } 6470 6471 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 6472 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 6473 6474 // Try to evaluate any dependencies out of the loop. 6475 LHS = getSCEVAtScope(LHS, L); 6476 RHS = getSCEVAtScope(RHS, L); 6477 6478 // At this point, we would like to compute how many iterations of the 6479 // loop the predicate will return true for these inputs. 6480 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 6481 // If there is a loop-invariant, force it into the RHS. 6482 std::swap(LHS, RHS); 6483 Cond = ICmpInst::getSwappedPredicate(Cond); 6484 } 6485 6486 // Simplify the operands before analyzing them. 6487 (void)SimplifyICmpOperands(Cond, LHS, RHS); 6488 6489 // If we have a comparison of a chrec against a constant, try to use value 6490 // ranges to answer this query. 6491 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 6492 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 6493 if (AddRec->getLoop() == L) { 6494 // Form the constant range. 6495 ConstantRange CompRange = 6496 ConstantRange::makeExactICmpRegion(Cond, RHSC->getAPInt()); 6497 6498 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 6499 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 6500 } 6501 6502 switch (Cond) { 6503 case ICmpInst::ICMP_NE: { // while (X != Y) 6504 // Convert to: while (X-Y != 0) 6505 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 6506 AllowPredicates); 6507 if (EL.hasAnyInfo()) return EL; 6508 break; 6509 } 6510 case ICmpInst::ICMP_EQ: { // while (X == Y) 6511 // Convert to: while (X-Y == 0) 6512 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 6513 if (EL.hasAnyInfo()) return EL; 6514 break; 6515 } 6516 case ICmpInst::ICMP_SLT: 6517 case ICmpInst::ICMP_ULT: { // while (X < Y) 6518 bool IsSigned = Cond == ICmpInst::ICMP_SLT; 6519 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 6520 AllowPredicates); 6521 if (EL.hasAnyInfo()) return EL; 6522 break; 6523 } 6524 case ICmpInst::ICMP_SGT: 6525 case ICmpInst::ICMP_UGT: { // while (X > Y) 6526 bool IsSigned = Cond == ICmpInst::ICMP_SGT; 6527 ExitLimit EL = 6528 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 6529 AllowPredicates); 6530 if (EL.hasAnyInfo()) return EL; 6531 break; 6532 } 6533 default: 6534 break; 6535 } 6536 6537 auto *ExhaustiveCount = 6538 computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 6539 6540 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 6541 return ExhaustiveCount; 6542 6543 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 6544 ExitCond->getOperand(1), L, Cond); 6545 } 6546 6547 ScalarEvolution::ExitLimit 6548 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 6549 SwitchInst *Switch, 6550 BasicBlock *ExitingBlock, 6551 bool ControlsExit) { 6552 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 6553 6554 // Give up if the exit is the default dest of a switch. 6555 if (Switch->getDefaultDest() == ExitingBlock) 6556 return getCouldNotCompute(); 6557 6558 assert(L->contains(Switch->getDefaultDest()) && 6559 "Default case must not exit the loop!"); 6560 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 6561 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 6562 6563 // while (X != Y) --> while (X-Y != 0) 6564 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 6565 if (EL.hasAnyInfo()) 6566 return EL; 6567 6568 return getCouldNotCompute(); 6569 } 6570 6571 static ConstantInt * 6572 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 6573 ScalarEvolution &SE) { 6574 const SCEV *InVal = SE.getConstant(C); 6575 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 6576 assert(isa<SCEVConstant>(Val) && 6577 "Evaluation of SCEV at constant didn't fold correctly?"); 6578 return cast<SCEVConstant>(Val)->getValue(); 6579 } 6580 6581 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 6582 /// compute the backedge execution count. 6583 ScalarEvolution::ExitLimit 6584 ScalarEvolution::computeLoadConstantCompareExitLimit( 6585 LoadInst *LI, 6586 Constant *RHS, 6587 const Loop *L, 6588 ICmpInst::Predicate predicate) { 6589 6590 if (LI->isVolatile()) return getCouldNotCompute(); 6591 6592 // Check to see if the loaded pointer is a getelementptr of a global. 6593 // TODO: Use SCEV instead of manually grubbing with GEPs. 6594 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 6595 if (!GEP) return getCouldNotCompute(); 6596 6597 // Make sure that it is really a constant global we are gepping, with an 6598 // initializer, and make sure the first IDX is really 0. 6599 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 6600 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 6601 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 6602 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 6603 return getCouldNotCompute(); 6604 6605 // Okay, we allow one non-constant index into the GEP instruction. 6606 Value *VarIdx = nullptr; 6607 std::vector<Constant*> Indexes; 6608 unsigned VarIdxNum = 0; 6609 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 6610 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 6611 Indexes.push_back(CI); 6612 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 6613 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 6614 VarIdx = GEP->getOperand(i); 6615 VarIdxNum = i-2; 6616 Indexes.push_back(nullptr); 6617 } 6618 6619 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 6620 if (!VarIdx) 6621 return getCouldNotCompute(); 6622 6623 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 6624 // Check to see if X is a loop variant variable value now. 6625 const SCEV *Idx = getSCEV(VarIdx); 6626 Idx = getSCEVAtScope(Idx, L); 6627 6628 // We can only recognize very limited forms of loop index expressions, in 6629 // particular, only affine AddRec's like {C1,+,C2}. 6630 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 6631 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 6632 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 6633 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 6634 return getCouldNotCompute(); 6635 6636 unsigned MaxSteps = MaxBruteForceIterations; 6637 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 6638 ConstantInt *ItCst = ConstantInt::get( 6639 cast<IntegerType>(IdxExpr->getType()), IterationNum); 6640 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 6641 6642 // Form the GEP offset. 6643 Indexes[VarIdxNum] = Val; 6644 6645 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 6646 Indexes); 6647 if (!Result) break; // Cannot compute! 6648 6649 // Evaluate the condition for this iteration. 6650 Result = ConstantExpr::getICmp(predicate, Result, RHS); 6651 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 6652 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 6653 ++NumArrayLenItCounts; 6654 return getConstant(ItCst); // Found terminating iteration! 6655 } 6656 } 6657 return getCouldNotCompute(); 6658 } 6659 6660 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 6661 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 6662 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 6663 if (!RHS) 6664 return getCouldNotCompute(); 6665 6666 const BasicBlock *Latch = L->getLoopLatch(); 6667 if (!Latch) 6668 return getCouldNotCompute(); 6669 6670 const BasicBlock *Predecessor = L->getLoopPredecessor(); 6671 if (!Predecessor) 6672 return getCouldNotCompute(); 6673 6674 // Return true if V is of the form "LHS `shift_op` <positive constant>". 6675 // Return LHS in OutLHS and shift_opt in OutOpCode. 6676 auto MatchPositiveShift = 6677 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 6678 6679 using namespace PatternMatch; 6680 6681 ConstantInt *ShiftAmt; 6682 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 6683 OutOpCode = Instruction::LShr; 6684 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 6685 OutOpCode = Instruction::AShr; 6686 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 6687 OutOpCode = Instruction::Shl; 6688 else 6689 return false; 6690 6691 return ShiftAmt->getValue().isStrictlyPositive(); 6692 }; 6693 6694 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 6695 // 6696 // loop: 6697 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 6698 // %iv.shifted = lshr i32 %iv, <positive constant> 6699 // 6700 // Return true on a successful match. Return the corresponding PHI node (%iv 6701 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 6702 auto MatchShiftRecurrence = 6703 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 6704 Optional<Instruction::BinaryOps> PostShiftOpCode; 6705 6706 { 6707 Instruction::BinaryOps OpC; 6708 Value *V; 6709 6710 // If we encounter a shift instruction, "peel off" the shift operation, 6711 // and remember that we did so. Later when we inspect %iv's backedge 6712 // value, we will make sure that the backedge value uses the same 6713 // operation. 6714 // 6715 // Note: the peeled shift operation does not have to be the same 6716 // instruction as the one feeding into the PHI's backedge value. We only 6717 // really care about it being the same *kind* of shift instruction -- 6718 // that's all that is required for our later inferences to hold. 6719 if (MatchPositiveShift(LHS, V, OpC)) { 6720 PostShiftOpCode = OpC; 6721 LHS = V; 6722 } 6723 } 6724 6725 PNOut = dyn_cast<PHINode>(LHS); 6726 if (!PNOut || PNOut->getParent() != L->getHeader()) 6727 return false; 6728 6729 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 6730 Value *OpLHS; 6731 6732 return 6733 // The backedge value for the PHI node must be a shift by a positive 6734 // amount 6735 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 6736 6737 // of the PHI node itself 6738 OpLHS == PNOut && 6739 6740 // and the kind of shift should be match the kind of shift we peeled 6741 // off, if any. 6742 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 6743 }; 6744 6745 PHINode *PN; 6746 Instruction::BinaryOps OpCode; 6747 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 6748 return getCouldNotCompute(); 6749 6750 const DataLayout &DL = getDataLayout(); 6751 6752 // The key rationale for this optimization is that for some kinds of shift 6753 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 6754 // within a finite number of iterations. If the condition guarding the 6755 // backedge (in the sense that the backedge is taken if the condition is true) 6756 // is false for the value the shift recurrence stabilizes to, then we know 6757 // that the backedge is taken only a finite number of times. 6758 6759 ConstantInt *StableValue = nullptr; 6760 switch (OpCode) { 6761 default: 6762 llvm_unreachable("Impossible case!"); 6763 6764 case Instruction::AShr: { 6765 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 6766 // bitwidth(K) iterations. 6767 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 6768 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 6769 Predecessor->getTerminator(), &DT); 6770 auto *Ty = cast<IntegerType>(RHS->getType()); 6771 if (Known.isNonNegative()) 6772 StableValue = ConstantInt::get(Ty, 0); 6773 else if (Known.isNegative()) 6774 StableValue = ConstantInt::get(Ty, -1, true); 6775 else 6776 return getCouldNotCompute(); 6777 6778 break; 6779 } 6780 case Instruction::LShr: 6781 case Instruction::Shl: 6782 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 6783 // stabilize to 0 in at most bitwidth(K) iterations. 6784 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 6785 break; 6786 } 6787 6788 auto *Result = 6789 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 6790 assert(Result->getType()->isIntegerTy(1) && 6791 "Otherwise cannot be an operand to a branch instruction"); 6792 6793 if (Result->isZeroValue()) { 6794 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 6795 const SCEV *UpperBound = 6796 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 6797 return ExitLimit(getCouldNotCompute(), UpperBound, false); 6798 } 6799 6800 return getCouldNotCompute(); 6801 } 6802 6803 /// Return true if we can constant fold an instruction of the specified type, 6804 /// assuming that all operands were constants. 6805 static bool CanConstantFold(const Instruction *I) { 6806 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 6807 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 6808 isa<LoadInst>(I)) 6809 return true; 6810 6811 if (const CallInst *CI = dyn_cast<CallInst>(I)) 6812 if (const Function *F = CI->getCalledFunction()) 6813 return canConstantFoldCallTo(CI, F); 6814 return false; 6815 } 6816 6817 /// Determine whether this instruction can constant evolve within this loop 6818 /// assuming its operands can all constant evolve. 6819 static bool canConstantEvolve(Instruction *I, const Loop *L) { 6820 // An instruction outside of the loop can't be derived from a loop PHI. 6821 if (!L->contains(I)) return false; 6822 6823 if (isa<PHINode>(I)) { 6824 // We don't currently keep track of the control flow needed to evaluate 6825 // PHIs, so we cannot handle PHIs inside of loops. 6826 return L->getHeader() == I->getParent(); 6827 } 6828 6829 // If we won't be able to constant fold this expression even if the operands 6830 // are constants, bail early. 6831 return CanConstantFold(I); 6832 } 6833 6834 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 6835 /// recursing through each instruction operand until reaching a loop header phi. 6836 static PHINode * 6837 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 6838 DenseMap<Instruction *, PHINode *> &PHIMap, 6839 unsigned Depth) { 6840 if (Depth > MaxConstantEvolvingDepth) 6841 return nullptr; 6842 6843 // Otherwise, we can evaluate this instruction if all of its operands are 6844 // constant or derived from a PHI node themselves. 6845 PHINode *PHI = nullptr; 6846 for (Value *Op : UseInst->operands()) { 6847 if (isa<Constant>(Op)) continue; 6848 6849 Instruction *OpInst = dyn_cast<Instruction>(Op); 6850 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 6851 6852 PHINode *P = dyn_cast<PHINode>(OpInst); 6853 if (!P) 6854 // If this operand is already visited, reuse the prior result. 6855 // We may have P != PHI if this is the deepest point at which the 6856 // inconsistent paths meet. 6857 P = PHIMap.lookup(OpInst); 6858 if (!P) { 6859 // Recurse and memoize the results, whether a phi is found or not. 6860 // This recursive call invalidates pointers into PHIMap. 6861 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 6862 PHIMap[OpInst] = P; 6863 } 6864 if (!P) 6865 return nullptr; // Not evolving from PHI 6866 if (PHI && PHI != P) 6867 return nullptr; // Evolving from multiple different PHIs. 6868 PHI = P; 6869 } 6870 // This is a expression evolving from a constant PHI! 6871 return PHI; 6872 } 6873 6874 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 6875 /// in the loop that V is derived from. We allow arbitrary operations along the 6876 /// way, but the operands of an operation must either be constants or a value 6877 /// derived from a constant PHI. If this expression does not fit with these 6878 /// constraints, return null. 6879 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 6880 Instruction *I = dyn_cast<Instruction>(V); 6881 if (!I || !canConstantEvolve(I, L)) return nullptr; 6882 6883 if (PHINode *PN = dyn_cast<PHINode>(I)) 6884 return PN; 6885 6886 // Record non-constant instructions contained by the loop. 6887 DenseMap<Instruction *, PHINode *> PHIMap; 6888 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 6889 } 6890 6891 /// EvaluateExpression - Given an expression that passes the 6892 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 6893 /// in the loop has the value PHIVal. If we can't fold this expression for some 6894 /// reason, return null. 6895 static Constant *EvaluateExpression(Value *V, const Loop *L, 6896 DenseMap<Instruction *, Constant *> &Vals, 6897 const DataLayout &DL, 6898 const TargetLibraryInfo *TLI) { 6899 // Convenient constant check, but redundant for recursive calls. 6900 if (Constant *C = dyn_cast<Constant>(V)) return C; 6901 Instruction *I = dyn_cast<Instruction>(V); 6902 if (!I) return nullptr; 6903 6904 if (Constant *C = Vals.lookup(I)) return C; 6905 6906 // An instruction inside the loop depends on a value outside the loop that we 6907 // weren't given a mapping for, or a value such as a call inside the loop. 6908 if (!canConstantEvolve(I, L)) return nullptr; 6909 6910 // An unmapped PHI can be due to a branch or another loop inside this loop, 6911 // or due to this not being the initial iteration through a loop where we 6912 // couldn't compute the evolution of this particular PHI last time. 6913 if (isa<PHINode>(I)) return nullptr; 6914 6915 std::vector<Constant*> Operands(I->getNumOperands()); 6916 6917 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 6918 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 6919 if (!Operand) { 6920 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 6921 if (!Operands[i]) return nullptr; 6922 continue; 6923 } 6924 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 6925 Vals[Operand] = C; 6926 if (!C) return nullptr; 6927 Operands[i] = C; 6928 } 6929 6930 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 6931 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 6932 Operands[1], DL, TLI); 6933 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 6934 if (!LI->isVolatile()) 6935 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 6936 } 6937 return ConstantFoldInstOperands(I, Operands, DL, TLI); 6938 } 6939 6940 6941 // If every incoming value to PN except the one for BB is a specific Constant, 6942 // return that, else return nullptr. 6943 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 6944 Constant *IncomingVal = nullptr; 6945 6946 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 6947 if (PN->getIncomingBlock(i) == BB) 6948 continue; 6949 6950 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 6951 if (!CurrentVal) 6952 return nullptr; 6953 6954 if (IncomingVal != CurrentVal) { 6955 if (IncomingVal) 6956 return nullptr; 6957 IncomingVal = CurrentVal; 6958 } 6959 } 6960 6961 return IncomingVal; 6962 } 6963 6964 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 6965 /// in the header of its containing loop, we know the loop executes a 6966 /// constant number of times, and the PHI node is just a recurrence 6967 /// involving constants, fold it. 6968 Constant * 6969 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 6970 const APInt &BEs, 6971 const Loop *L) { 6972 auto I = ConstantEvolutionLoopExitValue.find(PN); 6973 if (I != ConstantEvolutionLoopExitValue.end()) 6974 return I->second; 6975 6976 if (BEs.ugt(MaxBruteForceIterations)) 6977 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 6978 6979 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 6980 6981 DenseMap<Instruction *, Constant *> CurrentIterVals; 6982 BasicBlock *Header = L->getHeader(); 6983 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 6984 6985 BasicBlock *Latch = L->getLoopLatch(); 6986 if (!Latch) 6987 return nullptr; 6988 6989 for (auto &I : *Header) { 6990 PHINode *PHI = dyn_cast<PHINode>(&I); 6991 if (!PHI) break; 6992 auto *StartCST = getOtherIncomingValue(PHI, Latch); 6993 if (!StartCST) continue; 6994 CurrentIterVals[PHI] = StartCST; 6995 } 6996 if (!CurrentIterVals.count(PN)) 6997 return RetVal = nullptr; 6998 6999 Value *BEValue = PN->getIncomingValueForBlock(Latch); 7000 7001 // Execute the loop symbolically to determine the exit value. 7002 if (BEs.getActiveBits() >= 32) 7003 return RetVal = nullptr; // More than 2^32-1 iterations?? Not doing it! 7004 7005 unsigned NumIterations = BEs.getZExtValue(); // must be in range 7006 unsigned IterationNum = 0; 7007 const DataLayout &DL = getDataLayout(); 7008 for (; ; ++IterationNum) { 7009 if (IterationNum == NumIterations) 7010 return RetVal = CurrentIterVals[PN]; // Got exit value! 7011 7012 // Compute the value of the PHIs for the next iteration. 7013 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 7014 DenseMap<Instruction *, Constant *> NextIterVals; 7015 Constant *NextPHI = 7016 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7017 if (!NextPHI) 7018 return nullptr; // Couldn't evaluate! 7019 NextIterVals[PN] = NextPHI; 7020 7021 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 7022 7023 // Also evaluate the other PHI nodes. However, we don't get to stop if we 7024 // cease to be able to evaluate one of them or if they stop evolving, 7025 // because that doesn't necessarily prevent us from computing PN. 7026 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 7027 for (const auto &I : CurrentIterVals) { 7028 PHINode *PHI = dyn_cast<PHINode>(I.first); 7029 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 7030 PHIsToCompute.emplace_back(PHI, I.second); 7031 } 7032 // We use two distinct loops because EvaluateExpression may invalidate any 7033 // iterators into CurrentIterVals. 7034 for (const auto &I : PHIsToCompute) { 7035 PHINode *PHI = I.first; 7036 Constant *&NextPHI = NextIterVals[PHI]; 7037 if (!NextPHI) { // Not already computed. 7038 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7039 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7040 } 7041 if (NextPHI != I.second) 7042 StoppedEvolving = false; 7043 } 7044 7045 // If all entries in CurrentIterVals == NextIterVals then we can stop 7046 // iterating, the loop can't continue to change. 7047 if (StoppedEvolving) 7048 return RetVal = CurrentIterVals[PN]; 7049 7050 CurrentIterVals.swap(NextIterVals); 7051 } 7052 } 7053 7054 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 7055 Value *Cond, 7056 bool ExitWhen) { 7057 PHINode *PN = getConstantEvolvingPHI(Cond, L); 7058 if (!PN) return getCouldNotCompute(); 7059 7060 // If the loop is canonicalized, the PHI will have exactly two entries. 7061 // That's the only form we support here. 7062 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 7063 7064 DenseMap<Instruction *, Constant *> CurrentIterVals; 7065 BasicBlock *Header = L->getHeader(); 7066 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7067 7068 BasicBlock *Latch = L->getLoopLatch(); 7069 assert(Latch && "Should follow from NumIncomingValues == 2!"); 7070 7071 for (auto &I : *Header) { 7072 PHINode *PHI = dyn_cast<PHINode>(&I); 7073 if (!PHI) 7074 break; 7075 auto *StartCST = getOtherIncomingValue(PHI, Latch); 7076 if (!StartCST) continue; 7077 CurrentIterVals[PHI] = StartCST; 7078 } 7079 if (!CurrentIterVals.count(PN)) 7080 return getCouldNotCompute(); 7081 7082 // Okay, we find a PHI node that defines the trip count of this loop. Execute 7083 // the loop symbolically to determine when the condition gets a value of 7084 // "ExitWhen". 7085 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 7086 const DataLayout &DL = getDataLayout(); 7087 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 7088 auto *CondVal = dyn_cast_or_null<ConstantInt>( 7089 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 7090 7091 // Couldn't symbolically evaluate. 7092 if (!CondVal) return getCouldNotCompute(); 7093 7094 if (CondVal->getValue() == uint64_t(ExitWhen)) { 7095 ++NumBruteForceTripCountsComputed; 7096 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 7097 } 7098 7099 // Update all the PHI nodes for the next iteration. 7100 DenseMap<Instruction *, Constant *> NextIterVals; 7101 7102 // Create a list of which PHIs we need to compute. We want to do this before 7103 // calling EvaluateExpression on them because that may invalidate iterators 7104 // into CurrentIterVals. 7105 SmallVector<PHINode *, 8> PHIsToCompute; 7106 for (const auto &I : CurrentIterVals) { 7107 PHINode *PHI = dyn_cast<PHINode>(I.first); 7108 if (!PHI || PHI->getParent() != Header) continue; 7109 PHIsToCompute.push_back(PHI); 7110 } 7111 for (PHINode *PHI : PHIsToCompute) { 7112 Constant *&NextPHI = NextIterVals[PHI]; 7113 if (NextPHI) continue; // Already computed! 7114 7115 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7116 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7117 } 7118 CurrentIterVals.swap(NextIterVals); 7119 } 7120 7121 // Too many iterations were needed to evaluate. 7122 return getCouldNotCompute(); 7123 } 7124 7125 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 7126 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 7127 ValuesAtScopes[V]; 7128 // Check to see if we've folded this expression at this loop before. 7129 for (auto &LS : Values) 7130 if (LS.first == L) 7131 return LS.second ? LS.second : V; 7132 7133 Values.emplace_back(L, nullptr); 7134 7135 // Otherwise compute it. 7136 const SCEV *C = computeSCEVAtScope(V, L); 7137 for (auto &LS : reverse(ValuesAtScopes[V])) 7138 if (LS.first == L) { 7139 LS.second = C; 7140 break; 7141 } 7142 return C; 7143 } 7144 7145 /// This builds up a Constant using the ConstantExpr interface. That way, we 7146 /// will return Constants for objects which aren't represented by a 7147 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 7148 /// Returns NULL if the SCEV isn't representable as a Constant. 7149 static Constant *BuildConstantFromSCEV(const SCEV *V) { 7150 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 7151 case scCouldNotCompute: 7152 case scAddRecExpr: 7153 break; 7154 case scConstant: 7155 return cast<SCEVConstant>(V)->getValue(); 7156 case scUnknown: 7157 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 7158 case scSignExtend: { 7159 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 7160 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 7161 return ConstantExpr::getSExt(CastOp, SS->getType()); 7162 break; 7163 } 7164 case scZeroExtend: { 7165 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 7166 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 7167 return ConstantExpr::getZExt(CastOp, SZ->getType()); 7168 break; 7169 } 7170 case scTruncate: { 7171 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 7172 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 7173 return ConstantExpr::getTrunc(CastOp, ST->getType()); 7174 break; 7175 } 7176 case scAddExpr: { 7177 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 7178 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 7179 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7180 unsigned AS = PTy->getAddressSpace(); 7181 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7182 C = ConstantExpr::getBitCast(C, DestPtrTy); 7183 } 7184 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 7185 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 7186 if (!C2) return nullptr; 7187 7188 // First pointer! 7189 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 7190 unsigned AS = C2->getType()->getPointerAddressSpace(); 7191 std::swap(C, C2); 7192 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7193 // The offsets have been converted to bytes. We can add bytes to an 7194 // i8* by GEP with the byte count in the first index. 7195 C = ConstantExpr::getBitCast(C, DestPtrTy); 7196 } 7197 7198 // Don't bother trying to sum two pointers. We probably can't 7199 // statically compute a load that results from it anyway. 7200 if (C2->getType()->isPointerTy()) 7201 return nullptr; 7202 7203 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7204 if (PTy->getElementType()->isStructTy()) 7205 C2 = ConstantExpr::getIntegerCast( 7206 C2, Type::getInt32Ty(C->getContext()), true); 7207 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 7208 } else 7209 C = ConstantExpr::getAdd(C, C2); 7210 } 7211 return C; 7212 } 7213 break; 7214 } 7215 case scMulExpr: { 7216 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 7217 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 7218 // Don't bother with pointers at all. 7219 if (C->getType()->isPointerTy()) return nullptr; 7220 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 7221 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 7222 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 7223 C = ConstantExpr::getMul(C, C2); 7224 } 7225 return C; 7226 } 7227 break; 7228 } 7229 case scUDivExpr: { 7230 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 7231 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 7232 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 7233 if (LHS->getType() == RHS->getType()) 7234 return ConstantExpr::getUDiv(LHS, RHS); 7235 break; 7236 } 7237 case scSMaxExpr: 7238 case scUMaxExpr: 7239 break; // TODO: smax, umax. 7240 } 7241 return nullptr; 7242 } 7243 7244 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 7245 if (isa<SCEVConstant>(V)) return V; 7246 7247 // If this instruction is evolved from a constant-evolving PHI, compute the 7248 // exit value from the loop without using SCEVs. 7249 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 7250 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 7251 const Loop *LI = this->LI[I->getParent()]; 7252 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 7253 if (PHINode *PN = dyn_cast<PHINode>(I)) 7254 if (PN->getParent() == LI->getHeader()) { 7255 // Okay, there is no closed form solution for the PHI node. Check 7256 // to see if the loop that contains it has a known backedge-taken 7257 // count. If so, we may be able to force computation of the exit 7258 // value. 7259 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 7260 if (const SCEVConstant *BTCC = 7261 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 7262 // Okay, we know how many times the containing loop executes. If 7263 // this is a constant evolving PHI node, get the final value at 7264 // the specified iteration number. 7265 Constant *RV = 7266 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 7267 if (RV) return getSCEV(RV); 7268 } 7269 } 7270 7271 // Okay, this is an expression that we cannot symbolically evaluate 7272 // into a SCEV. Check to see if it's possible to symbolically evaluate 7273 // the arguments into constants, and if so, try to constant propagate the 7274 // result. This is particularly useful for computing loop exit values. 7275 if (CanConstantFold(I)) { 7276 SmallVector<Constant *, 4> Operands; 7277 bool MadeImprovement = false; 7278 for (Value *Op : I->operands()) { 7279 if (Constant *C = dyn_cast<Constant>(Op)) { 7280 Operands.push_back(C); 7281 continue; 7282 } 7283 7284 // If any of the operands is non-constant and if they are 7285 // non-integer and non-pointer, don't even try to analyze them 7286 // with scev techniques. 7287 if (!isSCEVable(Op->getType())) 7288 return V; 7289 7290 const SCEV *OrigV = getSCEV(Op); 7291 const SCEV *OpV = getSCEVAtScope(OrigV, L); 7292 MadeImprovement |= OrigV != OpV; 7293 7294 Constant *C = BuildConstantFromSCEV(OpV); 7295 if (!C) return V; 7296 if (C->getType() != Op->getType()) 7297 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 7298 Op->getType(), 7299 false), 7300 C, Op->getType()); 7301 Operands.push_back(C); 7302 } 7303 7304 // Check to see if getSCEVAtScope actually made an improvement. 7305 if (MadeImprovement) { 7306 Constant *C = nullptr; 7307 const DataLayout &DL = getDataLayout(); 7308 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 7309 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7310 Operands[1], DL, &TLI); 7311 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 7312 if (!LI->isVolatile()) 7313 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7314 } else 7315 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 7316 if (!C) return V; 7317 return getSCEV(C); 7318 } 7319 } 7320 } 7321 7322 // This is some other type of SCEVUnknown, just return it. 7323 return V; 7324 } 7325 7326 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 7327 // Avoid performing the look-up in the common case where the specified 7328 // expression has no loop-variant portions. 7329 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 7330 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 7331 if (OpAtScope != Comm->getOperand(i)) { 7332 // Okay, at least one of these operands is loop variant but might be 7333 // foldable. Build a new instance of the folded commutative expression. 7334 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 7335 Comm->op_begin()+i); 7336 NewOps.push_back(OpAtScope); 7337 7338 for (++i; i != e; ++i) { 7339 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 7340 NewOps.push_back(OpAtScope); 7341 } 7342 if (isa<SCEVAddExpr>(Comm)) 7343 return getAddExpr(NewOps); 7344 if (isa<SCEVMulExpr>(Comm)) 7345 return getMulExpr(NewOps); 7346 if (isa<SCEVSMaxExpr>(Comm)) 7347 return getSMaxExpr(NewOps); 7348 if (isa<SCEVUMaxExpr>(Comm)) 7349 return getUMaxExpr(NewOps); 7350 llvm_unreachable("Unknown commutative SCEV type!"); 7351 } 7352 } 7353 // If we got here, all operands are loop invariant. 7354 return Comm; 7355 } 7356 7357 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 7358 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 7359 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 7360 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 7361 return Div; // must be loop invariant 7362 return getUDivExpr(LHS, RHS); 7363 } 7364 7365 // If this is a loop recurrence for a loop that does not contain L, then we 7366 // are dealing with the final value computed by the loop. 7367 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 7368 // First, attempt to evaluate each operand. 7369 // Avoid performing the look-up in the common case where the specified 7370 // expression has no loop-variant portions. 7371 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 7372 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 7373 if (OpAtScope == AddRec->getOperand(i)) 7374 continue; 7375 7376 // Okay, at least one of these operands is loop variant but might be 7377 // foldable. Build a new instance of the folded commutative expression. 7378 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 7379 AddRec->op_begin()+i); 7380 NewOps.push_back(OpAtScope); 7381 for (++i; i != e; ++i) 7382 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 7383 7384 const SCEV *FoldedRec = 7385 getAddRecExpr(NewOps, AddRec->getLoop(), 7386 AddRec->getNoWrapFlags(SCEV::FlagNW)); 7387 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 7388 // The addrec may be folded to a nonrecurrence, for example, if the 7389 // induction variable is multiplied by zero after constant folding. Go 7390 // ahead and return the folded value. 7391 if (!AddRec) 7392 return FoldedRec; 7393 break; 7394 } 7395 7396 // If the scope is outside the addrec's loop, evaluate it by using the 7397 // loop exit value of the addrec. 7398 if (!AddRec->getLoop()->contains(L)) { 7399 // To evaluate this recurrence, we need to know how many times the AddRec 7400 // loop iterates. Compute this now. 7401 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 7402 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 7403 7404 // Then, evaluate the AddRec. 7405 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 7406 } 7407 7408 return AddRec; 7409 } 7410 7411 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 7412 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7413 if (Op == Cast->getOperand()) 7414 return Cast; // must be loop invariant 7415 return getZeroExtendExpr(Op, Cast->getType()); 7416 } 7417 7418 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 7419 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7420 if (Op == Cast->getOperand()) 7421 return Cast; // must be loop invariant 7422 return getSignExtendExpr(Op, Cast->getType()); 7423 } 7424 7425 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 7426 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7427 if (Op == Cast->getOperand()) 7428 return Cast; // must be loop invariant 7429 return getTruncateExpr(Op, Cast->getType()); 7430 } 7431 7432 llvm_unreachable("Unknown SCEV type!"); 7433 } 7434 7435 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 7436 return getSCEVAtScope(getSCEV(V), L); 7437 } 7438 7439 /// Finds the minimum unsigned root of the following equation: 7440 /// 7441 /// A * X = B (mod N) 7442 /// 7443 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 7444 /// A and B isn't important. 7445 /// 7446 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 7447 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 7448 ScalarEvolution &SE) { 7449 uint32_t BW = A.getBitWidth(); 7450 assert(BW == SE.getTypeSizeInBits(B->getType())); 7451 assert(A != 0 && "A must be non-zero."); 7452 7453 // 1. D = gcd(A, N) 7454 // 7455 // The gcd of A and N may have only one prime factor: 2. The number of 7456 // trailing zeros in A is its multiplicity 7457 uint32_t Mult2 = A.countTrailingZeros(); 7458 // D = 2^Mult2 7459 7460 // 2. Check if B is divisible by D. 7461 // 7462 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 7463 // is not less than multiplicity of this prime factor for D. 7464 if (SE.GetMinTrailingZeros(B) < Mult2) 7465 return SE.getCouldNotCompute(); 7466 7467 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 7468 // modulo (N / D). 7469 // 7470 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 7471 // (N / D) in general. The inverse itself always fits into BW bits, though, 7472 // so we immediately truncate it. 7473 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 7474 APInt Mod(BW + 1, 0); 7475 Mod.setBit(BW - Mult2); // Mod = N / D 7476 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 7477 7478 // 4. Compute the minimum unsigned root of the equation: 7479 // I * (B / D) mod (N / D) 7480 // To simplify the computation, we factor out the divide by D: 7481 // (I * B mod N) / D 7482 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 7483 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 7484 } 7485 7486 /// Find the roots of the quadratic equation for the given quadratic chrec 7487 /// {L,+,M,+,N}. This returns either the two roots (which might be the same) or 7488 /// two SCEVCouldNotCompute objects. 7489 /// 7490 static Optional<std::pair<const SCEVConstant *,const SCEVConstant *>> 7491 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 7492 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 7493 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 7494 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 7495 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 7496 7497 // We currently can only solve this if the coefficients are constants. 7498 if (!LC || !MC || !NC) 7499 return None; 7500 7501 uint32_t BitWidth = LC->getAPInt().getBitWidth(); 7502 const APInt &L = LC->getAPInt(); 7503 const APInt &M = MC->getAPInt(); 7504 const APInt &N = NC->getAPInt(); 7505 APInt Two(BitWidth, 2); 7506 7507 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 7508 7509 // The A coefficient is N/2 7510 APInt A = N.sdiv(Two); 7511 7512 // The B coefficient is M-N/2 7513 APInt B = M; 7514 B -= A; // A is the same as N/2. 7515 7516 // The C coefficient is L. 7517 const APInt& C = L; 7518 7519 // Compute the B^2-4ac term. 7520 APInt SqrtTerm = B; 7521 SqrtTerm *= B; 7522 SqrtTerm -= 4 * (A * C); 7523 7524 if (SqrtTerm.isNegative()) { 7525 // The loop is provably infinite. 7526 return None; 7527 } 7528 7529 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 7530 // integer value or else APInt::sqrt() will assert. 7531 APInt SqrtVal = SqrtTerm.sqrt(); 7532 7533 // Compute the two solutions for the quadratic formula. 7534 // The divisions must be performed as signed divisions. 7535 APInt NegB = -std::move(B); 7536 APInt TwoA = std::move(A); 7537 TwoA <<= 1; 7538 if (TwoA.isNullValue()) 7539 return None; 7540 7541 LLVMContext &Context = SE.getContext(); 7542 7543 ConstantInt *Solution1 = 7544 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); 7545 ConstantInt *Solution2 = 7546 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); 7547 7548 return std::make_pair(cast<SCEVConstant>(SE.getConstant(Solution1)), 7549 cast<SCEVConstant>(SE.getConstant(Solution2))); 7550 } 7551 7552 ScalarEvolution::ExitLimit 7553 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 7554 bool AllowPredicates) { 7555 7556 // This is only used for loops with a "x != y" exit test. The exit condition 7557 // is now expressed as a single expression, V = x-y. So the exit test is 7558 // effectively V != 0. We know and take advantage of the fact that this 7559 // expression only being used in a comparison by zero context. 7560 7561 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 7562 // If the value is a constant 7563 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 7564 // If the value is already zero, the branch will execute zero times. 7565 if (C->getValue()->isZero()) return C; 7566 return getCouldNotCompute(); // Otherwise it will loop infinitely. 7567 } 7568 7569 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 7570 if (!AddRec && AllowPredicates) 7571 // Try to make this an AddRec using runtime tests, in the first X 7572 // iterations of this loop, where X is the SCEV expression found by the 7573 // algorithm below. 7574 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 7575 7576 if (!AddRec || AddRec->getLoop() != L) 7577 return getCouldNotCompute(); 7578 7579 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 7580 // the quadratic equation to solve it. 7581 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 7582 if (auto Roots = SolveQuadraticEquation(AddRec, *this)) { 7583 const SCEVConstant *R1 = Roots->first; 7584 const SCEVConstant *R2 = Roots->second; 7585 // Pick the smallest positive root value. 7586 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 7587 CmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 7588 if (!CB->getZExtValue()) 7589 std::swap(R1, R2); // R1 is the minimum root now. 7590 7591 // We can only use this value if the chrec ends up with an exact zero 7592 // value at this index. When solving for "X*X != 5", for example, we 7593 // should not accept a root of 2. 7594 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); 7595 if (Val->isZero()) 7596 // We found a quadratic root! 7597 return ExitLimit(R1, R1, false, Predicates); 7598 } 7599 } 7600 return getCouldNotCompute(); 7601 } 7602 7603 // Otherwise we can only handle this if it is affine. 7604 if (!AddRec->isAffine()) 7605 return getCouldNotCompute(); 7606 7607 // If this is an affine expression, the execution count of this branch is 7608 // the minimum unsigned root of the following equation: 7609 // 7610 // Start + Step*N = 0 (mod 2^BW) 7611 // 7612 // equivalent to: 7613 // 7614 // Step*N = -Start (mod 2^BW) 7615 // 7616 // where BW is the common bit width of Start and Step. 7617 7618 // Get the initial value for the loop. 7619 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 7620 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 7621 7622 // For now we handle only constant steps. 7623 // 7624 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 7625 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 7626 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 7627 // We have not yet seen any such cases. 7628 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 7629 if (!StepC || StepC->getValue()->isZero()) 7630 return getCouldNotCompute(); 7631 7632 // For positive steps (counting up until unsigned overflow): 7633 // N = -Start/Step (as unsigned) 7634 // For negative steps (counting down to zero): 7635 // N = Start/-Step 7636 // First compute the unsigned distance from zero in the direction of Step. 7637 bool CountDown = StepC->getAPInt().isNegative(); 7638 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 7639 7640 // Handle unitary steps, which cannot wraparound. 7641 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 7642 // N = Distance (as unsigned) 7643 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 7644 APInt MaxBECount = getUnsignedRangeMax(Distance); 7645 7646 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 7647 // we end up with a loop whose backedge-taken count is n - 1. Detect this 7648 // case, and see if we can improve the bound. 7649 // 7650 // Explicitly handling this here is necessary because getUnsignedRange 7651 // isn't context-sensitive; it doesn't know that we only care about the 7652 // range inside the loop. 7653 const SCEV *Zero = getZero(Distance->getType()); 7654 const SCEV *One = getOne(Distance->getType()); 7655 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 7656 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 7657 // If Distance + 1 doesn't overflow, we can compute the maximum distance 7658 // as "unsigned_max(Distance + 1) - 1". 7659 ConstantRange CR = getUnsignedRange(DistancePlusOne); 7660 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 7661 } 7662 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 7663 } 7664 7665 // If the condition controls loop exit (the loop exits only if the expression 7666 // is true) and the addition is no-wrap we can use unsigned divide to 7667 // compute the backedge count. In this case, the step may not divide the 7668 // distance, but we don't care because if the condition is "missed" the loop 7669 // will have undefined behavior due to wrapping. 7670 if (ControlsExit && AddRec->hasNoSelfWrap() && 7671 loopHasNoAbnormalExits(AddRec->getLoop())) { 7672 const SCEV *Exact = 7673 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 7674 const SCEV *Max = 7675 Exact == getCouldNotCompute() 7676 ? Exact 7677 : getConstant(getUnsignedRangeMax(Exact)); 7678 return ExitLimit(Exact, Max, false, Predicates); 7679 } 7680 7681 // Solve the general equation. 7682 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 7683 getNegativeSCEV(Start), *this); 7684 const SCEV *M = E == getCouldNotCompute() 7685 ? E 7686 : getConstant(getUnsignedRangeMax(E)); 7687 return ExitLimit(E, M, false, Predicates); 7688 } 7689 7690 ScalarEvolution::ExitLimit 7691 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 7692 // Loops that look like: while (X == 0) are very strange indeed. We don't 7693 // handle them yet except for the trivial case. This could be expanded in the 7694 // future as needed. 7695 7696 // If the value is a constant, check to see if it is known to be non-zero 7697 // already. If so, the backedge will execute zero times. 7698 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 7699 if (!C->getValue()->isZero()) 7700 return getZero(C->getType()); 7701 return getCouldNotCompute(); // Otherwise it will loop infinitely. 7702 } 7703 7704 // We could implement others, but I really doubt anyone writes loops like 7705 // this, and if they did, they would already be constant folded. 7706 return getCouldNotCompute(); 7707 } 7708 7709 std::pair<BasicBlock *, BasicBlock *> 7710 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 7711 // If the block has a unique predecessor, then there is no path from the 7712 // predecessor to the block that does not go through the direct edge 7713 // from the predecessor to the block. 7714 if (BasicBlock *Pred = BB->getSinglePredecessor()) 7715 return {Pred, BB}; 7716 7717 // A loop's header is defined to be a block that dominates the loop. 7718 // If the header has a unique predecessor outside the loop, it must be 7719 // a block that has exactly one successor that can reach the loop. 7720 if (Loop *L = LI.getLoopFor(BB)) 7721 return {L->getLoopPredecessor(), L->getHeader()}; 7722 7723 return {nullptr, nullptr}; 7724 } 7725 7726 /// SCEV structural equivalence is usually sufficient for testing whether two 7727 /// expressions are equal, however for the purposes of looking for a condition 7728 /// guarding a loop, it can be useful to be a little more general, since a 7729 /// front-end may have replicated the controlling expression. 7730 /// 7731 static bool HasSameValue(const SCEV *A, const SCEV *B) { 7732 // Quick check to see if they are the same SCEV. 7733 if (A == B) return true; 7734 7735 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 7736 // Not all instructions that are "identical" compute the same value. For 7737 // instance, two distinct alloca instructions allocating the same type are 7738 // identical and do not read memory; but compute distinct values. 7739 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 7740 }; 7741 7742 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 7743 // two different instructions with the same value. Check for this case. 7744 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 7745 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 7746 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 7747 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 7748 if (ComputesEqualValues(AI, BI)) 7749 return true; 7750 7751 // Otherwise assume they may have a different value. 7752 return false; 7753 } 7754 7755 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 7756 const SCEV *&LHS, const SCEV *&RHS, 7757 unsigned Depth) { 7758 bool Changed = false; 7759 7760 // If we hit the max recursion limit bail out. 7761 if (Depth >= 3) 7762 return false; 7763 7764 // Canonicalize a constant to the right side. 7765 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 7766 // Check for both operands constant. 7767 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 7768 if (ConstantExpr::getICmp(Pred, 7769 LHSC->getValue(), 7770 RHSC->getValue())->isNullValue()) 7771 goto trivially_false; 7772 else 7773 goto trivially_true; 7774 } 7775 // Otherwise swap the operands to put the constant on the right. 7776 std::swap(LHS, RHS); 7777 Pred = ICmpInst::getSwappedPredicate(Pred); 7778 Changed = true; 7779 } 7780 7781 // If we're comparing an addrec with a value which is loop-invariant in the 7782 // addrec's loop, put the addrec on the left. Also make a dominance check, 7783 // as both operands could be addrecs loop-invariant in each other's loop. 7784 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 7785 const Loop *L = AR->getLoop(); 7786 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 7787 std::swap(LHS, RHS); 7788 Pred = ICmpInst::getSwappedPredicate(Pred); 7789 Changed = true; 7790 } 7791 } 7792 7793 // If there's a constant operand, canonicalize comparisons with boundary 7794 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 7795 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 7796 const APInt &RA = RC->getAPInt(); 7797 7798 bool SimplifiedByConstantRange = false; 7799 7800 if (!ICmpInst::isEquality(Pred)) { 7801 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 7802 if (ExactCR.isFullSet()) 7803 goto trivially_true; 7804 else if (ExactCR.isEmptySet()) 7805 goto trivially_false; 7806 7807 APInt NewRHS; 7808 CmpInst::Predicate NewPred; 7809 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 7810 ICmpInst::isEquality(NewPred)) { 7811 // We were able to convert an inequality to an equality. 7812 Pred = NewPred; 7813 RHS = getConstant(NewRHS); 7814 Changed = SimplifiedByConstantRange = true; 7815 } 7816 } 7817 7818 if (!SimplifiedByConstantRange) { 7819 switch (Pred) { 7820 default: 7821 break; 7822 case ICmpInst::ICMP_EQ: 7823 case ICmpInst::ICMP_NE: 7824 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 7825 if (!RA) 7826 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 7827 if (const SCEVMulExpr *ME = 7828 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 7829 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 7830 ME->getOperand(0)->isAllOnesValue()) { 7831 RHS = AE->getOperand(1); 7832 LHS = ME->getOperand(1); 7833 Changed = true; 7834 } 7835 break; 7836 7837 7838 // The "Should have been caught earlier!" messages refer to the fact 7839 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 7840 // should have fired on the corresponding cases, and canonicalized the 7841 // check to trivially_true or trivially_false. 7842 7843 case ICmpInst::ICMP_UGE: 7844 assert(!RA.isMinValue() && "Should have been caught earlier!"); 7845 Pred = ICmpInst::ICMP_UGT; 7846 RHS = getConstant(RA - 1); 7847 Changed = true; 7848 break; 7849 case ICmpInst::ICMP_ULE: 7850 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 7851 Pred = ICmpInst::ICMP_ULT; 7852 RHS = getConstant(RA + 1); 7853 Changed = true; 7854 break; 7855 case ICmpInst::ICMP_SGE: 7856 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 7857 Pred = ICmpInst::ICMP_SGT; 7858 RHS = getConstant(RA - 1); 7859 Changed = true; 7860 break; 7861 case ICmpInst::ICMP_SLE: 7862 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 7863 Pred = ICmpInst::ICMP_SLT; 7864 RHS = getConstant(RA + 1); 7865 Changed = true; 7866 break; 7867 } 7868 } 7869 } 7870 7871 // Check for obvious equality. 7872 if (HasSameValue(LHS, RHS)) { 7873 if (ICmpInst::isTrueWhenEqual(Pred)) 7874 goto trivially_true; 7875 if (ICmpInst::isFalseWhenEqual(Pred)) 7876 goto trivially_false; 7877 } 7878 7879 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 7880 // adding or subtracting 1 from one of the operands. 7881 switch (Pred) { 7882 case ICmpInst::ICMP_SLE: 7883 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 7884 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 7885 SCEV::FlagNSW); 7886 Pred = ICmpInst::ICMP_SLT; 7887 Changed = true; 7888 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 7889 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 7890 SCEV::FlagNSW); 7891 Pred = ICmpInst::ICMP_SLT; 7892 Changed = true; 7893 } 7894 break; 7895 case ICmpInst::ICMP_SGE: 7896 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 7897 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 7898 SCEV::FlagNSW); 7899 Pred = ICmpInst::ICMP_SGT; 7900 Changed = true; 7901 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 7902 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 7903 SCEV::FlagNSW); 7904 Pred = ICmpInst::ICMP_SGT; 7905 Changed = true; 7906 } 7907 break; 7908 case ICmpInst::ICMP_ULE: 7909 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 7910 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 7911 SCEV::FlagNUW); 7912 Pred = ICmpInst::ICMP_ULT; 7913 Changed = true; 7914 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 7915 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 7916 Pred = ICmpInst::ICMP_ULT; 7917 Changed = true; 7918 } 7919 break; 7920 case ICmpInst::ICMP_UGE: 7921 if (!getUnsignedRangeMin(RHS).isMinValue()) { 7922 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 7923 Pred = ICmpInst::ICMP_UGT; 7924 Changed = true; 7925 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 7926 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 7927 SCEV::FlagNUW); 7928 Pred = ICmpInst::ICMP_UGT; 7929 Changed = true; 7930 } 7931 break; 7932 default: 7933 break; 7934 } 7935 7936 // TODO: More simplifications are possible here. 7937 7938 // Recursively simplify until we either hit a recursion limit or nothing 7939 // changes. 7940 if (Changed) 7941 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 7942 7943 return Changed; 7944 7945 trivially_true: 7946 // Return 0 == 0. 7947 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 7948 Pred = ICmpInst::ICMP_EQ; 7949 return true; 7950 7951 trivially_false: 7952 // Return 0 != 0. 7953 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 7954 Pred = ICmpInst::ICMP_NE; 7955 return true; 7956 } 7957 7958 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 7959 return getSignedRangeMax(S).isNegative(); 7960 } 7961 7962 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 7963 return getSignedRangeMin(S).isStrictlyPositive(); 7964 } 7965 7966 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 7967 return !getSignedRangeMin(S).isNegative(); 7968 } 7969 7970 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 7971 return !getSignedRangeMax(S).isStrictlyPositive(); 7972 } 7973 7974 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 7975 return isKnownNegative(S) || isKnownPositive(S); 7976 } 7977 7978 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 7979 const SCEV *LHS, const SCEV *RHS) { 7980 // Canonicalize the inputs first. 7981 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7982 7983 // If LHS or RHS is an addrec, check to see if the condition is true in 7984 // every iteration of the loop. 7985 // If LHS and RHS are both addrec, both conditions must be true in 7986 // every iteration of the loop. 7987 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 7988 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 7989 bool LeftGuarded = false; 7990 bool RightGuarded = false; 7991 if (LAR) { 7992 const Loop *L = LAR->getLoop(); 7993 if (isLoopEntryGuardedByCond(L, Pred, LAR->getStart(), RHS) && 7994 isLoopBackedgeGuardedByCond(L, Pred, LAR->getPostIncExpr(*this), RHS)) { 7995 if (!RAR) return true; 7996 LeftGuarded = true; 7997 } 7998 } 7999 if (RAR) { 8000 const Loop *L = RAR->getLoop(); 8001 if (isLoopEntryGuardedByCond(L, Pred, LHS, RAR->getStart()) && 8002 isLoopBackedgeGuardedByCond(L, Pred, LHS, RAR->getPostIncExpr(*this))) { 8003 if (!LAR) return true; 8004 RightGuarded = true; 8005 } 8006 } 8007 if (LeftGuarded && RightGuarded) 8008 return true; 8009 8010 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 8011 return true; 8012 8013 // Otherwise see what can be done with known constant ranges. 8014 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS); 8015 } 8016 8017 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 8018 ICmpInst::Predicate Pred, 8019 bool &Increasing) { 8020 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 8021 8022 #ifndef NDEBUG 8023 // Verify an invariant: inverting the predicate should turn a monotonically 8024 // increasing change to a monotonically decreasing one, and vice versa. 8025 bool IncreasingSwapped; 8026 bool ResultSwapped = isMonotonicPredicateImpl( 8027 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 8028 8029 assert(Result == ResultSwapped && "should be able to analyze both!"); 8030 if (ResultSwapped) 8031 assert(Increasing == !IncreasingSwapped && 8032 "monotonicity should flip as we flip the predicate"); 8033 #endif 8034 8035 return Result; 8036 } 8037 8038 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 8039 ICmpInst::Predicate Pred, 8040 bool &Increasing) { 8041 8042 // A zero step value for LHS means the induction variable is essentially a 8043 // loop invariant value. We don't really depend on the predicate actually 8044 // flipping from false to true (for increasing predicates, and the other way 8045 // around for decreasing predicates), all we care about is that *if* the 8046 // predicate changes then it only changes from false to true. 8047 // 8048 // A zero step value in itself is not very useful, but there may be places 8049 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 8050 // as general as possible. 8051 8052 switch (Pred) { 8053 default: 8054 return false; // Conservative answer 8055 8056 case ICmpInst::ICMP_UGT: 8057 case ICmpInst::ICMP_UGE: 8058 case ICmpInst::ICMP_ULT: 8059 case ICmpInst::ICMP_ULE: 8060 if (!LHS->hasNoUnsignedWrap()) 8061 return false; 8062 8063 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 8064 return true; 8065 8066 case ICmpInst::ICMP_SGT: 8067 case ICmpInst::ICMP_SGE: 8068 case ICmpInst::ICMP_SLT: 8069 case ICmpInst::ICMP_SLE: { 8070 if (!LHS->hasNoSignedWrap()) 8071 return false; 8072 8073 const SCEV *Step = LHS->getStepRecurrence(*this); 8074 8075 if (isKnownNonNegative(Step)) { 8076 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 8077 return true; 8078 } 8079 8080 if (isKnownNonPositive(Step)) { 8081 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 8082 return true; 8083 } 8084 8085 return false; 8086 } 8087 8088 } 8089 8090 llvm_unreachable("switch has default clause!"); 8091 } 8092 8093 bool ScalarEvolution::isLoopInvariantPredicate( 8094 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 8095 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 8096 const SCEV *&InvariantRHS) { 8097 8098 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 8099 if (!isLoopInvariant(RHS, L)) { 8100 if (!isLoopInvariant(LHS, L)) 8101 return false; 8102 8103 std::swap(LHS, RHS); 8104 Pred = ICmpInst::getSwappedPredicate(Pred); 8105 } 8106 8107 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 8108 if (!ArLHS || ArLHS->getLoop() != L) 8109 return false; 8110 8111 bool Increasing; 8112 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 8113 return false; 8114 8115 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 8116 // true as the loop iterates, and the backedge is control dependent on 8117 // "ArLHS `Pred` RHS" == true then we can reason as follows: 8118 // 8119 // * if the predicate was false in the first iteration then the predicate 8120 // is never evaluated again, since the loop exits without taking the 8121 // backedge. 8122 // * if the predicate was true in the first iteration then it will 8123 // continue to be true for all future iterations since it is 8124 // monotonically increasing. 8125 // 8126 // For both the above possibilities, we can replace the loop varying 8127 // predicate with its value on the first iteration of the loop (which is 8128 // loop invariant). 8129 // 8130 // A similar reasoning applies for a monotonically decreasing predicate, by 8131 // replacing true with false and false with true in the above two bullets. 8132 8133 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 8134 8135 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 8136 return false; 8137 8138 InvariantPred = Pred; 8139 InvariantLHS = ArLHS->getStart(); 8140 InvariantRHS = RHS; 8141 return true; 8142 } 8143 8144 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 8145 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 8146 if (HasSameValue(LHS, RHS)) 8147 return ICmpInst::isTrueWhenEqual(Pred); 8148 8149 // This code is split out from isKnownPredicate because it is called from 8150 // within isLoopEntryGuardedByCond. 8151 8152 auto CheckRanges = 8153 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 8154 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 8155 .contains(RangeLHS); 8156 }; 8157 8158 // The check at the top of the function catches the case where the values are 8159 // known to be equal. 8160 if (Pred == CmpInst::ICMP_EQ) 8161 return false; 8162 8163 if (Pred == CmpInst::ICMP_NE) 8164 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 8165 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 8166 isKnownNonZero(getMinusSCEV(LHS, RHS)); 8167 8168 if (CmpInst::isSigned(Pred)) 8169 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 8170 8171 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 8172 } 8173 8174 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 8175 const SCEV *LHS, 8176 const SCEV *RHS) { 8177 8178 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 8179 // Return Y via OutY. 8180 auto MatchBinaryAddToConst = 8181 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 8182 SCEV::NoWrapFlags ExpectedFlags) { 8183 const SCEV *NonConstOp, *ConstOp; 8184 SCEV::NoWrapFlags FlagsPresent; 8185 8186 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 8187 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 8188 return false; 8189 8190 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 8191 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 8192 }; 8193 8194 APInt C; 8195 8196 switch (Pred) { 8197 default: 8198 break; 8199 8200 case ICmpInst::ICMP_SGE: 8201 std::swap(LHS, RHS); 8202 LLVM_FALLTHROUGH; 8203 case ICmpInst::ICMP_SLE: 8204 // X s<= (X + C)<nsw> if C >= 0 8205 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 8206 return true; 8207 8208 // (X + C)<nsw> s<= X if C <= 0 8209 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 8210 !C.isStrictlyPositive()) 8211 return true; 8212 break; 8213 8214 case ICmpInst::ICMP_SGT: 8215 std::swap(LHS, RHS); 8216 LLVM_FALLTHROUGH; 8217 case ICmpInst::ICMP_SLT: 8218 // X s< (X + C)<nsw> if C > 0 8219 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 8220 C.isStrictlyPositive()) 8221 return true; 8222 8223 // (X + C)<nsw> s< X if C < 0 8224 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 8225 return true; 8226 break; 8227 } 8228 8229 return false; 8230 } 8231 8232 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 8233 const SCEV *LHS, 8234 const SCEV *RHS) { 8235 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 8236 return false; 8237 8238 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 8239 // the stack can result in exponential time complexity. 8240 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 8241 8242 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 8243 // 8244 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 8245 // isKnownPredicate. isKnownPredicate is more powerful, but also more 8246 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 8247 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 8248 // use isKnownPredicate later if needed. 8249 return isKnownNonNegative(RHS) && 8250 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 8251 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 8252 } 8253 8254 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 8255 ICmpInst::Predicate Pred, 8256 const SCEV *LHS, const SCEV *RHS) { 8257 // No need to even try if we know the module has no guards. 8258 if (!HasGuards) 8259 return false; 8260 8261 return any_of(*BB, [&](Instruction &I) { 8262 using namespace llvm::PatternMatch; 8263 8264 Value *Condition; 8265 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 8266 m_Value(Condition))) && 8267 isImpliedCond(Pred, LHS, RHS, Condition, false); 8268 }); 8269 } 8270 8271 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 8272 /// protected by a conditional between LHS and RHS. This is used to 8273 /// to eliminate casts. 8274 bool 8275 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 8276 ICmpInst::Predicate Pred, 8277 const SCEV *LHS, const SCEV *RHS) { 8278 // Interpret a null as meaning no loop, where there is obviously no guard 8279 // (interprocedural conditions notwithstanding). 8280 if (!L) return true; 8281 8282 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 8283 return true; 8284 8285 BasicBlock *Latch = L->getLoopLatch(); 8286 if (!Latch) 8287 return false; 8288 8289 BranchInst *LoopContinuePredicate = 8290 dyn_cast<BranchInst>(Latch->getTerminator()); 8291 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 8292 isImpliedCond(Pred, LHS, RHS, 8293 LoopContinuePredicate->getCondition(), 8294 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 8295 return true; 8296 8297 // We don't want more than one activation of the following loops on the stack 8298 // -- that can lead to O(n!) time complexity. 8299 if (WalkingBEDominatingConds) 8300 return false; 8301 8302 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 8303 8304 // See if we can exploit a trip count to prove the predicate. 8305 const auto &BETakenInfo = getBackedgeTakenInfo(L); 8306 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 8307 if (LatchBECount != getCouldNotCompute()) { 8308 // We know that Latch branches back to the loop header exactly 8309 // LatchBECount times. This means the backdege condition at Latch is 8310 // equivalent to "{0,+,1} u< LatchBECount". 8311 Type *Ty = LatchBECount->getType(); 8312 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 8313 const SCEV *LoopCounter = 8314 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 8315 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 8316 LatchBECount)) 8317 return true; 8318 } 8319 8320 // Check conditions due to any @llvm.assume intrinsics. 8321 for (auto &AssumeVH : AC.assumptions()) { 8322 if (!AssumeVH) 8323 continue; 8324 auto *CI = cast<CallInst>(AssumeVH); 8325 if (!DT.dominates(CI, Latch->getTerminator())) 8326 continue; 8327 8328 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 8329 return true; 8330 } 8331 8332 // If the loop is not reachable from the entry block, we risk running into an 8333 // infinite loop as we walk up into the dom tree. These loops do not matter 8334 // anyway, so we just return a conservative answer when we see them. 8335 if (!DT.isReachableFromEntry(L->getHeader())) 8336 return false; 8337 8338 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 8339 return true; 8340 8341 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 8342 DTN != HeaderDTN; DTN = DTN->getIDom()) { 8343 8344 assert(DTN && "should reach the loop header before reaching the root!"); 8345 8346 BasicBlock *BB = DTN->getBlock(); 8347 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 8348 return true; 8349 8350 BasicBlock *PBB = BB->getSinglePredecessor(); 8351 if (!PBB) 8352 continue; 8353 8354 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 8355 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 8356 continue; 8357 8358 Value *Condition = ContinuePredicate->getCondition(); 8359 8360 // If we have an edge `E` within the loop body that dominates the only 8361 // latch, the condition guarding `E` also guards the backedge. This 8362 // reasoning works only for loops with a single latch. 8363 8364 BasicBlockEdge DominatingEdge(PBB, BB); 8365 if (DominatingEdge.isSingleEdge()) { 8366 // We're constructively (and conservatively) enumerating edges within the 8367 // loop body that dominate the latch. The dominator tree better agree 8368 // with us on this: 8369 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 8370 8371 if (isImpliedCond(Pred, LHS, RHS, Condition, 8372 BB != ContinuePredicate->getSuccessor(0))) 8373 return true; 8374 } 8375 } 8376 8377 return false; 8378 } 8379 8380 bool 8381 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 8382 ICmpInst::Predicate Pred, 8383 const SCEV *LHS, const SCEV *RHS) { 8384 // Interpret a null as meaning no loop, where there is obviously no guard 8385 // (interprocedural conditions notwithstanding). 8386 if (!L) return false; 8387 8388 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 8389 return true; 8390 8391 // Starting at the loop predecessor, climb up the predecessor chain, as long 8392 // as there are predecessors that can be found that have unique successors 8393 // leading to the original header. 8394 for (std::pair<BasicBlock *, BasicBlock *> 8395 Pair(L->getLoopPredecessor(), L->getHeader()); 8396 Pair.first; 8397 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 8398 8399 if (isImpliedViaGuard(Pair.first, Pred, LHS, RHS)) 8400 return true; 8401 8402 BranchInst *LoopEntryPredicate = 8403 dyn_cast<BranchInst>(Pair.first->getTerminator()); 8404 if (!LoopEntryPredicate || 8405 LoopEntryPredicate->isUnconditional()) 8406 continue; 8407 8408 if (isImpliedCond(Pred, LHS, RHS, 8409 LoopEntryPredicate->getCondition(), 8410 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 8411 return true; 8412 } 8413 8414 // Check conditions due to any @llvm.assume intrinsics. 8415 for (auto &AssumeVH : AC.assumptions()) { 8416 if (!AssumeVH) 8417 continue; 8418 auto *CI = cast<CallInst>(AssumeVH); 8419 if (!DT.dominates(CI, L->getHeader())) 8420 continue; 8421 8422 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 8423 return true; 8424 } 8425 8426 return false; 8427 } 8428 8429 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 8430 const SCEV *LHS, const SCEV *RHS, 8431 Value *FoundCondValue, 8432 bool Inverse) { 8433 if (!PendingLoopPredicates.insert(FoundCondValue).second) 8434 return false; 8435 8436 auto ClearOnExit = 8437 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 8438 8439 // Recursively handle And and Or conditions. 8440 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 8441 if (BO->getOpcode() == Instruction::And) { 8442 if (!Inverse) 8443 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 8444 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 8445 } else if (BO->getOpcode() == Instruction::Or) { 8446 if (Inverse) 8447 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 8448 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 8449 } 8450 } 8451 8452 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 8453 if (!ICI) return false; 8454 8455 // Now that we found a conditional branch that dominates the loop or controls 8456 // the loop latch. Check to see if it is the comparison we are looking for. 8457 ICmpInst::Predicate FoundPred; 8458 if (Inverse) 8459 FoundPred = ICI->getInversePredicate(); 8460 else 8461 FoundPred = ICI->getPredicate(); 8462 8463 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 8464 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 8465 8466 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 8467 } 8468 8469 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 8470 const SCEV *RHS, 8471 ICmpInst::Predicate FoundPred, 8472 const SCEV *FoundLHS, 8473 const SCEV *FoundRHS) { 8474 // Balance the types. 8475 if (getTypeSizeInBits(LHS->getType()) < 8476 getTypeSizeInBits(FoundLHS->getType())) { 8477 if (CmpInst::isSigned(Pred)) { 8478 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 8479 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 8480 } else { 8481 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 8482 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 8483 } 8484 } else if (getTypeSizeInBits(LHS->getType()) > 8485 getTypeSizeInBits(FoundLHS->getType())) { 8486 if (CmpInst::isSigned(FoundPred)) { 8487 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 8488 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 8489 } else { 8490 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 8491 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 8492 } 8493 } 8494 8495 // Canonicalize the query to match the way instcombine will have 8496 // canonicalized the comparison. 8497 if (SimplifyICmpOperands(Pred, LHS, RHS)) 8498 if (LHS == RHS) 8499 return CmpInst::isTrueWhenEqual(Pred); 8500 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 8501 if (FoundLHS == FoundRHS) 8502 return CmpInst::isFalseWhenEqual(FoundPred); 8503 8504 // Check to see if we can make the LHS or RHS match. 8505 if (LHS == FoundRHS || RHS == FoundLHS) { 8506 if (isa<SCEVConstant>(RHS)) { 8507 std::swap(FoundLHS, FoundRHS); 8508 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 8509 } else { 8510 std::swap(LHS, RHS); 8511 Pred = ICmpInst::getSwappedPredicate(Pred); 8512 } 8513 } 8514 8515 // Check whether the found predicate is the same as the desired predicate. 8516 if (FoundPred == Pred) 8517 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 8518 8519 // Check whether swapping the found predicate makes it the same as the 8520 // desired predicate. 8521 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 8522 if (isa<SCEVConstant>(RHS)) 8523 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 8524 else 8525 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 8526 RHS, LHS, FoundLHS, FoundRHS); 8527 } 8528 8529 // Unsigned comparison is the same as signed comparison when both the operands 8530 // are non-negative. 8531 if (CmpInst::isUnsigned(FoundPred) && 8532 CmpInst::getSignedPredicate(FoundPred) == Pred && 8533 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 8534 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 8535 8536 // Check if we can make progress by sharpening ranges. 8537 if (FoundPred == ICmpInst::ICMP_NE && 8538 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 8539 8540 const SCEVConstant *C = nullptr; 8541 const SCEV *V = nullptr; 8542 8543 if (isa<SCEVConstant>(FoundLHS)) { 8544 C = cast<SCEVConstant>(FoundLHS); 8545 V = FoundRHS; 8546 } else { 8547 C = cast<SCEVConstant>(FoundRHS); 8548 V = FoundLHS; 8549 } 8550 8551 // The guarding predicate tells us that C != V. If the known range 8552 // of V is [C, t), we can sharpen the range to [C + 1, t). The 8553 // range we consider has to correspond to same signedness as the 8554 // predicate we're interested in folding. 8555 8556 APInt Min = ICmpInst::isSigned(Pred) ? 8557 getSignedRangeMin(V) : getUnsignedRangeMin(V); 8558 8559 if (Min == C->getAPInt()) { 8560 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 8561 // This is true even if (Min + 1) wraps around -- in case of 8562 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 8563 8564 APInt SharperMin = Min + 1; 8565 8566 switch (Pred) { 8567 case ICmpInst::ICMP_SGE: 8568 case ICmpInst::ICMP_UGE: 8569 // We know V `Pred` SharperMin. If this implies LHS `Pred` 8570 // RHS, we're done. 8571 if (isImpliedCondOperands(Pred, LHS, RHS, V, 8572 getConstant(SharperMin))) 8573 return true; 8574 LLVM_FALLTHROUGH; 8575 8576 case ICmpInst::ICMP_SGT: 8577 case ICmpInst::ICMP_UGT: 8578 // We know from the range information that (V `Pred` Min || 8579 // V == Min). We know from the guarding condition that !(V 8580 // == Min). This gives us 8581 // 8582 // V `Pred` Min || V == Min && !(V == Min) 8583 // => V `Pred` Min 8584 // 8585 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 8586 8587 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 8588 return true; 8589 LLVM_FALLTHROUGH; 8590 8591 default: 8592 // No change 8593 break; 8594 } 8595 } 8596 } 8597 8598 // Check whether the actual condition is beyond sufficient. 8599 if (FoundPred == ICmpInst::ICMP_EQ) 8600 if (ICmpInst::isTrueWhenEqual(Pred)) 8601 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 8602 return true; 8603 if (Pred == ICmpInst::ICMP_NE) 8604 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 8605 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 8606 return true; 8607 8608 // Otherwise assume the worst. 8609 return false; 8610 } 8611 8612 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 8613 const SCEV *&L, const SCEV *&R, 8614 SCEV::NoWrapFlags &Flags) { 8615 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 8616 if (!AE || AE->getNumOperands() != 2) 8617 return false; 8618 8619 L = AE->getOperand(0); 8620 R = AE->getOperand(1); 8621 Flags = AE->getNoWrapFlags(); 8622 return true; 8623 } 8624 8625 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 8626 const SCEV *Less) { 8627 // We avoid subtracting expressions here because this function is usually 8628 // fairly deep in the call stack (i.e. is called many times). 8629 8630 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 8631 const auto *LAR = cast<SCEVAddRecExpr>(Less); 8632 const auto *MAR = cast<SCEVAddRecExpr>(More); 8633 8634 if (LAR->getLoop() != MAR->getLoop()) 8635 return None; 8636 8637 // We look at affine expressions only; not for correctness but to keep 8638 // getStepRecurrence cheap. 8639 if (!LAR->isAffine() || !MAR->isAffine()) 8640 return None; 8641 8642 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 8643 return None; 8644 8645 Less = LAR->getStart(); 8646 More = MAR->getStart(); 8647 8648 // fall through 8649 } 8650 8651 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 8652 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 8653 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 8654 return M - L; 8655 } 8656 8657 const SCEV *L, *R; 8658 SCEV::NoWrapFlags Flags; 8659 if (splitBinaryAdd(Less, L, R, Flags)) 8660 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 8661 if (R == More) 8662 return -(LC->getAPInt()); 8663 8664 if (splitBinaryAdd(More, L, R, Flags)) 8665 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 8666 if (R == Less) 8667 return LC->getAPInt(); 8668 8669 return None; 8670 } 8671 8672 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 8673 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 8674 const SCEV *FoundLHS, const SCEV *FoundRHS) { 8675 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 8676 return false; 8677 8678 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 8679 if (!AddRecLHS) 8680 return false; 8681 8682 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 8683 if (!AddRecFoundLHS) 8684 return false; 8685 8686 // We'd like to let SCEV reason about control dependencies, so we constrain 8687 // both the inequalities to be about add recurrences on the same loop. This 8688 // way we can use isLoopEntryGuardedByCond later. 8689 8690 const Loop *L = AddRecFoundLHS->getLoop(); 8691 if (L != AddRecLHS->getLoop()) 8692 return false; 8693 8694 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 8695 // 8696 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 8697 // ... (2) 8698 // 8699 // Informal proof for (2), assuming (1) [*]: 8700 // 8701 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 8702 // 8703 // Then 8704 // 8705 // FoundLHS s< FoundRHS s< INT_MIN - C 8706 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 8707 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 8708 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 8709 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 8710 // <=> FoundLHS + C s< FoundRHS + C 8711 // 8712 // [*]: (1) can be proved by ruling out overflow. 8713 // 8714 // [**]: This can be proved by analyzing all the four possibilities: 8715 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 8716 // (A s>= 0, B s>= 0). 8717 // 8718 // Note: 8719 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 8720 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 8721 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 8722 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 8723 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 8724 // C)". 8725 8726 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 8727 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 8728 if (!LDiff || !RDiff || *LDiff != *RDiff) 8729 return false; 8730 8731 if (LDiff->isMinValue()) 8732 return true; 8733 8734 APInt FoundRHSLimit; 8735 8736 if (Pred == CmpInst::ICMP_ULT) { 8737 FoundRHSLimit = -(*RDiff); 8738 } else { 8739 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 8740 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 8741 } 8742 8743 // Try to prove (1) or (2), as needed. 8744 return isLoopEntryGuardedByCond(L, Pred, FoundRHS, 8745 getConstant(FoundRHSLimit)); 8746 } 8747 8748 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 8749 const SCEV *LHS, const SCEV *RHS, 8750 const SCEV *FoundLHS, 8751 const SCEV *FoundRHS) { 8752 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 8753 return true; 8754 8755 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 8756 return true; 8757 8758 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 8759 FoundLHS, FoundRHS) || 8760 // ~x < ~y --> x > y 8761 isImpliedCondOperandsHelper(Pred, LHS, RHS, 8762 getNotSCEV(FoundRHS), 8763 getNotSCEV(FoundLHS)); 8764 } 8765 8766 8767 /// If Expr computes ~A, return A else return nullptr 8768 static const SCEV *MatchNotExpr(const SCEV *Expr) { 8769 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 8770 if (!Add || Add->getNumOperands() != 2 || 8771 !Add->getOperand(0)->isAllOnesValue()) 8772 return nullptr; 8773 8774 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 8775 if (!AddRHS || AddRHS->getNumOperands() != 2 || 8776 !AddRHS->getOperand(0)->isAllOnesValue()) 8777 return nullptr; 8778 8779 return AddRHS->getOperand(1); 8780 } 8781 8782 8783 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values? 8784 template<typename MaxExprType> 8785 static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, 8786 const SCEV *Candidate) { 8787 const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr); 8788 if (!MaxExpr) return false; 8789 8790 return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end(); 8791 } 8792 8793 8794 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values? 8795 template<typename MaxExprType> 8796 static bool IsMinConsistingOf(ScalarEvolution &SE, 8797 const SCEV *MaybeMinExpr, 8798 const SCEV *Candidate) { 8799 const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr); 8800 if (!MaybeMaxExpr) 8801 return false; 8802 8803 return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate)); 8804 } 8805 8806 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 8807 ICmpInst::Predicate Pred, 8808 const SCEV *LHS, const SCEV *RHS) { 8809 8810 // If both sides are affine addrecs for the same loop, with equal 8811 // steps, and we know the recurrences don't wrap, then we only 8812 // need to check the predicate on the starting values. 8813 8814 if (!ICmpInst::isRelational(Pred)) 8815 return false; 8816 8817 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 8818 if (!LAR) 8819 return false; 8820 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 8821 if (!RAR) 8822 return false; 8823 if (LAR->getLoop() != RAR->getLoop()) 8824 return false; 8825 if (!LAR->isAffine() || !RAR->isAffine()) 8826 return false; 8827 8828 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 8829 return false; 8830 8831 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 8832 SCEV::FlagNSW : SCEV::FlagNUW; 8833 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 8834 return false; 8835 8836 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 8837 } 8838 8839 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 8840 /// expression? 8841 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 8842 ICmpInst::Predicate Pred, 8843 const SCEV *LHS, const SCEV *RHS) { 8844 switch (Pred) { 8845 default: 8846 return false; 8847 8848 case ICmpInst::ICMP_SGE: 8849 std::swap(LHS, RHS); 8850 LLVM_FALLTHROUGH; 8851 case ICmpInst::ICMP_SLE: 8852 return 8853 // min(A, ...) <= A 8854 IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) || 8855 // A <= max(A, ...) 8856 IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 8857 8858 case ICmpInst::ICMP_UGE: 8859 std::swap(LHS, RHS); 8860 LLVM_FALLTHROUGH; 8861 case ICmpInst::ICMP_ULE: 8862 return 8863 // min(A, ...) <= A 8864 IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) || 8865 // A <= max(A, ...) 8866 IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 8867 } 8868 8869 llvm_unreachable("covered switch fell through?!"); 8870 } 8871 8872 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 8873 const SCEV *LHS, const SCEV *RHS, 8874 const SCEV *FoundLHS, 8875 const SCEV *FoundRHS, 8876 unsigned Depth) { 8877 assert(getTypeSizeInBits(LHS->getType()) == 8878 getTypeSizeInBits(RHS->getType()) && 8879 "LHS and RHS have different sizes?"); 8880 assert(getTypeSizeInBits(FoundLHS->getType()) == 8881 getTypeSizeInBits(FoundRHS->getType()) && 8882 "FoundLHS and FoundRHS have different sizes?"); 8883 // We want to avoid hurting the compile time with analysis of too big trees. 8884 if (Depth > MaxSCEVOperationsImplicationDepth) 8885 return false; 8886 // We only want to work with ICMP_SGT comparison so far. 8887 // TODO: Extend to ICMP_UGT? 8888 if (Pred == ICmpInst::ICMP_SLT) { 8889 Pred = ICmpInst::ICMP_SGT; 8890 std::swap(LHS, RHS); 8891 std::swap(FoundLHS, FoundRHS); 8892 } 8893 if (Pred != ICmpInst::ICMP_SGT) 8894 return false; 8895 8896 auto GetOpFromSExt = [&](const SCEV *S) { 8897 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 8898 return Ext->getOperand(); 8899 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 8900 // the constant in some cases. 8901 return S; 8902 }; 8903 8904 // Acquire values from extensions. 8905 auto *OrigFoundLHS = FoundLHS; 8906 LHS = GetOpFromSExt(LHS); 8907 FoundLHS = GetOpFromSExt(FoundLHS); 8908 8909 // Is the SGT predicate can be proved trivially or using the found context. 8910 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 8911 return isKnownViaSimpleReasoning(ICmpInst::ICMP_SGT, S1, S2) || 8912 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 8913 FoundRHS, Depth + 1); 8914 }; 8915 8916 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 8917 // We want to avoid creation of any new non-constant SCEV. Since we are 8918 // going to compare the operands to RHS, we should be certain that we don't 8919 // need any size extensions for this. So let's decline all cases when the 8920 // sizes of types of LHS and RHS do not match. 8921 // TODO: Maybe try to get RHS from sext to catch more cases? 8922 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 8923 return false; 8924 8925 // Should not overflow. 8926 if (!LHSAddExpr->hasNoSignedWrap()) 8927 return false; 8928 8929 auto *LL = LHSAddExpr->getOperand(0); 8930 auto *LR = LHSAddExpr->getOperand(1); 8931 auto *MinusOne = getNegativeSCEV(getOne(RHS->getType())); 8932 8933 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 8934 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 8935 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 8936 }; 8937 // Try to prove the following rule: 8938 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 8939 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 8940 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 8941 return true; 8942 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 8943 Value *LL, *LR; 8944 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 8945 using namespace llvm::PatternMatch; 8946 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 8947 // Rules for division. 8948 // We are going to perform some comparisons with Denominator and its 8949 // derivative expressions. In general case, creating a SCEV for it may 8950 // lead to a complex analysis of the entire graph, and in particular it 8951 // can request trip count recalculation for the same loop. This would 8952 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 8953 // this, we only want to create SCEVs that are constants in this section. 8954 // So we bail if Denominator is not a constant. 8955 if (!isa<ConstantInt>(LR)) 8956 return false; 8957 8958 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 8959 8960 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 8961 // then a SCEV for the numerator already exists and matches with FoundLHS. 8962 auto *Numerator = getExistingSCEV(LL); 8963 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 8964 return false; 8965 8966 // Make sure that the numerator matches with FoundLHS and the denominator 8967 // is positive. 8968 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 8969 return false; 8970 8971 auto *DTy = Denominator->getType(); 8972 auto *FRHSTy = FoundRHS->getType(); 8973 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 8974 // One of types is a pointer and another one is not. We cannot extend 8975 // them properly to a wider type, so let us just reject this case. 8976 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 8977 // to avoid this check. 8978 return false; 8979 8980 // Given that: 8981 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 8982 auto *WTy = getWiderType(DTy, FRHSTy); 8983 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 8984 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 8985 8986 // Try to prove the following rule: 8987 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 8988 // For example, given that FoundLHS > 2. It means that FoundLHS is at 8989 // least 3. If we divide it by Denominator < 4, we will have at least 1. 8990 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 8991 if (isKnownNonPositive(RHS) && 8992 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 8993 return true; 8994 8995 // Try to prove the following rule: 8996 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 8997 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 8998 // If we divide it by Denominator > 2, then: 8999 // 1. If FoundLHS is negative, then the result is 0. 9000 // 2. If FoundLHS is non-negative, then the result is non-negative. 9001 // Anyways, the result is non-negative. 9002 auto *MinusOne = getNegativeSCEV(getOne(WTy)); 9003 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 9004 if (isKnownNegative(RHS) && 9005 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 9006 return true; 9007 } 9008 } 9009 9010 return false; 9011 } 9012 9013 bool 9014 ScalarEvolution::isKnownViaSimpleReasoning(ICmpInst::Predicate Pred, 9015 const SCEV *LHS, const SCEV *RHS) { 9016 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 9017 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 9018 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 9019 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 9020 } 9021 9022 bool 9023 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 9024 const SCEV *LHS, const SCEV *RHS, 9025 const SCEV *FoundLHS, 9026 const SCEV *FoundRHS) { 9027 switch (Pred) { 9028 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 9029 case ICmpInst::ICMP_EQ: 9030 case ICmpInst::ICMP_NE: 9031 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 9032 return true; 9033 break; 9034 case ICmpInst::ICMP_SLT: 9035 case ICmpInst::ICMP_SLE: 9036 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 9037 isKnownViaSimpleReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 9038 return true; 9039 break; 9040 case ICmpInst::ICMP_SGT: 9041 case ICmpInst::ICMP_SGE: 9042 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 9043 isKnownViaSimpleReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 9044 return true; 9045 break; 9046 case ICmpInst::ICMP_ULT: 9047 case ICmpInst::ICMP_ULE: 9048 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 9049 isKnownViaSimpleReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 9050 return true; 9051 break; 9052 case ICmpInst::ICMP_UGT: 9053 case ICmpInst::ICMP_UGE: 9054 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 9055 isKnownViaSimpleReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 9056 return true; 9057 break; 9058 } 9059 9060 // Maybe it can be proved via operations? 9061 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9062 return true; 9063 9064 return false; 9065 } 9066 9067 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 9068 const SCEV *LHS, 9069 const SCEV *RHS, 9070 const SCEV *FoundLHS, 9071 const SCEV *FoundRHS) { 9072 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 9073 // The restriction on `FoundRHS` be lifted easily -- it exists only to 9074 // reduce the compile time impact of this optimization. 9075 return false; 9076 9077 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 9078 if (!Addend) 9079 return false; 9080 9081 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 9082 9083 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 9084 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 9085 ConstantRange FoundLHSRange = 9086 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 9087 9088 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 9089 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 9090 9091 // We can also compute the range of values for `LHS` that satisfy the 9092 // consequent, "`LHS` `Pred` `RHS`": 9093 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 9094 ConstantRange SatisfyingLHSRange = 9095 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 9096 9097 // The antecedent implies the consequent if every value of `LHS` that 9098 // satisfies the antecedent also satisfies the consequent. 9099 return SatisfyingLHSRange.contains(LHSRange); 9100 } 9101 9102 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 9103 bool IsSigned, bool NoWrap) { 9104 assert(isKnownPositive(Stride) && "Positive stride expected!"); 9105 9106 if (NoWrap) return false; 9107 9108 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9109 const SCEV *One = getOne(Stride->getType()); 9110 9111 if (IsSigned) { 9112 APInt MaxRHS = getSignedRangeMax(RHS); 9113 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 9114 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 9115 9116 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 9117 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 9118 } 9119 9120 APInt MaxRHS = getUnsignedRangeMax(RHS); 9121 APInt MaxValue = APInt::getMaxValue(BitWidth); 9122 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 9123 9124 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 9125 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 9126 } 9127 9128 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 9129 bool IsSigned, bool NoWrap) { 9130 if (NoWrap) return false; 9131 9132 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9133 const SCEV *One = getOne(Stride->getType()); 9134 9135 if (IsSigned) { 9136 APInt MinRHS = getSignedRangeMin(RHS); 9137 APInt MinValue = APInt::getSignedMinValue(BitWidth); 9138 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 9139 9140 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 9141 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 9142 } 9143 9144 APInt MinRHS = getUnsignedRangeMin(RHS); 9145 APInt MinValue = APInt::getMinValue(BitWidth); 9146 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 9147 9148 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 9149 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 9150 } 9151 9152 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 9153 bool Equality) { 9154 const SCEV *One = getOne(Step->getType()); 9155 Delta = Equality ? getAddExpr(Delta, Step) 9156 : getAddExpr(Delta, getMinusSCEV(Step, One)); 9157 return getUDivExpr(Delta, Step); 9158 } 9159 9160 ScalarEvolution::ExitLimit 9161 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 9162 const Loop *L, bool IsSigned, 9163 bool ControlsExit, bool AllowPredicates) { 9164 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9165 // We handle only IV < Invariant 9166 if (!isLoopInvariant(RHS, L)) 9167 return getCouldNotCompute(); 9168 9169 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 9170 bool PredicatedIV = false; 9171 9172 if (!IV && AllowPredicates) { 9173 // Try to make this an AddRec using runtime tests, in the first X 9174 // iterations of this loop, where X is the SCEV expression found by the 9175 // algorithm below. 9176 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 9177 PredicatedIV = true; 9178 } 9179 9180 // Avoid weird loops 9181 if (!IV || IV->getLoop() != L || !IV->isAffine()) 9182 return getCouldNotCompute(); 9183 9184 bool NoWrap = ControlsExit && 9185 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 9186 9187 const SCEV *Stride = IV->getStepRecurrence(*this); 9188 9189 bool PositiveStride = isKnownPositive(Stride); 9190 9191 // Avoid negative or zero stride values. 9192 if (!PositiveStride) { 9193 // We can compute the correct backedge taken count for loops with unknown 9194 // strides if we can prove that the loop is not an infinite loop with side 9195 // effects. Here's the loop structure we are trying to handle - 9196 // 9197 // i = start 9198 // do { 9199 // A[i] = i; 9200 // i += s; 9201 // } while (i < end); 9202 // 9203 // The backedge taken count for such loops is evaluated as - 9204 // (max(end, start + stride) - start - 1) /u stride 9205 // 9206 // The additional preconditions that we need to check to prove correctness 9207 // of the above formula is as follows - 9208 // 9209 // a) IV is either nuw or nsw depending upon signedness (indicated by the 9210 // NoWrap flag). 9211 // b) loop is single exit with no side effects. 9212 // 9213 // 9214 // Precondition a) implies that if the stride is negative, this is a single 9215 // trip loop. The backedge taken count formula reduces to zero in this case. 9216 // 9217 // Precondition b) implies that the unknown stride cannot be zero otherwise 9218 // we have UB. 9219 // 9220 // The positive stride case is the same as isKnownPositive(Stride) returning 9221 // true (original behavior of the function). 9222 // 9223 // We want to make sure that the stride is truly unknown as there are edge 9224 // cases where ScalarEvolution propagates no wrap flags to the 9225 // post-increment/decrement IV even though the increment/decrement operation 9226 // itself is wrapping. The computed backedge taken count may be wrong in 9227 // such cases. This is prevented by checking that the stride is not known to 9228 // be either positive or non-positive. For example, no wrap flags are 9229 // propagated to the post-increment IV of this loop with a trip count of 2 - 9230 // 9231 // unsigned char i; 9232 // for(i=127; i<128; i+=129) 9233 // A[i] = i; 9234 // 9235 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 9236 !loopHasNoSideEffects(L)) 9237 return getCouldNotCompute(); 9238 9239 } else if (!Stride->isOne() && 9240 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 9241 // Avoid proven overflow cases: this will ensure that the backedge taken 9242 // count will not generate any unsigned overflow. Relaxed no-overflow 9243 // conditions exploit NoWrapFlags, allowing to optimize in presence of 9244 // undefined behaviors like the case of C language. 9245 return getCouldNotCompute(); 9246 9247 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 9248 : ICmpInst::ICMP_ULT; 9249 const SCEV *Start = IV->getStart(); 9250 const SCEV *End = RHS; 9251 // If the backedge is taken at least once, then it will be taken 9252 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 9253 // is the LHS value of the less-than comparison the first time it is evaluated 9254 // and End is the RHS. 9255 const SCEV *BECountIfBackedgeTaken = 9256 computeBECount(getMinusSCEV(End, Start), Stride, false); 9257 // If the loop entry is guarded by the result of the backedge test of the 9258 // first loop iteration, then we know the backedge will be taken at least 9259 // once and so the backedge taken count is as above. If not then we use the 9260 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 9261 // as if the backedge is taken at least once max(End,Start) is End and so the 9262 // result is as above, and if not max(End,Start) is Start so we get a backedge 9263 // count of zero. 9264 const SCEV *BECount; 9265 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 9266 BECount = BECountIfBackedgeTaken; 9267 else { 9268 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 9269 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 9270 } 9271 9272 const SCEV *MaxBECount; 9273 bool MaxOrZero = false; 9274 if (isa<SCEVConstant>(BECount)) 9275 MaxBECount = BECount; 9276 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 9277 // If we know exactly how many times the backedge will be taken if it's 9278 // taken at least once, then the backedge count will either be that or 9279 // zero. 9280 MaxBECount = BECountIfBackedgeTaken; 9281 MaxOrZero = true; 9282 } else { 9283 // Calculate the maximum backedge count based on the range of values 9284 // permitted by Start, End, and Stride. 9285 APInt MinStart = IsSigned ? getSignedRangeMin(Start) 9286 : getUnsignedRangeMin(Start); 9287 9288 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 9289 9290 APInt StrideForMaxBECount; 9291 9292 if (PositiveStride) 9293 StrideForMaxBECount = 9294 IsSigned ? getSignedRangeMin(Stride) 9295 : getUnsignedRangeMin(Stride); 9296 else 9297 // Using a stride of 1 is safe when computing max backedge taken count for 9298 // a loop with unknown stride. 9299 StrideForMaxBECount = APInt(BitWidth, 1, IsSigned); 9300 9301 APInt Limit = 9302 IsSigned ? APInt::getSignedMaxValue(BitWidth) - (StrideForMaxBECount - 1) 9303 : APInt::getMaxValue(BitWidth) - (StrideForMaxBECount - 1); 9304 9305 // Although End can be a MAX expression we estimate MaxEnd considering only 9306 // the case End = RHS. This is safe because in the other case (End - Start) 9307 // is zero, leading to a zero maximum backedge taken count. 9308 APInt MaxEnd = 9309 IsSigned ? APIntOps::smin(getSignedRangeMax(RHS), Limit) 9310 : APIntOps::umin(getUnsignedRangeMax(RHS), Limit); 9311 9312 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart), 9313 getConstant(StrideForMaxBECount), false); 9314 } 9315 9316 if (isa<SCEVCouldNotCompute>(MaxBECount) && 9317 !isa<SCEVCouldNotCompute>(BECount)) 9318 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 9319 9320 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 9321 } 9322 9323 ScalarEvolution::ExitLimit 9324 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 9325 const Loop *L, bool IsSigned, 9326 bool ControlsExit, bool AllowPredicates) { 9327 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9328 // We handle only IV > Invariant 9329 if (!isLoopInvariant(RHS, L)) 9330 return getCouldNotCompute(); 9331 9332 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 9333 if (!IV && AllowPredicates) 9334 // Try to make this an AddRec using runtime tests, in the first X 9335 // iterations of this loop, where X is the SCEV expression found by the 9336 // algorithm below. 9337 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 9338 9339 // Avoid weird loops 9340 if (!IV || IV->getLoop() != L || !IV->isAffine()) 9341 return getCouldNotCompute(); 9342 9343 bool NoWrap = ControlsExit && 9344 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 9345 9346 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 9347 9348 // Avoid negative or zero stride values 9349 if (!isKnownPositive(Stride)) 9350 return getCouldNotCompute(); 9351 9352 // Avoid proven overflow cases: this will ensure that the backedge taken count 9353 // will not generate any unsigned overflow. Relaxed no-overflow conditions 9354 // exploit NoWrapFlags, allowing to optimize in presence of undefined 9355 // behaviors like the case of C language. 9356 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 9357 return getCouldNotCompute(); 9358 9359 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 9360 : ICmpInst::ICMP_UGT; 9361 9362 const SCEV *Start = IV->getStart(); 9363 const SCEV *End = RHS; 9364 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) 9365 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 9366 9367 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 9368 9369 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 9370 : getUnsignedRangeMax(Start); 9371 9372 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 9373 : getUnsignedRangeMin(Stride); 9374 9375 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 9376 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 9377 : APInt::getMinValue(BitWidth) + (MinStride - 1); 9378 9379 // Although End can be a MIN expression we estimate MinEnd considering only 9380 // the case End = RHS. This is safe because in the other case (Start - End) 9381 // is zero, leading to a zero maximum backedge taken count. 9382 APInt MinEnd = 9383 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 9384 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 9385 9386 9387 const SCEV *MaxBECount = getCouldNotCompute(); 9388 if (isa<SCEVConstant>(BECount)) 9389 MaxBECount = BECount; 9390 else 9391 MaxBECount = computeBECount(getConstant(MaxStart - MinEnd), 9392 getConstant(MinStride), false); 9393 9394 if (isa<SCEVCouldNotCompute>(MaxBECount)) 9395 MaxBECount = BECount; 9396 9397 return ExitLimit(BECount, MaxBECount, false, Predicates); 9398 } 9399 9400 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 9401 ScalarEvolution &SE) const { 9402 if (Range.isFullSet()) // Infinite loop. 9403 return SE.getCouldNotCompute(); 9404 9405 // If the start is a non-zero constant, shift the range to simplify things. 9406 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 9407 if (!SC->getValue()->isZero()) { 9408 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 9409 Operands[0] = SE.getZero(SC->getType()); 9410 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 9411 getNoWrapFlags(FlagNW)); 9412 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 9413 return ShiftedAddRec->getNumIterationsInRange( 9414 Range.subtract(SC->getAPInt()), SE); 9415 // This is strange and shouldn't happen. 9416 return SE.getCouldNotCompute(); 9417 } 9418 9419 // The only time we can solve this is when we have all constant indices. 9420 // Otherwise, we cannot determine the overflow conditions. 9421 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 9422 return SE.getCouldNotCompute(); 9423 9424 // Okay at this point we know that all elements of the chrec are constants and 9425 // that the start element is zero. 9426 9427 // First check to see if the range contains zero. If not, the first 9428 // iteration exits. 9429 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 9430 if (!Range.contains(APInt(BitWidth, 0))) 9431 return SE.getZero(getType()); 9432 9433 if (isAffine()) { 9434 // If this is an affine expression then we have this situation: 9435 // Solve {0,+,A} in Range === Ax in Range 9436 9437 // We know that zero is in the range. If A is positive then we know that 9438 // the upper value of the range must be the first possible exit value. 9439 // If A is negative then the lower of the range is the last possible loop 9440 // value. Also note that we already checked for a full range. 9441 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 9442 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 9443 9444 // The exit value should be (End+A)/A. 9445 APInt ExitVal = (End + A).udiv(A); 9446 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 9447 9448 // Evaluate at the exit value. If we really did fall out of the valid 9449 // range, then we computed our trip count, otherwise wrap around or other 9450 // things must have happened. 9451 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 9452 if (Range.contains(Val->getValue())) 9453 return SE.getCouldNotCompute(); // Something strange happened 9454 9455 // Ensure that the previous value is in the range. This is a sanity check. 9456 assert(Range.contains( 9457 EvaluateConstantChrecAtConstant(this, 9458 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 9459 "Linear scev computation is off in a bad way!"); 9460 return SE.getConstant(ExitValue); 9461 } else if (isQuadratic()) { 9462 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 9463 // quadratic equation to solve it. To do this, we must frame our problem in 9464 // terms of figuring out when zero is crossed, instead of when 9465 // Range.getUpper() is crossed. 9466 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); 9467 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 9468 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(), FlagAnyWrap); 9469 9470 // Next, solve the constructed addrec 9471 if (auto Roots = 9472 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE)) { 9473 const SCEVConstant *R1 = Roots->first; 9474 const SCEVConstant *R2 = Roots->second; 9475 // Pick the smallest positive root value. 9476 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 9477 ICmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 9478 if (!CB->getZExtValue()) 9479 std::swap(R1, R2); // R1 is the minimum root now. 9480 9481 // Make sure the root is not off by one. The returned iteration should 9482 // not be in the range, but the previous one should be. When solving 9483 // for "X*X < 5", for example, we should not return a root of 2. 9484 ConstantInt *R1Val = 9485 EvaluateConstantChrecAtConstant(this, R1->getValue(), SE); 9486 if (Range.contains(R1Val->getValue())) { 9487 // The next iteration must be out of the range... 9488 ConstantInt *NextVal = 9489 ConstantInt::get(SE.getContext(), R1->getAPInt() + 1); 9490 9491 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 9492 if (!Range.contains(R1Val->getValue())) 9493 return SE.getConstant(NextVal); 9494 return SE.getCouldNotCompute(); // Something strange happened 9495 } 9496 9497 // If R1 was not in the range, then it is a good return value. Make 9498 // sure that R1-1 WAS in the range though, just in case. 9499 ConstantInt *NextVal = 9500 ConstantInt::get(SE.getContext(), R1->getAPInt() - 1); 9501 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 9502 if (Range.contains(R1Val->getValue())) 9503 return R1; 9504 return SE.getCouldNotCompute(); // Something strange happened 9505 } 9506 } 9507 } 9508 9509 return SE.getCouldNotCompute(); 9510 } 9511 9512 // Return true when S contains at least an undef value. 9513 static inline bool containsUndefs(const SCEV *S) { 9514 return SCEVExprContains(S, [](const SCEV *S) { 9515 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 9516 return isa<UndefValue>(SU->getValue()); 9517 else if (const auto *SC = dyn_cast<SCEVConstant>(S)) 9518 return isa<UndefValue>(SC->getValue()); 9519 return false; 9520 }); 9521 } 9522 9523 namespace { 9524 // Collect all steps of SCEV expressions. 9525 struct SCEVCollectStrides { 9526 ScalarEvolution &SE; 9527 SmallVectorImpl<const SCEV *> &Strides; 9528 9529 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 9530 : SE(SE), Strides(S) {} 9531 9532 bool follow(const SCEV *S) { 9533 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 9534 Strides.push_back(AR->getStepRecurrence(SE)); 9535 return true; 9536 } 9537 bool isDone() const { return false; } 9538 }; 9539 9540 // Collect all SCEVUnknown and SCEVMulExpr expressions. 9541 struct SCEVCollectTerms { 9542 SmallVectorImpl<const SCEV *> &Terms; 9543 9544 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) 9545 : Terms(T) {} 9546 9547 bool follow(const SCEV *S) { 9548 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 9549 isa<SCEVSignExtendExpr>(S)) { 9550 if (!containsUndefs(S)) 9551 Terms.push_back(S); 9552 9553 // Stop recursion: once we collected a term, do not walk its operands. 9554 return false; 9555 } 9556 9557 // Keep looking. 9558 return true; 9559 } 9560 bool isDone() const { return false; } 9561 }; 9562 9563 // Check if a SCEV contains an AddRecExpr. 9564 struct SCEVHasAddRec { 9565 bool &ContainsAddRec; 9566 9567 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 9568 ContainsAddRec = false; 9569 } 9570 9571 bool follow(const SCEV *S) { 9572 if (isa<SCEVAddRecExpr>(S)) { 9573 ContainsAddRec = true; 9574 9575 // Stop recursion: once we collected a term, do not walk its operands. 9576 return false; 9577 } 9578 9579 // Keep looking. 9580 return true; 9581 } 9582 bool isDone() const { return false; } 9583 }; 9584 9585 // Find factors that are multiplied with an expression that (possibly as a 9586 // subexpression) contains an AddRecExpr. In the expression: 9587 // 9588 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 9589 // 9590 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 9591 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 9592 // parameters as they form a product with an induction variable. 9593 // 9594 // This collector expects all array size parameters to be in the same MulExpr. 9595 // It might be necessary to later add support for collecting parameters that are 9596 // spread over different nested MulExpr. 9597 struct SCEVCollectAddRecMultiplies { 9598 SmallVectorImpl<const SCEV *> &Terms; 9599 ScalarEvolution &SE; 9600 9601 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 9602 : Terms(T), SE(SE) {} 9603 9604 bool follow(const SCEV *S) { 9605 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 9606 bool HasAddRec = false; 9607 SmallVector<const SCEV *, 0> Operands; 9608 for (auto Op : Mul->operands()) { 9609 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 9610 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 9611 Operands.push_back(Op); 9612 } else if (Unknown) { 9613 HasAddRec = true; 9614 } else { 9615 bool ContainsAddRec; 9616 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 9617 visitAll(Op, ContiansAddRec); 9618 HasAddRec |= ContainsAddRec; 9619 } 9620 } 9621 if (Operands.size() == 0) 9622 return true; 9623 9624 if (!HasAddRec) 9625 return false; 9626 9627 Terms.push_back(SE.getMulExpr(Operands)); 9628 // Stop recursion: once we collected a term, do not walk its operands. 9629 return false; 9630 } 9631 9632 // Keep looking. 9633 return true; 9634 } 9635 bool isDone() const { return false; } 9636 }; 9637 } 9638 9639 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 9640 /// two places: 9641 /// 1) The strides of AddRec expressions. 9642 /// 2) Unknowns that are multiplied with AddRec expressions. 9643 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 9644 SmallVectorImpl<const SCEV *> &Terms) { 9645 SmallVector<const SCEV *, 4> Strides; 9646 SCEVCollectStrides StrideCollector(*this, Strides); 9647 visitAll(Expr, StrideCollector); 9648 9649 DEBUG({ 9650 dbgs() << "Strides:\n"; 9651 for (const SCEV *S : Strides) 9652 dbgs() << *S << "\n"; 9653 }); 9654 9655 for (const SCEV *S : Strides) { 9656 SCEVCollectTerms TermCollector(Terms); 9657 visitAll(S, TermCollector); 9658 } 9659 9660 DEBUG({ 9661 dbgs() << "Terms:\n"; 9662 for (const SCEV *T : Terms) 9663 dbgs() << *T << "\n"; 9664 }); 9665 9666 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 9667 visitAll(Expr, MulCollector); 9668 } 9669 9670 static bool findArrayDimensionsRec(ScalarEvolution &SE, 9671 SmallVectorImpl<const SCEV *> &Terms, 9672 SmallVectorImpl<const SCEV *> &Sizes) { 9673 int Last = Terms.size() - 1; 9674 const SCEV *Step = Terms[Last]; 9675 9676 // End of recursion. 9677 if (Last == 0) { 9678 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 9679 SmallVector<const SCEV *, 2> Qs; 9680 for (const SCEV *Op : M->operands()) 9681 if (!isa<SCEVConstant>(Op)) 9682 Qs.push_back(Op); 9683 9684 Step = SE.getMulExpr(Qs); 9685 } 9686 9687 Sizes.push_back(Step); 9688 return true; 9689 } 9690 9691 for (const SCEV *&Term : Terms) { 9692 // Normalize the terms before the next call to findArrayDimensionsRec. 9693 const SCEV *Q, *R; 9694 SCEVDivision::divide(SE, Term, Step, &Q, &R); 9695 9696 // Bail out when GCD does not evenly divide one of the terms. 9697 if (!R->isZero()) 9698 return false; 9699 9700 Term = Q; 9701 } 9702 9703 // Remove all SCEVConstants. 9704 Terms.erase( 9705 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 9706 Terms.end()); 9707 9708 if (Terms.size() > 0) 9709 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 9710 return false; 9711 9712 Sizes.push_back(Step); 9713 return true; 9714 } 9715 9716 9717 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 9718 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 9719 for (const SCEV *T : Terms) 9720 if (SCEVExprContains(T, isa<SCEVUnknown, const SCEV *>)) 9721 return true; 9722 return false; 9723 } 9724 9725 // Return the number of product terms in S. 9726 static inline int numberOfTerms(const SCEV *S) { 9727 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 9728 return Expr->getNumOperands(); 9729 return 1; 9730 } 9731 9732 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 9733 if (isa<SCEVConstant>(T)) 9734 return nullptr; 9735 9736 if (isa<SCEVUnknown>(T)) 9737 return T; 9738 9739 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 9740 SmallVector<const SCEV *, 2> Factors; 9741 for (const SCEV *Op : M->operands()) 9742 if (!isa<SCEVConstant>(Op)) 9743 Factors.push_back(Op); 9744 9745 return SE.getMulExpr(Factors); 9746 } 9747 9748 return T; 9749 } 9750 9751 /// Return the size of an element read or written by Inst. 9752 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 9753 Type *Ty; 9754 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 9755 Ty = Store->getValueOperand()->getType(); 9756 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 9757 Ty = Load->getType(); 9758 else 9759 return nullptr; 9760 9761 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 9762 return getSizeOfExpr(ETy, Ty); 9763 } 9764 9765 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 9766 SmallVectorImpl<const SCEV *> &Sizes, 9767 const SCEV *ElementSize) { 9768 if (Terms.size() < 1 || !ElementSize) 9769 return; 9770 9771 // Early return when Terms do not contain parameters: we do not delinearize 9772 // non parametric SCEVs. 9773 if (!containsParameters(Terms)) 9774 return; 9775 9776 DEBUG({ 9777 dbgs() << "Terms:\n"; 9778 for (const SCEV *T : Terms) 9779 dbgs() << *T << "\n"; 9780 }); 9781 9782 // Remove duplicates. 9783 array_pod_sort(Terms.begin(), Terms.end()); 9784 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 9785 9786 // Put larger terms first. 9787 std::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) { 9788 return numberOfTerms(LHS) > numberOfTerms(RHS); 9789 }); 9790 9791 // Try to divide all terms by the element size. If term is not divisible by 9792 // element size, proceed with the original term. 9793 for (const SCEV *&Term : Terms) { 9794 const SCEV *Q, *R; 9795 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 9796 if (!Q->isZero()) 9797 Term = Q; 9798 } 9799 9800 SmallVector<const SCEV *, 4> NewTerms; 9801 9802 // Remove constant factors. 9803 for (const SCEV *T : Terms) 9804 if (const SCEV *NewT = removeConstantFactors(*this, T)) 9805 NewTerms.push_back(NewT); 9806 9807 DEBUG({ 9808 dbgs() << "Terms after sorting:\n"; 9809 for (const SCEV *T : NewTerms) 9810 dbgs() << *T << "\n"; 9811 }); 9812 9813 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 9814 Sizes.clear(); 9815 return; 9816 } 9817 9818 // The last element to be pushed into Sizes is the size of an element. 9819 Sizes.push_back(ElementSize); 9820 9821 DEBUG({ 9822 dbgs() << "Sizes:\n"; 9823 for (const SCEV *S : Sizes) 9824 dbgs() << *S << "\n"; 9825 }); 9826 } 9827 9828 void ScalarEvolution::computeAccessFunctions( 9829 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 9830 SmallVectorImpl<const SCEV *> &Sizes) { 9831 9832 // Early exit in case this SCEV is not an affine multivariate function. 9833 if (Sizes.empty()) 9834 return; 9835 9836 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 9837 if (!AR->isAffine()) 9838 return; 9839 9840 const SCEV *Res = Expr; 9841 int Last = Sizes.size() - 1; 9842 for (int i = Last; i >= 0; i--) { 9843 const SCEV *Q, *R; 9844 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 9845 9846 DEBUG({ 9847 dbgs() << "Res: " << *Res << "\n"; 9848 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 9849 dbgs() << "Res divided by Sizes[i]:\n"; 9850 dbgs() << "Quotient: " << *Q << "\n"; 9851 dbgs() << "Remainder: " << *R << "\n"; 9852 }); 9853 9854 Res = Q; 9855 9856 // Do not record the last subscript corresponding to the size of elements in 9857 // the array. 9858 if (i == Last) { 9859 9860 // Bail out if the remainder is too complex. 9861 if (isa<SCEVAddRecExpr>(R)) { 9862 Subscripts.clear(); 9863 Sizes.clear(); 9864 return; 9865 } 9866 9867 continue; 9868 } 9869 9870 // Record the access function for the current subscript. 9871 Subscripts.push_back(R); 9872 } 9873 9874 // Also push in last position the remainder of the last division: it will be 9875 // the access function of the innermost dimension. 9876 Subscripts.push_back(Res); 9877 9878 std::reverse(Subscripts.begin(), Subscripts.end()); 9879 9880 DEBUG({ 9881 dbgs() << "Subscripts:\n"; 9882 for (const SCEV *S : Subscripts) 9883 dbgs() << *S << "\n"; 9884 }); 9885 } 9886 9887 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 9888 /// sizes of an array access. Returns the remainder of the delinearization that 9889 /// is the offset start of the array. The SCEV->delinearize algorithm computes 9890 /// the multiples of SCEV coefficients: that is a pattern matching of sub 9891 /// expressions in the stride and base of a SCEV corresponding to the 9892 /// computation of a GCD (greatest common divisor) of base and stride. When 9893 /// SCEV->delinearize fails, it returns the SCEV unchanged. 9894 /// 9895 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 9896 /// 9897 /// void foo(long n, long m, long o, double A[n][m][o]) { 9898 /// 9899 /// for (long i = 0; i < n; i++) 9900 /// for (long j = 0; j < m; j++) 9901 /// for (long k = 0; k < o; k++) 9902 /// A[i][j][k] = 1.0; 9903 /// } 9904 /// 9905 /// the delinearization input is the following AddRec SCEV: 9906 /// 9907 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 9908 /// 9909 /// From this SCEV, we are able to say that the base offset of the access is %A 9910 /// because it appears as an offset that does not divide any of the strides in 9911 /// the loops: 9912 /// 9913 /// CHECK: Base offset: %A 9914 /// 9915 /// and then SCEV->delinearize determines the size of some of the dimensions of 9916 /// the array as these are the multiples by which the strides are happening: 9917 /// 9918 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 9919 /// 9920 /// Note that the outermost dimension remains of UnknownSize because there are 9921 /// no strides that would help identifying the size of the last dimension: when 9922 /// the array has been statically allocated, one could compute the size of that 9923 /// dimension by dividing the overall size of the array by the size of the known 9924 /// dimensions: %m * %o * 8. 9925 /// 9926 /// Finally delinearize provides the access functions for the array reference 9927 /// that does correspond to A[i][j][k] of the above C testcase: 9928 /// 9929 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 9930 /// 9931 /// The testcases are checking the output of a function pass: 9932 /// DelinearizationPass that walks through all loads and stores of a function 9933 /// asking for the SCEV of the memory access with respect to all enclosing 9934 /// loops, calling SCEV->delinearize on that and printing the results. 9935 9936 void ScalarEvolution::delinearize(const SCEV *Expr, 9937 SmallVectorImpl<const SCEV *> &Subscripts, 9938 SmallVectorImpl<const SCEV *> &Sizes, 9939 const SCEV *ElementSize) { 9940 // First step: collect parametric terms. 9941 SmallVector<const SCEV *, 4> Terms; 9942 collectParametricTerms(Expr, Terms); 9943 9944 if (Terms.empty()) 9945 return; 9946 9947 // Second step: find subscript sizes. 9948 findArrayDimensions(Terms, Sizes, ElementSize); 9949 9950 if (Sizes.empty()) 9951 return; 9952 9953 // Third step: compute the access functions for each subscript. 9954 computeAccessFunctions(Expr, Subscripts, Sizes); 9955 9956 if (Subscripts.empty()) 9957 return; 9958 9959 DEBUG({ 9960 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 9961 dbgs() << "ArrayDecl[UnknownSize]"; 9962 for (const SCEV *S : Sizes) 9963 dbgs() << "[" << *S << "]"; 9964 9965 dbgs() << "\nArrayRef"; 9966 for (const SCEV *S : Subscripts) 9967 dbgs() << "[" << *S << "]"; 9968 dbgs() << "\n"; 9969 }); 9970 } 9971 9972 //===----------------------------------------------------------------------===// 9973 // SCEVCallbackVH Class Implementation 9974 //===----------------------------------------------------------------------===// 9975 9976 void ScalarEvolution::SCEVCallbackVH::deleted() { 9977 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 9978 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 9979 SE->ConstantEvolutionLoopExitValue.erase(PN); 9980 SE->eraseValueFromMap(getValPtr()); 9981 // this now dangles! 9982 } 9983 9984 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 9985 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 9986 9987 // Forget all the expressions associated with users of the old value, 9988 // so that future queries will recompute the expressions using the new 9989 // value. 9990 Value *Old = getValPtr(); 9991 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 9992 SmallPtrSet<User *, 8> Visited; 9993 while (!Worklist.empty()) { 9994 User *U = Worklist.pop_back_val(); 9995 // Deleting the Old value will cause this to dangle. Postpone 9996 // that until everything else is done. 9997 if (U == Old) 9998 continue; 9999 if (!Visited.insert(U).second) 10000 continue; 10001 if (PHINode *PN = dyn_cast<PHINode>(U)) 10002 SE->ConstantEvolutionLoopExitValue.erase(PN); 10003 SE->eraseValueFromMap(U); 10004 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 10005 } 10006 // Delete the Old value. 10007 if (PHINode *PN = dyn_cast<PHINode>(Old)) 10008 SE->ConstantEvolutionLoopExitValue.erase(PN); 10009 SE->eraseValueFromMap(Old); 10010 // this now dangles! 10011 } 10012 10013 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 10014 : CallbackVH(V), SE(se) {} 10015 10016 //===----------------------------------------------------------------------===// 10017 // ScalarEvolution Class Implementation 10018 //===----------------------------------------------------------------------===// 10019 10020 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 10021 AssumptionCache &AC, DominatorTree &DT, 10022 LoopInfo &LI) 10023 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 10024 CouldNotCompute(new SCEVCouldNotCompute()), 10025 WalkingBEDominatingConds(false), ProvingSplitPredicate(false), 10026 ValuesAtScopes(64), LoopDispositions(64), BlockDispositions(64), 10027 FirstUnknown(nullptr) { 10028 10029 // To use guards for proving predicates, we need to scan every instruction in 10030 // relevant basic blocks, and not just terminators. Doing this is a waste of 10031 // time if the IR does not actually contain any calls to 10032 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 10033 // 10034 // This pessimizes the case where a pass that preserves ScalarEvolution wants 10035 // to _add_ guards to the module when there weren't any before, and wants 10036 // ScalarEvolution to optimize based on those guards. For now we prefer to be 10037 // efficient in lieu of being smart in that rather obscure case. 10038 10039 auto *GuardDecl = F.getParent()->getFunction( 10040 Intrinsic::getName(Intrinsic::experimental_guard)); 10041 HasGuards = GuardDecl && !GuardDecl->use_empty(); 10042 } 10043 10044 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 10045 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 10046 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 10047 ValueExprMap(std::move(Arg.ValueExprMap)), 10048 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 10049 WalkingBEDominatingConds(false), ProvingSplitPredicate(false), 10050 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 10051 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 10052 PredicatedBackedgeTakenCounts( 10053 std::move(Arg.PredicatedBackedgeTakenCounts)), 10054 ConstantEvolutionLoopExitValue( 10055 std::move(Arg.ConstantEvolutionLoopExitValue)), 10056 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 10057 LoopDispositions(std::move(Arg.LoopDispositions)), 10058 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 10059 BlockDispositions(std::move(Arg.BlockDispositions)), 10060 UnsignedRanges(std::move(Arg.UnsignedRanges)), 10061 SignedRanges(std::move(Arg.SignedRanges)), 10062 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 10063 UniquePreds(std::move(Arg.UniquePreds)), 10064 SCEVAllocator(std::move(Arg.SCEVAllocator)), 10065 FirstUnknown(Arg.FirstUnknown) { 10066 Arg.FirstUnknown = nullptr; 10067 } 10068 10069 ScalarEvolution::~ScalarEvolution() { 10070 // Iterate through all the SCEVUnknown instances and call their 10071 // destructors, so that they release their references to their values. 10072 for (SCEVUnknown *U = FirstUnknown; U;) { 10073 SCEVUnknown *Tmp = U; 10074 U = U->Next; 10075 Tmp->~SCEVUnknown(); 10076 } 10077 FirstUnknown = nullptr; 10078 10079 ExprValueMap.clear(); 10080 ValueExprMap.clear(); 10081 HasRecMap.clear(); 10082 10083 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 10084 // that a loop had multiple computable exits. 10085 for (auto &BTCI : BackedgeTakenCounts) 10086 BTCI.second.clear(); 10087 for (auto &BTCI : PredicatedBackedgeTakenCounts) 10088 BTCI.second.clear(); 10089 10090 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 10091 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 10092 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 10093 } 10094 10095 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 10096 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 10097 } 10098 10099 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 10100 const Loop *L) { 10101 // Print all inner loops first 10102 for (Loop *I : *L) 10103 PrintLoopInfo(OS, SE, I); 10104 10105 OS << "Loop "; 10106 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10107 OS << ": "; 10108 10109 SmallVector<BasicBlock *, 8> ExitBlocks; 10110 L->getExitBlocks(ExitBlocks); 10111 if (ExitBlocks.size() != 1) 10112 OS << "<multiple exits> "; 10113 10114 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 10115 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 10116 } else { 10117 OS << "Unpredictable backedge-taken count. "; 10118 } 10119 10120 OS << "\n" 10121 "Loop "; 10122 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10123 OS << ": "; 10124 10125 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 10126 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 10127 if (SE->isBackedgeTakenCountMaxOrZero(L)) 10128 OS << ", actual taken count either this or zero."; 10129 } else { 10130 OS << "Unpredictable max backedge-taken count. "; 10131 } 10132 10133 OS << "\n" 10134 "Loop "; 10135 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10136 OS << ": "; 10137 10138 SCEVUnionPredicate Pred; 10139 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 10140 if (!isa<SCEVCouldNotCompute>(PBT)) { 10141 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 10142 OS << " Predicates:\n"; 10143 Pred.print(OS, 4); 10144 } else { 10145 OS << "Unpredictable predicated backedge-taken count. "; 10146 } 10147 OS << "\n"; 10148 10149 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 10150 OS << "Loop "; 10151 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10152 OS << ": "; 10153 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 10154 } 10155 } 10156 10157 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 10158 switch (LD) { 10159 case ScalarEvolution::LoopVariant: 10160 return "Variant"; 10161 case ScalarEvolution::LoopInvariant: 10162 return "Invariant"; 10163 case ScalarEvolution::LoopComputable: 10164 return "Computable"; 10165 } 10166 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 10167 } 10168 10169 void ScalarEvolution::print(raw_ostream &OS) const { 10170 // ScalarEvolution's implementation of the print method is to print 10171 // out SCEV values of all instructions that are interesting. Doing 10172 // this potentially causes it to create new SCEV objects though, 10173 // which technically conflicts with the const qualifier. This isn't 10174 // observable from outside the class though, so casting away the 10175 // const isn't dangerous. 10176 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 10177 10178 OS << "Classifying expressions for: "; 10179 F.printAsOperand(OS, /*PrintType=*/false); 10180 OS << "\n"; 10181 for (Instruction &I : instructions(F)) 10182 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 10183 OS << I << '\n'; 10184 OS << " --> "; 10185 const SCEV *SV = SE.getSCEV(&I); 10186 SV->print(OS); 10187 if (!isa<SCEVCouldNotCompute>(SV)) { 10188 OS << " U: "; 10189 SE.getUnsignedRange(SV).print(OS); 10190 OS << " S: "; 10191 SE.getSignedRange(SV).print(OS); 10192 } 10193 10194 const Loop *L = LI.getLoopFor(I.getParent()); 10195 10196 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 10197 if (AtUse != SV) { 10198 OS << " --> "; 10199 AtUse->print(OS); 10200 if (!isa<SCEVCouldNotCompute>(AtUse)) { 10201 OS << " U: "; 10202 SE.getUnsignedRange(AtUse).print(OS); 10203 OS << " S: "; 10204 SE.getSignedRange(AtUse).print(OS); 10205 } 10206 } 10207 10208 if (L) { 10209 OS << "\t\t" "Exits: "; 10210 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 10211 if (!SE.isLoopInvariant(ExitValue, L)) { 10212 OS << "<<Unknown>>"; 10213 } else { 10214 OS << *ExitValue; 10215 } 10216 10217 bool First = true; 10218 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 10219 if (First) { 10220 OS << "\t\t" "LoopDispositions: { "; 10221 First = false; 10222 } else { 10223 OS << ", "; 10224 } 10225 10226 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10227 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 10228 } 10229 10230 for (auto *InnerL : depth_first(L)) { 10231 if (InnerL == L) 10232 continue; 10233 if (First) { 10234 OS << "\t\t" "LoopDispositions: { "; 10235 First = false; 10236 } else { 10237 OS << ", "; 10238 } 10239 10240 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10241 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 10242 } 10243 10244 OS << " }"; 10245 } 10246 10247 OS << "\n"; 10248 } 10249 10250 OS << "Determining loop execution counts for: "; 10251 F.printAsOperand(OS, /*PrintType=*/false); 10252 OS << "\n"; 10253 for (Loop *I : LI) 10254 PrintLoopInfo(OS, &SE, I); 10255 } 10256 10257 ScalarEvolution::LoopDisposition 10258 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 10259 auto &Values = LoopDispositions[S]; 10260 for (auto &V : Values) { 10261 if (V.getPointer() == L) 10262 return V.getInt(); 10263 } 10264 Values.emplace_back(L, LoopVariant); 10265 LoopDisposition D = computeLoopDisposition(S, L); 10266 auto &Values2 = LoopDispositions[S]; 10267 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 10268 if (V.getPointer() == L) { 10269 V.setInt(D); 10270 break; 10271 } 10272 } 10273 return D; 10274 } 10275 10276 ScalarEvolution::LoopDisposition 10277 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 10278 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 10279 case scConstant: 10280 return LoopInvariant; 10281 case scTruncate: 10282 case scZeroExtend: 10283 case scSignExtend: 10284 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 10285 case scAddRecExpr: { 10286 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 10287 10288 // If L is the addrec's loop, it's computable. 10289 if (AR->getLoop() == L) 10290 return LoopComputable; 10291 10292 // Add recurrences are never invariant in the function-body (null loop). 10293 if (!L) 10294 return LoopVariant; 10295 10296 // This recurrence is variant w.r.t. L if L contains AR's loop. 10297 if (L->contains(AR->getLoop())) 10298 return LoopVariant; 10299 10300 // This recurrence is invariant w.r.t. L if AR's loop contains L. 10301 if (AR->getLoop()->contains(L)) 10302 return LoopInvariant; 10303 10304 // This recurrence is variant w.r.t. L if any of its operands 10305 // are variant. 10306 for (auto *Op : AR->operands()) 10307 if (!isLoopInvariant(Op, L)) 10308 return LoopVariant; 10309 10310 // Otherwise it's loop-invariant. 10311 return LoopInvariant; 10312 } 10313 case scAddExpr: 10314 case scMulExpr: 10315 case scUMaxExpr: 10316 case scSMaxExpr: { 10317 bool HasVarying = false; 10318 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 10319 LoopDisposition D = getLoopDisposition(Op, L); 10320 if (D == LoopVariant) 10321 return LoopVariant; 10322 if (D == LoopComputable) 10323 HasVarying = true; 10324 } 10325 return HasVarying ? LoopComputable : LoopInvariant; 10326 } 10327 case scUDivExpr: { 10328 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 10329 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 10330 if (LD == LoopVariant) 10331 return LoopVariant; 10332 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 10333 if (RD == LoopVariant) 10334 return LoopVariant; 10335 return (LD == LoopInvariant && RD == LoopInvariant) ? 10336 LoopInvariant : LoopComputable; 10337 } 10338 case scUnknown: 10339 // All non-instruction values are loop invariant. All instructions are loop 10340 // invariant if they are not contained in the specified loop. 10341 // Instructions are never considered invariant in the function body 10342 // (null loop) because they are defined within the "loop". 10343 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 10344 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 10345 return LoopInvariant; 10346 case scCouldNotCompute: 10347 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 10348 } 10349 llvm_unreachable("Unknown SCEV kind!"); 10350 } 10351 10352 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 10353 return getLoopDisposition(S, L) == LoopInvariant; 10354 } 10355 10356 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 10357 return getLoopDisposition(S, L) == LoopComputable; 10358 } 10359 10360 ScalarEvolution::BlockDisposition 10361 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 10362 auto &Values = BlockDispositions[S]; 10363 for (auto &V : Values) { 10364 if (V.getPointer() == BB) 10365 return V.getInt(); 10366 } 10367 Values.emplace_back(BB, DoesNotDominateBlock); 10368 BlockDisposition D = computeBlockDisposition(S, BB); 10369 auto &Values2 = BlockDispositions[S]; 10370 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 10371 if (V.getPointer() == BB) { 10372 V.setInt(D); 10373 break; 10374 } 10375 } 10376 return D; 10377 } 10378 10379 ScalarEvolution::BlockDisposition 10380 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 10381 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 10382 case scConstant: 10383 return ProperlyDominatesBlock; 10384 case scTruncate: 10385 case scZeroExtend: 10386 case scSignExtend: 10387 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 10388 case scAddRecExpr: { 10389 // This uses a "dominates" query instead of "properly dominates" query 10390 // to test for proper dominance too, because the instruction which 10391 // produces the addrec's value is a PHI, and a PHI effectively properly 10392 // dominates its entire containing block. 10393 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 10394 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 10395 return DoesNotDominateBlock; 10396 10397 // Fall through into SCEVNAryExpr handling. 10398 LLVM_FALLTHROUGH; 10399 } 10400 case scAddExpr: 10401 case scMulExpr: 10402 case scUMaxExpr: 10403 case scSMaxExpr: { 10404 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 10405 bool Proper = true; 10406 for (const SCEV *NAryOp : NAry->operands()) { 10407 BlockDisposition D = getBlockDisposition(NAryOp, BB); 10408 if (D == DoesNotDominateBlock) 10409 return DoesNotDominateBlock; 10410 if (D == DominatesBlock) 10411 Proper = false; 10412 } 10413 return Proper ? ProperlyDominatesBlock : DominatesBlock; 10414 } 10415 case scUDivExpr: { 10416 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 10417 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 10418 BlockDisposition LD = getBlockDisposition(LHS, BB); 10419 if (LD == DoesNotDominateBlock) 10420 return DoesNotDominateBlock; 10421 BlockDisposition RD = getBlockDisposition(RHS, BB); 10422 if (RD == DoesNotDominateBlock) 10423 return DoesNotDominateBlock; 10424 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 10425 ProperlyDominatesBlock : DominatesBlock; 10426 } 10427 case scUnknown: 10428 if (Instruction *I = 10429 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 10430 if (I->getParent() == BB) 10431 return DominatesBlock; 10432 if (DT.properlyDominates(I->getParent(), BB)) 10433 return ProperlyDominatesBlock; 10434 return DoesNotDominateBlock; 10435 } 10436 return ProperlyDominatesBlock; 10437 case scCouldNotCompute: 10438 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 10439 } 10440 llvm_unreachable("Unknown SCEV kind!"); 10441 } 10442 10443 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 10444 return getBlockDisposition(S, BB) >= DominatesBlock; 10445 } 10446 10447 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 10448 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 10449 } 10450 10451 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 10452 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 10453 } 10454 10455 void ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 10456 ValuesAtScopes.erase(S); 10457 LoopDispositions.erase(S); 10458 BlockDispositions.erase(S); 10459 UnsignedRanges.erase(S); 10460 SignedRanges.erase(S); 10461 ExprValueMap.erase(S); 10462 HasRecMap.erase(S); 10463 MinTrailingZerosCache.erase(S); 10464 10465 auto RemoveSCEVFromBackedgeMap = 10466 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 10467 for (auto I = Map.begin(), E = Map.end(); I != E;) { 10468 BackedgeTakenInfo &BEInfo = I->second; 10469 if (BEInfo.hasOperand(S, this)) { 10470 BEInfo.clear(); 10471 Map.erase(I++); 10472 } else 10473 ++I; 10474 } 10475 }; 10476 10477 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 10478 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 10479 } 10480 10481 void ScalarEvolution::verify() const { 10482 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 10483 ScalarEvolution SE2(F, TLI, AC, DT, LI); 10484 10485 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 10486 10487 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 10488 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 10489 const SCEV *visitConstant(const SCEVConstant *Constant) { 10490 return SE.getConstant(Constant->getAPInt()); 10491 } 10492 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 10493 return SE.getUnknown(Expr->getValue()); 10494 } 10495 10496 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 10497 return SE.getCouldNotCompute(); 10498 } 10499 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 10500 }; 10501 10502 SCEVMapper SCM(SE2); 10503 10504 while (!LoopStack.empty()) { 10505 auto *L = LoopStack.pop_back_val(); 10506 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 10507 10508 auto *CurBECount = SCM.visit( 10509 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 10510 auto *NewBECount = SE2.getBackedgeTakenCount(L); 10511 10512 if (CurBECount == SE2.getCouldNotCompute() || 10513 NewBECount == SE2.getCouldNotCompute()) { 10514 // NB! This situation is legal, but is very suspicious -- whatever pass 10515 // change the loop to make a trip count go from could not compute to 10516 // computable or vice-versa *should have* invalidated SCEV. However, we 10517 // choose not to assert here (for now) since we don't want false 10518 // positives. 10519 continue; 10520 } 10521 10522 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 10523 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 10524 // not propagate undef aggressively). This means we can (and do) fail 10525 // verification in cases where a transform makes the trip count of a loop 10526 // go from "undef" to "undef+1" (say). The transform is fine, since in 10527 // both cases the loop iterates "undef" times, but SCEV thinks we 10528 // increased the trip count of the loop by 1 incorrectly. 10529 continue; 10530 } 10531 10532 if (SE.getTypeSizeInBits(CurBECount->getType()) > 10533 SE.getTypeSizeInBits(NewBECount->getType())) 10534 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 10535 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 10536 SE.getTypeSizeInBits(NewBECount->getType())) 10537 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 10538 10539 auto *ConstantDelta = 10540 dyn_cast<SCEVConstant>(SE2.getMinusSCEV(CurBECount, NewBECount)); 10541 10542 if (ConstantDelta && ConstantDelta->getAPInt() != 0) { 10543 dbgs() << "Trip Count Changed!\n"; 10544 dbgs() << "Old: " << *CurBECount << "\n"; 10545 dbgs() << "New: " << *NewBECount << "\n"; 10546 dbgs() << "Delta: " << *ConstantDelta << "\n"; 10547 std::abort(); 10548 } 10549 } 10550 } 10551 10552 bool ScalarEvolution::invalidate( 10553 Function &F, const PreservedAnalyses &PA, 10554 FunctionAnalysisManager::Invalidator &Inv) { 10555 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 10556 // of its dependencies is invalidated. 10557 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 10558 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 10559 Inv.invalidate<AssumptionAnalysis>(F, PA) || 10560 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 10561 Inv.invalidate<LoopAnalysis>(F, PA); 10562 } 10563 10564 AnalysisKey ScalarEvolutionAnalysis::Key; 10565 10566 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 10567 FunctionAnalysisManager &AM) { 10568 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 10569 AM.getResult<AssumptionAnalysis>(F), 10570 AM.getResult<DominatorTreeAnalysis>(F), 10571 AM.getResult<LoopAnalysis>(F)); 10572 } 10573 10574 PreservedAnalyses 10575 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 10576 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 10577 return PreservedAnalyses::all(); 10578 } 10579 10580 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 10581 "Scalar Evolution Analysis", false, true) 10582 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 10583 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 10584 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 10585 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 10586 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 10587 "Scalar Evolution Analysis", false, true) 10588 char ScalarEvolutionWrapperPass::ID = 0; 10589 10590 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 10591 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 10592 } 10593 10594 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 10595 SE.reset(new ScalarEvolution( 10596 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 10597 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 10598 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 10599 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 10600 return false; 10601 } 10602 10603 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 10604 10605 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 10606 SE->print(OS); 10607 } 10608 10609 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 10610 if (!VerifySCEV) 10611 return; 10612 10613 SE->verify(); 10614 } 10615 10616 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 10617 AU.setPreservesAll(); 10618 AU.addRequiredTransitive<AssumptionCacheTracker>(); 10619 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 10620 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 10621 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 10622 } 10623 10624 const SCEVPredicate * 10625 ScalarEvolution::getEqualPredicate(const SCEVUnknown *LHS, 10626 const SCEVConstant *RHS) { 10627 FoldingSetNodeID ID; 10628 // Unique this node based on the arguments 10629 ID.AddInteger(SCEVPredicate::P_Equal); 10630 ID.AddPointer(LHS); 10631 ID.AddPointer(RHS); 10632 void *IP = nullptr; 10633 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 10634 return S; 10635 SCEVEqualPredicate *Eq = new (SCEVAllocator) 10636 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 10637 UniquePreds.InsertNode(Eq, IP); 10638 return Eq; 10639 } 10640 10641 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 10642 const SCEVAddRecExpr *AR, 10643 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 10644 FoldingSetNodeID ID; 10645 // Unique this node based on the arguments 10646 ID.AddInteger(SCEVPredicate::P_Wrap); 10647 ID.AddPointer(AR); 10648 ID.AddInteger(AddedFlags); 10649 void *IP = nullptr; 10650 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 10651 return S; 10652 auto *OF = new (SCEVAllocator) 10653 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 10654 UniquePreds.InsertNode(OF, IP); 10655 return OF; 10656 } 10657 10658 namespace { 10659 10660 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 10661 public: 10662 /// Rewrites \p S in the context of a loop L and the SCEV predication 10663 /// infrastructure. 10664 /// 10665 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 10666 /// equivalences present in \p Pred. 10667 /// 10668 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 10669 /// \p NewPreds such that the result will be an AddRecExpr. 10670 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 10671 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 10672 SCEVUnionPredicate *Pred) { 10673 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 10674 return Rewriter.visit(S); 10675 } 10676 10677 SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 10678 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 10679 SCEVUnionPredicate *Pred) 10680 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 10681 10682 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 10683 if (Pred) { 10684 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 10685 for (auto *Pred : ExprPreds) 10686 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 10687 if (IPred->getLHS() == Expr) 10688 return IPred->getRHS(); 10689 } 10690 10691 return Expr; 10692 } 10693 10694 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 10695 const SCEV *Operand = visit(Expr->getOperand()); 10696 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 10697 if (AR && AR->getLoop() == L && AR->isAffine()) { 10698 // This couldn't be folded because the operand didn't have the nuw 10699 // flag. Add the nusw flag as an assumption that we could make. 10700 const SCEV *Step = AR->getStepRecurrence(SE); 10701 Type *Ty = Expr->getType(); 10702 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 10703 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 10704 SE.getSignExtendExpr(Step, Ty), L, 10705 AR->getNoWrapFlags()); 10706 } 10707 return SE.getZeroExtendExpr(Operand, Expr->getType()); 10708 } 10709 10710 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 10711 const SCEV *Operand = visit(Expr->getOperand()); 10712 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 10713 if (AR && AR->getLoop() == L && AR->isAffine()) { 10714 // This couldn't be folded because the operand didn't have the nsw 10715 // flag. Add the nssw flag as an assumption that we could make. 10716 const SCEV *Step = AR->getStepRecurrence(SE); 10717 Type *Ty = Expr->getType(); 10718 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 10719 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 10720 SE.getSignExtendExpr(Step, Ty), L, 10721 AR->getNoWrapFlags()); 10722 } 10723 return SE.getSignExtendExpr(Operand, Expr->getType()); 10724 } 10725 10726 private: 10727 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 10728 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 10729 auto *A = SE.getWrapPredicate(AR, AddedFlags); 10730 if (!NewPreds) { 10731 // Check if we've already made this assumption. 10732 return Pred && Pred->implies(A); 10733 } 10734 NewPreds->insert(A); 10735 return true; 10736 } 10737 10738 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 10739 SCEVUnionPredicate *Pred; 10740 const Loop *L; 10741 }; 10742 } // end anonymous namespace 10743 10744 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 10745 SCEVUnionPredicate &Preds) { 10746 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 10747 } 10748 10749 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 10750 const SCEV *S, const Loop *L, 10751 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 10752 10753 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 10754 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 10755 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 10756 10757 if (!AddRec) 10758 return nullptr; 10759 10760 // Since the transformation was successful, we can now transfer the SCEV 10761 // predicates. 10762 for (auto *P : TransformPreds) 10763 Preds.insert(P); 10764 10765 return AddRec; 10766 } 10767 10768 /// SCEV predicates 10769 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 10770 SCEVPredicateKind Kind) 10771 : FastID(ID), Kind(Kind) {} 10772 10773 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 10774 const SCEVUnknown *LHS, 10775 const SCEVConstant *RHS) 10776 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) {} 10777 10778 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 10779 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 10780 10781 if (!Op) 10782 return false; 10783 10784 return Op->LHS == LHS && Op->RHS == RHS; 10785 } 10786 10787 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 10788 10789 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 10790 10791 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 10792 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 10793 } 10794 10795 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 10796 const SCEVAddRecExpr *AR, 10797 IncrementWrapFlags Flags) 10798 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 10799 10800 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 10801 10802 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 10803 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 10804 10805 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 10806 } 10807 10808 bool SCEVWrapPredicate::isAlwaysTrue() const { 10809 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 10810 IncrementWrapFlags IFlags = Flags; 10811 10812 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 10813 IFlags = clearFlags(IFlags, IncrementNSSW); 10814 10815 return IFlags == IncrementAnyWrap; 10816 } 10817 10818 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 10819 OS.indent(Depth) << *getExpr() << " Added Flags: "; 10820 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 10821 OS << "<nusw>"; 10822 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 10823 OS << "<nssw>"; 10824 OS << "\n"; 10825 } 10826 10827 SCEVWrapPredicate::IncrementWrapFlags 10828 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 10829 ScalarEvolution &SE) { 10830 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 10831 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 10832 10833 // We can safely transfer the NSW flag as NSSW. 10834 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 10835 ImpliedFlags = IncrementNSSW; 10836 10837 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 10838 // If the increment is positive, the SCEV NUW flag will also imply the 10839 // WrapPredicate NUSW flag. 10840 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 10841 if (Step->getValue()->getValue().isNonNegative()) 10842 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 10843 } 10844 10845 return ImpliedFlags; 10846 } 10847 10848 /// Union predicates don't get cached so create a dummy set ID for it. 10849 SCEVUnionPredicate::SCEVUnionPredicate() 10850 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 10851 10852 bool SCEVUnionPredicate::isAlwaysTrue() const { 10853 return all_of(Preds, 10854 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 10855 } 10856 10857 ArrayRef<const SCEVPredicate *> 10858 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 10859 auto I = SCEVToPreds.find(Expr); 10860 if (I == SCEVToPreds.end()) 10861 return ArrayRef<const SCEVPredicate *>(); 10862 return I->second; 10863 } 10864 10865 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 10866 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 10867 return all_of(Set->Preds, 10868 [this](const SCEVPredicate *I) { return this->implies(I); }); 10869 10870 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 10871 if (ScevPredsIt == SCEVToPreds.end()) 10872 return false; 10873 auto &SCEVPreds = ScevPredsIt->second; 10874 10875 return any_of(SCEVPreds, 10876 [N](const SCEVPredicate *I) { return I->implies(N); }); 10877 } 10878 10879 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 10880 10881 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 10882 for (auto Pred : Preds) 10883 Pred->print(OS, Depth); 10884 } 10885 10886 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 10887 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 10888 for (auto Pred : Set->Preds) 10889 add(Pred); 10890 return; 10891 } 10892 10893 if (implies(N)) 10894 return; 10895 10896 const SCEV *Key = N->getExpr(); 10897 assert(Key && "Only SCEVUnionPredicate doesn't have an " 10898 " associated expression!"); 10899 10900 SCEVToPreds[Key].push_back(N); 10901 Preds.push_back(N); 10902 } 10903 10904 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 10905 Loop &L) 10906 : SE(SE), L(L), Generation(0), BackedgeCount(nullptr) {} 10907 10908 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 10909 const SCEV *Expr = SE.getSCEV(V); 10910 RewriteEntry &Entry = RewriteMap[Expr]; 10911 10912 // If we already have an entry and the version matches, return it. 10913 if (Entry.second && Generation == Entry.first) 10914 return Entry.second; 10915 10916 // We found an entry but it's stale. Rewrite the stale entry 10917 // according to the current predicate. 10918 if (Entry.second) 10919 Expr = Entry.second; 10920 10921 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 10922 Entry = {Generation, NewSCEV}; 10923 10924 return NewSCEV; 10925 } 10926 10927 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 10928 if (!BackedgeCount) { 10929 SCEVUnionPredicate BackedgePred; 10930 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 10931 addPredicate(BackedgePred); 10932 } 10933 return BackedgeCount; 10934 } 10935 10936 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 10937 if (Preds.implies(&Pred)) 10938 return; 10939 Preds.add(&Pred); 10940 updateGeneration(); 10941 } 10942 10943 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 10944 return Preds; 10945 } 10946 10947 void PredicatedScalarEvolution::updateGeneration() { 10948 // If the generation number wrapped recompute everything. 10949 if (++Generation == 0) { 10950 for (auto &II : RewriteMap) { 10951 const SCEV *Rewritten = II.second.second; 10952 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 10953 } 10954 } 10955 } 10956 10957 void PredicatedScalarEvolution::setNoOverflow( 10958 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 10959 const SCEV *Expr = getSCEV(V); 10960 const auto *AR = cast<SCEVAddRecExpr>(Expr); 10961 10962 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 10963 10964 // Clear the statically implied flags. 10965 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 10966 addPredicate(*SE.getWrapPredicate(AR, Flags)); 10967 10968 auto II = FlagsMap.insert({V, Flags}); 10969 if (!II.second) 10970 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 10971 } 10972 10973 bool PredicatedScalarEvolution::hasNoOverflow( 10974 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 10975 const SCEV *Expr = getSCEV(V); 10976 const auto *AR = cast<SCEVAddRecExpr>(Expr); 10977 10978 Flags = SCEVWrapPredicate::clearFlags( 10979 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 10980 10981 auto II = FlagsMap.find(V); 10982 10983 if (II != FlagsMap.end()) 10984 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 10985 10986 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 10987 } 10988 10989 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 10990 const SCEV *Expr = this->getSCEV(V); 10991 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 10992 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 10993 10994 if (!New) 10995 return nullptr; 10996 10997 for (auto *P : NewPreds) 10998 Preds.add(P); 10999 11000 updateGeneration(); 11001 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 11002 return New; 11003 } 11004 11005 PredicatedScalarEvolution::PredicatedScalarEvolution( 11006 const PredicatedScalarEvolution &Init) 11007 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 11008 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 11009 for (const auto &I : Init.FlagsMap) 11010 FlagsMap.insert(I); 11011 } 11012 11013 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 11014 // For each block. 11015 for (auto *BB : L.getBlocks()) 11016 for (auto &I : *BB) { 11017 if (!SE.isSCEVable(I.getType())) 11018 continue; 11019 11020 auto *Expr = SE.getSCEV(&I); 11021 auto II = RewriteMap.find(Expr); 11022 11023 if (II == RewriteMap.end()) 11024 continue; 11025 11026 // Don't print things that are not interesting. 11027 if (II->second.second == Expr) 11028 continue; 11029 11030 OS.indent(Depth) << "[PSE]" << I << ":\n"; 11031 OS.indent(Depth + 2) << *Expr << "\n"; 11032 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 11033 } 11034 } 11035