1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #include "llvm/Analysis/ScalarEvolution.h" 62 #include "llvm/ADT/Optional.h" 63 #include "llvm/ADT/STLExtras.h" 64 #include "llvm/ADT/ScopeExit.h" 65 #include "llvm/ADT/Sequence.h" 66 #include "llvm/ADT/SmallPtrSet.h" 67 #include "llvm/ADT/Statistic.h" 68 #include "llvm/Analysis/AssumptionCache.h" 69 #include "llvm/Analysis/ConstantFolding.h" 70 #include "llvm/Analysis/InstructionSimplify.h" 71 #include "llvm/Analysis/LoopInfo.h" 72 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 73 #include "llvm/Analysis/TargetLibraryInfo.h" 74 #include "llvm/Analysis/ValueTracking.h" 75 #include "llvm/IR/ConstantRange.h" 76 #include "llvm/IR/Constants.h" 77 #include "llvm/IR/DataLayout.h" 78 #include "llvm/IR/DerivedTypes.h" 79 #include "llvm/IR/Dominators.h" 80 #include "llvm/IR/GetElementPtrTypeIterator.h" 81 #include "llvm/IR/GlobalAlias.h" 82 #include "llvm/IR/GlobalVariable.h" 83 #include "llvm/IR/InstIterator.h" 84 #include "llvm/IR/Instructions.h" 85 #include "llvm/IR/LLVMContext.h" 86 #include "llvm/IR/Metadata.h" 87 #include "llvm/IR/Operator.h" 88 #include "llvm/IR/PatternMatch.h" 89 #include "llvm/Support/CommandLine.h" 90 #include "llvm/Support/Debug.h" 91 #include "llvm/Support/ErrorHandling.h" 92 #include "llvm/Support/MathExtras.h" 93 #include "llvm/Support/raw_ostream.h" 94 #include "llvm/Support/SaveAndRestore.h" 95 #include <algorithm> 96 using namespace llvm; 97 98 #define DEBUG_TYPE "scalar-evolution" 99 100 STATISTIC(NumArrayLenItCounts, 101 "Number of trip counts computed with array length"); 102 STATISTIC(NumTripCountsComputed, 103 "Number of loops with predictable loop counts"); 104 STATISTIC(NumTripCountsNotComputed, 105 "Number of loops without predictable loop counts"); 106 STATISTIC(NumBruteForceTripCountsComputed, 107 "Number of loops with trip counts computed by force"); 108 109 static cl::opt<unsigned> 110 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 111 cl::desc("Maximum number of iterations SCEV will " 112 "symbolically execute a constant " 113 "derived loop"), 114 cl::init(100)); 115 116 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 117 static cl::opt<bool> 118 VerifySCEV("verify-scev", 119 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 120 static cl::opt<bool> 121 VerifySCEVMap("verify-scev-maps", 122 cl::desc("Verify no dangling value in ScalarEvolution's " 123 "ExprValueMap (slow)")); 124 125 static cl::opt<unsigned> MulOpsInlineThreshold( 126 "scev-mulops-inline-threshold", cl::Hidden, 127 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 128 cl::init(1000)); 129 130 static cl::opt<unsigned> AddOpsInlineThreshold( 131 "scev-addops-inline-threshold", cl::Hidden, 132 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 133 cl::init(500)); 134 135 static cl::opt<unsigned> 136 MaxCompareDepth("scalar-evolution-max-compare-depth", cl::Hidden, 137 cl::desc("Maximum depth of recursive compare complexity"), 138 cl::init(32)); 139 140 static cl::opt<unsigned> 141 MaxAddExprDepth("scalar-evolution-max-addexpr-depth", cl::Hidden, 142 cl::desc("Maximum depth of recursive AddExpr"), 143 cl::init(32)); 144 145 static cl::opt<unsigned> MaxConstantEvolvingDepth( 146 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 147 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 148 149 //===----------------------------------------------------------------------===// 150 // SCEV class definitions 151 //===----------------------------------------------------------------------===// 152 153 //===----------------------------------------------------------------------===// 154 // Implementation of the SCEV class. 155 // 156 157 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 158 LLVM_DUMP_METHOD void SCEV::dump() const { 159 print(dbgs()); 160 dbgs() << '\n'; 161 } 162 #endif 163 164 void SCEV::print(raw_ostream &OS) const { 165 switch (static_cast<SCEVTypes>(getSCEVType())) { 166 case scConstant: 167 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 168 return; 169 case scTruncate: { 170 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 171 const SCEV *Op = Trunc->getOperand(); 172 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 173 << *Trunc->getType() << ")"; 174 return; 175 } 176 case scZeroExtend: { 177 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 178 const SCEV *Op = ZExt->getOperand(); 179 OS << "(zext " << *Op->getType() << " " << *Op << " to " 180 << *ZExt->getType() << ")"; 181 return; 182 } 183 case scSignExtend: { 184 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 185 const SCEV *Op = SExt->getOperand(); 186 OS << "(sext " << *Op->getType() << " " << *Op << " to " 187 << *SExt->getType() << ")"; 188 return; 189 } 190 case scAddRecExpr: { 191 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 192 OS << "{" << *AR->getOperand(0); 193 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 194 OS << ",+," << *AR->getOperand(i); 195 OS << "}<"; 196 if (AR->hasNoUnsignedWrap()) 197 OS << "nuw><"; 198 if (AR->hasNoSignedWrap()) 199 OS << "nsw><"; 200 if (AR->hasNoSelfWrap() && 201 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 202 OS << "nw><"; 203 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 204 OS << ">"; 205 return; 206 } 207 case scAddExpr: 208 case scMulExpr: 209 case scUMaxExpr: 210 case scSMaxExpr: { 211 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 212 const char *OpStr = nullptr; 213 switch (NAry->getSCEVType()) { 214 case scAddExpr: OpStr = " + "; break; 215 case scMulExpr: OpStr = " * "; break; 216 case scUMaxExpr: OpStr = " umax "; break; 217 case scSMaxExpr: OpStr = " smax "; break; 218 } 219 OS << "("; 220 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 221 I != E; ++I) { 222 OS << **I; 223 if (std::next(I) != E) 224 OS << OpStr; 225 } 226 OS << ")"; 227 switch (NAry->getSCEVType()) { 228 case scAddExpr: 229 case scMulExpr: 230 if (NAry->hasNoUnsignedWrap()) 231 OS << "<nuw>"; 232 if (NAry->hasNoSignedWrap()) 233 OS << "<nsw>"; 234 } 235 return; 236 } 237 case scUDivExpr: { 238 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 239 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 240 return; 241 } 242 case scUnknown: { 243 const SCEVUnknown *U = cast<SCEVUnknown>(this); 244 Type *AllocTy; 245 if (U->isSizeOf(AllocTy)) { 246 OS << "sizeof(" << *AllocTy << ")"; 247 return; 248 } 249 if (U->isAlignOf(AllocTy)) { 250 OS << "alignof(" << *AllocTy << ")"; 251 return; 252 } 253 254 Type *CTy; 255 Constant *FieldNo; 256 if (U->isOffsetOf(CTy, FieldNo)) { 257 OS << "offsetof(" << *CTy << ", "; 258 FieldNo->printAsOperand(OS, false); 259 OS << ")"; 260 return; 261 } 262 263 // Otherwise just print it normally. 264 U->getValue()->printAsOperand(OS, false); 265 return; 266 } 267 case scCouldNotCompute: 268 OS << "***COULDNOTCOMPUTE***"; 269 return; 270 } 271 llvm_unreachable("Unknown SCEV kind!"); 272 } 273 274 Type *SCEV::getType() const { 275 switch (static_cast<SCEVTypes>(getSCEVType())) { 276 case scConstant: 277 return cast<SCEVConstant>(this)->getType(); 278 case scTruncate: 279 case scZeroExtend: 280 case scSignExtend: 281 return cast<SCEVCastExpr>(this)->getType(); 282 case scAddRecExpr: 283 case scMulExpr: 284 case scUMaxExpr: 285 case scSMaxExpr: 286 return cast<SCEVNAryExpr>(this)->getType(); 287 case scAddExpr: 288 return cast<SCEVAddExpr>(this)->getType(); 289 case scUDivExpr: 290 return cast<SCEVUDivExpr>(this)->getType(); 291 case scUnknown: 292 return cast<SCEVUnknown>(this)->getType(); 293 case scCouldNotCompute: 294 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 295 } 296 llvm_unreachable("Unknown SCEV kind!"); 297 } 298 299 bool SCEV::isZero() const { 300 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 301 return SC->getValue()->isZero(); 302 return false; 303 } 304 305 bool SCEV::isOne() const { 306 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 307 return SC->getValue()->isOne(); 308 return false; 309 } 310 311 bool SCEV::isAllOnesValue() const { 312 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 313 return SC->getValue()->isAllOnesValue(); 314 return false; 315 } 316 317 bool SCEV::isNonConstantNegative() const { 318 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 319 if (!Mul) return false; 320 321 // If there is a constant factor, it will be first. 322 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 323 if (!SC) return false; 324 325 // Return true if the value is negative, this matches things like (-42 * V). 326 return SC->getAPInt().isNegative(); 327 } 328 329 SCEVCouldNotCompute::SCEVCouldNotCompute() : 330 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} 331 332 bool SCEVCouldNotCompute::classof(const SCEV *S) { 333 return S->getSCEVType() == scCouldNotCompute; 334 } 335 336 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 337 FoldingSetNodeID ID; 338 ID.AddInteger(scConstant); 339 ID.AddPointer(V); 340 void *IP = nullptr; 341 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 342 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 343 UniqueSCEVs.InsertNode(S, IP); 344 return S; 345 } 346 347 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 348 return getConstant(ConstantInt::get(getContext(), Val)); 349 } 350 351 const SCEV * 352 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 353 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 354 return getConstant(ConstantInt::get(ITy, V, isSigned)); 355 } 356 357 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 358 unsigned SCEVTy, const SCEV *op, Type *ty) 359 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 360 361 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 362 const SCEV *op, Type *ty) 363 : SCEVCastExpr(ID, scTruncate, op, ty) { 364 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 365 (Ty->isIntegerTy() || Ty->isPointerTy()) && 366 "Cannot truncate non-integer value!"); 367 } 368 369 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 370 const SCEV *op, Type *ty) 371 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 372 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 373 (Ty->isIntegerTy() || Ty->isPointerTy()) && 374 "Cannot zero extend non-integer value!"); 375 } 376 377 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 378 const SCEV *op, Type *ty) 379 : SCEVCastExpr(ID, scSignExtend, op, ty) { 380 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 381 (Ty->isIntegerTy() || Ty->isPointerTy()) && 382 "Cannot sign extend non-integer value!"); 383 } 384 385 void SCEVUnknown::deleted() { 386 // Clear this SCEVUnknown from various maps. 387 SE->forgetMemoizedResults(this); 388 389 // Remove this SCEVUnknown from the uniquing map. 390 SE->UniqueSCEVs.RemoveNode(this); 391 392 // Release the value. 393 setValPtr(nullptr); 394 } 395 396 void SCEVUnknown::allUsesReplacedWith(Value *New) { 397 // Clear this SCEVUnknown from various maps. 398 SE->forgetMemoizedResults(this); 399 400 // Remove this SCEVUnknown from the uniquing map. 401 SE->UniqueSCEVs.RemoveNode(this); 402 403 // Update this SCEVUnknown to point to the new value. This is needed 404 // because there may still be outstanding SCEVs which still point to 405 // this SCEVUnknown. 406 setValPtr(New); 407 } 408 409 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 410 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 411 if (VCE->getOpcode() == Instruction::PtrToInt) 412 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 413 if (CE->getOpcode() == Instruction::GetElementPtr && 414 CE->getOperand(0)->isNullValue() && 415 CE->getNumOperands() == 2) 416 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 417 if (CI->isOne()) { 418 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 419 ->getElementType(); 420 return true; 421 } 422 423 return false; 424 } 425 426 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 427 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 428 if (VCE->getOpcode() == Instruction::PtrToInt) 429 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 430 if (CE->getOpcode() == Instruction::GetElementPtr && 431 CE->getOperand(0)->isNullValue()) { 432 Type *Ty = 433 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 434 if (StructType *STy = dyn_cast<StructType>(Ty)) 435 if (!STy->isPacked() && 436 CE->getNumOperands() == 3 && 437 CE->getOperand(1)->isNullValue()) { 438 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 439 if (CI->isOne() && 440 STy->getNumElements() == 2 && 441 STy->getElementType(0)->isIntegerTy(1)) { 442 AllocTy = STy->getElementType(1); 443 return true; 444 } 445 } 446 } 447 448 return false; 449 } 450 451 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 452 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 453 if (VCE->getOpcode() == Instruction::PtrToInt) 454 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 455 if (CE->getOpcode() == Instruction::GetElementPtr && 456 CE->getNumOperands() == 3 && 457 CE->getOperand(0)->isNullValue() && 458 CE->getOperand(1)->isNullValue()) { 459 Type *Ty = 460 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 461 // Ignore vector types here so that ScalarEvolutionExpander doesn't 462 // emit getelementptrs that index into vectors. 463 if (Ty->isStructTy() || Ty->isArrayTy()) { 464 CTy = Ty; 465 FieldNo = CE->getOperand(2); 466 return true; 467 } 468 } 469 470 return false; 471 } 472 473 //===----------------------------------------------------------------------===// 474 // SCEV Utilities 475 //===----------------------------------------------------------------------===// 476 477 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 478 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 479 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 480 /// have been previously deemed to be "equally complex" by this routine. It is 481 /// intended to avoid exponential time complexity in cases like: 482 /// 483 /// %a = f(%x, %y) 484 /// %b = f(%a, %a) 485 /// %c = f(%b, %b) 486 /// 487 /// %d = f(%x, %y) 488 /// %e = f(%d, %d) 489 /// %f = f(%e, %e) 490 /// 491 /// CompareValueComplexity(%f, %c) 492 /// 493 /// Since we do not continue running this routine on expression trees once we 494 /// have seen unequal values, there is no need to track them in the cache. 495 static int 496 CompareValueComplexity(SmallSet<std::pair<Value *, Value *>, 8> &EqCache, 497 const LoopInfo *const LI, Value *LV, Value *RV, 498 unsigned Depth) { 499 if (Depth > MaxCompareDepth || EqCache.count({LV, RV})) 500 return 0; 501 502 // Order pointer values after integer values. This helps SCEVExpander form 503 // GEPs. 504 bool LIsPointer = LV->getType()->isPointerTy(), 505 RIsPointer = RV->getType()->isPointerTy(); 506 if (LIsPointer != RIsPointer) 507 return (int)LIsPointer - (int)RIsPointer; 508 509 // Compare getValueID values. 510 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 511 if (LID != RID) 512 return (int)LID - (int)RID; 513 514 // Sort arguments by their position. 515 if (const auto *LA = dyn_cast<Argument>(LV)) { 516 const auto *RA = cast<Argument>(RV); 517 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 518 return (int)LArgNo - (int)RArgNo; 519 } 520 521 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 522 const auto *RGV = cast<GlobalValue>(RV); 523 524 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 525 auto LT = GV->getLinkage(); 526 return !(GlobalValue::isPrivateLinkage(LT) || 527 GlobalValue::isInternalLinkage(LT)); 528 }; 529 530 // Use the names to distinguish the two values, but only if the 531 // names are semantically important. 532 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 533 return LGV->getName().compare(RGV->getName()); 534 } 535 536 // For instructions, compare their loop depth, and their operand count. This 537 // is pretty loose. 538 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 539 const auto *RInst = cast<Instruction>(RV); 540 541 // Compare loop depths. 542 const BasicBlock *LParent = LInst->getParent(), 543 *RParent = RInst->getParent(); 544 if (LParent != RParent) { 545 unsigned LDepth = LI->getLoopDepth(LParent), 546 RDepth = LI->getLoopDepth(RParent); 547 if (LDepth != RDepth) 548 return (int)LDepth - (int)RDepth; 549 } 550 551 // Compare the number of operands. 552 unsigned LNumOps = LInst->getNumOperands(), 553 RNumOps = RInst->getNumOperands(); 554 if (LNumOps != RNumOps) 555 return (int)LNumOps - (int)RNumOps; 556 557 for (unsigned Idx : seq(0u, LNumOps)) { 558 int Result = 559 CompareValueComplexity(EqCache, LI, LInst->getOperand(Idx), 560 RInst->getOperand(Idx), Depth + 1); 561 if (Result != 0) 562 return Result; 563 } 564 } 565 566 EqCache.insert({LV, RV}); 567 return 0; 568 } 569 570 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 571 // than RHS, respectively. A three-way result allows recursive comparisons to be 572 // more efficient. 573 static int CompareSCEVComplexity( 574 SmallSet<std::pair<const SCEV *, const SCEV *>, 8> &EqCacheSCEV, 575 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 576 unsigned Depth = 0) { 577 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 578 if (LHS == RHS) 579 return 0; 580 581 // Primarily, sort the SCEVs by their getSCEVType(). 582 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 583 if (LType != RType) 584 return (int)LType - (int)RType; 585 586 if (Depth > MaxCompareDepth || EqCacheSCEV.count({LHS, RHS})) 587 return 0; 588 // Aside from the getSCEVType() ordering, the particular ordering 589 // isn't very important except that it's beneficial to be consistent, 590 // so that (a + b) and (b + a) don't end up as different expressions. 591 switch (static_cast<SCEVTypes>(LType)) { 592 case scUnknown: { 593 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 594 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 595 596 SmallSet<std::pair<Value *, Value *>, 8> EqCache; 597 int X = CompareValueComplexity(EqCache, LI, LU->getValue(), RU->getValue(), 598 Depth + 1); 599 if (X == 0) 600 EqCacheSCEV.insert({LHS, RHS}); 601 return X; 602 } 603 604 case scConstant: { 605 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 606 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 607 608 // Compare constant values. 609 const APInt &LA = LC->getAPInt(); 610 const APInt &RA = RC->getAPInt(); 611 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 612 if (LBitWidth != RBitWidth) 613 return (int)LBitWidth - (int)RBitWidth; 614 return LA.ult(RA) ? -1 : 1; 615 } 616 617 case scAddRecExpr: { 618 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 619 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 620 621 // Compare addrec loop depths. 622 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 623 if (LLoop != RLoop) { 624 unsigned LDepth = LLoop->getLoopDepth(), RDepth = RLoop->getLoopDepth(); 625 if (LDepth != RDepth) 626 return (int)LDepth - (int)RDepth; 627 } 628 629 // Addrec complexity grows with operand count. 630 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 631 if (LNumOps != RNumOps) 632 return (int)LNumOps - (int)RNumOps; 633 634 // Lexicographically compare. 635 for (unsigned i = 0; i != LNumOps; ++i) { 636 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LA->getOperand(i), 637 RA->getOperand(i), Depth + 1); 638 if (X != 0) 639 return X; 640 } 641 EqCacheSCEV.insert({LHS, RHS}); 642 return 0; 643 } 644 645 case scAddExpr: 646 case scMulExpr: 647 case scSMaxExpr: 648 case scUMaxExpr: { 649 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 650 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 651 652 // Lexicographically compare n-ary expressions. 653 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 654 if (LNumOps != RNumOps) 655 return (int)LNumOps - (int)RNumOps; 656 657 for (unsigned i = 0; i != LNumOps; ++i) { 658 if (i >= RNumOps) 659 return 1; 660 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getOperand(i), 661 RC->getOperand(i), Depth + 1); 662 if (X != 0) 663 return X; 664 } 665 EqCacheSCEV.insert({LHS, RHS}); 666 return 0; 667 } 668 669 case scUDivExpr: { 670 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 671 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 672 673 // Lexicographically compare udiv expressions. 674 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getLHS(), RC->getLHS(), 675 Depth + 1); 676 if (X != 0) 677 return X; 678 X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getRHS(), RC->getRHS(), 679 Depth + 1); 680 if (X == 0) 681 EqCacheSCEV.insert({LHS, RHS}); 682 return X; 683 } 684 685 case scTruncate: 686 case scZeroExtend: 687 case scSignExtend: { 688 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 689 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 690 691 // Compare cast expressions by operand. 692 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getOperand(), 693 RC->getOperand(), Depth + 1); 694 if (X == 0) 695 EqCacheSCEV.insert({LHS, RHS}); 696 return X; 697 } 698 699 case scCouldNotCompute: 700 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 701 } 702 llvm_unreachable("Unknown SCEV kind!"); 703 } 704 705 /// Given a list of SCEV objects, order them by their complexity, and group 706 /// objects of the same complexity together by value. When this routine is 707 /// finished, we know that any duplicates in the vector are consecutive and that 708 /// complexity is monotonically increasing. 709 /// 710 /// Note that we go take special precautions to ensure that we get deterministic 711 /// results from this routine. In other words, we don't want the results of 712 /// this to depend on where the addresses of various SCEV objects happened to 713 /// land in memory. 714 /// 715 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 716 LoopInfo *LI) { 717 if (Ops.size() < 2) return; // Noop 718 719 SmallSet<std::pair<const SCEV *, const SCEV *>, 8> EqCache; 720 if (Ops.size() == 2) { 721 // This is the common case, which also happens to be trivially simple. 722 // Special case it. 723 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 724 if (CompareSCEVComplexity(EqCache, LI, RHS, LHS) < 0) 725 std::swap(LHS, RHS); 726 return; 727 } 728 729 // Do the rough sort by complexity. 730 std::stable_sort(Ops.begin(), Ops.end(), 731 [&EqCache, LI](const SCEV *LHS, const SCEV *RHS) { 732 return CompareSCEVComplexity(EqCache, LI, LHS, RHS) < 0; 733 }); 734 735 // Now that we are sorted by complexity, group elements of the same 736 // complexity. Note that this is, at worst, N^2, but the vector is likely to 737 // be extremely short in practice. Note that we take this approach because we 738 // do not want to depend on the addresses of the objects we are grouping. 739 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 740 const SCEV *S = Ops[i]; 741 unsigned Complexity = S->getSCEVType(); 742 743 // If there are any objects of the same complexity and same value as this 744 // one, group them. 745 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 746 if (Ops[j] == S) { // Found a duplicate. 747 // Move it to immediately after i'th element. 748 std::swap(Ops[i+1], Ops[j]); 749 ++i; // no need to rescan it. 750 if (i == e-2) return; // Done! 751 } 752 } 753 } 754 } 755 756 // Returns the size of the SCEV S. 757 static inline int sizeOfSCEV(const SCEV *S) { 758 struct FindSCEVSize { 759 int Size; 760 FindSCEVSize() : Size(0) {} 761 762 bool follow(const SCEV *S) { 763 ++Size; 764 // Keep looking at all operands of S. 765 return true; 766 } 767 bool isDone() const { 768 return false; 769 } 770 }; 771 772 FindSCEVSize F; 773 SCEVTraversal<FindSCEVSize> ST(F); 774 ST.visitAll(S); 775 return F.Size; 776 } 777 778 namespace { 779 780 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { 781 public: 782 // Computes the Quotient and Remainder of the division of Numerator by 783 // Denominator. 784 static void divide(ScalarEvolution &SE, const SCEV *Numerator, 785 const SCEV *Denominator, const SCEV **Quotient, 786 const SCEV **Remainder) { 787 assert(Numerator && Denominator && "Uninitialized SCEV"); 788 789 SCEVDivision D(SE, Numerator, Denominator); 790 791 // Check for the trivial case here to avoid having to check for it in the 792 // rest of the code. 793 if (Numerator == Denominator) { 794 *Quotient = D.One; 795 *Remainder = D.Zero; 796 return; 797 } 798 799 if (Numerator->isZero()) { 800 *Quotient = D.Zero; 801 *Remainder = D.Zero; 802 return; 803 } 804 805 // A simple case when N/1. The quotient is N. 806 if (Denominator->isOne()) { 807 *Quotient = Numerator; 808 *Remainder = D.Zero; 809 return; 810 } 811 812 // Split the Denominator when it is a product. 813 if (const SCEVMulExpr *T = dyn_cast<SCEVMulExpr>(Denominator)) { 814 const SCEV *Q, *R; 815 *Quotient = Numerator; 816 for (const SCEV *Op : T->operands()) { 817 divide(SE, *Quotient, Op, &Q, &R); 818 *Quotient = Q; 819 820 // Bail out when the Numerator is not divisible by one of the terms of 821 // the Denominator. 822 if (!R->isZero()) { 823 *Quotient = D.Zero; 824 *Remainder = Numerator; 825 return; 826 } 827 } 828 *Remainder = D.Zero; 829 return; 830 } 831 832 D.visit(Numerator); 833 *Quotient = D.Quotient; 834 *Remainder = D.Remainder; 835 } 836 837 // Except in the trivial case described above, we do not know how to divide 838 // Expr by Denominator for the following functions with empty implementation. 839 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} 840 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} 841 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} 842 void visitUDivExpr(const SCEVUDivExpr *Numerator) {} 843 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} 844 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} 845 void visitUnknown(const SCEVUnknown *Numerator) {} 846 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} 847 848 void visitConstant(const SCEVConstant *Numerator) { 849 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { 850 APInt NumeratorVal = Numerator->getAPInt(); 851 APInt DenominatorVal = D->getAPInt(); 852 uint32_t NumeratorBW = NumeratorVal.getBitWidth(); 853 uint32_t DenominatorBW = DenominatorVal.getBitWidth(); 854 855 if (NumeratorBW > DenominatorBW) 856 DenominatorVal = DenominatorVal.sext(NumeratorBW); 857 else if (NumeratorBW < DenominatorBW) 858 NumeratorVal = NumeratorVal.sext(DenominatorBW); 859 860 APInt QuotientVal(NumeratorVal.getBitWidth(), 0); 861 APInt RemainderVal(NumeratorVal.getBitWidth(), 0); 862 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); 863 Quotient = SE.getConstant(QuotientVal); 864 Remainder = SE.getConstant(RemainderVal); 865 return; 866 } 867 } 868 869 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { 870 const SCEV *StartQ, *StartR, *StepQ, *StepR; 871 if (!Numerator->isAffine()) 872 return cannotDivide(Numerator); 873 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); 874 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); 875 // Bail out if the types do not match. 876 Type *Ty = Denominator->getType(); 877 if (Ty != StartQ->getType() || Ty != StartR->getType() || 878 Ty != StepQ->getType() || Ty != StepR->getType()) 879 return cannotDivide(Numerator); 880 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), 881 Numerator->getNoWrapFlags()); 882 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), 883 Numerator->getNoWrapFlags()); 884 } 885 886 void visitAddExpr(const SCEVAddExpr *Numerator) { 887 SmallVector<const SCEV *, 2> Qs, Rs; 888 Type *Ty = Denominator->getType(); 889 890 for (const SCEV *Op : Numerator->operands()) { 891 const SCEV *Q, *R; 892 divide(SE, Op, Denominator, &Q, &R); 893 894 // Bail out if types do not match. 895 if (Ty != Q->getType() || Ty != R->getType()) 896 return cannotDivide(Numerator); 897 898 Qs.push_back(Q); 899 Rs.push_back(R); 900 } 901 902 if (Qs.size() == 1) { 903 Quotient = Qs[0]; 904 Remainder = Rs[0]; 905 return; 906 } 907 908 Quotient = SE.getAddExpr(Qs); 909 Remainder = SE.getAddExpr(Rs); 910 } 911 912 void visitMulExpr(const SCEVMulExpr *Numerator) { 913 SmallVector<const SCEV *, 2> Qs; 914 Type *Ty = Denominator->getType(); 915 916 bool FoundDenominatorTerm = false; 917 for (const SCEV *Op : Numerator->operands()) { 918 // Bail out if types do not match. 919 if (Ty != Op->getType()) 920 return cannotDivide(Numerator); 921 922 if (FoundDenominatorTerm) { 923 Qs.push_back(Op); 924 continue; 925 } 926 927 // Check whether Denominator divides one of the product operands. 928 const SCEV *Q, *R; 929 divide(SE, Op, Denominator, &Q, &R); 930 if (!R->isZero()) { 931 Qs.push_back(Op); 932 continue; 933 } 934 935 // Bail out if types do not match. 936 if (Ty != Q->getType()) 937 return cannotDivide(Numerator); 938 939 FoundDenominatorTerm = true; 940 Qs.push_back(Q); 941 } 942 943 if (FoundDenominatorTerm) { 944 Remainder = Zero; 945 if (Qs.size() == 1) 946 Quotient = Qs[0]; 947 else 948 Quotient = SE.getMulExpr(Qs); 949 return; 950 } 951 952 if (!isa<SCEVUnknown>(Denominator)) 953 return cannotDivide(Numerator); 954 955 // The Remainder is obtained by replacing Denominator by 0 in Numerator. 956 ValueToValueMap RewriteMap; 957 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 958 cast<SCEVConstant>(Zero)->getValue(); 959 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 960 961 if (Remainder->isZero()) { 962 // The Quotient is obtained by replacing Denominator by 1 in Numerator. 963 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 964 cast<SCEVConstant>(One)->getValue(); 965 Quotient = 966 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 967 return; 968 } 969 970 // Quotient is (Numerator - Remainder) divided by Denominator. 971 const SCEV *Q, *R; 972 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); 973 // This SCEV does not seem to simplify: fail the division here. 974 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) 975 return cannotDivide(Numerator); 976 divide(SE, Diff, Denominator, &Q, &R); 977 if (R != Zero) 978 return cannotDivide(Numerator); 979 Quotient = Q; 980 } 981 982 private: 983 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, 984 const SCEV *Denominator) 985 : SE(S), Denominator(Denominator) { 986 Zero = SE.getZero(Denominator->getType()); 987 One = SE.getOne(Denominator->getType()); 988 989 // We generally do not know how to divide Expr by Denominator. We 990 // initialize the division to a "cannot divide" state to simplify the rest 991 // of the code. 992 cannotDivide(Numerator); 993 } 994 995 // Convenience function for giving up on the division. We set the quotient to 996 // be equal to zero and the remainder to be equal to the numerator. 997 void cannotDivide(const SCEV *Numerator) { 998 Quotient = Zero; 999 Remainder = Numerator; 1000 } 1001 1002 ScalarEvolution &SE; 1003 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; 1004 }; 1005 1006 } 1007 1008 //===----------------------------------------------------------------------===// 1009 // Simple SCEV method implementations 1010 //===----------------------------------------------------------------------===// 1011 1012 /// Compute BC(It, K). The result has width W. Assume, K > 0. 1013 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 1014 ScalarEvolution &SE, 1015 Type *ResultTy) { 1016 // Handle the simplest case efficiently. 1017 if (K == 1) 1018 return SE.getTruncateOrZeroExtend(It, ResultTy); 1019 1020 // We are using the following formula for BC(It, K): 1021 // 1022 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 1023 // 1024 // Suppose, W is the bitwidth of the return value. We must be prepared for 1025 // overflow. Hence, we must assure that the result of our computation is 1026 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 1027 // safe in modular arithmetic. 1028 // 1029 // However, this code doesn't use exactly that formula; the formula it uses 1030 // is something like the following, where T is the number of factors of 2 in 1031 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 1032 // exponentiation: 1033 // 1034 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 1035 // 1036 // This formula is trivially equivalent to the previous formula. However, 1037 // this formula can be implemented much more efficiently. The trick is that 1038 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 1039 // arithmetic. To do exact division in modular arithmetic, all we have 1040 // to do is multiply by the inverse. Therefore, this step can be done at 1041 // width W. 1042 // 1043 // The next issue is how to safely do the division by 2^T. The way this 1044 // is done is by doing the multiplication step at a width of at least W + T 1045 // bits. This way, the bottom W+T bits of the product are accurate. Then, 1046 // when we perform the division by 2^T (which is equivalent to a right shift 1047 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 1048 // truncated out after the division by 2^T. 1049 // 1050 // In comparison to just directly using the first formula, this technique 1051 // is much more efficient; using the first formula requires W * K bits, 1052 // but this formula less than W + K bits. Also, the first formula requires 1053 // a division step, whereas this formula only requires multiplies and shifts. 1054 // 1055 // It doesn't matter whether the subtraction step is done in the calculation 1056 // width or the input iteration count's width; if the subtraction overflows, 1057 // the result must be zero anyway. We prefer here to do it in the width of 1058 // the induction variable because it helps a lot for certain cases; CodeGen 1059 // isn't smart enough to ignore the overflow, which leads to much less 1060 // efficient code if the width of the subtraction is wider than the native 1061 // register width. 1062 // 1063 // (It's possible to not widen at all by pulling out factors of 2 before 1064 // the multiplication; for example, K=2 can be calculated as 1065 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 1066 // extra arithmetic, so it's not an obvious win, and it gets 1067 // much more complicated for K > 3.) 1068 1069 // Protection from insane SCEVs; this bound is conservative, 1070 // but it probably doesn't matter. 1071 if (K > 1000) 1072 return SE.getCouldNotCompute(); 1073 1074 unsigned W = SE.getTypeSizeInBits(ResultTy); 1075 1076 // Calculate K! / 2^T and T; we divide out the factors of two before 1077 // multiplying for calculating K! / 2^T to avoid overflow. 1078 // Other overflow doesn't matter because we only care about the bottom 1079 // W bits of the result. 1080 APInt OddFactorial(W, 1); 1081 unsigned T = 1; 1082 for (unsigned i = 3; i <= K; ++i) { 1083 APInt Mult(W, i); 1084 unsigned TwoFactors = Mult.countTrailingZeros(); 1085 T += TwoFactors; 1086 Mult = Mult.lshr(TwoFactors); 1087 OddFactorial *= Mult; 1088 } 1089 1090 // We need at least W + T bits for the multiplication step 1091 unsigned CalculationBits = W + T; 1092 1093 // Calculate 2^T, at width T+W. 1094 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1095 1096 // Calculate the multiplicative inverse of K! / 2^T; 1097 // this multiplication factor will perform the exact division by 1098 // K! / 2^T. 1099 APInt Mod = APInt::getSignedMinValue(W+1); 1100 APInt MultiplyFactor = OddFactorial.zext(W+1); 1101 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1102 MultiplyFactor = MultiplyFactor.trunc(W); 1103 1104 // Calculate the product, at width T+W 1105 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1106 CalculationBits); 1107 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1108 for (unsigned i = 1; i != K; ++i) { 1109 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1110 Dividend = SE.getMulExpr(Dividend, 1111 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1112 } 1113 1114 // Divide by 2^T 1115 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1116 1117 // Truncate the result, and divide by K! / 2^T. 1118 1119 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1120 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1121 } 1122 1123 /// Return the value of this chain of recurrences at the specified iteration 1124 /// number. We can evaluate this recurrence by multiplying each element in the 1125 /// chain by the binomial coefficient corresponding to it. In other words, we 1126 /// can evaluate {A,+,B,+,C,+,D} as: 1127 /// 1128 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1129 /// 1130 /// where BC(It, k) stands for binomial coefficient. 1131 /// 1132 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1133 ScalarEvolution &SE) const { 1134 const SCEV *Result = getStart(); 1135 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1136 // The computation is correct in the face of overflow provided that the 1137 // multiplication is performed _after_ the evaluation of the binomial 1138 // coefficient. 1139 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1140 if (isa<SCEVCouldNotCompute>(Coeff)) 1141 return Coeff; 1142 1143 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1144 } 1145 return Result; 1146 } 1147 1148 //===----------------------------------------------------------------------===// 1149 // SCEV Expression folder implementations 1150 //===----------------------------------------------------------------------===// 1151 1152 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 1153 Type *Ty) { 1154 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1155 "This is not a truncating conversion!"); 1156 assert(isSCEVable(Ty) && 1157 "This is not a conversion to a SCEVable type!"); 1158 Ty = getEffectiveSCEVType(Ty); 1159 1160 FoldingSetNodeID ID; 1161 ID.AddInteger(scTruncate); 1162 ID.AddPointer(Op); 1163 ID.AddPointer(Ty); 1164 void *IP = nullptr; 1165 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1166 1167 // Fold if the operand is constant. 1168 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1169 return getConstant( 1170 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1171 1172 // trunc(trunc(x)) --> trunc(x) 1173 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1174 return getTruncateExpr(ST->getOperand(), Ty); 1175 1176 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1177 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1178 return getTruncateOrSignExtend(SS->getOperand(), Ty); 1179 1180 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1181 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1182 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 1183 1184 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can 1185 // eliminate all the truncates, or we replace other casts with truncates. 1186 if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) { 1187 SmallVector<const SCEV *, 4> Operands; 1188 bool hasTrunc = false; 1189 for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) { 1190 const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty); 1191 if (!isa<SCEVCastExpr>(SA->getOperand(i))) 1192 hasTrunc = isa<SCEVTruncateExpr>(S); 1193 Operands.push_back(S); 1194 } 1195 if (!hasTrunc) 1196 return getAddExpr(Operands); 1197 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1198 } 1199 1200 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can 1201 // eliminate all the truncates, or we replace other casts with truncates. 1202 if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) { 1203 SmallVector<const SCEV *, 4> Operands; 1204 bool hasTrunc = false; 1205 for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) { 1206 const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty); 1207 if (!isa<SCEVCastExpr>(SM->getOperand(i))) 1208 hasTrunc = isa<SCEVTruncateExpr>(S); 1209 Operands.push_back(S); 1210 } 1211 if (!hasTrunc) 1212 return getMulExpr(Operands); 1213 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1214 } 1215 1216 // If the input value is a chrec scev, truncate the chrec's operands. 1217 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1218 SmallVector<const SCEV *, 4> Operands; 1219 for (const SCEV *Op : AddRec->operands()) 1220 Operands.push_back(getTruncateExpr(Op, Ty)); 1221 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1222 } 1223 1224 // The cast wasn't folded; create an explicit cast node. We can reuse 1225 // the existing insert position since if we get here, we won't have 1226 // made any changes which would invalidate it. 1227 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1228 Op, Ty); 1229 UniqueSCEVs.InsertNode(S, IP); 1230 return S; 1231 } 1232 1233 // Get the limit of a recurrence such that incrementing by Step cannot cause 1234 // signed overflow as long as the value of the recurrence within the 1235 // loop does not exceed this limit before incrementing. 1236 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1237 ICmpInst::Predicate *Pred, 1238 ScalarEvolution *SE) { 1239 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1240 if (SE->isKnownPositive(Step)) { 1241 *Pred = ICmpInst::ICMP_SLT; 1242 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1243 SE->getSignedRange(Step).getSignedMax()); 1244 } 1245 if (SE->isKnownNegative(Step)) { 1246 *Pred = ICmpInst::ICMP_SGT; 1247 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1248 SE->getSignedRange(Step).getSignedMin()); 1249 } 1250 return nullptr; 1251 } 1252 1253 // Get the limit of a recurrence such that incrementing by Step cannot cause 1254 // unsigned overflow as long as the value of the recurrence within the loop does 1255 // not exceed this limit before incrementing. 1256 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1257 ICmpInst::Predicate *Pred, 1258 ScalarEvolution *SE) { 1259 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1260 *Pred = ICmpInst::ICMP_ULT; 1261 1262 return SE->getConstant(APInt::getMinValue(BitWidth) - 1263 SE->getUnsignedRange(Step).getUnsignedMax()); 1264 } 1265 1266 namespace { 1267 1268 struct ExtendOpTraitsBase { 1269 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *); 1270 }; 1271 1272 // Used to make code generic over signed and unsigned overflow. 1273 template <typename ExtendOp> struct ExtendOpTraits { 1274 // Members present: 1275 // 1276 // static const SCEV::NoWrapFlags WrapType; 1277 // 1278 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1279 // 1280 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1281 // ICmpInst::Predicate *Pred, 1282 // ScalarEvolution *SE); 1283 }; 1284 1285 template <> 1286 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1287 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1288 1289 static const GetExtendExprTy GetExtendExpr; 1290 1291 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1292 ICmpInst::Predicate *Pred, 1293 ScalarEvolution *SE) { 1294 return getSignedOverflowLimitForStep(Step, Pred, SE); 1295 } 1296 }; 1297 1298 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1299 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1300 1301 template <> 1302 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1303 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1304 1305 static const GetExtendExprTy GetExtendExpr; 1306 1307 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1308 ICmpInst::Predicate *Pred, 1309 ScalarEvolution *SE) { 1310 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1311 } 1312 }; 1313 1314 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1315 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1316 } 1317 1318 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1319 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1320 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1321 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1322 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1323 // expression "Step + sext/zext(PreIncAR)" is congruent with 1324 // "sext/zext(PostIncAR)" 1325 template <typename ExtendOpTy> 1326 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1327 ScalarEvolution *SE) { 1328 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1329 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1330 1331 const Loop *L = AR->getLoop(); 1332 const SCEV *Start = AR->getStart(); 1333 const SCEV *Step = AR->getStepRecurrence(*SE); 1334 1335 // Check for a simple looking step prior to loop entry. 1336 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1337 if (!SA) 1338 return nullptr; 1339 1340 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1341 // subtraction is expensive. For this purpose, perform a quick and dirty 1342 // difference, by checking for Step in the operand list. 1343 SmallVector<const SCEV *, 4> DiffOps; 1344 for (const SCEV *Op : SA->operands()) 1345 if (Op != Step) 1346 DiffOps.push_back(Op); 1347 1348 if (DiffOps.size() == SA->getNumOperands()) 1349 return nullptr; 1350 1351 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1352 // `Step`: 1353 1354 // 1. NSW/NUW flags on the step increment. 1355 auto PreStartFlags = 1356 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1357 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1358 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1359 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1360 1361 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1362 // "S+X does not sign/unsign-overflow". 1363 // 1364 1365 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1366 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1367 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1368 return PreStart; 1369 1370 // 2. Direct overflow check on the step operation's expression. 1371 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1372 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1373 const SCEV *OperandExtendedStart = 1374 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy), 1375 (SE->*GetExtendExpr)(Step, WideTy)); 1376 if ((SE->*GetExtendExpr)(Start, WideTy) == OperandExtendedStart) { 1377 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1378 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1379 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1380 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1381 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1382 } 1383 return PreStart; 1384 } 1385 1386 // 3. Loop precondition. 1387 ICmpInst::Predicate Pred; 1388 const SCEV *OverflowLimit = 1389 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1390 1391 if (OverflowLimit && 1392 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1393 return PreStart; 1394 1395 return nullptr; 1396 } 1397 1398 // Get the normalized zero or sign extended expression for this AddRec's Start. 1399 template <typename ExtendOpTy> 1400 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1401 ScalarEvolution *SE) { 1402 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1403 1404 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE); 1405 if (!PreStart) 1406 return (SE->*GetExtendExpr)(AR->getStart(), Ty); 1407 1408 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty), 1409 (SE->*GetExtendExpr)(PreStart, Ty)); 1410 } 1411 1412 // Try to prove away overflow by looking at "nearby" add recurrences. A 1413 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1414 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1415 // 1416 // Formally: 1417 // 1418 // {S,+,X} == {S-T,+,X} + T 1419 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1420 // 1421 // If ({S-T,+,X} + T) does not overflow ... (1) 1422 // 1423 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1424 // 1425 // If {S-T,+,X} does not overflow ... (2) 1426 // 1427 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1428 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1429 // 1430 // If (S-T)+T does not overflow ... (3) 1431 // 1432 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1433 // == {Ext(S),+,Ext(X)} == LHS 1434 // 1435 // Thus, if (1), (2) and (3) are true for some T, then 1436 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1437 // 1438 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1439 // does not overflow" restricted to the 0th iteration. Therefore we only need 1440 // to check for (1) and (2). 1441 // 1442 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1443 // is `Delta` (defined below). 1444 // 1445 template <typename ExtendOpTy> 1446 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1447 const SCEV *Step, 1448 const Loop *L) { 1449 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1450 1451 // We restrict `Start` to a constant to prevent SCEV from spending too much 1452 // time here. It is correct (but more expensive) to continue with a 1453 // non-constant `Start` and do a general SCEV subtraction to compute 1454 // `PreStart` below. 1455 // 1456 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1457 if (!StartC) 1458 return false; 1459 1460 APInt StartAI = StartC->getAPInt(); 1461 1462 for (unsigned Delta : {-2, -1, 1, 2}) { 1463 const SCEV *PreStart = getConstant(StartAI - Delta); 1464 1465 FoldingSetNodeID ID; 1466 ID.AddInteger(scAddRecExpr); 1467 ID.AddPointer(PreStart); 1468 ID.AddPointer(Step); 1469 ID.AddPointer(L); 1470 void *IP = nullptr; 1471 const auto *PreAR = 1472 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1473 1474 // Give up if we don't already have the add recurrence we need because 1475 // actually constructing an add recurrence is relatively expensive. 1476 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1477 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1478 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1479 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1480 DeltaS, &Pred, this); 1481 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1482 return true; 1483 } 1484 } 1485 1486 return false; 1487 } 1488 1489 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, 1490 Type *Ty) { 1491 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1492 "This is not an extending conversion!"); 1493 assert(isSCEVable(Ty) && 1494 "This is not a conversion to a SCEVable type!"); 1495 Ty = getEffectiveSCEVType(Ty); 1496 1497 // Fold if the operand is constant. 1498 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1499 return getConstant( 1500 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1501 1502 // zext(zext(x)) --> zext(x) 1503 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1504 return getZeroExtendExpr(SZ->getOperand(), Ty); 1505 1506 // Before doing any expensive analysis, check to see if we've already 1507 // computed a SCEV for this Op and Ty. 1508 FoldingSetNodeID ID; 1509 ID.AddInteger(scZeroExtend); 1510 ID.AddPointer(Op); 1511 ID.AddPointer(Ty); 1512 void *IP = nullptr; 1513 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1514 1515 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1516 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1517 // It's possible the bits taken off by the truncate were all zero bits. If 1518 // so, we should be able to simplify this further. 1519 const SCEV *X = ST->getOperand(); 1520 ConstantRange CR = getUnsignedRange(X); 1521 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1522 unsigned NewBits = getTypeSizeInBits(Ty); 1523 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1524 CR.zextOrTrunc(NewBits))) 1525 return getTruncateOrZeroExtend(X, Ty); 1526 } 1527 1528 // If the input value is a chrec scev, and we can prove that the value 1529 // did not overflow the old, smaller, value, we can zero extend all of the 1530 // operands (often constants). This allows analysis of something like 1531 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1532 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1533 if (AR->isAffine()) { 1534 const SCEV *Start = AR->getStart(); 1535 const SCEV *Step = AR->getStepRecurrence(*this); 1536 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1537 const Loop *L = AR->getLoop(); 1538 1539 if (!AR->hasNoUnsignedWrap()) { 1540 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1541 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1542 } 1543 1544 // If we have special knowledge that this addrec won't overflow, 1545 // we don't need to do any further analysis. 1546 if (AR->hasNoUnsignedWrap()) 1547 return getAddRecExpr( 1548 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1549 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1550 1551 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1552 // Note that this serves two purposes: It filters out loops that are 1553 // simply not analyzable, and it covers the case where this code is 1554 // being called from within backedge-taken count analysis, such that 1555 // attempting to ask for the backedge-taken count would likely result 1556 // in infinite recursion. In the later case, the analysis code will 1557 // cope with a conservative value, and it will take care to purge 1558 // that value once it has finished. 1559 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1560 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1561 // Manually compute the final value for AR, checking for 1562 // overflow. 1563 1564 // Check whether the backedge-taken count can be losslessly casted to 1565 // the addrec's type. The count is always unsigned. 1566 const SCEV *CastedMaxBECount = 1567 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1568 const SCEV *RecastedMaxBECount = 1569 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1570 if (MaxBECount == RecastedMaxBECount) { 1571 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1572 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1573 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step); 1574 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy); 1575 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy); 1576 const SCEV *WideMaxBECount = 1577 getZeroExtendExpr(CastedMaxBECount, WideTy); 1578 const SCEV *OperandExtendedAdd = 1579 getAddExpr(WideStart, 1580 getMulExpr(WideMaxBECount, 1581 getZeroExtendExpr(Step, WideTy))); 1582 if (ZAdd == OperandExtendedAdd) { 1583 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1584 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1585 // Return the expression with the addrec on the outside. 1586 return getAddRecExpr( 1587 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1588 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1589 } 1590 // Similar to above, only this time treat the step value as signed. 1591 // This covers loops that count down. 1592 OperandExtendedAdd = 1593 getAddExpr(WideStart, 1594 getMulExpr(WideMaxBECount, 1595 getSignExtendExpr(Step, WideTy))); 1596 if (ZAdd == OperandExtendedAdd) { 1597 // Cache knowledge of AR NW, which is propagated to this AddRec. 1598 // Negative step causes unsigned wrap, but it still can't self-wrap. 1599 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1600 // Return the expression with the addrec on the outside. 1601 return getAddRecExpr( 1602 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1603 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1604 } 1605 } 1606 } 1607 1608 // Normally, in the cases we can prove no-overflow via a 1609 // backedge guarding condition, we can also compute a backedge 1610 // taken count for the loop. The exceptions are assumptions and 1611 // guards present in the loop -- SCEV is not great at exploiting 1612 // these to compute max backedge taken counts, but can still use 1613 // these to prove lack of overflow. Use this fact to avoid 1614 // doing extra work that may not pay off. 1615 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1616 !AC.assumptions().empty()) { 1617 // If the backedge is guarded by a comparison with the pre-inc 1618 // value the addrec is safe. Also, if the entry is guarded by 1619 // a comparison with the start value and the backedge is 1620 // guarded by a comparison with the post-inc value, the addrec 1621 // is safe. 1622 if (isKnownPositive(Step)) { 1623 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1624 getUnsignedRange(Step).getUnsignedMax()); 1625 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1626 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && 1627 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, 1628 AR->getPostIncExpr(*this), N))) { 1629 // Cache knowledge of AR NUW, which is propagated to this 1630 // AddRec. 1631 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1632 // Return the expression with the addrec on the outside. 1633 return getAddRecExpr( 1634 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1635 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1636 } 1637 } else if (isKnownNegative(Step)) { 1638 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1639 getSignedRange(Step).getSignedMin()); 1640 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1641 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) && 1642 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, 1643 AR->getPostIncExpr(*this), N))) { 1644 // Cache knowledge of AR NW, which is propagated to this 1645 // AddRec. Negative step causes unsigned wrap, but it 1646 // still can't self-wrap. 1647 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1648 // Return the expression with the addrec on the outside. 1649 return getAddRecExpr( 1650 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1651 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1652 } 1653 } 1654 } 1655 1656 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1657 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1658 return getAddRecExpr( 1659 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1660 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1661 } 1662 } 1663 1664 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1665 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1666 if (SA->hasNoUnsignedWrap()) { 1667 // If the addition does not unsign overflow then we can, by definition, 1668 // commute the zero extension with the addition operation. 1669 SmallVector<const SCEV *, 4> Ops; 1670 for (const auto *Op : SA->operands()) 1671 Ops.push_back(getZeroExtendExpr(Op, Ty)); 1672 return getAddExpr(Ops, SCEV::FlagNUW); 1673 } 1674 } 1675 1676 // The cast wasn't folded; create an explicit cast node. 1677 // Recompute the insert position, as it may have been invalidated. 1678 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1679 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1680 Op, Ty); 1681 UniqueSCEVs.InsertNode(S, IP); 1682 return S; 1683 } 1684 1685 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, 1686 Type *Ty) { 1687 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1688 "This is not an extending conversion!"); 1689 assert(isSCEVable(Ty) && 1690 "This is not a conversion to a SCEVable type!"); 1691 Ty = getEffectiveSCEVType(Ty); 1692 1693 // Fold if the operand is constant. 1694 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1695 return getConstant( 1696 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1697 1698 // sext(sext(x)) --> sext(x) 1699 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1700 return getSignExtendExpr(SS->getOperand(), Ty); 1701 1702 // sext(zext(x)) --> zext(x) 1703 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1704 return getZeroExtendExpr(SZ->getOperand(), Ty); 1705 1706 // Before doing any expensive analysis, check to see if we've already 1707 // computed a SCEV for this Op and Ty. 1708 FoldingSetNodeID ID; 1709 ID.AddInteger(scSignExtend); 1710 ID.AddPointer(Op); 1711 ID.AddPointer(Ty); 1712 void *IP = nullptr; 1713 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1714 1715 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1716 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1717 // It's possible the bits taken off by the truncate were all sign bits. If 1718 // so, we should be able to simplify this further. 1719 const SCEV *X = ST->getOperand(); 1720 ConstantRange CR = getSignedRange(X); 1721 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1722 unsigned NewBits = getTypeSizeInBits(Ty); 1723 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1724 CR.sextOrTrunc(NewBits))) 1725 return getTruncateOrSignExtend(X, Ty); 1726 } 1727 1728 // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2 1729 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1730 if (SA->getNumOperands() == 2) { 1731 auto *SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0)); 1732 auto *SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1)); 1733 if (SMul && SC1) { 1734 if (auto *SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) { 1735 const APInt &C1 = SC1->getAPInt(); 1736 const APInt &C2 = SC2->getAPInt(); 1737 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && 1738 C2.ugt(C1) && C2.isPowerOf2()) 1739 return getAddExpr(getSignExtendExpr(SC1, Ty), 1740 getSignExtendExpr(SMul, Ty)); 1741 } 1742 } 1743 } 1744 1745 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1746 if (SA->hasNoSignedWrap()) { 1747 // If the addition does not sign overflow then we can, by definition, 1748 // commute the sign extension with the addition operation. 1749 SmallVector<const SCEV *, 4> Ops; 1750 for (const auto *Op : SA->operands()) 1751 Ops.push_back(getSignExtendExpr(Op, Ty)); 1752 return getAddExpr(Ops, SCEV::FlagNSW); 1753 } 1754 } 1755 // If the input value is a chrec scev, and we can prove that the value 1756 // did not overflow the old, smaller, value, we can sign extend all of the 1757 // operands (often constants). This allows analysis of something like 1758 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1759 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1760 if (AR->isAffine()) { 1761 const SCEV *Start = AR->getStart(); 1762 const SCEV *Step = AR->getStepRecurrence(*this); 1763 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1764 const Loop *L = AR->getLoop(); 1765 1766 if (!AR->hasNoSignedWrap()) { 1767 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1768 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1769 } 1770 1771 // If we have special knowledge that this addrec won't overflow, 1772 // we don't need to do any further analysis. 1773 if (AR->hasNoSignedWrap()) 1774 return getAddRecExpr( 1775 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), 1776 getSignExtendExpr(Step, Ty), L, SCEV::FlagNSW); 1777 1778 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1779 // Note that this serves two purposes: It filters out loops that are 1780 // simply not analyzable, and it covers the case where this code is 1781 // being called from within backedge-taken count analysis, such that 1782 // attempting to ask for the backedge-taken count would likely result 1783 // in infinite recursion. In the later case, the analysis code will 1784 // cope with a conservative value, and it will take care to purge 1785 // that value once it has finished. 1786 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1787 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1788 // Manually compute the final value for AR, checking for 1789 // overflow. 1790 1791 // Check whether the backedge-taken count can be losslessly casted to 1792 // the addrec's type. The count is always unsigned. 1793 const SCEV *CastedMaxBECount = 1794 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1795 const SCEV *RecastedMaxBECount = 1796 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1797 if (MaxBECount == RecastedMaxBECount) { 1798 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1799 // Check whether Start+Step*MaxBECount has no signed overflow. 1800 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step); 1801 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy); 1802 const SCEV *WideStart = getSignExtendExpr(Start, WideTy); 1803 const SCEV *WideMaxBECount = 1804 getZeroExtendExpr(CastedMaxBECount, WideTy); 1805 const SCEV *OperandExtendedAdd = 1806 getAddExpr(WideStart, 1807 getMulExpr(WideMaxBECount, 1808 getSignExtendExpr(Step, WideTy))); 1809 if (SAdd == OperandExtendedAdd) { 1810 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1811 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1812 // Return the expression with the addrec on the outside. 1813 return getAddRecExpr( 1814 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), 1815 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1816 } 1817 // Similar to above, only this time treat the step value as unsigned. 1818 // This covers loops that count up with an unsigned step. 1819 OperandExtendedAdd = 1820 getAddExpr(WideStart, 1821 getMulExpr(WideMaxBECount, 1822 getZeroExtendExpr(Step, WideTy))); 1823 if (SAdd == OperandExtendedAdd) { 1824 // If AR wraps around then 1825 // 1826 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 1827 // => SAdd != OperandExtendedAdd 1828 // 1829 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 1830 // (SAdd == OperandExtendedAdd => AR is NW) 1831 1832 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1833 1834 // Return the expression with the addrec on the outside. 1835 return getAddRecExpr( 1836 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), 1837 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1838 } 1839 } 1840 } 1841 1842 // Normally, in the cases we can prove no-overflow via a 1843 // backedge guarding condition, we can also compute a backedge 1844 // taken count for the loop. The exceptions are assumptions and 1845 // guards present in the loop -- SCEV is not great at exploiting 1846 // these to compute max backedge taken counts, but can still use 1847 // these to prove lack of overflow. Use this fact to avoid 1848 // doing extra work that may not pay off. 1849 1850 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1851 !AC.assumptions().empty()) { 1852 // If the backedge is guarded by a comparison with the pre-inc 1853 // value the addrec is safe. Also, if the entry is guarded by 1854 // a comparison with the start value and the backedge is 1855 // guarded by a comparison with the post-inc value, the addrec 1856 // is safe. 1857 ICmpInst::Predicate Pred; 1858 const SCEV *OverflowLimit = 1859 getSignedOverflowLimitForStep(Step, &Pred, this); 1860 if (OverflowLimit && 1861 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 1862 (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) && 1863 isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this), 1864 OverflowLimit)))) { 1865 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 1866 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1867 return getAddRecExpr( 1868 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), 1869 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1870 } 1871 } 1872 1873 // If Start and Step are constants, check if we can apply this 1874 // transformation: 1875 // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2 1876 auto *SC1 = dyn_cast<SCEVConstant>(Start); 1877 auto *SC2 = dyn_cast<SCEVConstant>(Step); 1878 if (SC1 && SC2) { 1879 const APInt &C1 = SC1->getAPInt(); 1880 const APInt &C2 = SC2->getAPInt(); 1881 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) && 1882 C2.isPowerOf2()) { 1883 Start = getSignExtendExpr(Start, Ty); 1884 const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L, 1885 AR->getNoWrapFlags()); 1886 return getAddExpr(Start, getSignExtendExpr(NewAR, Ty)); 1887 } 1888 } 1889 1890 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 1891 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1892 return getAddRecExpr( 1893 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), 1894 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1895 } 1896 } 1897 1898 // If the input value is provably positive and we could not simplify 1899 // away the sext build a zext instead. 1900 if (isKnownNonNegative(Op)) 1901 return getZeroExtendExpr(Op, Ty); 1902 1903 // The cast wasn't folded; create an explicit cast node. 1904 // Recompute the insert position, as it may have been invalidated. 1905 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1906 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1907 Op, Ty); 1908 UniqueSCEVs.InsertNode(S, IP); 1909 return S; 1910 } 1911 1912 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 1913 /// unspecified bits out to the given type. 1914 /// 1915 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 1916 Type *Ty) { 1917 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1918 "This is not an extending conversion!"); 1919 assert(isSCEVable(Ty) && 1920 "This is not a conversion to a SCEVable type!"); 1921 Ty = getEffectiveSCEVType(Ty); 1922 1923 // Sign-extend negative constants. 1924 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1925 if (SC->getAPInt().isNegative()) 1926 return getSignExtendExpr(Op, Ty); 1927 1928 // Peel off a truncate cast. 1929 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 1930 const SCEV *NewOp = T->getOperand(); 1931 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 1932 return getAnyExtendExpr(NewOp, Ty); 1933 return getTruncateOrNoop(NewOp, Ty); 1934 } 1935 1936 // Next try a zext cast. If the cast is folded, use it. 1937 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 1938 if (!isa<SCEVZeroExtendExpr>(ZExt)) 1939 return ZExt; 1940 1941 // Next try a sext cast. If the cast is folded, use it. 1942 const SCEV *SExt = getSignExtendExpr(Op, Ty); 1943 if (!isa<SCEVSignExtendExpr>(SExt)) 1944 return SExt; 1945 1946 // Force the cast to be folded into the operands of an addrec. 1947 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 1948 SmallVector<const SCEV *, 4> Ops; 1949 for (const SCEV *Op : AR->operands()) 1950 Ops.push_back(getAnyExtendExpr(Op, Ty)); 1951 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 1952 } 1953 1954 // If the expression is obviously signed, use the sext cast value. 1955 if (isa<SCEVSMaxExpr>(Op)) 1956 return SExt; 1957 1958 // Absent any other information, use the zext cast value. 1959 return ZExt; 1960 } 1961 1962 /// Process the given Ops list, which is a list of operands to be added under 1963 /// the given scale, update the given map. This is a helper function for 1964 /// getAddRecExpr. As an example of what it does, given a sequence of operands 1965 /// that would form an add expression like this: 1966 /// 1967 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 1968 /// 1969 /// where A and B are constants, update the map with these values: 1970 /// 1971 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 1972 /// 1973 /// and add 13 + A*B*29 to AccumulatedConstant. 1974 /// This will allow getAddRecExpr to produce this: 1975 /// 1976 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 1977 /// 1978 /// This form often exposes folding opportunities that are hidden in 1979 /// the original operand list. 1980 /// 1981 /// Return true iff it appears that any interesting folding opportunities 1982 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 1983 /// the common case where no interesting opportunities are present, and 1984 /// is also used as a check to avoid infinite recursion. 1985 /// 1986 static bool 1987 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 1988 SmallVectorImpl<const SCEV *> &NewOps, 1989 APInt &AccumulatedConstant, 1990 const SCEV *const *Ops, size_t NumOperands, 1991 const APInt &Scale, 1992 ScalarEvolution &SE) { 1993 bool Interesting = false; 1994 1995 // Iterate over the add operands. They are sorted, with constants first. 1996 unsigned i = 0; 1997 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1998 ++i; 1999 // Pull a buried constant out to the outside. 2000 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2001 Interesting = true; 2002 AccumulatedConstant += Scale * C->getAPInt(); 2003 } 2004 2005 // Next comes everything else. We're especially interested in multiplies 2006 // here, but they're in the middle, so just visit the rest with one loop. 2007 for (; i != NumOperands; ++i) { 2008 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2009 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2010 APInt NewScale = 2011 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2012 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2013 // A multiplication of a constant with another add; recurse. 2014 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2015 Interesting |= 2016 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2017 Add->op_begin(), Add->getNumOperands(), 2018 NewScale, SE); 2019 } else { 2020 // A multiplication of a constant with some other value. Update 2021 // the map. 2022 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2023 const SCEV *Key = SE.getMulExpr(MulOps); 2024 auto Pair = M.insert({Key, NewScale}); 2025 if (Pair.second) { 2026 NewOps.push_back(Pair.first->first); 2027 } else { 2028 Pair.first->second += NewScale; 2029 // The map already had an entry for this value, which may indicate 2030 // a folding opportunity. 2031 Interesting = true; 2032 } 2033 } 2034 } else { 2035 // An ordinary operand. Update the map. 2036 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2037 M.insert({Ops[i], Scale}); 2038 if (Pair.second) { 2039 NewOps.push_back(Pair.first->first); 2040 } else { 2041 Pair.first->second += Scale; 2042 // The map already had an entry for this value, which may indicate 2043 // a folding opportunity. 2044 Interesting = true; 2045 } 2046 } 2047 } 2048 2049 return Interesting; 2050 } 2051 2052 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2053 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2054 // can't-overflow flags for the operation if possible. 2055 static SCEV::NoWrapFlags 2056 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2057 const SmallVectorImpl<const SCEV *> &Ops, 2058 SCEV::NoWrapFlags Flags) { 2059 using namespace std::placeholders; 2060 typedef OverflowingBinaryOperator OBO; 2061 2062 bool CanAnalyze = 2063 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2064 (void)CanAnalyze; 2065 assert(CanAnalyze && "don't call from other places!"); 2066 2067 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2068 SCEV::NoWrapFlags SignOrUnsignWrap = 2069 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2070 2071 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2072 auto IsKnownNonNegative = [&](const SCEV *S) { 2073 return SE->isKnownNonNegative(S); 2074 }; 2075 2076 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2077 Flags = 2078 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2079 2080 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2081 2082 if (SignOrUnsignWrap != SignOrUnsignMask && Type == scAddExpr && 2083 Ops.size() == 2 && isa<SCEVConstant>(Ops[0])) { 2084 2085 // (A + C) --> (A + C)<nsw> if the addition does not sign overflow 2086 // (A + C) --> (A + C)<nuw> if the addition does not unsign overflow 2087 2088 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2089 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2090 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2091 Instruction::Add, C, OBO::NoSignedWrap); 2092 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2093 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2094 } 2095 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2096 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2097 Instruction::Add, C, OBO::NoUnsignedWrap); 2098 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2099 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2100 } 2101 } 2102 2103 return Flags; 2104 } 2105 2106 /// Get a canonical add expression, or something simpler if possible. 2107 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2108 SCEV::NoWrapFlags Flags, 2109 unsigned Depth) { 2110 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2111 "only nuw or nsw allowed"); 2112 assert(!Ops.empty() && "Cannot get empty add!"); 2113 if (Ops.size() == 1) return Ops[0]; 2114 #ifndef NDEBUG 2115 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2116 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2117 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2118 "SCEVAddExpr operand types don't match!"); 2119 #endif 2120 2121 // Sort by complexity, this groups all similar expression types together. 2122 GroupByComplexity(Ops, &LI); 2123 2124 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2125 2126 // If there are any constants, fold them together. 2127 unsigned Idx = 0; 2128 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2129 ++Idx; 2130 assert(Idx < Ops.size()); 2131 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2132 // We found two constants, fold them together! 2133 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2134 if (Ops.size() == 2) return Ops[0]; 2135 Ops.erase(Ops.begin()+1); // Erase the folded element 2136 LHSC = cast<SCEVConstant>(Ops[0]); 2137 } 2138 2139 // If we are left with a constant zero being added, strip it off. 2140 if (LHSC->getValue()->isZero()) { 2141 Ops.erase(Ops.begin()); 2142 --Idx; 2143 } 2144 2145 if (Ops.size() == 1) return Ops[0]; 2146 } 2147 2148 // Limit recursion calls depth 2149 if (Depth > MaxAddExprDepth) 2150 return getOrCreateAddExpr(Ops, Flags); 2151 2152 // Okay, check to see if the same value occurs in the operand list more than 2153 // once. If so, merge them together into an multiply expression. Since we 2154 // sorted the list, these values are required to be adjacent. 2155 Type *Ty = Ops[0]->getType(); 2156 bool FoundMatch = false; 2157 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2158 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2159 // Scan ahead to count how many equal operands there are. 2160 unsigned Count = 2; 2161 while (i+Count != e && Ops[i+Count] == Ops[i]) 2162 ++Count; 2163 // Merge the values into a multiply. 2164 const SCEV *Scale = getConstant(Ty, Count); 2165 const SCEV *Mul = getMulExpr(Scale, Ops[i]); 2166 if (Ops.size() == Count) 2167 return Mul; 2168 Ops[i] = Mul; 2169 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2170 --i; e -= Count - 1; 2171 FoundMatch = true; 2172 } 2173 if (FoundMatch) 2174 return getAddExpr(Ops, Flags); 2175 2176 // Check for truncates. If all the operands are truncated from the same 2177 // type, see if factoring out the truncate would permit the result to be 2178 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) 2179 // if the contents of the resulting outer trunc fold to something simple. 2180 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { 2181 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); 2182 Type *DstType = Trunc->getType(); 2183 Type *SrcType = Trunc->getOperand()->getType(); 2184 SmallVector<const SCEV *, 8> LargeOps; 2185 bool Ok = true; 2186 // Check all the operands to see if they can be represented in the 2187 // source type of the truncate. 2188 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2189 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2190 if (T->getOperand()->getType() != SrcType) { 2191 Ok = false; 2192 break; 2193 } 2194 LargeOps.push_back(T->getOperand()); 2195 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2196 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2197 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2198 SmallVector<const SCEV *, 8> LargeMulOps; 2199 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2200 if (const SCEVTruncateExpr *T = 2201 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2202 if (T->getOperand()->getType() != SrcType) { 2203 Ok = false; 2204 break; 2205 } 2206 LargeMulOps.push_back(T->getOperand()); 2207 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2208 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2209 } else { 2210 Ok = false; 2211 break; 2212 } 2213 } 2214 if (Ok) 2215 LargeOps.push_back(getMulExpr(LargeMulOps)); 2216 } else { 2217 Ok = false; 2218 break; 2219 } 2220 } 2221 if (Ok) { 2222 // Evaluate the expression in the larger type. 2223 const SCEV *Fold = getAddExpr(LargeOps, Flags, Depth + 1); 2224 // If it folds to something simple, use it. Otherwise, don't. 2225 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2226 return getTruncateExpr(Fold, DstType); 2227 } 2228 } 2229 2230 // Skip past any other cast SCEVs. 2231 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2232 ++Idx; 2233 2234 // If there are add operands they would be next. 2235 if (Idx < Ops.size()) { 2236 bool DeletedAdd = false; 2237 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2238 if (Ops.size() > AddOpsInlineThreshold || 2239 Add->getNumOperands() > AddOpsInlineThreshold) 2240 break; 2241 // If we have an add, expand the add operands onto the end of the operands 2242 // list. 2243 Ops.erase(Ops.begin()+Idx); 2244 Ops.append(Add->op_begin(), Add->op_end()); 2245 DeletedAdd = true; 2246 } 2247 2248 // If we deleted at least one add, we added operands to the end of the list, 2249 // and they are not necessarily sorted. Recurse to resort and resimplify 2250 // any operands we just acquired. 2251 if (DeletedAdd) 2252 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2253 } 2254 2255 // Skip over the add expression until we get to a multiply. 2256 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2257 ++Idx; 2258 2259 // Check to see if there are any folding opportunities present with 2260 // operands multiplied by constant values. 2261 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2262 uint64_t BitWidth = getTypeSizeInBits(Ty); 2263 DenseMap<const SCEV *, APInt> M; 2264 SmallVector<const SCEV *, 8> NewOps; 2265 APInt AccumulatedConstant(BitWidth, 0); 2266 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2267 Ops.data(), Ops.size(), 2268 APInt(BitWidth, 1), *this)) { 2269 struct APIntCompare { 2270 bool operator()(const APInt &LHS, const APInt &RHS) const { 2271 return LHS.ult(RHS); 2272 } 2273 }; 2274 2275 // Some interesting folding opportunity is present, so its worthwhile to 2276 // re-generate the operands list. Group the operands by constant scale, 2277 // to avoid multiplying by the same constant scale multiple times. 2278 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2279 for (const SCEV *NewOp : NewOps) 2280 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2281 // Re-generate the operands list. 2282 Ops.clear(); 2283 if (AccumulatedConstant != 0) 2284 Ops.push_back(getConstant(AccumulatedConstant)); 2285 for (auto &MulOp : MulOpLists) 2286 if (MulOp.first != 0) 2287 Ops.push_back(getMulExpr( 2288 getConstant(MulOp.first), 2289 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1))); 2290 if (Ops.empty()) 2291 return getZero(Ty); 2292 if (Ops.size() == 1) 2293 return Ops[0]; 2294 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2295 } 2296 } 2297 2298 // If we are adding something to a multiply expression, make sure the 2299 // something is not already an operand of the multiply. If so, merge it into 2300 // the multiply. 2301 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2302 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2303 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2304 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2305 if (isa<SCEVConstant>(MulOpSCEV)) 2306 continue; 2307 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2308 if (MulOpSCEV == Ops[AddOp]) { 2309 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2310 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2311 if (Mul->getNumOperands() != 2) { 2312 // If the multiply has more than two operands, we must get the 2313 // Y*Z term. 2314 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2315 Mul->op_begin()+MulOp); 2316 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2317 InnerMul = getMulExpr(MulOps); 2318 } 2319 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2320 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2321 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV); 2322 if (Ops.size() == 2) return OuterMul; 2323 if (AddOp < Idx) { 2324 Ops.erase(Ops.begin()+AddOp); 2325 Ops.erase(Ops.begin()+Idx-1); 2326 } else { 2327 Ops.erase(Ops.begin()+Idx); 2328 Ops.erase(Ops.begin()+AddOp-1); 2329 } 2330 Ops.push_back(OuterMul); 2331 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2332 } 2333 2334 // Check this multiply against other multiplies being added together. 2335 for (unsigned OtherMulIdx = Idx+1; 2336 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2337 ++OtherMulIdx) { 2338 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2339 // If MulOp occurs in OtherMul, we can fold the two multiplies 2340 // together. 2341 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2342 OMulOp != e; ++OMulOp) 2343 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2344 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2345 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2346 if (Mul->getNumOperands() != 2) { 2347 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2348 Mul->op_begin()+MulOp); 2349 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2350 InnerMul1 = getMulExpr(MulOps); 2351 } 2352 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2353 if (OtherMul->getNumOperands() != 2) { 2354 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2355 OtherMul->op_begin()+OMulOp); 2356 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2357 InnerMul2 = getMulExpr(MulOps); 2358 } 2359 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2360 const SCEV *InnerMulSum = 2361 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2362 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); 2363 if (Ops.size() == 2) return OuterMul; 2364 Ops.erase(Ops.begin()+Idx); 2365 Ops.erase(Ops.begin()+OtherMulIdx-1); 2366 Ops.push_back(OuterMul); 2367 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2368 } 2369 } 2370 } 2371 } 2372 2373 // If there are any add recurrences in the operands list, see if any other 2374 // added values are loop invariant. If so, we can fold them into the 2375 // recurrence. 2376 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2377 ++Idx; 2378 2379 // Scan over all recurrences, trying to fold loop invariants into them. 2380 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2381 // Scan all of the other operands to this add and add them to the vector if 2382 // they are loop invariant w.r.t. the recurrence. 2383 SmallVector<const SCEV *, 8> LIOps; 2384 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2385 const Loop *AddRecLoop = AddRec->getLoop(); 2386 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2387 if (isLoopInvariant(Ops[i], AddRecLoop)) { 2388 LIOps.push_back(Ops[i]); 2389 Ops.erase(Ops.begin()+i); 2390 --i; --e; 2391 } 2392 2393 // If we found some loop invariants, fold them into the recurrence. 2394 if (!LIOps.empty()) { 2395 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2396 LIOps.push_back(AddRec->getStart()); 2397 2398 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2399 AddRec->op_end()); 2400 // This follows from the fact that the no-wrap flags on the outer add 2401 // expression are applicable on the 0th iteration, when the add recurrence 2402 // will be equal to its start value. 2403 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2404 2405 // Build the new addrec. Propagate the NUW and NSW flags if both the 2406 // outer add and the inner addrec are guaranteed to have no overflow. 2407 // Always propagate NW. 2408 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2409 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2410 2411 // If all of the other operands were loop invariant, we are done. 2412 if (Ops.size() == 1) return NewRec; 2413 2414 // Otherwise, add the folded AddRec by the non-invariant parts. 2415 for (unsigned i = 0;; ++i) 2416 if (Ops[i] == AddRec) { 2417 Ops[i] = NewRec; 2418 break; 2419 } 2420 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2421 } 2422 2423 // Okay, if there weren't any loop invariants to be folded, check to see if 2424 // there are multiple AddRec's with the same loop induction variable being 2425 // added together. If so, we can fold them. 2426 for (unsigned OtherIdx = Idx+1; 2427 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2428 ++OtherIdx) 2429 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2430 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2431 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2432 AddRec->op_end()); 2433 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2434 ++OtherIdx) 2435 if (const auto *OtherAddRec = dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx])) 2436 if (OtherAddRec->getLoop() == AddRecLoop) { 2437 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2438 i != e; ++i) { 2439 if (i >= AddRecOps.size()) { 2440 AddRecOps.append(OtherAddRec->op_begin()+i, 2441 OtherAddRec->op_end()); 2442 break; 2443 } 2444 SmallVector<const SCEV *, 2> TwoOps = { 2445 AddRecOps[i], OtherAddRec->getOperand(i)}; 2446 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2447 } 2448 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2449 } 2450 // Step size has changed, so we cannot guarantee no self-wraparound. 2451 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2452 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2453 } 2454 2455 // Otherwise couldn't fold anything into this recurrence. Move onto the 2456 // next one. 2457 } 2458 2459 // Okay, it looks like we really DO need an add expr. Check to see if we 2460 // already have one, otherwise create a new one. 2461 return getOrCreateAddExpr(Ops, Flags); 2462 } 2463 2464 const SCEV * 2465 ScalarEvolution::getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2466 SCEV::NoWrapFlags Flags) { 2467 FoldingSetNodeID ID; 2468 ID.AddInteger(scAddExpr); 2469 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2470 ID.AddPointer(Ops[i]); 2471 void *IP = nullptr; 2472 SCEVAddExpr *S = 2473 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2474 if (!S) { 2475 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2476 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2477 S = new (SCEVAllocator) 2478 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2479 UniqueSCEVs.InsertNode(S, IP); 2480 } 2481 S->setNoWrapFlags(Flags); 2482 return S; 2483 } 2484 2485 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2486 uint64_t k = i*j; 2487 if (j > 1 && k / j != i) Overflow = true; 2488 return k; 2489 } 2490 2491 /// Compute the result of "n choose k", the binomial coefficient. If an 2492 /// intermediate computation overflows, Overflow will be set and the return will 2493 /// be garbage. Overflow is not cleared on absence of overflow. 2494 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2495 // We use the multiplicative formula: 2496 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2497 // At each iteration, we take the n-th term of the numeral and divide by the 2498 // (k-n)th term of the denominator. This division will always produce an 2499 // integral result, and helps reduce the chance of overflow in the 2500 // intermediate computations. However, we can still overflow even when the 2501 // final result would fit. 2502 2503 if (n == 0 || n == k) return 1; 2504 if (k > n) return 0; 2505 2506 if (k > n/2) 2507 k = n-k; 2508 2509 uint64_t r = 1; 2510 for (uint64_t i = 1; i <= k; ++i) { 2511 r = umul_ov(r, n-(i-1), Overflow); 2512 r /= i; 2513 } 2514 return r; 2515 } 2516 2517 /// Determine if any of the operands in this SCEV are a constant or if 2518 /// any of the add or multiply expressions in this SCEV contain a constant. 2519 static bool containsConstantSomewhere(const SCEV *StartExpr) { 2520 SmallVector<const SCEV *, 4> Ops; 2521 Ops.push_back(StartExpr); 2522 while (!Ops.empty()) { 2523 const SCEV *CurrentExpr = Ops.pop_back_val(); 2524 if (isa<SCEVConstant>(*CurrentExpr)) 2525 return true; 2526 2527 if (isa<SCEVAddExpr>(*CurrentExpr) || isa<SCEVMulExpr>(*CurrentExpr)) { 2528 const auto *CurrentNAry = cast<SCEVNAryExpr>(CurrentExpr); 2529 Ops.append(CurrentNAry->op_begin(), CurrentNAry->op_end()); 2530 } 2531 } 2532 return false; 2533 } 2534 2535 /// Get a canonical multiply expression, or something simpler if possible. 2536 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2537 SCEV::NoWrapFlags Flags) { 2538 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2539 "only nuw or nsw allowed"); 2540 assert(!Ops.empty() && "Cannot get empty mul!"); 2541 if (Ops.size() == 1) return Ops[0]; 2542 #ifndef NDEBUG 2543 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2544 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2545 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2546 "SCEVMulExpr operand types don't match!"); 2547 #endif 2548 2549 // Sort by complexity, this groups all similar expression types together. 2550 GroupByComplexity(Ops, &LI); 2551 2552 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2553 2554 // If there are any constants, fold them together. 2555 unsigned Idx = 0; 2556 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2557 2558 // C1*(C2+V) -> C1*C2 + C1*V 2559 if (Ops.size() == 2) 2560 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2561 // If any of Add's ops are Adds or Muls with a constant, 2562 // apply this transformation as well. 2563 if (Add->getNumOperands() == 2) 2564 if (containsConstantSomewhere(Add)) 2565 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)), 2566 getMulExpr(LHSC, Add->getOperand(1))); 2567 2568 ++Idx; 2569 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2570 // We found two constants, fold them together! 2571 ConstantInt *Fold = 2572 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2573 Ops[0] = getConstant(Fold); 2574 Ops.erase(Ops.begin()+1); // Erase the folded element 2575 if (Ops.size() == 1) return Ops[0]; 2576 LHSC = cast<SCEVConstant>(Ops[0]); 2577 } 2578 2579 // If we are left with a constant one being multiplied, strip it off. 2580 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) { 2581 Ops.erase(Ops.begin()); 2582 --Idx; 2583 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2584 // If we have a multiply of zero, it will always be zero. 2585 return Ops[0]; 2586 } else if (Ops[0]->isAllOnesValue()) { 2587 // If we have a mul by -1 of an add, try distributing the -1 among the 2588 // add operands. 2589 if (Ops.size() == 2) { 2590 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2591 SmallVector<const SCEV *, 4> NewOps; 2592 bool AnyFolded = false; 2593 for (const SCEV *AddOp : Add->operands()) { 2594 const SCEV *Mul = getMulExpr(Ops[0], AddOp); 2595 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2596 NewOps.push_back(Mul); 2597 } 2598 if (AnyFolded) 2599 return getAddExpr(NewOps); 2600 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2601 // Negation preserves a recurrence's no self-wrap property. 2602 SmallVector<const SCEV *, 4> Operands; 2603 for (const SCEV *AddRecOp : AddRec->operands()) 2604 Operands.push_back(getMulExpr(Ops[0], AddRecOp)); 2605 2606 return getAddRecExpr(Operands, AddRec->getLoop(), 2607 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2608 } 2609 } 2610 } 2611 2612 if (Ops.size() == 1) 2613 return Ops[0]; 2614 } 2615 2616 // Skip over the add expression until we get to a multiply. 2617 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2618 ++Idx; 2619 2620 // If there are mul operands inline them all into this expression. 2621 if (Idx < Ops.size()) { 2622 bool DeletedMul = false; 2623 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2624 if (Ops.size() > MulOpsInlineThreshold) 2625 break; 2626 // If we have an mul, expand the mul operands onto the end of the operands 2627 // list. 2628 Ops.erase(Ops.begin()+Idx); 2629 Ops.append(Mul->op_begin(), Mul->op_end()); 2630 DeletedMul = true; 2631 } 2632 2633 // If we deleted at least one mul, we added operands to the end of the list, 2634 // and they are not necessarily sorted. Recurse to resort and resimplify 2635 // any operands we just acquired. 2636 if (DeletedMul) 2637 return getMulExpr(Ops); 2638 } 2639 2640 // If there are any add recurrences in the operands list, see if any other 2641 // added values are loop invariant. If so, we can fold them into the 2642 // recurrence. 2643 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2644 ++Idx; 2645 2646 // Scan over all recurrences, trying to fold loop invariants into them. 2647 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2648 // Scan all of the other operands to this mul and add them to the vector if 2649 // they are loop invariant w.r.t. the recurrence. 2650 SmallVector<const SCEV *, 8> LIOps; 2651 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2652 const Loop *AddRecLoop = AddRec->getLoop(); 2653 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2654 if (isLoopInvariant(Ops[i], AddRecLoop)) { 2655 LIOps.push_back(Ops[i]); 2656 Ops.erase(Ops.begin()+i); 2657 --i; --e; 2658 } 2659 2660 // If we found some loop invariants, fold them into the recurrence. 2661 if (!LIOps.empty()) { 2662 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2663 SmallVector<const SCEV *, 4> NewOps; 2664 NewOps.reserve(AddRec->getNumOperands()); 2665 const SCEV *Scale = getMulExpr(LIOps); 2666 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2667 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); 2668 2669 // Build the new addrec. Propagate the NUW and NSW flags if both the 2670 // outer mul and the inner addrec are guaranteed to have no overflow. 2671 // 2672 // No self-wrap cannot be guaranteed after changing the step size, but 2673 // will be inferred if either NUW or NSW is true. 2674 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 2675 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 2676 2677 // If all of the other operands were loop invariant, we are done. 2678 if (Ops.size() == 1) return NewRec; 2679 2680 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2681 for (unsigned i = 0;; ++i) 2682 if (Ops[i] == AddRec) { 2683 Ops[i] = NewRec; 2684 break; 2685 } 2686 return getMulExpr(Ops); 2687 } 2688 2689 // Okay, if there weren't any loop invariants to be folded, check to see if 2690 // there are multiple AddRec's with the same loop induction variable being 2691 // multiplied together. If so, we can fold them. 2692 2693 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2694 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2695 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2696 // ]]],+,...up to x=2n}. 2697 // Note that the arguments to choose() are always integers with values 2698 // known at compile time, never SCEV objects. 2699 // 2700 // The implementation avoids pointless extra computations when the two 2701 // addrec's are of different length (mathematically, it's equivalent to 2702 // an infinite stream of zeros on the right). 2703 bool OpsModified = false; 2704 for (unsigned OtherIdx = Idx+1; 2705 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2706 ++OtherIdx) { 2707 const SCEVAddRecExpr *OtherAddRec = 2708 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2709 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 2710 continue; 2711 2712 bool Overflow = false; 2713 Type *Ty = AddRec->getType(); 2714 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 2715 SmallVector<const SCEV*, 7> AddRecOps; 2716 for (int x = 0, xe = AddRec->getNumOperands() + 2717 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 2718 const SCEV *Term = getZero(Ty); 2719 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 2720 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 2721 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 2722 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 2723 z < ze && !Overflow; ++z) { 2724 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 2725 uint64_t Coeff; 2726 if (LargerThan64Bits) 2727 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 2728 else 2729 Coeff = Coeff1*Coeff2; 2730 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 2731 const SCEV *Term1 = AddRec->getOperand(y-z); 2732 const SCEV *Term2 = OtherAddRec->getOperand(z); 2733 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2)); 2734 } 2735 } 2736 AddRecOps.push_back(Term); 2737 } 2738 if (!Overflow) { 2739 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(), 2740 SCEV::FlagAnyWrap); 2741 if (Ops.size() == 2) return NewAddRec; 2742 Ops[Idx] = NewAddRec; 2743 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2744 OpsModified = true; 2745 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 2746 if (!AddRec) 2747 break; 2748 } 2749 } 2750 if (OpsModified) 2751 return getMulExpr(Ops); 2752 2753 // Otherwise couldn't fold anything into this recurrence. Move onto the 2754 // next one. 2755 } 2756 2757 // Okay, it looks like we really DO need an mul expr. Check to see if we 2758 // already have one, otherwise create a new one. 2759 FoldingSetNodeID ID; 2760 ID.AddInteger(scMulExpr); 2761 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2762 ID.AddPointer(Ops[i]); 2763 void *IP = nullptr; 2764 SCEVMulExpr *S = 2765 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2766 if (!S) { 2767 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2768 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2769 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2770 O, Ops.size()); 2771 UniqueSCEVs.InsertNode(S, IP); 2772 } 2773 S->setNoWrapFlags(Flags); 2774 return S; 2775 } 2776 2777 /// Get a canonical unsigned division expression, or something simpler if 2778 /// possible. 2779 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 2780 const SCEV *RHS) { 2781 assert(getEffectiveSCEVType(LHS->getType()) == 2782 getEffectiveSCEVType(RHS->getType()) && 2783 "SCEVUDivExpr operand types don't match!"); 2784 2785 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 2786 if (RHSC->getValue()->equalsInt(1)) 2787 return LHS; // X udiv 1 --> x 2788 // If the denominator is zero, the result of the udiv is undefined. Don't 2789 // try to analyze it, because the resolution chosen here may differ from 2790 // the resolution chosen in other parts of the compiler. 2791 if (!RHSC->getValue()->isZero()) { 2792 // Determine if the division can be folded into the operands of 2793 // its operands. 2794 // TODO: Generalize this to non-constants by using known-bits information. 2795 Type *Ty = LHS->getType(); 2796 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 2797 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 2798 // For non-power-of-two values, effectively round the value up to the 2799 // nearest power of two. 2800 if (!RHSC->getAPInt().isPowerOf2()) 2801 ++MaxShiftAmt; 2802 IntegerType *ExtTy = 2803 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 2804 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 2805 if (const SCEVConstant *Step = 2806 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 2807 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 2808 const APInt &StepInt = Step->getAPInt(); 2809 const APInt &DivInt = RHSC->getAPInt(); 2810 if (!StepInt.urem(DivInt) && 2811 getZeroExtendExpr(AR, ExtTy) == 2812 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 2813 getZeroExtendExpr(Step, ExtTy), 2814 AR->getLoop(), SCEV::FlagAnyWrap)) { 2815 SmallVector<const SCEV *, 4> Operands; 2816 for (const SCEV *Op : AR->operands()) 2817 Operands.push_back(getUDivExpr(Op, RHS)); 2818 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 2819 } 2820 /// Get a canonical UDivExpr for a recurrence. 2821 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 2822 // We can currently only fold X%N if X is constant. 2823 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 2824 if (StartC && !DivInt.urem(StepInt) && 2825 getZeroExtendExpr(AR, ExtTy) == 2826 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 2827 getZeroExtendExpr(Step, ExtTy), 2828 AR->getLoop(), SCEV::FlagAnyWrap)) { 2829 const APInt &StartInt = StartC->getAPInt(); 2830 const APInt &StartRem = StartInt.urem(StepInt); 2831 if (StartRem != 0) 2832 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, 2833 AR->getLoop(), SCEV::FlagNW); 2834 } 2835 } 2836 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 2837 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 2838 SmallVector<const SCEV *, 4> Operands; 2839 for (const SCEV *Op : M->operands()) 2840 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 2841 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 2842 // Find an operand that's safely divisible. 2843 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 2844 const SCEV *Op = M->getOperand(i); 2845 const SCEV *Div = getUDivExpr(Op, RHSC); 2846 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 2847 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 2848 M->op_end()); 2849 Operands[i] = Div; 2850 return getMulExpr(Operands); 2851 } 2852 } 2853 } 2854 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 2855 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 2856 SmallVector<const SCEV *, 4> Operands; 2857 for (const SCEV *Op : A->operands()) 2858 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 2859 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 2860 Operands.clear(); 2861 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 2862 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 2863 if (isa<SCEVUDivExpr>(Op) || 2864 getMulExpr(Op, RHS) != A->getOperand(i)) 2865 break; 2866 Operands.push_back(Op); 2867 } 2868 if (Operands.size() == A->getNumOperands()) 2869 return getAddExpr(Operands); 2870 } 2871 } 2872 2873 // Fold if both operands are constant. 2874 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 2875 Constant *LHSCV = LHSC->getValue(); 2876 Constant *RHSCV = RHSC->getValue(); 2877 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 2878 RHSCV))); 2879 } 2880 } 2881 } 2882 2883 FoldingSetNodeID ID; 2884 ID.AddInteger(scUDivExpr); 2885 ID.AddPointer(LHS); 2886 ID.AddPointer(RHS); 2887 void *IP = nullptr; 2888 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2889 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 2890 LHS, RHS); 2891 UniqueSCEVs.InsertNode(S, IP); 2892 return S; 2893 } 2894 2895 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 2896 APInt A = C1->getAPInt().abs(); 2897 APInt B = C2->getAPInt().abs(); 2898 uint32_t ABW = A.getBitWidth(); 2899 uint32_t BBW = B.getBitWidth(); 2900 2901 if (ABW > BBW) 2902 B = B.zext(ABW); 2903 else if (ABW < BBW) 2904 A = A.zext(BBW); 2905 2906 return APIntOps::GreatestCommonDivisor(A, B); 2907 } 2908 2909 /// Get a canonical unsigned division expression, or something simpler if 2910 /// possible. There is no representation for an exact udiv in SCEV IR, but we 2911 /// can attempt to remove factors from the LHS and RHS. We can't do this when 2912 /// it's not exact because the udiv may be clearing bits. 2913 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 2914 const SCEV *RHS) { 2915 // TODO: we could try to find factors in all sorts of things, but for now we 2916 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 2917 // end of this file for inspiration. 2918 2919 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 2920 if (!Mul || !Mul->hasNoUnsignedWrap()) 2921 return getUDivExpr(LHS, RHS); 2922 2923 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 2924 // If the mulexpr multiplies by a constant, then that constant must be the 2925 // first element of the mulexpr. 2926 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 2927 if (LHSCst == RHSCst) { 2928 SmallVector<const SCEV *, 2> Operands; 2929 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 2930 return getMulExpr(Operands); 2931 } 2932 2933 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 2934 // that there's a factor provided by one of the other terms. We need to 2935 // check. 2936 APInt Factor = gcd(LHSCst, RHSCst); 2937 if (!Factor.isIntN(1)) { 2938 LHSCst = 2939 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 2940 RHSCst = 2941 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 2942 SmallVector<const SCEV *, 2> Operands; 2943 Operands.push_back(LHSCst); 2944 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 2945 LHS = getMulExpr(Operands); 2946 RHS = RHSCst; 2947 Mul = dyn_cast<SCEVMulExpr>(LHS); 2948 if (!Mul) 2949 return getUDivExactExpr(LHS, RHS); 2950 } 2951 } 2952 } 2953 2954 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 2955 if (Mul->getOperand(i) == RHS) { 2956 SmallVector<const SCEV *, 2> Operands; 2957 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 2958 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 2959 return getMulExpr(Operands); 2960 } 2961 } 2962 2963 return getUDivExpr(LHS, RHS); 2964 } 2965 2966 /// Get an add recurrence expression for the specified loop. Simplify the 2967 /// expression as much as possible. 2968 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 2969 const Loop *L, 2970 SCEV::NoWrapFlags Flags) { 2971 SmallVector<const SCEV *, 4> Operands; 2972 Operands.push_back(Start); 2973 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 2974 if (StepChrec->getLoop() == L) { 2975 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 2976 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 2977 } 2978 2979 Operands.push_back(Step); 2980 return getAddRecExpr(Operands, L, Flags); 2981 } 2982 2983 /// Get an add recurrence expression for the specified loop. Simplify the 2984 /// expression as much as possible. 2985 const SCEV * 2986 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 2987 const Loop *L, SCEV::NoWrapFlags Flags) { 2988 if (Operands.size() == 1) return Operands[0]; 2989 #ifndef NDEBUG 2990 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 2991 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 2992 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 2993 "SCEVAddRecExpr operand types don't match!"); 2994 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 2995 assert(isLoopInvariant(Operands[i], L) && 2996 "SCEVAddRecExpr operand is not loop-invariant!"); 2997 #endif 2998 2999 if (Operands.back()->isZero()) { 3000 Operands.pop_back(); 3001 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3002 } 3003 3004 // It's tempting to want to call getMaxBackedgeTakenCount count here and 3005 // use that information to infer NUW and NSW flags. However, computing a 3006 // BE count requires calling getAddRecExpr, so we may not yet have a 3007 // meaningful BE count at this point (and if we don't, we'd be stuck 3008 // with a SCEVCouldNotCompute as the cached BE count). 3009 3010 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3011 3012 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3013 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3014 const Loop *NestedLoop = NestedAR->getLoop(); 3015 if (L->contains(NestedLoop) 3016 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3017 : (!NestedLoop->contains(L) && 3018 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3019 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3020 NestedAR->op_end()); 3021 Operands[0] = NestedAR->getStart(); 3022 // AddRecs require their operands be loop-invariant with respect to their 3023 // loops. Don't perform this transformation if it would break this 3024 // requirement. 3025 bool AllInvariant = all_of( 3026 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3027 3028 if (AllInvariant) { 3029 // Create a recurrence for the outer loop with the same step size. 3030 // 3031 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3032 // inner recurrence has the same property. 3033 SCEV::NoWrapFlags OuterFlags = 3034 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3035 3036 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3037 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3038 return isLoopInvariant(Op, NestedLoop); 3039 }); 3040 3041 if (AllInvariant) { 3042 // Ok, both add recurrences are valid after the transformation. 3043 // 3044 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3045 // the outer recurrence has the same property. 3046 SCEV::NoWrapFlags InnerFlags = 3047 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3048 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3049 } 3050 } 3051 // Reset Operands to its original state. 3052 Operands[0] = NestedAR; 3053 } 3054 } 3055 3056 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3057 // already have one, otherwise create a new one. 3058 FoldingSetNodeID ID; 3059 ID.AddInteger(scAddRecExpr); 3060 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3061 ID.AddPointer(Operands[i]); 3062 ID.AddPointer(L); 3063 void *IP = nullptr; 3064 SCEVAddRecExpr *S = 3065 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 3066 if (!S) { 3067 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); 3068 std::uninitialized_copy(Operands.begin(), Operands.end(), O); 3069 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), 3070 O, Operands.size(), L); 3071 UniqueSCEVs.InsertNode(S, IP); 3072 } 3073 S->setNoWrapFlags(Flags); 3074 return S; 3075 } 3076 3077 const SCEV * 3078 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3079 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3080 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3081 // getSCEV(Base)->getType() has the same address space as Base->getType() 3082 // because SCEV::getType() preserves the address space. 3083 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); 3084 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3085 // instruction to its SCEV, because the Instruction may be guarded by control 3086 // flow and the no-overflow bits may not be valid for the expression in any 3087 // context. This can be fixed similarly to how these flags are handled for 3088 // adds. 3089 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3090 : SCEV::FlagAnyWrap; 3091 3092 const SCEV *TotalOffset = getZero(IntPtrTy); 3093 // The array size is unimportant. The first thing we do on CurTy is getting 3094 // its element type. 3095 Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0); 3096 for (const SCEV *IndexExpr : IndexExprs) { 3097 // Compute the (potentially symbolic) offset in bytes for this index. 3098 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3099 // For a struct, add the member offset. 3100 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3101 unsigned FieldNo = Index->getZExtValue(); 3102 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); 3103 3104 // Add the field offset to the running total offset. 3105 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3106 3107 // Update CurTy to the type of the field at Index. 3108 CurTy = STy->getTypeAtIndex(Index); 3109 } else { 3110 // Update CurTy to its element type. 3111 CurTy = cast<SequentialType>(CurTy)->getElementType(); 3112 // For an array, add the element offset, explicitly scaled. 3113 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); 3114 // Getelementptr indices are signed. 3115 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); 3116 3117 // Multiply the index by the element size to compute the element offset. 3118 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3119 3120 // Add the element offset to the running total offset. 3121 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3122 } 3123 } 3124 3125 // Add the total offset from all the GEP indices to the base. 3126 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3127 } 3128 3129 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 3130 const SCEV *RHS) { 3131 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3132 return getSMaxExpr(Ops); 3133 } 3134 3135 const SCEV * 3136 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3137 assert(!Ops.empty() && "Cannot get empty smax!"); 3138 if (Ops.size() == 1) return Ops[0]; 3139 #ifndef NDEBUG 3140 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3141 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3142 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3143 "SCEVSMaxExpr operand types don't match!"); 3144 #endif 3145 3146 // Sort by complexity, this groups all similar expression types together. 3147 GroupByComplexity(Ops, &LI); 3148 3149 // If there are any constants, fold them together. 3150 unsigned Idx = 0; 3151 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3152 ++Idx; 3153 assert(Idx < Ops.size()); 3154 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3155 // We found two constants, fold them together! 3156 ConstantInt *Fold = ConstantInt::get( 3157 getContext(), APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt())); 3158 Ops[0] = getConstant(Fold); 3159 Ops.erase(Ops.begin()+1); // Erase the folded element 3160 if (Ops.size() == 1) return Ops[0]; 3161 LHSC = cast<SCEVConstant>(Ops[0]); 3162 } 3163 3164 // If we are left with a constant minimum-int, strip it off. 3165 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 3166 Ops.erase(Ops.begin()); 3167 --Idx; 3168 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 3169 // If we have an smax with a constant maximum-int, it will always be 3170 // maximum-int. 3171 return Ops[0]; 3172 } 3173 3174 if (Ops.size() == 1) return Ops[0]; 3175 } 3176 3177 // Find the first SMax 3178 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 3179 ++Idx; 3180 3181 // Check to see if one of the operands is an SMax. If so, expand its operands 3182 // onto our operand list, and recurse to simplify. 3183 if (Idx < Ops.size()) { 3184 bool DeletedSMax = false; 3185 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 3186 Ops.erase(Ops.begin()+Idx); 3187 Ops.append(SMax->op_begin(), SMax->op_end()); 3188 DeletedSMax = true; 3189 } 3190 3191 if (DeletedSMax) 3192 return getSMaxExpr(Ops); 3193 } 3194 3195 // Okay, check to see if the same value occurs in the operand list twice. If 3196 // so, delete one. Since we sorted the list, these values are required to 3197 // be adjacent. 3198 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3199 // X smax Y smax Y --> X smax Y 3200 // X smax Y --> X, if X is always greater than Y 3201 if (Ops[i] == Ops[i+1] || 3202 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 3203 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3204 --i; --e; 3205 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 3206 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3207 --i; --e; 3208 } 3209 3210 if (Ops.size() == 1) return Ops[0]; 3211 3212 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3213 3214 // Okay, it looks like we really DO need an smax expr. Check to see if we 3215 // already have one, otherwise create a new one. 3216 FoldingSetNodeID ID; 3217 ID.AddInteger(scSMaxExpr); 3218 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3219 ID.AddPointer(Ops[i]); 3220 void *IP = nullptr; 3221 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3222 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3223 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3224 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), 3225 O, Ops.size()); 3226 UniqueSCEVs.InsertNode(S, IP); 3227 return S; 3228 } 3229 3230 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 3231 const SCEV *RHS) { 3232 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3233 return getUMaxExpr(Ops); 3234 } 3235 3236 const SCEV * 3237 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3238 assert(!Ops.empty() && "Cannot get empty umax!"); 3239 if (Ops.size() == 1) return Ops[0]; 3240 #ifndef NDEBUG 3241 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3242 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3243 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3244 "SCEVUMaxExpr operand types don't match!"); 3245 #endif 3246 3247 // Sort by complexity, this groups all similar expression types together. 3248 GroupByComplexity(Ops, &LI); 3249 3250 // If there are any constants, fold them together. 3251 unsigned Idx = 0; 3252 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3253 ++Idx; 3254 assert(Idx < Ops.size()); 3255 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3256 // We found two constants, fold them together! 3257 ConstantInt *Fold = ConstantInt::get( 3258 getContext(), APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt())); 3259 Ops[0] = getConstant(Fold); 3260 Ops.erase(Ops.begin()+1); // Erase the folded element 3261 if (Ops.size() == 1) return Ops[0]; 3262 LHSC = cast<SCEVConstant>(Ops[0]); 3263 } 3264 3265 // If we are left with a constant minimum-int, strip it off. 3266 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 3267 Ops.erase(Ops.begin()); 3268 --Idx; 3269 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 3270 // If we have an umax with a constant maximum-int, it will always be 3271 // maximum-int. 3272 return Ops[0]; 3273 } 3274 3275 if (Ops.size() == 1) return Ops[0]; 3276 } 3277 3278 // Find the first UMax 3279 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 3280 ++Idx; 3281 3282 // Check to see if one of the operands is a UMax. If so, expand its operands 3283 // onto our operand list, and recurse to simplify. 3284 if (Idx < Ops.size()) { 3285 bool DeletedUMax = false; 3286 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 3287 Ops.erase(Ops.begin()+Idx); 3288 Ops.append(UMax->op_begin(), UMax->op_end()); 3289 DeletedUMax = true; 3290 } 3291 3292 if (DeletedUMax) 3293 return getUMaxExpr(Ops); 3294 } 3295 3296 // Okay, check to see if the same value occurs in the operand list twice. If 3297 // so, delete one. Since we sorted the list, these values are required to 3298 // be adjacent. 3299 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3300 // X umax Y umax Y --> X umax Y 3301 // X umax Y --> X, if X is always greater than Y 3302 if (Ops[i] == Ops[i+1] || 3303 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) { 3304 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3305 --i; --e; 3306 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) { 3307 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3308 --i; --e; 3309 } 3310 3311 if (Ops.size() == 1) return Ops[0]; 3312 3313 assert(!Ops.empty() && "Reduced umax down to nothing!"); 3314 3315 // Okay, it looks like we really DO need a umax expr. Check to see if we 3316 // already have one, otherwise create a new one. 3317 FoldingSetNodeID ID; 3318 ID.AddInteger(scUMaxExpr); 3319 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3320 ID.AddPointer(Ops[i]); 3321 void *IP = nullptr; 3322 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3323 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3324 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3325 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 3326 O, Ops.size()); 3327 UniqueSCEVs.InsertNode(S, IP); 3328 return S; 3329 } 3330 3331 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3332 const SCEV *RHS) { 3333 // ~smax(~x, ~y) == smin(x, y). 3334 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3335 } 3336 3337 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3338 const SCEV *RHS) { 3339 // ~umax(~x, ~y) == umin(x, y) 3340 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3341 } 3342 3343 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3344 // We can bypass creating a target-independent 3345 // constant expression and then folding it back into a ConstantInt. 3346 // This is just a compile-time optimization. 3347 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3348 } 3349 3350 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3351 StructType *STy, 3352 unsigned FieldNo) { 3353 // We can bypass creating a target-independent 3354 // constant expression and then folding it back into a ConstantInt. 3355 // This is just a compile-time optimization. 3356 return getConstant( 3357 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3358 } 3359 3360 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3361 // Don't attempt to do anything other than create a SCEVUnknown object 3362 // here. createSCEV only calls getUnknown after checking for all other 3363 // interesting possibilities, and any other code that calls getUnknown 3364 // is doing so in order to hide a value from SCEV canonicalization. 3365 3366 FoldingSetNodeID ID; 3367 ID.AddInteger(scUnknown); 3368 ID.AddPointer(V); 3369 void *IP = nullptr; 3370 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3371 assert(cast<SCEVUnknown>(S)->getValue() == V && 3372 "Stale SCEVUnknown in uniquing map!"); 3373 return S; 3374 } 3375 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3376 FirstUnknown); 3377 FirstUnknown = cast<SCEVUnknown>(S); 3378 UniqueSCEVs.InsertNode(S, IP); 3379 return S; 3380 } 3381 3382 //===----------------------------------------------------------------------===// 3383 // Basic SCEV Analysis and PHI Idiom Recognition Code 3384 // 3385 3386 /// Test if values of the given type are analyzable within the SCEV 3387 /// framework. This primarily includes integer types, and it can optionally 3388 /// include pointer types if the ScalarEvolution class has access to 3389 /// target-specific information. 3390 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3391 // Integers and pointers are always SCEVable. 3392 return Ty->isIntegerTy() || Ty->isPointerTy(); 3393 } 3394 3395 /// Return the size in bits of the specified type, for which isSCEVable must 3396 /// return true. 3397 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3398 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3399 return getDataLayout().getTypeSizeInBits(Ty); 3400 } 3401 3402 /// Return a type with the same bitwidth as the given type and which represents 3403 /// how SCEV will treat the given type, for which isSCEVable must return 3404 /// true. For pointer types, this is the pointer-sized integer type. 3405 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3406 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3407 3408 if (Ty->isIntegerTy()) 3409 return Ty; 3410 3411 // The only other support type is pointer. 3412 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3413 return getDataLayout().getIntPtrType(Ty); 3414 } 3415 3416 const SCEV *ScalarEvolution::getCouldNotCompute() { 3417 return CouldNotCompute.get(); 3418 } 3419 3420 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3421 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3422 auto *SU = dyn_cast<SCEVUnknown>(S); 3423 return SU && SU->getValue() == nullptr; 3424 }); 3425 3426 return !ContainsNulls; 3427 } 3428 3429 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3430 HasRecMapType::iterator I = HasRecMap.find(S); 3431 if (I != HasRecMap.end()) 3432 return I->second; 3433 3434 bool FoundAddRec = SCEVExprContains(S, isa<SCEVAddRecExpr, const SCEV *>); 3435 HasRecMap.insert({S, FoundAddRec}); 3436 return FoundAddRec; 3437 } 3438 3439 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3440 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3441 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3442 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3443 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3444 if (!Add) 3445 return {S, nullptr}; 3446 3447 if (Add->getNumOperands() != 2) 3448 return {S, nullptr}; 3449 3450 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3451 if (!ConstOp) 3452 return {S, nullptr}; 3453 3454 return {Add->getOperand(1), ConstOp->getValue()}; 3455 } 3456 3457 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3458 /// by the value and offset from any ValueOffsetPair in the set. 3459 SetVector<ScalarEvolution::ValueOffsetPair> * 3460 ScalarEvolution::getSCEVValues(const SCEV *S) { 3461 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3462 if (SI == ExprValueMap.end()) 3463 return nullptr; 3464 #ifndef NDEBUG 3465 if (VerifySCEVMap) { 3466 // Check there is no dangling Value in the set returned. 3467 for (const auto &VE : SI->second) 3468 assert(ValueExprMap.count(VE.first)); 3469 } 3470 #endif 3471 return &SI->second; 3472 } 3473 3474 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3475 /// cannot be used separately. eraseValueFromMap should be used to remove 3476 /// V from ValueExprMap and ExprValueMap at the same time. 3477 void ScalarEvolution::eraseValueFromMap(Value *V) { 3478 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3479 if (I != ValueExprMap.end()) { 3480 const SCEV *S = I->second; 3481 // Remove {V, 0} from the set of ExprValueMap[S] 3482 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3483 SV->remove({V, nullptr}); 3484 3485 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3486 const SCEV *Stripped; 3487 ConstantInt *Offset; 3488 std::tie(Stripped, Offset) = splitAddExpr(S); 3489 if (Offset != nullptr) { 3490 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3491 SV->remove({V, Offset}); 3492 } 3493 ValueExprMap.erase(V); 3494 } 3495 } 3496 3497 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3498 /// create a new one. 3499 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3500 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3501 3502 const SCEV *S = getExistingSCEV(V); 3503 if (S == nullptr) { 3504 S = createSCEV(V); 3505 // During PHI resolution, it is possible to create two SCEVs for the same 3506 // V, so it is needed to double check whether V->S is inserted into 3507 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3508 std::pair<ValueExprMapType::iterator, bool> Pair = 3509 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3510 if (Pair.second) { 3511 ExprValueMap[S].insert({V, nullptr}); 3512 3513 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3514 // ExprValueMap. 3515 const SCEV *Stripped = S; 3516 ConstantInt *Offset = nullptr; 3517 std::tie(Stripped, Offset) = splitAddExpr(S); 3518 // If stripped is SCEVUnknown, don't bother to save 3519 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3520 // increase the complexity of the expansion code. 3521 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3522 // because it may generate add/sub instead of GEP in SCEV expansion. 3523 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3524 !isa<GetElementPtrInst>(V)) 3525 ExprValueMap[Stripped].insert({V, Offset}); 3526 } 3527 } 3528 return S; 3529 } 3530 3531 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3532 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3533 3534 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3535 if (I != ValueExprMap.end()) { 3536 const SCEV *S = I->second; 3537 if (checkValidity(S)) 3538 return S; 3539 eraseValueFromMap(V); 3540 forgetMemoizedResults(S); 3541 } 3542 return nullptr; 3543 } 3544 3545 /// Return a SCEV corresponding to -V = -1*V 3546 /// 3547 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3548 SCEV::NoWrapFlags Flags) { 3549 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3550 return getConstant( 3551 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3552 3553 Type *Ty = V->getType(); 3554 Ty = getEffectiveSCEVType(Ty); 3555 return getMulExpr( 3556 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 3557 } 3558 3559 /// Return a SCEV corresponding to ~V = -1-V 3560 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3561 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3562 return getConstant( 3563 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3564 3565 Type *Ty = V->getType(); 3566 Ty = getEffectiveSCEVType(Ty); 3567 const SCEV *AllOnes = 3568 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 3569 return getMinusSCEV(AllOnes, V); 3570 } 3571 3572 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3573 SCEV::NoWrapFlags Flags) { 3574 // Fast path: X - X --> 0. 3575 if (LHS == RHS) 3576 return getZero(LHS->getType()); 3577 3578 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3579 // makes it so that we cannot make much use of NUW. 3580 auto AddFlags = SCEV::FlagAnyWrap; 3581 const bool RHSIsNotMinSigned = 3582 !getSignedRange(RHS).getSignedMin().isMinSignedValue(); 3583 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3584 // Let M be the minimum representable signed value. Then (-1)*RHS 3585 // signed-wraps if and only if RHS is M. That can happen even for 3586 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3587 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3588 // (-1)*RHS, we need to prove that RHS != M. 3589 // 3590 // If LHS is non-negative and we know that LHS - RHS does not 3591 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 3592 // either by proving that RHS > M or that LHS >= 0. 3593 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 3594 AddFlags = SCEV::FlagNSW; 3595 } 3596 } 3597 3598 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 3599 // RHS is NSW and LHS >= 0. 3600 // 3601 // The difficulty here is that the NSW flag may have been proven 3602 // relative to a loop that is to be found in a recurrence in LHS and 3603 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 3604 // larger scope than intended. 3605 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3606 3607 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags); 3608 } 3609 3610 const SCEV * 3611 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) { 3612 Type *SrcTy = V->getType(); 3613 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3614 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3615 "Cannot truncate or zero extend with non-integer arguments!"); 3616 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3617 return V; // No conversion 3618 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3619 return getTruncateExpr(V, Ty); 3620 return getZeroExtendExpr(V, Ty); 3621 } 3622 3623 const SCEV * 3624 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 3625 Type *Ty) { 3626 Type *SrcTy = V->getType(); 3627 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3628 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3629 "Cannot truncate or zero extend with non-integer arguments!"); 3630 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3631 return V; // No conversion 3632 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3633 return getTruncateExpr(V, Ty); 3634 return getSignExtendExpr(V, Ty); 3635 } 3636 3637 const SCEV * 3638 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 3639 Type *SrcTy = V->getType(); 3640 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3641 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3642 "Cannot noop or zero extend with non-integer arguments!"); 3643 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3644 "getNoopOrZeroExtend cannot truncate!"); 3645 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3646 return V; // No conversion 3647 return getZeroExtendExpr(V, Ty); 3648 } 3649 3650 const SCEV * 3651 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 3652 Type *SrcTy = V->getType(); 3653 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3654 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3655 "Cannot noop or sign extend with non-integer arguments!"); 3656 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3657 "getNoopOrSignExtend cannot truncate!"); 3658 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3659 return V; // No conversion 3660 return getSignExtendExpr(V, Ty); 3661 } 3662 3663 const SCEV * 3664 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 3665 Type *SrcTy = V->getType(); 3666 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3667 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3668 "Cannot noop or any extend with non-integer arguments!"); 3669 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3670 "getNoopOrAnyExtend cannot truncate!"); 3671 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3672 return V; // No conversion 3673 return getAnyExtendExpr(V, Ty); 3674 } 3675 3676 const SCEV * 3677 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 3678 Type *SrcTy = V->getType(); 3679 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3680 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3681 "Cannot truncate or noop with non-integer arguments!"); 3682 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 3683 "getTruncateOrNoop cannot extend!"); 3684 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3685 return V; // No conversion 3686 return getTruncateExpr(V, Ty); 3687 } 3688 3689 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 3690 const SCEV *RHS) { 3691 const SCEV *PromotedLHS = LHS; 3692 const SCEV *PromotedRHS = RHS; 3693 3694 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3695 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3696 else 3697 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3698 3699 return getUMaxExpr(PromotedLHS, PromotedRHS); 3700 } 3701 3702 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 3703 const SCEV *RHS) { 3704 const SCEV *PromotedLHS = LHS; 3705 const SCEV *PromotedRHS = RHS; 3706 3707 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3708 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3709 else 3710 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3711 3712 return getUMinExpr(PromotedLHS, PromotedRHS); 3713 } 3714 3715 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 3716 // A pointer operand may evaluate to a nonpointer expression, such as null. 3717 if (!V->getType()->isPointerTy()) 3718 return V; 3719 3720 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 3721 return getPointerBase(Cast->getOperand()); 3722 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 3723 const SCEV *PtrOp = nullptr; 3724 for (const SCEV *NAryOp : NAry->operands()) { 3725 if (NAryOp->getType()->isPointerTy()) { 3726 // Cannot find the base of an expression with multiple pointer operands. 3727 if (PtrOp) 3728 return V; 3729 PtrOp = NAryOp; 3730 } 3731 } 3732 if (!PtrOp) 3733 return V; 3734 return getPointerBase(PtrOp); 3735 } 3736 return V; 3737 } 3738 3739 /// Push users of the given Instruction onto the given Worklist. 3740 static void 3741 PushDefUseChildren(Instruction *I, 3742 SmallVectorImpl<Instruction *> &Worklist) { 3743 // Push the def-use children onto the Worklist stack. 3744 for (User *U : I->users()) 3745 Worklist.push_back(cast<Instruction>(U)); 3746 } 3747 3748 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 3749 SmallVector<Instruction *, 16> Worklist; 3750 PushDefUseChildren(PN, Worklist); 3751 3752 SmallPtrSet<Instruction *, 8> Visited; 3753 Visited.insert(PN); 3754 while (!Worklist.empty()) { 3755 Instruction *I = Worklist.pop_back_val(); 3756 if (!Visited.insert(I).second) 3757 continue; 3758 3759 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 3760 if (It != ValueExprMap.end()) { 3761 const SCEV *Old = It->second; 3762 3763 // Short-circuit the def-use traversal if the symbolic name 3764 // ceases to appear in expressions. 3765 if (Old != SymName && !hasOperand(Old, SymName)) 3766 continue; 3767 3768 // SCEVUnknown for a PHI either means that it has an unrecognized 3769 // structure, it's a PHI that's in the progress of being computed 3770 // by createNodeForPHI, or it's a single-value PHI. In the first case, 3771 // additional loop trip count information isn't going to change anything. 3772 // In the second case, createNodeForPHI will perform the necessary 3773 // updates on its own when it gets to that point. In the third, we do 3774 // want to forget the SCEVUnknown. 3775 if (!isa<PHINode>(I) || 3776 !isa<SCEVUnknown>(Old) || 3777 (I != PN && Old == SymName)) { 3778 eraseValueFromMap(It->first); 3779 forgetMemoizedResults(Old); 3780 } 3781 } 3782 3783 PushDefUseChildren(I, Worklist); 3784 } 3785 } 3786 3787 namespace { 3788 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 3789 public: 3790 static const SCEV *rewrite(const SCEV *S, const Loop *L, 3791 ScalarEvolution &SE) { 3792 SCEVInitRewriter Rewriter(L, SE); 3793 const SCEV *Result = Rewriter.visit(S); 3794 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 3795 } 3796 3797 SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 3798 : SCEVRewriteVisitor(SE), L(L), Valid(true) {} 3799 3800 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 3801 if (!(SE.getLoopDisposition(Expr, L) == ScalarEvolution::LoopInvariant)) 3802 Valid = false; 3803 return Expr; 3804 } 3805 3806 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 3807 // Only allow AddRecExprs for this loop. 3808 if (Expr->getLoop() == L) 3809 return Expr->getStart(); 3810 Valid = false; 3811 return Expr; 3812 } 3813 3814 bool isValid() { return Valid; } 3815 3816 private: 3817 const Loop *L; 3818 bool Valid; 3819 }; 3820 3821 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 3822 public: 3823 static const SCEV *rewrite(const SCEV *S, const Loop *L, 3824 ScalarEvolution &SE) { 3825 SCEVShiftRewriter Rewriter(L, SE); 3826 const SCEV *Result = Rewriter.visit(S); 3827 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 3828 } 3829 3830 SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 3831 : SCEVRewriteVisitor(SE), L(L), Valid(true) {} 3832 3833 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 3834 // Only allow AddRecExprs for this loop. 3835 if (!(SE.getLoopDisposition(Expr, L) == ScalarEvolution::LoopInvariant)) 3836 Valid = false; 3837 return Expr; 3838 } 3839 3840 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 3841 if (Expr->getLoop() == L && Expr->isAffine()) 3842 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 3843 Valid = false; 3844 return Expr; 3845 } 3846 bool isValid() { return Valid; } 3847 3848 private: 3849 const Loop *L; 3850 bool Valid; 3851 }; 3852 } // end anonymous namespace 3853 3854 SCEV::NoWrapFlags 3855 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 3856 if (!AR->isAffine()) 3857 return SCEV::FlagAnyWrap; 3858 3859 typedef OverflowingBinaryOperator OBO; 3860 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 3861 3862 if (!AR->hasNoSignedWrap()) { 3863 ConstantRange AddRecRange = getSignedRange(AR); 3864 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 3865 3866 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 3867 Instruction::Add, IncRange, OBO::NoSignedWrap); 3868 if (NSWRegion.contains(AddRecRange)) 3869 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 3870 } 3871 3872 if (!AR->hasNoUnsignedWrap()) { 3873 ConstantRange AddRecRange = getUnsignedRange(AR); 3874 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 3875 3876 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 3877 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 3878 if (NUWRegion.contains(AddRecRange)) 3879 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 3880 } 3881 3882 return Result; 3883 } 3884 3885 namespace { 3886 /// Represents an abstract binary operation. This may exist as a 3887 /// normal instruction or constant expression, or may have been 3888 /// derived from an expression tree. 3889 struct BinaryOp { 3890 unsigned Opcode; 3891 Value *LHS; 3892 Value *RHS; 3893 bool IsNSW; 3894 bool IsNUW; 3895 3896 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 3897 /// constant expression. 3898 Operator *Op; 3899 3900 explicit BinaryOp(Operator *Op) 3901 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 3902 IsNSW(false), IsNUW(false), Op(Op) { 3903 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 3904 IsNSW = OBO->hasNoSignedWrap(); 3905 IsNUW = OBO->hasNoUnsignedWrap(); 3906 } 3907 } 3908 3909 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 3910 bool IsNUW = false) 3911 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW), 3912 Op(nullptr) {} 3913 }; 3914 } 3915 3916 3917 /// Try to map \p V into a BinaryOp, and return \c None on failure. 3918 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 3919 auto *Op = dyn_cast<Operator>(V); 3920 if (!Op) 3921 return None; 3922 3923 // Implementation detail: all the cleverness here should happen without 3924 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 3925 // SCEV expressions when possible, and we should not break that. 3926 3927 switch (Op->getOpcode()) { 3928 case Instruction::Add: 3929 case Instruction::Sub: 3930 case Instruction::Mul: 3931 case Instruction::UDiv: 3932 case Instruction::And: 3933 case Instruction::Or: 3934 case Instruction::AShr: 3935 case Instruction::Shl: 3936 return BinaryOp(Op); 3937 3938 case Instruction::Xor: 3939 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 3940 // If the RHS of the xor is a signbit, then this is just an add. 3941 // Instcombine turns add of signbit into xor as a strength reduction step. 3942 if (RHSC->getValue().isSignBit()) 3943 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 3944 return BinaryOp(Op); 3945 3946 case Instruction::LShr: 3947 // Turn logical shift right of a constant into a unsigned divide. 3948 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 3949 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 3950 3951 // If the shift count is not less than the bitwidth, the result of 3952 // the shift is undefined. Don't try to analyze it, because the 3953 // resolution chosen here may differ from the resolution chosen in 3954 // other parts of the compiler. 3955 if (SA->getValue().ult(BitWidth)) { 3956 Constant *X = 3957 ConstantInt::get(SA->getContext(), 3958 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 3959 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 3960 } 3961 } 3962 return BinaryOp(Op); 3963 3964 case Instruction::ExtractValue: { 3965 auto *EVI = cast<ExtractValueInst>(Op); 3966 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 3967 break; 3968 3969 auto *CI = dyn_cast<CallInst>(EVI->getAggregateOperand()); 3970 if (!CI) 3971 break; 3972 3973 if (auto *F = CI->getCalledFunction()) 3974 switch (F->getIntrinsicID()) { 3975 case Intrinsic::sadd_with_overflow: 3976 case Intrinsic::uadd_with_overflow: { 3977 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 3978 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 3979 CI->getArgOperand(1)); 3980 3981 // Now that we know that all uses of the arithmetic-result component of 3982 // CI are guarded by the overflow check, we can go ahead and pretend 3983 // that the arithmetic is non-overflowing. 3984 if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow) 3985 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 3986 CI->getArgOperand(1), /* IsNSW = */ true, 3987 /* IsNUW = */ false); 3988 else 3989 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 3990 CI->getArgOperand(1), /* IsNSW = */ false, 3991 /* IsNUW*/ true); 3992 } 3993 3994 case Intrinsic::ssub_with_overflow: 3995 case Intrinsic::usub_with_overflow: 3996 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 3997 CI->getArgOperand(1)); 3998 3999 case Intrinsic::smul_with_overflow: 4000 case Intrinsic::umul_with_overflow: 4001 return BinaryOp(Instruction::Mul, CI->getArgOperand(0), 4002 CI->getArgOperand(1)); 4003 default: 4004 break; 4005 } 4006 } 4007 4008 default: 4009 break; 4010 } 4011 4012 return None; 4013 } 4014 4015 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 4016 const Loop *L = LI.getLoopFor(PN->getParent()); 4017 if (!L || L->getHeader() != PN->getParent()) 4018 return nullptr; 4019 4020 // The loop may have multiple entrances or multiple exits; we can analyze 4021 // this phi as an addrec if it has a unique entry value and a unique 4022 // backedge value. 4023 Value *BEValueV = nullptr, *StartValueV = nullptr; 4024 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4025 Value *V = PN->getIncomingValue(i); 4026 if (L->contains(PN->getIncomingBlock(i))) { 4027 if (!BEValueV) { 4028 BEValueV = V; 4029 } else if (BEValueV != V) { 4030 BEValueV = nullptr; 4031 break; 4032 } 4033 } else if (!StartValueV) { 4034 StartValueV = V; 4035 } else if (StartValueV != V) { 4036 StartValueV = nullptr; 4037 break; 4038 } 4039 } 4040 if (BEValueV && StartValueV) { 4041 // While we are analyzing this PHI node, handle its value symbolically. 4042 const SCEV *SymbolicName = getUnknown(PN); 4043 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 4044 "PHI node already processed?"); 4045 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 4046 4047 // Using this symbolic name for the PHI, analyze the value coming around 4048 // the back-edge. 4049 const SCEV *BEValue = getSCEV(BEValueV); 4050 4051 // NOTE: If BEValue is loop invariant, we know that the PHI node just 4052 // has a special value for the first iteration of the loop. 4053 4054 // If the value coming around the backedge is an add with the symbolic 4055 // value we just inserted, then we found a simple induction variable! 4056 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 4057 // If there is a single occurrence of the symbolic value, replace it 4058 // with a recurrence. 4059 unsigned FoundIndex = Add->getNumOperands(); 4060 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4061 if (Add->getOperand(i) == SymbolicName) 4062 if (FoundIndex == e) { 4063 FoundIndex = i; 4064 break; 4065 } 4066 4067 if (FoundIndex != Add->getNumOperands()) { 4068 // Create an add with everything but the specified operand. 4069 SmallVector<const SCEV *, 8> Ops; 4070 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4071 if (i != FoundIndex) 4072 Ops.push_back(Add->getOperand(i)); 4073 const SCEV *Accum = getAddExpr(Ops); 4074 4075 // This is not a valid addrec if the step amount is varying each 4076 // loop iteration, but is not itself an addrec in this loop. 4077 if (isLoopInvariant(Accum, L) || 4078 (isa<SCEVAddRecExpr>(Accum) && 4079 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 4080 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4081 4082 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 4083 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 4084 if (BO->IsNUW) 4085 Flags = setFlags(Flags, SCEV::FlagNUW); 4086 if (BO->IsNSW) 4087 Flags = setFlags(Flags, SCEV::FlagNSW); 4088 } 4089 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 4090 // If the increment is an inbounds GEP, then we know the address 4091 // space cannot be wrapped around. We cannot make any guarantee 4092 // about signed or unsigned overflow because pointers are 4093 // unsigned but we may have a negative index from the base 4094 // pointer. We can guarantee that no unsigned wrap occurs if the 4095 // indices form a positive value. 4096 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 4097 Flags = setFlags(Flags, SCEV::FlagNW); 4098 4099 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 4100 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 4101 Flags = setFlags(Flags, SCEV::FlagNUW); 4102 } 4103 4104 // We cannot transfer nuw and nsw flags from subtraction 4105 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 4106 // for instance. 4107 } 4108 4109 const SCEV *StartVal = getSCEV(StartValueV); 4110 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4111 4112 // Okay, for the entire analysis of this edge we assumed the PHI 4113 // to be symbolic. We now need to go back and purge all of the 4114 // entries for the scalars that use the symbolic expression. 4115 forgetSymbolicName(PN, SymbolicName); 4116 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4117 4118 // We can add Flags to the post-inc expression only if we 4119 // know that it us *undefined behavior* for BEValueV to 4120 // overflow. 4121 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4122 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4123 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4124 4125 return PHISCEV; 4126 } 4127 } 4128 } else { 4129 // Otherwise, this could be a loop like this: 4130 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 4131 // In this case, j = {1,+,1} and BEValue is j. 4132 // Because the other in-value of i (0) fits the evolution of BEValue 4133 // i really is an addrec evolution. 4134 // 4135 // We can generalize this saying that i is the shifted value of BEValue 4136 // by one iteration: 4137 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 4138 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 4139 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this); 4140 if (Shifted != getCouldNotCompute() && 4141 Start != getCouldNotCompute()) { 4142 const SCEV *StartVal = getSCEV(StartValueV); 4143 if (Start == StartVal) { 4144 // Okay, for the entire analysis of this edge we assumed the PHI 4145 // to be symbolic. We now need to go back and purge all of the 4146 // entries for the scalars that use the symbolic expression. 4147 forgetSymbolicName(PN, SymbolicName); 4148 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 4149 return Shifted; 4150 } 4151 } 4152 } 4153 4154 // Remove the temporary PHI node SCEV that has been inserted while intending 4155 // to create an AddRecExpr for this PHI node. We can not keep this temporary 4156 // as it will prevent later (possibly simpler) SCEV expressions to be added 4157 // to the ValueExprMap. 4158 eraseValueFromMap(PN); 4159 } 4160 4161 return nullptr; 4162 } 4163 4164 // Checks if the SCEV S is available at BB. S is considered available at BB 4165 // if S can be materialized at BB without introducing a fault. 4166 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 4167 BasicBlock *BB) { 4168 struct CheckAvailable { 4169 bool TraversalDone = false; 4170 bool Available = true; 4171 4172 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 4173 BasicBlock *BB = nullptr; 4174 DominatorTree &DT; 4175 4176 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 4177 : L(L), BB(BB), DT(DT) {} 4178 4179 bool setUnavailable() { 4180 TraversalDone = true; 4181 Available = false; 4182 return false; 4183 } 4184 4185 bool follow(const SCEV *S) { 4186 switch (S->getSCEVType()) { 4187 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 4188 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 4189 // These expressions are available if their operand(s) is/are. 4190 return true; 4191 4192 case scAddRecExpr: { 4193 // We allow add recurrences that are on the loop BB is in, or some 4194 // outer loop. This guarantees availability because the value of the 4195 // add recurrence at BB is simply the "current" value of the induction 4196 // variable. We can relax this in the future; for instance an add 4197 // recurrence on a sibling dominating loop is also available at BB. 4198 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 4199 if (L && (ARLoop == L || ARLoop->contains(L))) 4200 return true; 4201 4202 return setUnavailable(); 4203 } 4204 4205 case scUnknown: { 4206 // For SCEVUnknown, we check for simple dominance. 4207 const auto *SU = cast<SCEVUnknown>(S); 4208 Value *V = SU->getValue(); 4209 4210 if (isa<Argument>(V)) 4211 return false; 4212 4213 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 4214 return false; 4215 4216 return setUnavailable(); 4217 } 4218 4219 case scUDivExpr: 4220 case scCouldNotCompute: 4221 // We do not try to smart about these at all. 4222 return setUnavailable(); 4223 } 4224 llvm_unreachable("switch should be fully covered!"); 4225 } 4226 4227 bool isDone() { return TraversalDone; } 4228 }; 4229 4230 CheckAvailable CA(L, BB, DT); 4231 SCEVTraversal<CheckAvailable> ST(CA); 4232 4233 ST.visitAll(S); 4234 return CA.Available; 4235 } 4236 4237 // Try to match a control flow sequence that branches out at BI and merges back 4238 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 4239 // match. 4240 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 4241 Value *&C, Value *&LHS, Value *&RHS) { 4242 C = BI->getCondition(); 4243 4244 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 4245 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 4246 4247 if (!LeftEdge.isSingleEdge()) 4248 return false; 4249 4250 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 4251 4252 Use &LeftUse = Merge->getOperandUse(0); 4253 Use &RightUse = Merge->getOperandUse(1); 4254 4255 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 4256 LHS = LeftUse; 4257 RHS = RightUse; 4258 return true; 4259 } 4260 4261 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 4262 LHS = RightUse; 4263 RHS = LeftUse; 4264 return true; 4265 } 4266 4267 return false; 4268 } 4269 4270 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 4271 auto IsReachable = 4272 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 4273 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 4274 const Loop *L = LI.getLoopFor(PN->getParent()); 4275 4276 // We don't want to break LCSSA, even in a SCEV expression tree. 4277 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 4278 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 4279 return nullptr; 4280 4281 // Try to match 4282 // 4283 // br %cond, label %left, label %right 4284 // left: 4285 // br label %merge 4286 // right: 4287 // br label %merge 4288 // merge: 4289 // V = phi [ %x, %left ], [ %y, %right ] 4290 // 4291 // as "select %cond, %x, %y" 4292 4293 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 4294 assert(IDom && "At least the entry block should dominate PN"); 4295 4296 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 4297 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 4298 4299 if (BI && BI->isConditional() && 4300 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 4301 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 4302 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 4303 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 4304 } 4305 4306 return nullptr; 4307 } 4308 4309 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 4310 if (const SCEV *S = createAddRecFromPHI(PN)) 4311 return S; 4312 4313 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 4314 return S; 4315 4316 // If the PHI has a single incoming value, follow that value, unless the 4317 // PHI's incoming blocks are in a different loop, in which case doing so 4318 // risks breaking LCSSA form. Instcombine would normally zap these, but 4319 // it doesn't have DominatorTree information, so it may miss cases. 4320 if (Value *V = SimplifyInstruction(PN, getDataLayout(), &TLI, &DT, &AC)) 4321 if (LI.replacementPreservesLCSSAForm(PN, V)) 4322 return getSCEV(V); 4323 4324 // If it's not a loop phi, we can't handle it yet. 4325 return getUnknown(PN); 4326 } 4327 4328 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 4329 Value *Cond, 4330 Value *TrueVal, 4331 Value *FalseVal) { 4332 // Handle "constant" branch or select. This can occur for instance when a 4333 // loop pass transforms an inner loop and moves on to process the outer loop. 4334 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 4335 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 4336 4337 // Try to match some simple smax or umax patterns. 4338 auto *ICI = dyn_cast<ICmpInst>(Cond); 4339 if (!ICI) 4340 return getUnknown(I); 4341 4342 Value *LHS = ICI->getOperand(0); 4343 Value *RHS = ICI->getOperand(1); 4344 4345 switch (ICI->getPredicate()) { 4346 case ICmpInst::ICMP_SLT: 4347 case ICmpInst::ICMP_SLE: 4348 std::swap(LHS, RHS); 4349 LLVM_FALLTHROUGH; 4350 case ICmpInst::ICMP_SGT: 4351 case ICmpInst::ICMP_SGE: 4352 // a >s b ? a+x : b+x -> smax(a, b)+x 4353 // a >s b ? b+x : a+x -> smin(a, b)+x 4354 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 4355 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 4356 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 4357 const SCEV *LA = getSCEV(TrueVal); 4358 const SCEV *RA = getSCEV(FalseVal); 4359 const SCEV *LDiff = getMinusSCEV(LA, LS); 4360 const SCEV *RDiff = getMinusSCEV(RA, RS); 4361 if (LDiff == RDiff) 4362 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 4363 LDiff = getMinusSCEV(LA, RS); 4364 RDiff = getMinusSCEV(RA, LS); 4365 if (LDiff == RDiff) 4366 return getAddExpr(getSMinExpr(LS, RS), LDiff); 4367 } 4368 break; 4369 case ICmpInst::ICMP_ULT: 4370 case ICmpInst::ICMP_ULE: 4371 std::swap(LHS, RHS); 4372 LLVM_FALLTHROUGH; 4373 case ICmpInst::ICMP_UGT: 4374 case ICmpInst::ICMP_UGE: 4375 // a >u b ? a+x : b+x -> umax(a, b)+x 4376 // a >u b ? b+x : a+x -> umin(a, b)+x 4377 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 4378 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4379 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 4380 const SCEV *LA = getSCEV(TrueVal); 4381 const SCEV *RA = getSCEV(FalseVal); 4382 const SCEV *LDiff = getMinusSCEV(LA, LS); 4383 const SCEV *RDiff = getMinusSCEV(RA, RS); 4384 if (LDiff == RDiff) 4385 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 4386 LDiff = getMinusSCEV(LA, RS); 4387 RDiff = getMinusSCEV(RA, LS); 4388 if (LDiff == RDiff) 4389 return getAddExpr(getUMinExpr(LS, RS), LDiff); 4390 } 4391 break; 4392 case ICmpInst::ICMP_NE: 4393 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 4394 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 4395 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 4396 const SCEV *One = getOne(I->getType()); 4397 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4398 const SCEV *LA = getSCEV(TrueVal); 4399 const SCEV *RA = getSCEV(FalseVal); 4400 const SCEV *LDiff = getMinusSCEV(LA, LS); 4401 const SCEV *RDiff = getMinusSCEV(RA, One); 4402 if (LDiff == RDiff) 4403 return getAddExpr(getUMaxExpr(One, LS), LDiff); 4404 } 4405 break; 4406 case ICmpInst::ICMP_EQ: 4407 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 4408 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 4409 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 4410 const SCEV *One = getOne(I->getType()); 4411 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4412 const SCEV *LA = getSCEV(TrueVal); 4413 const SCEV *RA = getSCEV(FalseVal); 4414 const SCEV *LDiff = getMinusSCEV(LA, One); 4415 const SCEV *RDiff = getMinusSCEV(RA, LS); 4416 if (LDiff == RDiff) 4417 return getAddExpr(getUMaxExpr(One, LS), LDiff); 4418 } 4419 break; 4420 default: 4421 break; 4422 } 4423 4424 return getUnknown(I); 4425 } 4426 4427 /// Expand GEP instructions into add and multiply operations. This allows them 4428 /// to be analyzed by regular SCEV code. 4429 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 4430 // Don't attempt to analyze GEPs over unsized objects. 4431 if (!GEP->getSourceElementType()->isSized()) 4432 return getUnknown(GEP); 4433 4434 SmallVector<const SCEV *, 4> IndexExprs; 4435 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 4436 IndexExprs.push_back(getSCEV(*Index)); 4437 return getGEPExpr(GEP, IndexExprs); 4438 } 4439 4440 uint32_t 4441 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 4442 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 4443 return C->getAPInt().countTrailingZeros(); 4444 4445 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 4446 return std::min(GetMinTrailingZeros(T->getOperand()), 4447 (uint32_t)getTypeSizeInBits(T->getType())); 4448 4449 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 4450 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 4451 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 4452 getTypeSizeInBits(E->getType()) : OpRes; 4453 } 4454 4455 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 4456 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 4457 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 4458 getTypeSizeInBits(E->getType()) : OpRes; 4459 } 4460 4461 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 4462 // The result is the min of all operands results. 4463 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 4464 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 4465 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 4466 return MinOpRes; 4467 } 4468 4469 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 4470 // The result is the sum of all operands results. 4471 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 4472 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 4473 for (unsigned i = 1, e = M->getNumOperands(); 4474 SumOpRes != BitWidth && i != e; ++i) 4475 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), 4476 BitWidth); 4477 return SumOpRes; 4478 } 4479 4480 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 4481 // The result is the min of all operands results. 4482 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 4483 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 4484 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 4485 return MinOpRes; 4486 } 4487 4488 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 4489 // The result is the min of all operands results. 4490 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 4491 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 4492 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 4493 return MinOpRes; 4494 } 4495 4496 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 4497 // The result is the min of all operands results. 4498 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 4499 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 4500 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 4501 return MinOpRes; 4502 } 4503 4504 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 4505 // For a SCEVUnknown, ask ValueTracking. 4506 unsigned BitWidth = getTypeSizeInBits(U->getType()); 4507 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 4508 computeKnownBits(U->getValue(), Zeros, Ones, getDataLayout(), 0, &AC, 4509 nullptr, &DT); 4510 return Zeros.countTrailingOnes(); 4511 } 4512 4513 // SCEVUDivExpr 4514 return 0; 4515 } 4516 4517 /// Helper method to assign a range to V from metadata present in the IR. 4518 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 4519 if (Instruction *I = dyn_cast<Instruction>(V)) 4520 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 4521 return getConstantRangeFromMetadata(*MD); 4522 4523 return None; 4524 } 4525 4526 /// Determine the range for a particular SCEV. If SignHint is 4527 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 4528 /// with a "cleaner" unsigned (resp. signed) representation. 4529 ConstantRange 4530 ScalarEvolution::getRange(const SCEV *S, 4531 ScalarEvolution::RangeSignHint SignHint) { 4532 DenseMap<const SCEV *, ConstantRange> &Cache = 4533 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 4534 : SignedRanges; 4535 4536 // See if we've computed this range already. 4537 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 4538 if (I != Cache.end()) 4539 return I->second; 4540 4541 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 4542 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 4543 4544 unsigned BitWidth = getTypeSizeInBits(S->getType()); 4545 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 4546 4547 // If the value has known zeros, the maximum value will have those known zeros 4548 // as well. 4549 uint32_t TZ = GetMinTrailingZeros(S); 4550 if (TZ != 0) { 4551 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 4552 ConservativeResult = 4553 ConstantRange(APInt::getMinValue(BitWidth), 4554 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 4555 else 4556 ConservativeResult = ConstantRange( 4557 APInt::getSignedMinValue(BitWidth), 4558 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 4559 } 4560 4561 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 4562 ConstantRange X = getRange(Add->getOperand(0), SignHint); 4563 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 4564 X = X.add(getRange(Add->getOperand(i), SignHint)); 4565 return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); 4566 } 4567 4568 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 4569 ConstantRange X = getRange(Mul->getOperand(0), SignHint); 4570 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 4571 X = X.multiply(getRange(Mul->getOperand(i), SignHint)); 4572 return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); 4573 } 4574 4575 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 4576 ConstantRange X = getRange(SMax->getOperand(0), SignHint); 4577 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 4578 X = X.smax(getRange(SMax->getOperand(i), SignHint)); 4579 return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); 4580 } 4581 4582 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 4583 ConstantRange X = getRange(UMax->getOperand(0), SignHint); 4584 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 4585 X = X.umax(getRange(UMax->getOperand(i), SignHint)); 4586 return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); 4587 } 4588 4589 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 4590 ConstantRange X = getRange(UDiv->getLHS(), SignHint); 4591 ConstantRange Y = getRange(UDiv->getRHS(), SignHint); 4592 return setRange(UDiv, SignHint, 4593 ConservativeResult.intersectWith(X.udiv(Y))); 4594 } 4595 4596 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 4597 ConstantRange X = getRange(ZExt->getOperand(), SignHint); 4598 return setRange(ZExt, SignHint, 4599 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 4600 } 4601 4602 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 4603 ConstantRange X = getRange(SExt->getOperand(), SignHint); 4604 return setRange(SExt, SignHint, 4605 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 4606 } 4607 4608 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 4609 ConstantRange X = getRange(Trunc->getOperand(), SignHint); 4610 return setRange(Trunc, SignHint, 4611 ConservativeResult.intersectWith(X.truncate(BitWidth))); 4612 } 4613 4614 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 4615 // If there's no unsigned wrap, the value will never be less than its 4616 // initial value. 4617 if (AddRec->hasNoUnsignedWrap()) 4618 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 4619 if (!C->getValue()->isZero()) 4620 ConservativeResult = ConservativeResult.intersectWith( 4621 ConstantRange(C->getAPInt(), APInt(BitWidth, 0))); 4622 4623 // If there's no signed wrap, and all the operands have the same sign or 4624 // zero, the value won't ever change sign. 4625 if (AddRec->hasNoSignedWrap()) { 4626 bool AllNonNeg = true; 4627 bool AllNonPos = true; 4628 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 4629 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 4630 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 4631 } 4632 if (AllNonNeg) 4633 ConservativeResult = ConservativeResult.intersectWith( 4634 ConstantRange(APInt(BitWidth, 0), 4635 APInt::getSignedMinValue(BitWidth))); 4636 else if (AllNonPos) 4637 ConservativeResult = ConservativeResult.intersectWith( 4638 ConstantRange(APInt::getSignedMinValue(BitWidth), 4639 APInt(BitWidth, 1))); 4640 } 4641 4642 // TODO: non-affine addrec 4643 if (AddRec->isAffine()) { 4644 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 4645 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 4646 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 4647 auto RangeFromAffine = getRangeForAffineAR( 4648 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 4649 BitWidth); 4650 if (!RangeFromAffine.isFullSet()) 4651 ConservativeResult = 4652 ConservativeResult.intersectWith(RangeFromAffine); 4653 4654 auto RangeFromFactoring = getRangeViaFactoring( 4655 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 4656 BitWidth); 4657 if (!RangeFromFactoring.isFullSet()) 4658 ConservativeResult = 4659 ConservativeResult.intersectWith(RangeFromFactoring); 4660 } 4661 } 4662 4663 return setRange(AddRec, SignHint, ConservativeResult); 4664 } 4665 4666 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 4667 // Check if the IR explicitly contains !range metadata. 4668 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 4669 if (MDRange.hasValue()) 4670 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue()); 4671 4672 // Split here to avoid paying the compile-time cost of calling both 4673 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 4674 // if needed. 4675 const DataLayout &DL = getDataLayout(); 4676 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 4677 // For a SCEVUnknown, ask ValueTracking. 4678 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 4679 computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, &AC, nullptr, &DT); 4680 if (Ones != ~Zeros + 1) 4681 ConservativeResult = 4682 ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)); 4683 } else { 4684 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 4685 "generalize as needed!"); 4686 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 4687 if (NS > 1) 4688 ConservativeResult = ConservativeResult.intersectWith( 4689 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 4690 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1)); 4691 } 4692 4693 return setRange(U, SignHint, ConservativeResult); 4694 } 4695 4696 return setRange(S, SignHint, ConservativeResult); 4697 } 4698 4699 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 4700 const SCEV *Step, 4701 const SCEV *MaxBECount, 4702 unsigned BitWidth) { 4703 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 4704 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 4705 "Precondition!"); 4706 4707 ConstantRange Result(BitWidth, /* isFullSet = */ true); 4708 4709 // Check for overflow. This must be done with ConstantRange arithmetic 4710 // because we could be called from within the ScalarEvolution overflow 4711 // checking code. 4712 4713 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 4714 ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount); 4715 ConstantRange ZExtMaxBECountRange = MaxBECountRange.zextOrTrunc(BitWidth * 2); 4716 4717 ConstantRange StepSRange = getSignedRange(Step); 4718 ConstantRange SExtStepSRange = StepSRange.sextOrTrunc(BitWidth * 2); 4719 4720 ConstantRange StartURange = getUnsignedRange(Start); 4721 ConstantRange EndURange = 4722 StartURange.add(MaxBECountRange.multiply(StepSRange)); 4723 4724 // Check for unsigned overflow. 4725 ConstantRange ZExtStartURange = StartURange.zextOrTrunc(BitWidth * 2); 4726 ConstantRange ZExtEndURange = EndURange.zextOrTrunc(BitWidth * 2); 4727 if (ZExtStartURange.add(ZExtMaxBECountRange.multiply(SExtStepSRange)) == 4728 ZExtEndURange) { 4729 APInt Min = APIntOps::umin(StartURange.getUnsignedMin(), 4730 EndURange.getUnsignedMin()); 4731 APInt Max = APIntOps::umax(StartURange.getUnsignedMax(), 4732 EndURange.getUnsignedMax()); 4733 bool IsFullRange = Min.isMinValue() && Max.isMaxValue(); 4734 if (!IsFullRange) 4735 Result = 4736 Result.intersectWith(ConstantRange(Min, Max + 1)); 4737 } 4738 4739 ConstantRange StartSRange = getSignedRange(Start); 4740 ConstantRange EndSRange = 4741 StartSRange.add(MaxBECountRange.multiply(StepSRange)); 4742 4743 // Check for signed overflow. This must be done with ConstantRange 4744 // arithmetic because we could be called from within the ScalarEvolution 4745 // overflow checking code. 4746 ConstantRange SExtStartSRange = StartSRange.sextOrTrunc(BitWidth * 2); 4747 ConstantRange SExtEndSRange = EndSRange.sextOrTrunc(BitWidth * 2); 4748 if (SExtStartSRange.add(ZExtMaxBECountRange.multiply(SExtStepSRange)) == 4749 SExtEndSRange) { 4750 APInt Min = 4751 APIntOps::smin(StartSRange.getSignedMin(), EndSRange.getSignedMin()); 4752 APInt Max = 4753 APIntOps::smax(StartSRange.getSignedMax(), EndSRange.getSignedMax()); 4754 bool IsFullRange = Min.isMinSignedValue() && Max.isMaxSignedValue(); 4755 if (!IsFullRange) 4756 Result = 4757 Result.intersectWith(ConstantRange(Min, Max + 1)); 4758 } 4759 4760 return Result; 4761 } 4762 4763 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 4764 const SCEV *Step, 4765 const SCEV *MaxBECount, 4766 unsigned BitWidth) { 4767 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 4768 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 4769 4770 struct SelectPattern { 4771 Value *Condition = nullptr; 4772 APInt TrueValue; 4773 APInt FalseValue; 4774 4775 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 4776 const SCEV *S) { 4777 Optional<unsigned> CastOp; 4778 APInt Offset(BitWidth, 0); 4779 4780 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 4781 "Should be!"); 4782 4783 // Peel off a constant offset: 4784 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 4785 // In the future we could consider being smarter here and handle 4786 // {Start+Step,+,Step} too. 4787 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 4788 return; 4789 4790 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 4791 S = SA->getOperand(1); 4792 } 4793 4794 // Peel off a cast operation 4795 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 4796 CastOp = SCast->getSCEVType(); 4797 S = SCast->getOperand(); 4798 } 4799 4800 using namespace llvm::PatternMatch; 4801 4802 auto *SU = dyn_cast<SCEVUnknown>(S); 4803 const APInt *TrueVal, *FalseVal; 4804 if (!SU || 4805 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 4806 m_APInt(FalseVal)))) { 4807 Condition = nullptr; 4808 return; 4809 } 4810 4811 TrueValue = *TrueVal; 4812 FalseValue = *FalseVal; 4813 4814 // Re-apply the cast we peeled off earlier 4815 if (CastOp.hasValue()) 4816 switch (*CastOp) { 4817 default: 4818 llvm_unreachable("Unknown SCEV cast type!"); 4819 4820 case scTruncate: 4821 TrueValue = TrueValue.trunc(BitWidth); 4822 FalseValue = FalseValue.trunc(BitWidth); 4823 break; 4824 case scZeroExtend: 4825 TrueValue = TrueValue.zext(BitWidth); 4826 FalseValue = FalseValue.zext(BitWidth); 4827 break; 4828 case scSignExtend: 4829 TrueValue = TrueValue.sext(BitWidth); 4830 FalseValue = FalseValue.sext(BitWidth); 4831 break; 4832 } 4833 4834 // Re-apply the constant offset we peeled off earlier 4835 TrueValue += Offset; 4836 FalseValue += Offset; 4837 } 4838 4839 bool isRecognized() { return Condition != nullptr; } 4840 }; 4841 4842 SelectPattern StartPattern(*this, BitWidth, Start); 4843 if (!StartPattern.isRecognized()) 4844 return ConstantRange(BitWidth, /* isFullSet = */ true); 4845 4846 SelectPattern StepPattern(*this, BitWidth, Step); 4847 if (!StepPattern.isRecognized()) 4848 return ConstantRange(BitWidth, /* isFullSet = */ true); 4849 4850 if (StartPattern.Condition != StepPattern.Condition) { 4851 // We don't handle this case today; but we could, by considering four 4852 // possibilities below instead of two. I'm not sure if there are cases where 4853 // that will help over what getRange already does, though. 4854 return ConstantRange(BitWidth, /* isFullSet = */ true); 4855 } 4856 4857 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 4858 // construct arbitrary general SCEV expressions here. This function is called 4859 // from deep in the call stack, and calling getSCEV (on a sext instruction, 4860 // say) can end up caching a suboptimal value. 4861 4862 // FIXME: without the explicit `this` receiver below, MSVC errors out with 4863 // C2352 and C2512 (otherwise it isn't needed). 4864 4865 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 4866 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 4867 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 4868 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 4869 4870 ConstantRange TrueRange = 4871 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 4872 ConstantRange FalseRange = 4873 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 4874 4875 return TrueRange.unionWith(FalseRange); 4876 } 4877 4878 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 4879 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 4880 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 4881 4882 // Return early if there are no flags to propagate to the SCEV. 4883 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4884 if (BinOp->hasNoUnsignedWrap()) 4885 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 4886 if (BinOp->hasNoSignedWrap()) 4887 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 4888 if (Flags == SCEV::FlagAnyWrap) 4889 return SCEV::FlagAnyWrap; 4890 4891 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 4892 } 4893 4894 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 4895 // Here we check that I is in the header of the innermost loop containing I, 4896 // since we only deal with instructions in the loop header. The actual loop we 4897 // need to check later will come from an add recurrence, but getting that 4898 // requires computing the SCEV of the operands, which can be expensive. This 4899 // check we can do cheaply to rule out some cases early. 4900 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 4901 if (InnermostContainingLoop == nullptr || 4902 InnermostContainingLoop->getHeader() != I->getParent()) 4903 return false; 4904 4905 // Only proceed if we can prove that I does not yield poison. 4906 if (!isKnownNotFullPoison(I)) return false; 4907 4908 // At this point we know that if I is executed, then it does not wrap 4909 // according to at least one of NSW or NUW. If I is not executed, then we do 4910 // not know if the calculation that I represents would wrap. Multiple 4911 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 4912 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 4913 // derived from other instructions that map to the same SCEV. We cannot make 4914 // that guarantee for cases where I is not executed. So we need to find the 4915 // loop that I is considered in relation to and prove that I is executed for 4916 // every iteration of that loop. That implies that the value that I 4917 // calculates does not wrap anywhere in the loop, so then we can apply the 4918 // flags to the SCEV. 4919 // 4920 // We check isLoopInvariant to disambiguate in case we are adding recurrences 4921 // from different loops, so that we know which loop to prove that I is 4922 // executed in. 4923 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 4924 // I could be an extractvalue from a call to an overflow intrinsic. 4925 // TODO: We can do better here in some cases. 4926 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 4927 return false; 4928 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 4929 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 4930 bool AllOtherOpsLoopInvariant = true; 4931 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 4932 ++OtherOpIndex) { 4933 if (OtherOpIndex != OpIndex) { 4934 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 4935 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 4936 AllOtherOpsLoopInvariant = false; 4937 break; 4938 } 4939 } 4940 } 4941 if (AllOtherOpsLoopInvariant && 4942 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 4943 return true; 4944 } 4945 } 4946 return false; 4947 } 4948 4949 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 4950 // If we know that \c I can never be poison period, then that's enough. 4951 if (isSCEVExprNeverPoison(I)) 4952 return true; 4953 4954 // For an add recurrence specifically, we assume that infinite loops without 4955 // side effects are undefined behavior, and then reason as follows: 4956 // 4957 // If the add recurrence is poison in any iteration, it is poison on all 4958 // future iterations (since incrementing poison yields poison). If the result 4959 // of the add recurrence is fed into the loop latch condition and the loop 4960 // does not contain any throws or exiting blocks other than the latch, we now 4961 // have the ability to "choose" whether the backedge is taken or not (by 4962 // choosing a sufficiently evil value for the poison feeding into the branch) 4963 // for every iteration including and after the one in which \p I first became 4964 // poison. There are two possibilities (let's call the iteration in which \p 4965 // I first became poison as K): 4966 // 4967 // 1. In the set of iterations including and after K, the loop body executes 4968 // no side effects. In this case executing the backege an infinte number 4969 // of times will yield undefined behavior. 4970 // 4971 // 2. In the set of iterations including and after K, the loop body executes 4972 // at least one side effect. In this case, that specific instance of side 4973 // effect is control dependent on poison, which also yields undefined 4974 // behavior. 4975 4976 auto *ExitingBB = L->getExitingBlock(); 4977 auto *LatchBB = L->getLoopLatch(); 4978 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 4979 return false; 4980 4981 SmallPtrSet<const Instruction *, 16> Pushed; 4982 SmallVector<const Instruction *, 8> PoisonStack; 4983 4984 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 4985 // things that are known to be fully poison under that assumption go on the 4986 // PoisonStack. 4987 Pushed.insert(I); 4988 PoisonStack.push_back(I); 4989 4990 bool LatchControlDependentOnPoison = false; 4991 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 4992 const Instruction *Poison = PoisonStack.pop_back_val(); 4993 4994 for (auto *PoisonUser : Poison->users()) { 4995 if (propagatesFullPoison(cast<Instruction>(PoisonUser))) { 4996 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 4997 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 4998 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 4999 assert(BI->isConditional() && "Only possibility!"); 5000 if (BI->getParent() == LatchBB) { 5001 LatchControlDependentOnPoison = true; 5002 break; 5003 } 5004 } 5005 } 5006 } 5007 5008 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 5009 } 5010 5011 ScalarEvolution::LoopProperties 5012 ScalarEvolution::getLoopProperties(const Loop *L) { 5013 typedef ScalarEvolution::LoopProperties LoopProperties; 5014 5015 auto Itr = LoopPropertiesCache.find(L); 5016 if (Itr == LoopPropertiesCache.end()) { 5017 auto HasSideEffects = [](Instruction *I) { 5018 if (auto *SI = dyn_cast<StoreInst>(I)) 5019 return !SI->isSimple(); 5020 5021 return I->mayHaveSideEffects(); 5022 }; 5023 5024 LoopProperties LP = {/* HasNoAbnormalExits */ true, 5025 /*HasNoSideEffects*/ true}; 5026 5027 for (auto *BB : L->getBlocks()) 5028 for (auto &I : *BB) { 5029 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5030 LP.HasNoAbnormalExits = false; 5031 if (HasSideEffects(&I)) 5032 LP.HasNoSideEffects = false; 5033 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 5034 break; // We're already as pessimistic as we can get. 5035 } 5036 5037 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 5038 assert(InsertPair.second && "We just checked!"); 5039 Itr = InsertPair.first; 5040 } 5041 5042 return Itr->second; 5043 } 5044 5045 const SCEV *ScalarEvolution::createSCEV(Value *V) { 5046 if (!isSCEVable(V->getType())) 5047 return getUnknown(V); 5048 5049 if (Instruction *I = dyn_cast<Instruction>(V)) { 5050 // Don't attempt to analyze instructions in blocks that aren't 5051 // reachable. Such instructions don't matter, and they aren't required 5052 // to obey basic rules for definitions dominating uses which this 5053 // analysis depends on. 5054 if (!DT.isReachableFromEntry(I->getParent())) 5055 return getUnknown(V); 5056 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 5057 return getConstant(CI); 5058 else if (isa<ConstantPointerNull>(V)) 5059 return getZero(V->getType()); 5060 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 5061 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 5062 else if (!isa<ConstantExpr>(V)) 5063 return getUnknown(V); 5064 5065 Operator *U = cast<Operator>(V); 5066 if (auto BO = MatchBinaryOp(U, DT)) { 5067 switch (BO->Opcode) { 5068 case Instruction::Add: { 5069 // The simple thing to do would be to just call getSCEV on both operands 5070 // and call getAddExpr with the result. However if we're looking at a 5071 // bunch of things all added together, this can be quite inefficient, 5072 // because it leads to N-1 getAddExpr calls for N ultimate operands. 5073 // Instead, gather up all the operands and make a single getAddExpr call. 5074 // LLVM IR canonical form means we need only traverse the left operands. 5075 SmallVector<const SCEV *, 4> AddOps; 5076 do { 5077 if (BO->Op) { 5078 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 5079 AddOps.push_back(OpSCEV); 5080 break; 5081 } 5082 5083 // If a NUW or NSW flag can be applied to the SCEV for this 5084 // addition, then compute the SCEV for this addition by itself 5085 // with a separate call to getAddExpr. We need to do that 5086 // instead of pushing the operands of the addition onto AddOps, 5087 // since the flags are only known to apply to this particular 5088 // addition - they may not apply to other additions that can be 5089 // formed with operands from AddOps. 5090 const SCEV *RHS = getSCEV(BO->RHS); 5091 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 5092 if (Flags != SCEV::FlagAnyWrap) { 5093 const SCEV *LHS = getSCEV(BO->LHS); 5094 if (BO->Opcode == Instruction::Sub) 5095 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 5096 else 5097 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 5098 break; 5099 } 5100 } 5101 5102 if (BO->Opcode == Instruction::Sub) 5103 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 5104 else 5105 AddOps.push_back(getSCEV(BO->RHS)); 5106 5107 auto NewBO = MatchBinaryOp(BO->LHS, DT); 5108 if (!NewBO || (NewBO->Opcode != Instruction::Add && 5109 NewBO->Opcode != Instruction::Sub)) { 5110 AddOps.push_back(getSCEV(BO->LHS)); 5111 break; 5112 } 5113 BO = NewBO; 5114 } while (true); 5115 5116 return getAddExpr(AddOps); 5117 } 5118 5119 case Instruction::Mul: { 5120 SmallVector<const SCEV *, 4> MulOps; 5121 do { 5122 if (BO->Op) { 5123 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 5124 MulOps.push_back(OpSCEV); 5125 break; 5126 } 5127 5128 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 5129 if (Flags != SCEV::FlagAnyWrap) { 5130 MulOps.push_back( 5131 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 5132 break; 5133 } 5134 } 5135 5136 MulOps.push_back(getSCEV(BO->RHS)); 5137 auto NewBO = MatchBinaryOp(BO->LHS, DT); 5138 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 5139 MulOps.push_back(getSCEV(BO->LHS)); 5140 break; 5141 } 5142 BO = NewBO; 5143 } while (true); 5144 5145 return getMulExpr(MulOps); 5146 } 5147 case Instruction::UDiv: 5148 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 5149 case Instruction::Sub: { 5150 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5151 if (BO->Op) 5152 Flags = getNoWrapFlagsFromUB(BO->Op); 5153 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 5154 } 5155 case Instruction::And: 5156 // For an expression like x&255 that merely masks off the high bits, 5157 // use zext(trunc(x)) as the SCEV expression. 5158 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5159 if (CI->isNullValue()) 5160 return getSCEV(BO->RHS); 5161 if (CI->isAllOnesValue()) 5162 return getSCEV(BO->LHS); 5163 const APInt &A = CI->getValue(); 5164 5165 // Instcombine's ShrinkDemandedConstant may strip bits out of 5166 // constants, obscuring what would otherwise be a low-bits mask. 5167 // Use computeKnownBits to compute what ShrinkDemandedConstant 5168 // knew about to reconstruct a low-bits mask value. 5169 unsigned LZ = A.countLeadingZeros(); 5170 unsigned TZ = A.countTrailingZeros(); 5171 unsigned BitWidth = A.getBitWidth(); 5172 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 5173 computeKnownBits(BO->LHS, KnownZero, KnownOne, getDataLayout(), 5174 0, &AC, nullptr, &DT); 5175 5176 APInt EffectiveMask = 5177 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 5178 if ((LZ != 0 || TZ != 0) && !((~A & ~KnownZero) & EffectiveMask)) { 5179 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 5180 const SCEV *LHS = getSCEV(BO->LHS); 5181 const SCEV *ShiftedLHS = nullptr; 5182 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 5183 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 5184 // For an expression like (x * 8) & 8, simplify the multiply. 5185 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 5186 unsigned GCD = std::min(MulZeros, TZ); 5187 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 5188 SmallVector<const SCEV*, 4> MulOps; 5189 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 5190 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 5191 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 5192 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 5193 } 5194 } 5195 if (!ShiftedLHS) 5196 ShiftedLHS = getUDivExpr(LHS, MulCount); 5197 return getMulExpr( 5198 getZeroExtendExpr( 5199 getTruncateExpr(ShiftedLHS, 5200 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 5201 BO->LHS->getType()), 5202 MulCount); 5203 } 5204 } 5205 break; 5206 5207 case Instruction::Or: 5208 // If the RHS of the Or is a constant, we may have something like: 5209 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 5210 // optimizations will transparently handle this case. 5211 // 5212 // In order for this transformation to be safe, the LHS must be of the 5213 // form X*(2^n) and the Or constant must be less than 2^n. 5214 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5215 const SCEV *LHS = getSCEV(BO->LHS); 5216 const APInt &CIVal = CI->getValue(); 5217 if (GetMinTrailingZeros(LHS) >= 5218 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 5219 // Build a plain add SCEV. 5220 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 5221 // If the LHS of the add was an addrec and it has no-wrap flags, 5222 // transfer the no-wrap flags, since an or won't introduce a wrap. 5223 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 5224 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 5225 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( 5226 OldAR->getNoWrapFlags()); 5227 } 5228 return S; 5229 } 5230 } 5231 break; 5232 5233 case Instruction::Xor: 5234 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5235 // If the RHS of xor is -1, then this is a not operation. 5236 if (CI->isAllOnesValue()) 5237 return getNotSCEV(getSCEV(BO->LHS)); 5238 5239 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 5240 // This is a variant of the check for xor with -1, and it handles 5241 // the case where instcombine has trimmed non-demanded bits out 5242 // of an xor with -1. 5243 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 5244 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 5245 if (LBO->getOpcode() == Instruction::And && 5246 LCI->getValue() == CI->getValue()) 5247 if (const SCEVZeroExtendExpr *Z = 5248 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 5249 Type *UTy = BO->LHS->getType(); 5250 const SCEV *Z0 = Z->getOperand(); 5251 Type *Z0Ty = Z0->getType(); 5252 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 5253 5254 // If C is a low-bits mask, the zero extend is serving to 5255 // mask off the high bits. Complement the operand and 5256 // re-apply the zext. 5257 if (APIntOps::isMask(Z0TySize, CI->getValue())) 5258 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 5259 5260 // If C is a single bit, it may be in the sign-bit position 5261 // before the zero-extend. In this case, represent the xor 5262 // using an add, which is equivalent, and re-apply the zext. 5263 APInt Trunc = CI->getValue().trunc(Z0TySize); 5264 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 5265 Trunc.isSignBit()) 5266 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 5267 UTy); 5268 } 5269 } 5270 break; 5271 5272 case Instruction::Shl: 5273 // Turn shift left of a constant amount into a multiply. 5274 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 5275 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 5276 5277 // If the shift count is not less than the bitwidth, the result of 5278 // the shift is undefined. Don't try to analyze it, because the 5279 // resolution chosen here may differ from the resolution chosen in 5280 // other parts of the compiler. 5281 if (SA->getValue().uge(BitWidth)) 5282 break; 5283 5284 // It is currently not resolved how to interpret NSW for left 5285 // shift by BitWidth - 1, so we avoid applying flags in that 5286 // case. Remove this check (or this comment) once the situation 5287 // is resolved. See 5288 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html 5289 // and http://reviews.llvm.org/D8890 . 5290 auto Flags = SCEV::FlagAnyWrap; 5291 if (BO->Op && SA->getValue().ult(BitWidth - 1)) 5292 Flags = getNoWrapFlagsFromUB(BO->Op); 5293 5294 Constant *X = ConstantInt::get(getContext(), 5295 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 5296 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 5297 } 5298 break; 5299 5300 case Instruction::AShr: 5301 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression. 5302 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) 5303 if (Operator *L = dyn_cast<Operator>(BO->LHS)) 5304 if (L->getOpcode() == Instruction::Shl && 5305 L->getOperand(1) == BO->RHS) { 5306 uint64_t BitWidth = getTypeSizeInBits(BO->LHS->getType()); 5307 5308 // If the shift count is not less than the bitwidth, the result of 5309 // the shift is undefined. Don't try to analyze it, because the 5310 // resolution chosen here may differ from the resolution chosen in 5311 // other parts of the compiler. 5312 if (CI->getValue().uge(BitWidth)) 5313 break; 5314 5315 uint64_t Amt = BitWidth - CI->getZExtValue(); 5316 if (Amt == BitWidth) 5317 return getSCEV(L->getOperand(0)); // shift by zero --> noop 5318 return getSignExtendExpr( 5319 getTruncateExpr(getSCEV(L->getOperand(0)), 5320 IntegerType::get(getContext(), Amt)), 5321 BO->LHS->getType()); 5322 } 5323 break; 5324 } 5325 } 5326 5327 switch (U->getOpcode()) { 5328 case Instruction::Trunc: 5329 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 5330 5331 case Instruction::ZExt: 5332 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 5333 5334 case Instruction::SExt: 5335 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 5336 5337 case Instruction::BitCast: 5338 // BitCasts are no-op casts so we just eliminate the cast. 5339 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 5340 return getSCEV(U->getOperand(0)); 5341 break; 5342 5343 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 5344 // lead to pointer expressions which cannot safely be expanded to GEPs, 5345 // because ScalarEvolution doesn't respect the GEP aliasing rules when 5346 // simplifying integer expressions. 5347 5348 case Instruction::GetElementPtr: 5349 return createNodeForGEP(cast<GEPOperator>(U)); 5350 5351 case Instruction::PHI: 5352 return createNodeForPHI(cast<PHINode>(U)); 5353 5354 case Instruction::Select: 5355 // U can also be a select constant expr, which let fall through. Since 5356 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 5357 // constant expressions cannot have instructions as operands, we'd have 5358 // returned getUnknown for a select constant expressions anyway. 5359 if (isa<Instruction>(U)) 5360 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 5361 U->getOperand(1), U->getOperand(2)); 5362 break; 5363 5364 case Instruction::Call: 5365 case Instruction::Invoke: 5366 if (Value *RV = CallSite(U).getReturnedArgOperand()) 5367 return getSCEV(RV); 5368 break; 5369 } 5370 5371 return getUnknown(V); 5372 } 5373 5374 5375 5376 //===----------------------------------------------------------------------===// 5377 // Iteration Count Computation Code 5378 // 5379 5380 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 5381 if (!ExitCount) 5382 return 0; 5383 5384 ConstantInt *ExitConst = ExitCount->getValue(); 5385 5386 // Guard against huge trip counts. 5387 if (ExitConst->getValue().getActiveBits() > 32) 5388 return 0; 5389 5390 // In case of integer overflow, this returns 0, which is correct. 5391 return ((unsigned)ExitConst->getZExtValue()) + 1; 5392 } 5393 5394 unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L) { 5395 if (BasicBlock *ExitingBB = L->getExitingBlock()) 5396 return getSmallConstantTripCount(L, ExitingBB); 5397 5398 // No trip count information for multiple exits. 5399 return 0; 5400 } 5401 5402 unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L, 5403 BasicBlock *ExitingBlock) { 5404 assert(ExitingBlock && "Must pass a non-null exiting block!"); 5405 assert(L->isLoopExiting(ExitingBlock) && 5406 "Exiting block must actually branch out of the loop!"); 5407 const SCEVConstant *ExitCount = 5408 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 5409 return getConstantTripCount(ExitCount); 5410 } 5411 5412 unsigned ScalarEvolution::getSmallConstantMaxTripCount(Loop *L) { 5413 const auto *MaxExitCount = 5414 dyn_cast<SCEVConstant>(getMaxBackedgeTakenCount(L)); 5415 return getConstantTripCount(MaxExitCount); 5416 } 5417 5418 unsigned ScalarEvolution::getSmallConstantTripMultiple(Loop *L) { 5419 if (BasicBlock *ExitingBB = L->getExitingBlock()) 5420 return getSmallConstantTripMultiple(L, ExitingBB); 5421 5422 // No trip multiple information for multiple exits. 5423 return 0; 5424 } 5425 5426 /// Returns the largest constant divisor of the trip count of this loop as a 5427 /// normal unsigned value, if possible. This means that the actual trip count is 5428 /// always a multiple of the returned value (don't forget the trip count could 5429 /// very well be zero as well!). 5430 /// 5431 /// Returns 1 if the trip count is unknown or not guaranteed to be the 5432 /// multiple of a constant (which is also the case if the trip count is simply 5433 /// constant, use getSmallConstantTripCount for that case), Will also return 1 5434 /// if the trip count is very large (>= 2^32). 5435 /// 5436 /// As explained in the comments for getSmallConstantTripCount, this assumes 5437 /// that control exits the loop via ExitingBlock. 5438 unsigned 5439 ScalarEvolution::getSmallConstantTripMultiple(Loop *L, 5440 BasicBlock *ExitingBlock) { 5441 assert(ExitingBlock && "Must pass a non-null exiting block!"); 5442 assert(L->isLoopExiting(ExitingBlock) && 5443 "Exiting block must actually branch out of the loop!"); 5444 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 5445 if (ExitCount == getCouldNotCompute()) 5446 return 1; 5447 5448 // Get the trip count from the BE count by adding 1. 5449 const SCEV *TCMul = getAddExpr(ExitCount, getOne(ExitCount->getType())); 5450 // FIXME: SCEV distributes multiplication as V1*C1 + V2*C1. We could attempt 5451 // to factor simple cases. 5452 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(TCMul)) 5453 TCMul = Mul->getOperand(0); 5454 5455 const SCEVConstant *MulC = dyn_cast<SCEVConstant>(TCMul); 5456 if (!MulC) 5457 return 1; 5458 5459 ConstantInt *Result = MulC->getValue(); 5460 5461 // Guard against huge trip counts (this requires checking 5462 // for zero to handle the case where the trip count == -1 and the 5463 // addition wraps). 5464 if (!Result || Result->getValue().getActiveBits() > 32 || 5465 Result->getValue().getActiveBits() == 0) 5466 return 1; 5467 5468 return (unsigned)Result->getZExtValue(); 5469 } 5470 5471 /// Get the expression for the number of loop iterations for which this loop is 5472 /// guaranteed not to exit via ExitingBlock. Otherwise return 5473 /// SCEVCouldNotCompute. 5474 const SCEV *ScalarEvolution::getExitCount(Loop *L, BasicBlock *ExitingBlock) { 5475 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 5476 } 5477 5478 const SCEV * 5479 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 5480 SCEVUnionPredicate &Preds) { 5481 return getPredicatedBackedgeTakenInfo(L).getExact(this, &Preds); 5482 } 5483 5484 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 5485 return getBackedgeTakenInfo(L).getExact(this); 5486 } 5487 5488 /// Similar to getBackedgeTakenCount, except return the least SCEV value that is 5489 /// known never to be less than the actual backedge taken count. 5490 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 5491 return getBackedgeTakenInfo(L).getMax(this); 5492 } 5493 5494 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 5495 return getBackedgeTakenInfo(L).isMaxOrZero(this); 5496 } 5497 5498 /// Push PHI nodes in the header of the given loop onto the given Worklist. 5499 static void 5500 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 5501 BasicBlock *Header = L->getHeader(); 5502 5503 // Push all Loop-header PHIs onto the Worklist stack. 5504 for (BasicBlock::iterator I = Header->begin(); 5505 PHINode *PN = dyn_cast<PHINode>(I); ++I) 5506 Worklist.push_back(PN); 5507 } 5508 5509 const ScalarEvolution::BackedgeTakenInfo & 5510 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 5511 auto &BTI = getBackedgeTakenInfo(L); 5512 if (BTI.hasFullInfo()) 5513 return BTI; 5514 5515 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 5516 5517 if (!Pair.second) 5518 return Pair.first->second; 5519 5520 BackedgeTakenInfo Result = 5521 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 5522 5523 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 5524 } 5525 5526 const ScalarEvolution::BackedgeTakenInfo & 5527 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 5528 // Initially insert an invalid entry for this loop. If the insertion 5529 // succeeds, proceed to actually compute a backedge-taken count and 5530 // update the value. The temporary CouldNotCompute value tells SCEV 5531 // code elsewhere that it shouldn't attempt to request a new 5532 // backedge-taken count, which could result in infinite recursion. 5533 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 5534 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 5535 if (!Pair.second) 5536 return Pair.first->second; 5537 5538 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 5539 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 5540 // must be cleared in this scope. 5541 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 5542 5543 if (Result.getExact(this) != getCouldNotCompute()) { 5544 assert(isLoopInvariant(Result.getExact(this), L) && 5545 isLoopInvariant(Result.getMax(this), L) && 5546 "Computed backedge-taken count isn't loop invariant for loop!"); 5547 ++NumTripCountsComputed; 5548 } 5549 else if (Result.getMax(this) == getCouldNotCompute() && 5550 isa<PHINode>(L->getHeader()->begin())) { 5551 // Only count loops that have phi nodes as not being computable. 5552 ++NumTripCountsNotComputed; 5553 } 5554 5555 // Now that we know more about the trip count for this loop, forget any 5556 // existing SCEV values for PHI nodes in this loop since they are only 5557 // conservative estimates made without the benefit of trip count 5558 // information. This is similar to the code in forgetLoop, except that 5559 // it handles SCEVUnknown PHI nodes specially. 5560 if (Result.hasAnyInfo()) { 5561 SmallVector<Instruction *, 16> Worklist; 5562 PushLoopPHIs(L, Worklist); 5563 5564 SmallPtrSet<Instruction *, 8> Visited; 5565 while (!Worklist.empty()) { 5566 Instruction *I = Worklist.pop_back_val(); 5567 if (!Visited.insert(I).second) 5568 continue; 5569 5570 ValueExprMapType::iterator It = 5571 ValueExprMap.find_as(static_cast<Value *>(I)); 5572 if (It != ValueExprMap.end()) { 5573 const SCEV *Old = It->second; 5574 5575 // SCEVUnknown for a PHI either means that it has an unrecognized 5576 // structure, or it's a PHI that's in the progress of being computed 5577 // by createNodeForPHI. In the former case, additional loop trip 5578 // count information isn't going to change anything. In the later 5579 // case, createNodeForPHI will perform the necessary updates on its 5580 // own when it gets to that point. 5581 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 5582 eraseValueFromMap(It->first); 5583 forgetMemoizedResults(Old); 5584 } 5585 if (PHINode *PN = dyn_cast<PHINode>(I)) 5586 ConstantEvolutionLoopExitValue.erase(PN); 5587 } 5588 5589 PushDefUseChildren(I, Worklist); 5590 } 5591 } 5592 5593 // Re-lookup the insert position, since the call to 5594 // computeBackedgeTakenCount above could result in a 5595 // recusive call to getBackedgeTakenInfo (on a different 5596 // loop), which would invalidate the iterator computed 5597 // earlier. 5598 return BackedgeTakenCounts.find(L)->second = std::move(Result); 5599 } 5600 5601 void ScalarEvolution::forgetLoop(const Loop *L) { 5602 // Drop any stored trip count value. 5603 auto RemoveLoopFromBackedgeMap = 5604 [L](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 5605 auto BTCPos = Map.find(L); 5606 if (BTCPos != Map.end()) { 5607 BTCPos->second.clear(); 5608 Map.erase(BTCPos); 5609 } 5610 }; 5611 5612 RemoveLoopFromBackedgeMap(BackedgeTakenCounts); 5613 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts); 5614 5615 // Drop information about expressions based on loop-header PHIs. 5616 SmallVector<Instruction *, 16> Worklist; 5617 PushLoopPHIs(L, Worklist); 5618 5619 SmallPtrSet<Instruction *, 8> Visited; 5620 while (!Worklist.empty()) { 5621 Instruction *I = Worklist.pop_back_val(); 5622 if (!Visited.insert(I).second) 5623 continue; 5624 5625 ValueExprMapType::iterator It = 5626 ValueExprMap.find_as(static_cast<Value *>(I)); 5627 if (It != ValueExprMap.end()) { 5628 eraseValueFromMap(It->first); 5629 forgetMemoizedResults(It->second); 5630 if (PHINode *PN = dyn_cast<PHINode>(I)) 5631 ConstantEvolutionLoopExitValue.erase(PN); 5632 } 5633 5634 PushDefUseChildren(I, Worklist); 5635 } 5636 5637 // Forget all contained loops too, to avoid dangling entries in the 5638 // ValuesAtScopes map. 5639 for (Loop *I : *L) 5640 forgetLoop(I); 5641 5642 LoopPropertiesCache.erase(L); 5643 } 5644 5645 void ScalarEvolution::forgetValue(Value *V) { 5646 Instruction *I = dyn_cast<Instruction>(V); 5647 if (!I) return; 5648 5649 // Drop information about expressions based on loop-header PHIs. 5650 SmallVector<Instruction *, 16> Worklist; 5651 Worklist.push_back(I); 5652 5653 SmallPtrSet<Instruction *, 8> Visited; 5654 while (!Worklist.empty()) { 5655 I = Worklist.pop_back_val(); 5656 if (!Visited.insert(I).second) 5657 continue; 5658 5659 ValueExprMapType::iterator It = 5660 ValueExprMap.find_as(static_cast<Value *>(I)); 5661 if (It != ValueExprMap.end()) { 5662 eraseValueFromMap(It->first); 5663 forgetMemoizedResults(It->second); 5664 if (PHINode *PN = dyn_cast<PHINode>(I)) 5665 ConstantEvolutionLoopExitValue.erase(PN); 5666 } 5667 5668 PushDefUseChildren(I, Worklist); 5669 } 5670 } 5671 5672 /// Get the exact loop backedge taken count considering all loop exits. A 5673 /// computable result can only be returned for loops with a single exit. 5674 /// Returning the minimum taken count among all exits is incorrect because one 5675 /// of the loop's exit limit's may have been skipped. howFarToZero assumes that 5676 /// the limit of each loop test is never skipped. This is a valid assumption as 5677 /// long as the loop exits via that test. For precise results, it is the 5678 /// caller's responsibility to specify the relevant loop exit using 5679 /// getExact(ExitingBlock, SE). 5680 const SCEV * 5681 ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE, 5682 SCEVUnionPredicate *Preds) const { 5683 // If any exits were not computable, the loop is not computable. 5684 if (!isComplete() || ExitNotTaken.empty()) 5685 return SE->getCouldNotCompute(); 5686 5687 const SCEV *BECount = nullptr; 5688 for (auto &ENT : ExitNotTaken) { 5689 assert(ENT.ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV"); 5690 5691 if (!BECount) 5692 BECount = ENT.ExactNotTaken; 5693 else if (BECount != ENT.ExactNotTaken) 5694 return SE->getCouldNotCompute(); 5695 if (Preds && !ENT.hasAlwaysTruePredicate()) 5696 Preds->add(ENT.Predicate.get()); 5697 5698 assert((Preds || ENT.hasAlwaysTruePredicate()) && 5699 "Predicate should be always true!"); 5700 } 5701 5702 assert(BECount && "Invalid not taken count for loop exit"); 5703 return BECount; 5704 } 5705 5706 /// Get the exact not taken count for this loop exit. 5707 const SCEV * 5708 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 5709 ScalarEvolution *SE) const { 5710 for (auto &ENT : ExitNotTaken) 5711 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 5712 return ENT.ExactNotTaken; 5713 5714 return SE->getCouldNotCompute(); 5715 } 5716 5717 /// getMax - Get the max backedge taken count for the loop. 5718 const SCEV * 5719 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 5720 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 5721 return !ENT.hasAlwaysTruePredicate(); 5722 }; 5723 5724 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax()) 5725 return SE->getCouldNotCompute(); 5726 5727 return getMax(); 5728 } 5729 5730 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const { 5731 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 5732 return !ENT.hasAlwaysTruePredicate(); 5733 }; 5734 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 5735 } 5736 5737 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 5738 ScalarEvolution *SE) const { 5739 if (getMax() && getMax() != SE->getCouldNotCompute() && 5740 SE->hasOperand(getMax(), S)) 5741 return true; 5742 5743 for (auto &ENT : ExitNotTaken) 5744 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 5745 SE->hasOperand(ENT.ExactNotTaken, S)) 5746 return true; 5747 5748 return false; 5749 } 5750 5751 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 5752 /// computable exit into a persistent ExitNotTakenInfo array. 5753 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 5754 SmallVectorImpl<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> 5755 &&ExitCounts, 5756 bool Complete, const SCEV *MaxCount, bool MaxOrZero) 5757 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { 5758 typedef ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo EdgeExitInfo; 5759 ExitNotTaken.reserve(ExitCounts.size()); 5760 std::transform( 5761 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 5762 [&](const EdgeExitInfo &EEI) { 5763 BasicBlock *ExitBB = EEI.first; 5764 const ExitLimit &EL = EEI.second; 5765 if (EL.Predicates.empty()) 5766 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, nullptr); 5767 5768 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 5769 for (auto *Pred : EL.Predicates) 5770 Predicate->add(Pred); 5771 5772 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, std::move(Predicate)); 5773 }); 5774 } 5775 5776 /// Invalidate this result and free the ExitNotTakenInfo array. 5777 void ScalarEvolution::BackedgeTakenInfo::clear() { 5778 ExitNotTaken.clear(); 5779 } 5780 5781 /// Compute the number of times the backedge of the specified loop will execute. 5782 ScalarEvolution::BackedgeTakenInfo 5783 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 5784 bool AllowPredicates) { 5785 SmallVector<BasicBlock *, 8> ExitingBlocks; 5786 L->getExitingBlocks(ExitingBlocks); 5787 5788 typedef ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo EdgeExitInfo; 5789 5790 SmallVector<EdgeExitInfo, 4> ExitCounts; 5791 bool CouldComputeBECount = true; 5792 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 5793 const SCEV *MustExitMaxBECount = nullptr; 5794 const SCEV *MayExitMaxBECount = nullptr; 5795 bool MustExitMaxOrZero = false; 5796 5797 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 5798 // and compute maxBECount. 5799 // Do a union of all the predicates here. 5800 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 5801 BasicBlock *ExitBB = ExitingBlocks[i]; 5802 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 5803 5804 assert((AllowPredicates || EL.Predicates.empty()) && 5805 "Predicated exit limit when predicates are not allowed!"); 5806 5807 // 1. For each exit that can be computed, add an entry to ExitCounts. 5808 // CouldComputeBECount is true only if all exits can be computed. 5809 if (EL.ExactNotTaken == getCouldNotCompute()) 5810 // We couldn't compute an exact value for this exit, so 5811 // we won't be able to compute an exact value for the loop. 5812 CouldComputeBECount = false; 5813 else 5814 ExitCounts.emplace_back(ExitBB, EL); 5815 5816 // 2. Derive the loop's MaxBECount from each exit's max number of 5817 // non-exiting iterations. Partition the loop exits into two kinds: 5818 // LoopMustExits and LoopMayExits. 5819 // 5820 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 5821 // is a LoopMayExit. If any computable LoopMustExit is found, then 5822 // MaxBECount is the minimum EL.MaxNotTaken of computable 5823 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 5824 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 5825 // computable EL.MaxNotTaken. 5826 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 5827 DT.dominates(ExitBB, Latch)) { 5828 if (!MustExitMaxBECount) { 5829 MustExitMaxBECount = EL.MaxNotTaken; 5830 MustExitMaxOrZero = EL.MaxOrZero; 5831 } else { 5832 MustExitMaxBECount = 5833 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 5834 } 5835 } else if (MayExitMaxBECount != getCouldNotCompute()) { 5836 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 5837 MayExitMaxBECount = EL.MaxNotTaken; 5838 else { 5839 MayExitMaxBECount = 5840 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 5841 } 5842 } 5843 } 5844 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 5845 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 5846 // The loop backedge will be taken the maximum or zero times if there's 5847 // a single exit that must be taken the maximum or zero times. 5848 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 5849 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 5850 MaxBECount, MaxOrZero); 5851 } 5852 5853 ScalarEvolution::ExitLimit 5854 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 5855 bool AllowPredicates) { 5856 5857 // Okay, we've chosen an exiting block. See what condition causes us to exit 5858 // at this block and remember the exit block and whether all other targets 5859 // lead to the loop header. 5860 bool MustExecuteLoopHeader = true; 5861 BasicBlock *Exit = nullptr; 5862 for (auto *SBB : successors(ExitingBlock)) 5863 if (!L->contains(SBB)) { 5864 if (Exit) // Multiple exit successors. 5865 return getCouldNotCompute(); 5866 Exit = SBB; 5867 } else if (SBB != L->getHeader()) { 5868 MustExecuteLoopHeader = false; 5869 } 5870 5871 // At this point, we know we have a conditional branch that determines whether 5872 // the loop is exited. However, we don't know if the branch is executed each 5873 // time through the loop. If not, then the execution count of the branch will 5874 // not be equal to the trip count of the loop. 5875 // 5876 // Currently we check for this by checking to see if the Exit branch goes to 5877 // the loop header. If so, we know it will always execute the same number of 5878 // times as the loop. We also handle the case where the exit block *is* the 5879 // loop header. This is common for un-rotated loops. 5880 // 5881 // If both of those tests fail, walk up the unique predecessor chain to the 5882 // header, stopping if there is an edge that doesn't exit the loop. If the 5883 // header is reached, the execution count of the branch will be equal to the 5884 // trip count of the loop. 5885 // 5886 // More extensive analysis could be done to handle more cases here. 5887 // 5888 if (!MustExecuteLoopHeader && ExitingBlock != L->getHeader()) { 5889 // The simple checks failed, try climbing the unique predecessor chain 5890 // up to the header. 5891 bool Ok = false; 5892 for (BasicBlock *BB = ExitingBlock; BB; ) { 5893 BasicBlock *Pred = BB->getUniquePredecessor(); 5894 if (!Pred) 5895 return getCouldNotCompute(); 5896 TerminatorInst *PredTerm = Pred->getTerminator(); 5897 for (const BasicBlock *PredSucc : PredTerm->successors()) { 5898 if (PredSucc == BB) 5899 continue; 5900 // If the predecessor has a successor that isn't BB and isn't 5901 // outside the loop, assume the worst. 5902 if (L->contains(PredSucc)) 5903 return getCouldNotCompute(); 5904 } 5905 if (Pred == L->getHeader()) { 5906 Ok = true; 5907 break; 5908 } 5909 BB = Pred; 5910 } 5911 if (!Ok) 5912 return getCouldNotCompute(); 5913 } 5914 5915 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 5916 TerminatorInst *Term = ExitingBlock->getTerminator(); 5917 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 5918 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 5919 // Proceed to the next level to examine the exit condition expression. 5920 return computeExitLimitFromCond( 5921 L, BI->getCondition(), BI->getSuccessor(0), BI->getSuccessor(1), 5922 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 5923 } 5924 5925 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) 5926 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 5927 /*ControlsExit=*/IsOnlyExit); 5928 5929 return getCouldNotCompute(); 5930 } 5931 5932 ScalarEvolution::ExitLimit 5933 ScalarEvolution::computeExitLimitFromCond(const Loop *L, 5934 Value *ExitCond, 5935 BasicBlock *TBB, 5936 BasicBlock *FBB, 5937 bool ControlsExit, 5938 bool AllowPredicates) { 5939 // Check if the controlling expression for this loop is an And or Or. 5940 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 5941 if (BO->getOpcode() == Instruction::And) { 5942 // Recurse on the operands of the and. 5943 bool EitherMayExit = L->contains(TBB); 5944 ExitLimit EL0 = computeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB, 5945 ControlsExit && !EitherMayExit, 5946 AllowPredicates); 5947 ExitLimit EL1 = computeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB, 5948 ControlsExit && !EitherMayExit, 5949 AllowPredicates); 5950 const SCEV *BECount = getCouldNotCompute(); 5951 const SCEV *MaxBECount = getCouldNotCompute(); 5952 if (EitherMayExit) { 5953 // Both conditions must be true for the loop to continue executing. 5954 // Choose the less conservative count. 5955 if (EL0.ExactNotTaken == getCouldNotCompute() || 5956 EL1.ExactNotTaken == getCouldNotCompute()) 5957 BECount = getCouldNotCompute(); 5958 else 5959 BECount = 5960 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 5961 if (EL0.MaxNotTaken == getCouldNotCompute()) 5962 MaxBECount = EL1.MaxNotTaken; 5963 else if (EL1.MaxNotTaken == getCouldNotCompute()) 5964 MaxBECount = EL0.MaxNotTaken; 5965 else 5966 MaxBECount = 5967 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 5968 } else { 5969 // Both conditions must be true at the same time for the loop to exit. 5970 // For now, be conservative. 5971 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 5972 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 5973 MaxBECount = EL0.MaxNotTaken; 5974 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 5975 BECount = EL0.ExactNotTaken; 5976 } 5977 5978 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 5979 // to be more aggressive when computing BECount than when computing 5980 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 5981 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 5982 // to not. 5983 if (isa<SCEVCouldNotCompute>(MaxBECount) && 5984 !isa<SCEVCouldNotCompute>(BECount)) 5985 MaxBECount = BECount; 5986 5987 return ExitLimit(BECount, MaxBECount, false, 5988 {&EL0.Predicates, &EL1.Predicates}); 5989 } 5990 if (BO->getOpcode() == Instruction::Or) { 5991 // Recurse on the operands of the or. 5992 bool EitherMayExit = L->contains(FBB); 5993 ExitLimit EL0 = computeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB, 5994 ControlsExit && !EitherMayExit, 5995 AllowPredicates); 5996 ExitLimit EL1 = computeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB, 5997 ControlsExit && !EitherMayExit, 5998 AllowPredicates); 5999 const SCEV *BECount = getCouldNotCompute(); 6000 const SCEV *MaxBECount = getCouldNotCompute(); 6001 if (EitherMayExit) { 6002 // Both conditions must be false for the loop to continue executing. 6003 // Choose the less conservative count. 6004 if (EL0.ExactNotTaken == getCouldNotCompute() || 6005 EL1.ExactNotTaken == getCouldNotCompute()) 6006 BECount = getCouldNotCompute(); 6007 else 6008 BECount = 6009 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 6010 if (EL0.MaxNotTaken == getCouldNotCompute()) 6011 MaxBECount = EL1.MaxNotTaken; 6012 else if (EL1.MaxNotTaken == getCouldNotCompute()) 6013 MaxBECount = EL0.MaxNotTaken; 6014 else 6015 MaxBECount = 6016 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 6017 } else { 6018 // Both conditions must be false at the same time for the loop to exit. 6019 // For now, be conservative. 6020 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 6021 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 6022 MaxBECount = EL0.MaxNotTaken; 6023 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 6024 BECount = EL0.ExactNotTaken; 6025 } 6026 6027 return ExitLimit(BECount, MaxBECount, false, 6028 {&EL0.Predicates, &EL1.Predicates}); 6029 } 6030 } 6031 6032 // With an icmp, it may be feasible to compute an exact backedge-taken count. 6033 // Proceed to the next level to examine the icmp. 6034 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 6035 ExitLimit EL = 6036 computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit); 6037 if (EL.hasFullInfo() || !AllowPredicates) 6038 return EL; 6039 6040 // Try again, but use SCEV predicates this time. 6041 return computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit, 6042 /*AllowPredicates=*/true); 6043 } 6044 6045 // Check for a constant condition. These are normally stripped out by 6046 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 6047 // preserve the CFG and is temporarily leaving constant conditions 6048 // in place. 6049 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 6050 if (L->contains(FBB) == !CI->getZExtValue()) 6051 // The backedge is always taken. 6052 return getCouldNotCompute(); 6053 else 6054 // The backedge is never taken. 6055 return getZero(CI->getType()); 6056 } 6057 6058 // If it's not an integer or pointer comparison then compute it the hard way. 6059 return computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 6060 } 6061 6062 ScalarEvolution::ExitLimit 6063 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 6064 ICmpInst *ExitCond, 6065 BasicBlock *TBB, 6066 BasicBlock *FBB, 6067 bool ControlsExit, 6068 bool AllowPredicates) { 6069 6070 // If the condition was exit on true, convert the condition to exit on false 6071 ICmpInst::Predicate Cond; 6072 if (!L->contains(FBB)) 6073 Cond = ExitCond->getPredicate(); 6074 else 6075 Cond = ExitCond->getInversePredicate(); 6076 6077 // Handle common loops like: for (X = "string"; *X; ++X) 6078 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 6079 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 6080 ExitLimit ItCnt = 6081 computeLoadConstantCompareExitLimit(LI, RHS, L, Cond); 6082 if (ItCnt.hasAnyInfo()) 6083 return ItCnt; 6084 } 6085 6086 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 6087 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 6088 6089 // Try to evaluate any dependencies out of the loop. 6090 LHS = getSCEVAtScope(LHS, L); 6091 RHS = getSCEVAtScope(RHS, L); 6092 6093 // At this point, we would like to compute how many iterations of the 6094 // loop the predicate will return true for these inputs. 6095 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 6096 // If there is a loop-invariant, force it into the RHS. 6097 std::swap(LHS, RHS); 6098 Cond = ICmpInst::getSwappedPredicate(Cond); 6099 } 6100 6101 // Simplify the operands before analyzing them. 6102 (void)SimplifyICmpOperands(Cond, LHS, RHS); 6103 6104 // If we have a comparison of a chrec against a constant, try to use value 6105 // ranges to answer this query. 6106 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 6107 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 6108 if (AddRec->getLoop() == L) { 6109 // Form the constant range. 6110 ConstantRange CompRange = 6111 ConstantRange::makeExactICmpRegion(Cond, RHSC->getAPInt()); 6112 6113 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 6114 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 6115 } 6116 6117 switch (Cond) { 6118 case ICmpInst::ICMP_NE: { // while (X != Y) 6119 // Convert to: while (X-Y != 0) 6120 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 6121 AllowPredicates); 6122 if (EL.hasAnyInfo()) return EL; 6123 break; 6124 } 6125 case ICmpInst::ICMP_EQ: { // while (X == Y) 6126 // Convert to: while (X-Y == 0) 6127 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 6128 if (EL.hasAnyInfo()) return EL; 6129 break; 6130 } 6131 case ICmpInst::ICMP_SLT: 6132 case ICmpInst::ICMP_ULT: { // while (X < Y) 6133 bool IsSigned = Cond == ICmpInst::ICMP_SLT; 6134 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 6135 AllowPredicates); 6136 if (EL.hasAnyInfo()) return EL; 6137 break; 6138 } 6139 case ICmpInst::ICMP_SGT: 6140 case ICmpInst::ICMP_UGT: { // while (X > Y) 6141 bool IsSigned = Cond == ICmpInst::ICMP_SGT; 6142 ExitLimit EL = 6143 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 6144 AllowPredicates); 6145 if (EL.hasAnyInfo()) return EL; 6146 break; 6147 } 6148 default: 6149 break; 6150 } 6151 6152 auto *ExhaustiveCount = 6153 computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 6154 6155 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 6156 return ExhaustiveCount; 6157 6158 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 6159 ExitCond->getOperand(1), L, Cond); 6160 } 6161 6162 ScalarEvolution::ExitLimit 6163 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 6164 SwitchInst *Switch, 6165 BasicBlock *ExitingBlock, 6166 bool ControlsExit) { 6167 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 6168 6169 // Give up if the exit is the default dest of a switch. 6170 if (Switch->getDefaultDest() == ExitingBlock) 6171 return getCouldNotCompute(); 6172 6173 assert(L->contains(Switch->getDefaultDest()) && 6174 "Default case must not exit the loop!"); 6175 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 6176 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 6177 6178 // while (X != Y) --> while (X-Y != 0) 6179 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 6180 if (EL.hasAnyInfo()) 6181 return EL; 6182 6183 return getCouldNotCompute(); 6184 } 6185 6186 static ConstantInt * 6187 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 6188 ScalarEvolution &SE) { 6189 const SCEV *InVal = SE.getConstant(C); 6190 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 6191 assert(isa<SCEVConstant>(Val) && 6192 "Evaluation of SCEV at constant didn't fold correctly?"); 6193 return cast<SCEVConstant>(Val)->getValue(); 6194 } 6195 6196 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 6197 /// compute the backedge execution count. 6198 ScalarEvolution::ExitLimit 6199 ScalarEvolution::computeLoadConstantCompareExitLimit( 6200 LoadInst *LI, 6201 Constant *RHS, 6202 const Loop *L, 6203 ICmpInst::Predicate predicate) { 6204 6205 if (LI->isVolatile()) return getCouldNotCompute(); 6206 6207 // Check to see if the loaded pointer is a getelementptr of a global. 6208 // TODO: Use SCEV instead of manually grubbing with GEPs. 6209 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 6210 if (!GEP) return getCouldNotCompute(); 6211 6212 // Make sure that it is really a constant global we are gepping, with an 6213 // initializer, and make sure the first IDX is really 0. 6214 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 6215 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 6216 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 6217 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 6218 return getCouldNotCompute(); 6219 6220 // Okay, we allow one non-constant index into the GEP instruction. 6221 Value *VarIdx = nullptr; 6222 std::vector<Constant*> Indexes; 6223 unsigned VarIdxNum = 0; 6224 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 6225 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 6226 Indexes.push_back(CI); 6227 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 6228 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 6229 VarIdx = GEP->getOperand(i); 6230 VarIdxNum = i-2; 6231 Indexes.push_back(nullptr); 6232 } 6233 6234 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 6235 if (!VarIdx) 6236 return getCouldNotCompute(); 6237 6238 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 6239 // Check to see if X is a loop variant variable value now. 6240 const SCEV *Idx = getSCEV(VarIdx); 6241 Idx = getSCEVAtScope(Idx, L); 6242 6243 // We can only recognize very limited forms of loop index expressions, in 6244 // particular, only affine AddRec's like {C1,+,C2}. 6245 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 6246 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 6247 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 6248 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 6249 return getCouldNotCompute(); 6250 6251 unsigned MaxSteps = MaxBruteForceIterations; 6252 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 6253 ConstantInt *ItCst = ConstantInt::get( 6254 cast<IntegerType>(IdxExpr->getType()), IterationNum); 6255 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 6256 6257 // Form the GEP offset. 6258 Indexes[VarIdxNum] = Val; 6259 6260 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 6261 Indexes); 6262 if (!Result) break; // Cannot compute! 6263 6264 // Evaluate the condition for this iteration. 6265 Result = ConstantExpr::getICmp(predicate, Result, RHS); 6266 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 6267 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 6268 ++NumArrayLenItCounts; 6269 return getConstant(ItCst); // Found terminating iteration! 6270 } 6271 } 6272 return getCouldNotCompute(); 6273 } 6274 6275 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 6276 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 6277 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 6278 if (!RHS) 6279 return getCouldNotCompute(); 6280 6281 const BasicBlock *Latch = L->getLoopLatch(); 6282 if (!Latch) 6283 return getCouldNotCompute(); 6284 6285 const BasicBlock *Predecessor = L->getLoopPredecessor(); 6286 if (!Predecessor) 6287 return getCouldNotCompute(); 6288 6289 // Return true if V is of the form "LHS `shift_op` <positive constant>". 6290 // Return LHS in OutLHS and shift_opt in OutOpCode. 6291 auto MatchPositiveShift = 6292 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 6293 6294 using namespace PatternMatch; 6295 6296 ConstantInt *ShiftAmt; 6297 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 6298 OutOpCode = Instruction::LShr; 6299 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 6300 OutOpCode = Instruction::AShr; 6301 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 6302 OutOpCode = Instruction::Shl; 6303 else 6304 return false; 6305 6306 return ShiftAmt->getValue().isStrictlyPositive(); 6307 }; 6308 6309 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 6310 // 6311 // loop: 6312 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 6313 // %iv.shifted = lshr i32 %iv, <positive constant> 6314 // 6315 // Return true on a successful match. Return the corresponding PHI node (%iv 6316 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 6317 auto MatchShiftRecurrence = 6318 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 6319 Optional<Instruction::BinaryOps> PostShiftOpCode; 6320 6321 { 6322 Instruction::BinaryOps OpC; 6323 Value *V; 6324 6325 // If we encounter a shift instruction, "peel off" the shift operation, 6326 // and remember that we did so. Later when we inspect %iv's backedge 6327 // value, we will make sure that the backedge value uses the same 6328 // operation. 6329 // 6330 // Note: the peeled shift operation does not have to be the same 6331 // instruction as the one feeding into the PHI's backedge value. We only 6332 // really care about it being the same *kind* of shift instruction -- 6333 // that's all that is required for our later inferences to hold. 6334 if (MatchPositiveShift(LHS, V, OpC)) { 6335 PostShiftOpCode = OpC; 6336 LHS = V; 6337 } 6338 } 6339 6340 PNOut = dyn_cast<PHINode>(LHS); 6341 if (!PNOut || PNOut->getParent() != L->getHeader()) 6342 return false; 6343 6344 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 6345 Value *OpLHS; 6346 6347 return 6348 // The backedge value for the PHI node must be a shift by a positive 6349 // amount 6350 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 6351 6352 // of the PHI node itself 6353 OpLHS == PNOut && 6354 6355 // and the kind of shift should be match the kind of shift we peeled 6356 // off, if any. 6357 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 6358 }; 6359 6360 PHINode *PN; 6361 Instruction::BinaryOps OpCode; 6362 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 6363 return getCouldNotCompute(); 6364 6365 const DataLayout &DL = getDataLayout(); 6366 6367 // The key rationale for this optimization is that for some kinds of shift 6368 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 6369 // within a finite number of iterations. If the condition guarding the 6370 // backedge (in the sense that the backedge is taken if the condition is true) 6371 // is false for the value the shift recurrence stabilizes to, then we know 6372 // that the backedge is taken only a finite number of times. 6373 6374 ConstantInt *StableValue = nullptr; 6375 switch (OpCode) { 6376 default: 6377 llvm_unreachable("Impossible case!"); 6378 6379 case Instruction::AShr: { 6380 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 6381 // bitwidth(K) iterations. 6382 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 6383 bool KnownZero, KnownOne; 6384 ComputeSignBit(FirstValue, KnownZero, KnownOne, DL, 0, nullptr, 6385 Predecessor->getTerminator(), &DT); 6386 auto *Ty = cast<IntegerType>(RHS->getType()); 6387 if (KnownZero) 6388 StableValue = ConstantInt::get(Ty, 0); 6389 else if (KnownOne) 6390 StableValue = ConstantInt::get(Ty, -1, true); 6391 else 6392 return getCouldNotCompute(); 6393 6394 break; 6395 } 6396 case Instruction::LShr: 6397 case Instruction::Shl: 6398 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 6399 // stabilize to 0 in at most bitwidth(K) iterations. 6400 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 6401 break; 6402 } 6403 6404 auto *Result = 6405 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 6406 assert(Result->getType()->isIntegerTy(1) && 6407 "Otherwise cannot be an operand to a branch instruction"); 6408 6409 if (Result->isZeroValue()) { 6410 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 6411 const SCEV *UpperBound = 6412 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 6413 return ExitLimit(getCouldNotCompute(), UpperBound, false); 6414 } 6415 6416 return getCouldNotCompute(); 6417 } 6418 6419 /// Return true if we can constant fold an instruction of the specified type, 6420 /// assuming that all operands were constants. 6421 static bool CanConstantFold(const Instruction *I) { 6422 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 6423 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 6424 isa<LoadInst>(I)) 6425 return true; 6426 6427 if (const CallInst *CI = dyn_cast<CallInst>(I)) 6428 if (const Function *F = CI->getCalledFunction()) 6429 return canConstantFoldCallTo(F); 6430 return false; 6431 } 6432 6433 /// Determine whether this instruction can constant evolve within this loop 6434 /// assuming its operands can all constant evolve. 6435 static bool canConstantEvolve(Instruction *I, const Loop *L) { 6436 // An instruction outside of the loop can't be derived from a loop PHI. 6437 if (!L->contains(I)) return false; 6438 6439 if (isa<PHINode>(I)) { 6440 // We don't currently keep track of the control flow needed to evaluate 6441 // PHIs, so we cannot handle PHIs inside of loops. 6442 return L->getHeader() == I->getParent(); 6443 } 6444 6445 // If we won't be able to constant fold this expression even if the operands 6446 // are constants, bail early. 6447 return CanConstantFold(I); 6448 } 6449 6450 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 6451 /// recursing through each instruction operand until reaching a loop header phi. 6452 static PHINode * 6453 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 6454 DenseMap<Instruction *, PHINode *> &PHIMap, 6455 unsigned Depth) { 6456 if (Depth > MaxConstantEvolvingDepth) 6457 return nullptr; 6458 6459 // Otherwise, we can evaluate this instruction if all of its operands are 6460 // constant or derived from a PHI node themselves. 6461 PHINode *PHI = nullptr; 6462 for (Value *Op : UseInst->operands()) { 6463 if (isa<Constant>(Op)) continue; 6464 6465 Instruction *OpInst = dyn_cast<Instruction>(Op); 6466 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 6467 6468 PHINode *P = dyn_cast<PHINode>(OpInst); 6469 if (!P) 6470 // If this operand is already visited, reuse the prior result. 6471 // We may have P != PHI if this is the deepest point at which the 6472 // inconsistent paths meet. 6473 P = PHIMap.lookup(OpInst); 6474 if (!P) { 6475 // Recurse and memoize the results, whether a phi is found or not. 6476 // This recursive call invalidates pointers into PHIMap. 6477 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 6478 PHIMap[OpInst] = P; 6479 } 6480 if (!P) 6481 return nullptr; // Not evolving from PHI 6482 if (PHI && PHI != P) 6483 return nullptr; // Evolving from multiple different PHIs. 6484 PHI = P; 6485 } 6486 // This is a expression evolving from a constant PHI! 6487 return PHI; 6488 } 6489 6490 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 6491 /// in the loop that V is derived from. We allow arbitrary operations along the 6492 /// way, but the operands of an operation must either be constants or a value 6493 /// derived from a constant PHI. If this expression does not fit with these 6494 /// constraints, return null. 6495 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 6496 Instruction *I = dyn_cast<Instruction>(V); 6497 if (!I || !canConstantEvolve(I, L)) return nullptr; 6498 6499 if (PHINode *PN = dyn_cast<PHINode>(I)) 6500 return PN; 6501 6502 // Record non-constant instructions contained by the loop. 6503 DenseMap<Instruction *, PHINode *> PHIMap; 6504 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 6505 } 6506 6507 /// EvaluateExpression - Given an expression that passes the 6508 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 6509 /// in the loop has the value PHIVal. If we can't fold this expression for some 6510 /// reason, return null. 6511 static Constant *EvaluateExpression(Value *V, const Loop *L, 6512 DenseMap<Instruction *, Constant *> &Vals, 6513 const DataLayout &DL, 6514 const TargetLibraryInfo *TLI) { 6515 // Convenient constant check, but redundant for recursive calls. 6516 if (Constant *C = dyn_cast<Constant>(V)) return C; 6517 Instruction *I = dyn_cast<Instruction>(V); 6518 if (!I) return nullptr; 6519 6520 if (Constant *C = Vals.lookup(I)) return C; 6521 6522 // An instruction inside the loop depends on a value outside the loop that we 6523 // weren't given a mapping for, or a value such as a call inside the loop. 6524 if (!canConstantEvolve(I, L)) return nullptr; 6525 6526 // An unmapped PHI can be due to a branch or another loop inside this loop, 6527 // or due to this not being the initial iteration through a loop where we 6528 // couldn't compute the evolution of this particular PHI last time. 6529 if (isa<PHINode>(I)) return nullptr; 6530 6531 std::vector<Constant*> Operands(I->getNumOperands()); 6532 6533 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 6534 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 6535 if (!Operand) { 6536 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 6537 if (!Operands[i]) return nullptr; 6538 continue; 6539 } 6540 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 6541 Vals[Operand] = C; 6542 if (!C) return nullptr; 6543 Operands[i] = C; 6544 } 6545 6546 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 6547 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 6548 Operands[1], DL, TLI); 6549 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 6550 if (!LI->isVolatile()) 6551 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 6552 } 6553 return ConstantFoldInstOperands(I, Operands, DL, TLI); 6554 } 6555 6556 6557 // If every incoming value to PN except the one for BB is a specific Constant, 6558 // return that, else return nullptr. 6559 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 6560 Constant *IncomingVal = nullptr; 6561 6562 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 6563 if (PN->getIncomingBlock(i) == BB) 6564 continue; 6565 6566 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 6567 if (!CurrentVal) 6568 return nullptr; 6569 6570 if (IncomingVal != CurrentVal) { 6571 if (IncomingVal) 6572 return nullptr; 6573 IncomingVal = CurrentVal; 6574 } 6575 } 6576 6577 return IncomingVal; 6578 } 6579 6580 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 6581 /// in the header of its containing loop, we know the loop executes a 6582 /// constant number of times, and the PHI node is just a recurrence 6583 /// involving constants, fold it. 6584 Constant * 6585 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 6586 const APInt &BEs, 6587 const Loop *L) { 6588 auto I = ConstantEvolutionLoopExitValue.find(PN); 6589 if (I != ConstantEvolutionLoopExitValue.end()) 6590 return I->second; 6591 6592 if (BEs.ugt(MaxBruteForceIterations)) 6593 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 6594 6595 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 6596 6597 DenseMap<Instruction *, Constant *> CurrentIterVals; 6598 BasicBlock *Header = L->getHeader(); 6599 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 6600 6601 BasicBlock *Latch = L->getLoopLatch(); 6602 if (!Latch) 6603 return nullptr; 6604 6605 for (auto &I : *Header) { 6606 PHINode *PHI = dyn_cast<PHINode>(&I); 6607 if (!PHI) break; 6608 auto *StartCST = getOtherIncomingValue(PHI, Latch); 6609 if (!StartCST) continue; 6610 CurrentIterVals[PHI] = StartCST; 6611 } 6612 if (!CurrentIterVals.count(PN)) 6613 return RetVal = nullptr; 6614 6615 Value *BEValue = PN->getIncomingValueForBlock(Latch); 6616 6617 // Execute the loop symbolically to determine the exit value. 6618 if (BEs.getActiveBits() >= 32) 6619 return RetVal = nullptr; // More than 2^32-1 iterations?? Not doing it! 6620 6621 unsigned NumIterations = BEs.getZExtValue(); // must be in range 6622 unsigned IterationNum = 0; 6623 const DataLayout &DL = getDataLayout(); 6624 for (; ; ++IterationNum) { 6625 if (IterationNum == NumIterations) 6626 return RetVal = CurrentIterVals[PN]; // Got exit value! 6627 6628 // Compute the value of the PHIs for the next iteration. 6629 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 6630 DenseMap<Instruction *, Constant *> NextIterVals; 6631 Constant *NextPHI = 6632 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 6633 if (!NextPHI) 6634 return nullptr; // Couldn't evaluate! 6635 NextIterVals[PN] = NextPHI; 6636 6637 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 6638 6639 // Also evaluate the other PHI nodes. However, we don't get to stop if we 6640 // cease to be able to evaluate one of them or if they stop evolving, 6641 // because that doesn't necessarily prevent us from computing PN. 6642 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 6643 for (const auto &I : CurrentIterVals) { 6644 PHINode *PHI = dyn_cast<PHINode>(I.first); 6645 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 6646 PHIsToCompute.emplace_back(PHI, I.second); 6647 } 6648 // We use two distinct loops because EvaluateExpression may invalidate any 6649 // iterators into CurrentIterVals. 6650 for (const auto &I : PHIsToCompute) { 6651 PHINode *PHI = I.first; 6652 Constant *&NextPHI = NextIterVals[PHI]; 6653 if (!NextPHI) { // Not already computed. 6654 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 6655 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 6656 } 6657 if (NextPHI != I.second) 6658 StoppedEvolving = false; 6659 } 6660 6661 // If all entries in CurrentIterVals == NextIterVals then we can stop 6662 // iterating, the loop can't continue to change. 6663 if (StoppedEvolving) 6664 return RetVal = CurrentIterVals[PN]; 6665 6666 CurrentIterVals.swap(NextIterVals); 6667 } 6668 } 6669 6670 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 6671 Value *Cond, 6672 bool ExitWhen) { 6673 PHINode *PN = getConstantEvolvingPHI(Cond, L); 6674 if (!PN) return getCouldNotCompute(); 6675 6676 // If the loop is canonicalized, the PHI will have exactly two entries. 6677 // That's the only form we support here. 6678 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 6679 6680 DenseMap<Instruction *, Constant *> CurrentIterVals; 6681 BasicBlock *Header = L->getHeader(); 6682 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 6683 6684 BasicBlock *Latch = L->getLoopLatch(); 6685 assert(Latch && "Should follow from NumIncomingValues == 2!"); 6686 6687 for (auto &I : *Header) { 6688 PHINode *PHI = dyn_cast<PHINode>(&I); 6689 if (!PHI) 6690 break; 6691 auto *StartCST = getOtherIncomingValue(PHI, Latch); 6692 if (!StartCST) continue; 6693 CurrentIterVals[PHI] = StartCST; 6694 } 6695 if (!CurrentIterVals.count(PN)) 6696 return getCouldNotCompute(); 6697 6698 // Okay, we find a PHI node that defines the trip count of this loop. Execute 6699 // the loop symbolically to determine when the condition gets a value of 6700 // "ExitWhen". 6701 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 6702 const DataLayout &DL = getDataLayout(); 6703 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 6704 auto *CondVal = dyn_cast_or_null<ConstantInt>( 6705 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 6706 6707 // Couldn't symbolically evaluate. 6708 if (!CondVal) return getCouldNotCompute(); 6709 6710 if (CondVal->getValue() == uint64_t(ExitWhen)) { 6711 ++NumBruteForceTripCountsComputed; 6712 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 6713 } 6714 6715 // Update all the PHI nodes for the next iteration. 6716 DenseMap<Instruction *, Constant *> NextIterVals; 6717 6718 // Create a list of which PHIs we need to compute. We want to do this before 6719 // calling EvaluateExpression on them because that may invalidate iterators 6720 // into CurrentIterVals. 6721 SmallVector<PHINode *, 8> PHIsToCompute; 6722 for (const auto &I : CurrentIterVals) { 6723 PHINode *PHI = dyn_cast<PHINode>(I.first); 6724 if (!PHI || PHI->getParent() != Header) continue; 6725 PHIsToCompute.push_back(PHI); 6726 } 6727 for (PHINode *PHI : PHIsToCompute) { 6728 Constant *&NextPHI = NextIterVals[PHI]; 6729 if (NextPHI) continue; // Already computed! 6730 6731 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 6732 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 6733 } 6734 CurrentIterVals.swap(NextIterVals); 6735 } 6736 6737 // Too many iterations were needed to evaluate. 6738 return getCouldNotCompute(); 6739 } 6740 6741 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 6742 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 6743 ValuesAtScopes[V]; 6744 // Check to see if we've folded this expression at this loop before. 6745 for (auto &LS : Values) 6746 if (LS.first == L) 6747 return LS.second ? LS.second : V; 6748 6749 Values.emplace_back(L, nullptr); 6750 6751 // Otherwise compute it. 6752 const SCEV *C = computeSCEVAtScope(V, L); 6753 for (auto &LS : reverse(ValuesAtScopes[V])) 6754 if (LS.first == L) { 6755 LS.second = C; 6756 break; 6757 } 6758 return C; 6759 } 6760 6761 /// This builds up a Constant using the ConstantExpr interface. That way, we 6762 /// will return Constants for objects which aren't represented by a 6763 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 6764 /// Returns NULL if the SCEV isn't representable as a Constant. 6765 static Constant *BuildConstantFromSCEV(const SCEV *V) { 6766 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 6767 case scCouldNotCompute: 6768 case scAddRecExpr: 6769 break; 6770 case scConstant: 6771 return cast<SCEVConstant>(V)->getValue(); 6772 case scUnknown: 6773 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 6774 case scSignExtend: { 6775 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 6776 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 6777 return ConstantExpr::getSExt(CastOp, SS->getType()); 6778 break; 6779 } 6780 case scZeroExtend: { 6781 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 6782 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 6783 return ConstantExpr::getZExt(CastOp, SZ->getType()); 6784 break; 6785 } 6786 case scTruncate: { 6787 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 6788 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 6789 return ConstantExpr::getTrunc(CastOp, ST->getType()); 6790 break; 6791 } 6792 case scAddExpr: { 6793 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 6794 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 6795 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 6796 unsigned AS = PTy->getAddressSpace(); 6797 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 6798 C = ConstantExpr::getBitCast(C, DestPtrTy); 6799 } 6800 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 6801 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 6802 if (!C2) return nullptr; 6803 6804 // First pointer! 6805 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 6806 unsigned AS = C2->getType()->getPointerAddressSpace(); 6807 std::swap(C, C2); 6808 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 6809 // The offsets have been converted to bytes. We can add bytes to an 6810 // i8* by GEP with the byte count in the first index. 6811 C = ConstantExpr::getBitCast(C, DestPtrTy); 6812 } 6813 6814 // Don't bother trying to sum two pointers. We probably can't 6815 // statically compute a load that results from it anyway. 6816 if (C2->getType()->isPointerTy()) 6817 return nullptr; 6818 6819 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 6820 if (PTy->getElementType()->isStructTy()) 6821 C2 = ConstantExpr::getIntegerCast( 6822 C2, Type::getInt32Ty(C->getContext()), true); 6823 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 6824 } else 6825 C = ConstantExpr::getAdd(C, C2); 6826 } 6827 return C; 6828 } 6829 break; 6830 } 6831 case scMulExpr: { 6832 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 6833 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 6834 // Don't bother with pointers at all. 6835 if (C->getType()->isPointerTy()) return nullptr; 6836 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 6837 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 6838 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 6839 C = ConstantExpr::getMul(C, C2); 6840 } 6841 return C; 6842 } 6843 break; 6844 } 6845 case scUDivExpr: { 6846 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 6847 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 6848 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 6849 if (LHS->getType() == RHS->getType()) 6850 return ConstantExpr::getUDiv(LHS, RHS); 6851 break; 6852 } 6853 case scSMaxExpr: 6854 case scUMaxExpr: 6855 break; // TODO: smax, umax. 6856 } 6857 return nullptr; 6858 } 6859 6860 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 6861 if (isa<SCEVConstant>(V)) return V; 6862 6863 // If this instruction is evolved from a constant-evolving PHI, compute the 6864 // exit value from the loop without using SCEVs. 6865 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 6866 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 6867 const Loop *LI = this->LI[I->getParent()]; 6868 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 6869 if (PHINode *PN = dyn_cast<PHINode>(I)) 6870 if (PN->getParent() == LI->getHeader()) { 6871 // Okay, there is no closed form solution for the PHI node. Check 6872 // to see if the loop that contains it has a known backedge-taken 6873 // count. If so, we may be able to force computation of the exit 6874 // value. 6875 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 6876 if (const SCEVConstant *BTCC = 6877 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 6878 // Okay, we know how many times the containing loop executes. If 6879 // this is a constant evolving PHI node, get the final value at 6880 // the specified iteration number. 6881 Constant *RV = 6882 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 6883 if (RV) return getSCEV(RV); 6884 } 6885 } 6886 6887 // Okay, this is an expression that we cannot symbolically evaluate 6888 // into a SCEV. Check to see if it's possible to symbolically evaluate 6889 // the arguments into constants, and if so, try to constant propagate the 6890 // result. This is particularly useful for computing loop exit values. 6891 if (CanConstantFold(I)) { 6892 SmallVector<Constant *, 4> Operands; 6893 bool MadeImprovement = false; 6894 for (Value *Op : I->operands()) { 6895 if (Constant *C = dyn_cast<Constant>(Op)) { 6896 Operands.push_back(C); 6897 continue; 6898 } 6899 6900 // If any of the operands is non-constant and if they are 6901 // non-integer and non-pointer, don't even try to analyze them 6902 // with scev techniques. 6903 if (!isSCEVable(Op->getType())) 6904 return V; 6905 6906 const SCEV *OrigV = getSCEV(Op); 6907 const SCEV *OpV = getSCEVAtScope(OrigV, L); 6908 MadeImprovement |= OrigV != OpV; 6909 6910 Constant *C = BuildConstantFromSCEV(OpV); 6911 if (!C) return V; 6912 if (C->getType() != Op->getType()) 6913 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 6914 Op->getType(), 6915 false), 6916 C, Op->getType()); 6917 Operands.push_back(C); 6918 } 6919 6920 // Check to see if getSCEVAtScope actually made an improvement. 6921 if (MadeImprovement) { 6922 Constant *C = nullptr; 6923 const DataLayout &DL = getDataLayout(); 6924 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 6925 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 6926 Operands[1], DL, &TLI); 6927 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 6928 if (!LI->isVolatile()) 6929 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 6930 } else 6931 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 6932 if (!C) return V; 6933 return getSCEV(C); 6934 } 6935 } 6936 } 6937 6938 // This is some other type of SCEVUnknown, just return it. 6939 return V; 6940 } 6941 6942 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 6943 // Avoid performing the look-up in the common case where the specified 6944 // expression has no loop-variant portions. 6945 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 6946 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 6947 if (OpAtScope != Comm->getOperand(i)) { 6948 // Okay, at least one of these operands is loop variant but might be 6949 // foldable. Build a new instance of the folded commutative expression. 6950 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 6951 Comm->op_begin()+i); 6952 NewOps.push_back(OpAtScope); 6953 6954 for (++i; i != e; ++i) { 6955 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 6956 NewOps.push_back(OpAtScope); 6957 } 6958 if (isa<SCEVAddExpr>(Comm)) 6959 return getAddExpr(NewOps); 6960 if (isa<SCEVMulExpr>(Comm)) 6961 return getMulExpr(NewOps); 6962 if (isa<SCEVSMaxExpr>(Comm)) 6963 return getSMaxExpr(NewOps); 6964 if (isa<SCEVUMaxExpr>(Comm)) 6965 return getUMaxExpr(NewOps); 6966 llvm_unreachable("Unknown commutative SCEV type!"); 6967 } 6968 } 6969 // If we got here, all operands are loop invariant. 6970 return Comm; 6971 } 6972 6973 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 6974 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 6975 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 6976 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 6977 return Div; // must be loop invariant 6978 return getUDivExpr(LHS, RHS); 6979 } 6980 6981 // If this is a loop recurrence for a loop that does not contain L, then we 6982 // are dealing with the final value computed by the loop. 6983 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 6984 // First, attempt to evaluate each operand. 6985 // Avoid performing the look-up in the common case where the specified 6986 // expression has no loop-variant portions. 6987 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 6988 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 6989 if (OpAtScope == AddRec->getOperand(i)) 6990 continue; 6991 6992 // Okay, at least one of these operands is loop variant but might be 6993 // foldable. Build a new instance of the folded commutative expression. 6994 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 6995 AddRec->op_begin()+i); 6996 NewOps.push_back(OpAtScope); 6997 for (++i; i != e; ++i) 6998 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 6999 7000 const SCEV *FoldedRec = 7001 getAddRecExpr(NewOps, AddRec->getLoop(), 7002 AddRec->getNoWrapFlags(SCEV::FlagNW)); 7003 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 7004 // The addrec may be folded to a nonrecurrence, for example, if the 7005 // induction variable is multiplied by zero after constant folding. Go 7006 // ahead and return the folded value. 7007 if (!AddRec) 7008 return FoldedRec; 7009 break; 7010 } 7011 7012 // If the scope is outside the addrec's loop, evaluate it by using the 7013 // loop exit value of the addrec. 7014 if (!AddRec->getLoop()->contains(L)) { 7015 // To evaluate this recurrence, we need to know how many times the AddRec 7016 // loop iterates. Compute this now. 7017 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 7018 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 7019 7020 // Then, evaluate the AddRec. 7021 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 7022 } 7023 7024 return AddRec; 7025 } 7026 7027 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 7028 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7029 if (Op == Cast->getOperand()) 7030 return Cast; // must be loop invariant 7031 return getZeroExtendExpr(Op, Cast->getType()); 7032 } 7033 7034 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 7035 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7036 if (Op == Cast->getOperand()) 7037 return Cast; // must be loop invariant 7038 return getSignExtendExpr(Op, Cast->getType()); 7039 } 7040 7041 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 7042 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7043 if (Op == Cast->getOperand()) 7044 return Cast; // must be loop invariant 7045 return getTruncateExpr(Op, Cast->getType()); 7046 } 7047 7048 llvm_unreachable("Unknown SCEV type!"); 7049 } 7050 7051 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 7052 return getSCEVAtScope(getSCEV(V), L); 7053 } 7054 7055 /// Finds the minimum unsigned root of the following equation: 7056 /// 7057 /// A * X = B (mod N) 7058 /// 7059 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 7060 /// A and B isn't important. 7061 /// 7062 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 7063 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 7064 ScalarEvolution &SE) { 7065 uint32_t BW = A.getBitWidth(); 7066 assert(BW == SE.getTypeSizeInBits(B->getType())); 7067 assert(A != 0 && "A must be non-zero."); 7068 7069 // 1. D = gcd(A, N) 7070 // 7071 // The gcd of A and N may have only one prime factor: 2. The number of 7072 // trailing zeros in A is its multiplicity 7073 uint32_t Mult2 = A.countTrailingZeros(); 7074 // D = 2^Mult2 7075 7076 // 2. Check if B is divisible by D. 7077 // 7078 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 7079 // is not less than multiplicity of this prime factor for D. 7080 if (SE.GetMinTrailingZeros(B) < Mult2) 7081 return SE.getCouldNotCompute(); 7082 7083 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 7084 // modulo (N / D). 7085 // 7086 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 7087 // (N / D) in general. The inverse itself always fits into BW bits, though, 7088 // so we immediately truncate it. 7089 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 7090 APInt Mod(BW + 1, 0); 7091 Mod.setBit(BW - Mult2); // Mod = N / D 7092 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 7093 7094 // 4. Compute the minimum unsigned root of the equation: 7095 // I * (B / D) mod (N / D) 7096 // To simplify the computation, we factor out the divide by D: 7097 // (I * B mod N) / D 7098 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 7099 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 7100 } 7101 7102 /// Find the roots of the quadratic equation for the given quadratic chrec 7103 /// {L,+,M,+,N}. This returns either the two roots (which might be the same) or 7104 /// two SCEVCouldNotCompute objects. 7105 /// 7106 static Optional<std::pair<const SCEVConstant *,const SCEVConstant *>> 7107 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 7108 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 7109 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 7110 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 7111 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 7112 7113 // We currently can only solve this if the coefficients are constants. 7114 if (!LC || !MC || !NC) 7115 return None; 7116 7117 uint32_t BitWidth = LC->getAPInt().getBitWidth(); 7118 const APInt &L = LC->getAPInt(); 7119 const APInt &M = MC->getAPInt(); 7120 const APInt &N = NC->getAPInt(); 7121 APInt Two(BitWidth, 2); 7122 APInt Four(BitWidth, 4); 7123 7124 { 7125 using namespace APIntOps; 7126 const APInt& C = L; 7127 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 7128 // The B coefficient is M-N/2 7129 APInt B(M); 7130 B -= sdiv(N,Two); 7131 7132 // The A coefficient is N/2 7133 APInt A(N.sdiv(Two)); 7134 7135 // Compute the B^2-4ac term. 7136 APInt SqrtTerm(B); 7137 SqrtTerm *= B; 7138 SqrtTerm -= Four * (A * C); 7139 7140 if (SqrtTerm.isNegative()) { 7141 // The loop is provably infinite. 7142 return None; 7143 } 7144 7145 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 7146 // integer value or else APInt::sqrt() will assert. 7147 APInt SqrtVal(SqrtTerm.sqrt()); 7148 7149 // Compute the two solutions for the quadratic formula. 7150 // The divisions must be performed as signed divisions. 7151 APInt NegB(-B); 7152 APInt TwoA(A << 1); 7153 if (TwoA.isMinValue()) 7154 return None; 7155 7156 LLVMContext &Context = SE.getContext(); 7157 7158 ConstantInt *Solution1 = 7159 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); 7160 ConstantInt *Solution2 = 7161 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); 7162 7163 return std::make_pair(cast<SCEVConstant>(SE.getConstant(Solution1)), 7164 cast<SCEVConstant>(SE.getConstant(Solution2))); 7165 } // end APIntOps namespace 7166 } 7167 7168 ScalarEvolution::ExitLimit 7169 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 7170 bool AllowPredicates) { 7171 7172 // This is only used for loops with a "x != y" exit test. The exit condition 7173 // is now expressed as a single expression, V = x-y. So the exit test is 7174 // effectively V != 0. We know and take advantage of the fact that this 7175 // expression only being used in a comparison by zero context. 7176 7177 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 7178 // If the value is a constant 7179 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 7180 // If the value is already zero, the branch will execute zero times. 7181 if (C->getValue()->isZero()) return C; 7182 return getCouldNotCompute(); // Otherwise it will loop infinitely. 7183 } 7184 7185 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 7186 if (!AddRec && AllowPredicates) 7187 // Try to make this an AddRec using runtime tests, in the first X 7188 // iterations of this loop, where X is the SCEV expression found by the 7189 // algorithm below. 7190 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 7191 7192 if (!AddRec || AddRec->getLoop() != L) 7193 return getCouldNotCompute(); 7194 7195 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 7196 // the quadratic equation to solve it. 7197 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 7198 if (auto Roots = SolveQuadraticEquation(AddRec, *this)) { 7199 const SCEVConstant *R1 = Roots->first; 7200 const SCEVConstant *R2 = Roots->second; 7201 // Pick the smallest positive root value. 7202 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 7203 CmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 7204 if (!CB->getZExtValue()) 7205 std::swap(R1, R2); // R1 is the minimum root now. 7206 7207 // We can only use this value if the chrec ends up with an exact zero 7208 // value at this index. When solving for "X*X != 5", for example, we 7209 // should not accept a root of 2. 7210 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); 7211 if (Val->isZero()) 7212 // We found a quadratic root! 7213 return ExitLimit(R1, R1, false, Predicates); 7214 } 7215 } 7216 return getCouldNotCompute(); 7217 } 7218 7219 // Otherwise we can only handle this if it is affine. 7220 if (!AddRec->isAffine()) 7221 return getCouldNotCompute(); 7222 7223 // If this is an affine expression, the execution count of this branch is 7224 // the minimum unsigned root of the following equation: 7225 // 7226 // Start + Step*N = 0 (mod 2^BW) 7227 // 7228 // equivalent to: 7229 // 7230 // Step*N = -Start (mod 2^BW) 7231 // 7232 // where BW is the common bit width of Start and Step. 7233 7234 // Get the initial value for the loop. 7235 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 7236 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 7237 7238 // For now we handle only constant steps. 7239 // 7240 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 7241 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 7242 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 7243 // We have not yet seen any such cases. 7244 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 7245 if (!StepC || StepC->getValue()->equalsInt(0)) 7246 return getCouldNotCompute(); 7247 7248 // For positive steps (counting up until unsigned overflow): 7249 // N = -Start/Step (as unsigned) 7250 // For negative steps (counting down to zero): 7251 // N = Start/-Step 7252 // First compute the unsigned distance from zero in the direction of Step. 7253 bool CountDown = StepC->getAPInt().isNegative(); 7254 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 7255 7256 // Handle unitary steps, which cannot wraparound. 7257 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 7258 // N = Distance (as unsigned) 7259 if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) { 7260 APInt MaxBECount = getUnsignedRange(Distance).getUnsignedMax(); 7261 7262 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 7263 // we end up with a loop whose backedge-taken count is n - 1. Detect this 7264 // case, and see if we can improve the bound. 7265 // 7266 // Explicitly handling this here is necessary because getUnsignedRange 7267 // isn't context-sensitive; it doesn't know that we only care about the 7268 // range inside the loop. 7269 const SCEV *Zero = getZero(Distance->getType()); 7270 const SCEV *One = getOne(Distance->getType()); 7271 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 7272 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 7273 // If Distance + 1 doesn't overflow, we can compute the maximum distance 7274 // as "unsigned_max(Distance + 1) - 1". 7275 ConstantRange CR = getUnsignedRange(DistancePlusOne); 7276 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 7277 } 7278 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 7279 } 7280 7281 // If the condition controls loop exit (the loop exits only if the expression 7282 // is true) and the addition is no-wrap we can use unsigned divide to 7283 // compute the backedge count. In this case, the step may not divide the 7284 // distance, but we don't care because if the condition is "missed" the loop 7285 // will have undefined behavior due to wrapping. 7286 if (ControlsExit && AddRec->hasNoSelfWrap() && 7287 loopHasNoAbnormalExits(AddRec->getLoop())) { 7288 const SCEV *Exact = 7289 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 7290 return ExitLimit(Exact, Exact, false, Predicates); 7291 } 7292 7293 // Solve the general equation. 7294 const SCEV *E = SolveLinEquationWithOverflow( 7295 StepC->getAPInt(), getNegativeSCEV(Start), *this); 7296 return ExitLimit(E, E, false, Predicates); 7297 } 7298 7299 ScalarEvolution::ExitLimit 7300 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 7301 // Loops that look like: while (X == 0) are very strange indeed. We don't 7302 // handle them yet except for the trivial case. This could be expanded in the 7303 // future as needed. 7304 7305 // If the value is a constant, check to see if it is known to be non-zero 7306 // already. If so, the backedge will execute zero times. 7307 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 7308 if (!C->getValue()->isNullValue()) 7309 return getZero(C->getType()); 7310 return getCouldNotCompute(); // Otherwise it will loop infinitely. 7311 } 7312 7313 // We could implement others, but I really doubt anyone writes loops like 7314 // this, and if they did, they would already be constant folded. 7315 return getCouldNotCompute(); 7316 } 7317 7318 std::pair<BasicBlock *, BasicBlock *> 7319 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 7320 // If the block has a unique predecessor, then there is no path from the 7321 // predecessor to the block that does not go through the direct edge 7322 // from the predecessor to the block. 7323 if (BasicBlock *Pred = BB->getSinglePredecessor()) 7324 return {Pred, BB}; 7325 7326 // A loop's header is defined to be a block that dominates the loop. 7327 // If the header has a unique predecessor outside the loop, it must be 7328 // a block that has exactly one successor that can reach the loop. 7329 if (Loop *L = LI.getLoopFor(BB)) 7330 return {L->getLoopPredecessor(), L->getHeader()}; 7331 7332 return {nullptr, nullptr}; 7333 } 7334 7335 /// SCEV structural equivalence is usually sufficient for testing whether two 7336 /// expressions are equal, however for the purposes of looking for a condition 7337 /// guarding a loop, it can be useful to be a little more general, since a 7338 /// front-end may have replicated the controlling expression. 7339 /// 7340 static bool HasSameValue(const SCEV *A, const SCEV *B) { 7341 // Quick check to see if they are the same SCEV. 7342 if (A == B) return true; 7343 7344 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 7345 // Not all instructions that are "identical" compute the same value. For 7346 // instance, two distinct alloca instructions allocating the same type are 7347 // identical and do not read memory; but compute distinct values. 7348 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 7349 }; 7350 7351 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 7352 // two different instructions with the same value. Check for this case. 7353 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 7354 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 7355 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 7356 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 7357 if (ComputesEqualValues(AI, BI)) 7358 return true; 7359 7360 // Otherwise assume they may have a different value. 7361 return false; 7362 } 7363 7364 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 7365 const SCEV *&LHS, const SCEV *&RHS, 7366 unsigned Depth) { 7367 bool Changed = false; 7368 7369 // If we hit the max recursion limit bail out. 7370 if (Depth >= 3) 7371 return false; 7372 7373 // Canonicalize a constant to the right side. 7374 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 7375 // Check for both operands constant. 7376 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 7377 if (ConstantExpr::getICmp(Pred, 7378 LHSC->getValue(), 7379 RHSC->getValue())->isNullValue()) 7380 goto trivially_false; 7381 else 7382 goto trivially_true; 7383 } 7384 // Otherwise swap the operands to put the constant on the right. 7385 std::swap(LHS, RHS); 7386 Pred = ICmpInst::getSwappedPredicate(Pred); 7387 Changed = true; 7388 } 7389 7390 // If we're comparing an addrec with a value which is loop-invariant in the 7391 // addrec's loop, put the addrec on the left. Also make a dominance check, 7392 // as both operands could be addrecs loop-invariant in each other's loop. 7393 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 7394 const Loop *L = AR->getLoop(); 7395 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 7396 std::swap(LHS, RHS); 7397 Pred = ICmpInst::getSwappedPredicate(Pred); 7398 Changed = true; 7399 } 7400 } 7401 7402 // If there's a constant operand, canonicalize comparisons with boundary 7403 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 7404 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 7405 const APInt &RA = RC->getAPInt(); 7406 7407 bool SimplifiedByConstantRange = false; 7408 7409 if (!ICmpInst::isEquality(Pred)) { 7410 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 7411 if (ExactCR.isFullSet()) 7412 goto trivially_true; 7413 else if (ExactCR.isEmptySet()) 7414 goto trivially_false; 7415 7416 APInt NewRHS; 7417 CmpInst::Predicate NewPred; 7418 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 7419 ICmpInst::isEquality(NewPred)) { 7420 // We were able to convert an inequality to an equality. 7421 Pred = NewPred; 7422 RHS = getConstant(NewRHS); 7423 Changed = SimplifiedByConstantRange = true; 7424 } 7425 } 7426 7427 if (!SimplifiedByConstantRange) { 7428 switch (Pred) { 7429 default: 7430 break; 7431 case ICmpInst::ICMP_EQ: 7432 case ICmpInst::ICMP_NE: 7433 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 7434 if (!RA) 7435 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 7436 if (const SCEVMulExpr *ME = 7437 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 7438 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 7439 ME->getOperand(0)->isAllOnesValue()) { 7440 RHS = AE->getOperand(1); 7441 LHS = ME->getOperand(1); 7442 Changed = true; 7443 } 7444 break; 7445 7446 7447 // The "Should have been caught earlier!" messages refer to the fact 7448 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 7449 // should have fired on the corresponding cases, and canonicalized the 7450 // check to trivially_true or trivially_false. 7451 7452 case ICmpInst::ICMP_UGE: 7453 assert(!RA.isMinValue() && "Should have been caught earlier!"); 7454 Pred = ICmpInst::ICMP_UGT; 7455 RHS = getConstant(RA - 1); 7456 Changed = true; 7457 break; 7458 case ICmpInst::ICMP_ULE: 7459 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 7460 Pred = ICmpInst::ICMP_ULT; 7461 RHS = getConstant(RA + 1); 7462 Changed = true; 7463 break; 7464 case ICmpInst::ICMP_SGE: 7465 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 7466 Pred = ICmpInst::ICMP_SGT; 7467 RHS = getConstant(RA - 1); 7468 Changed = true; 7469 break; 7470 case ICmpInst::ICMP_SLE: 7471 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 7472 Pred = ICmpInst::ICMP_SLT; 7473 RHS = getConstant(RA + 1); 7474 Changed = true; 7475 break; 7476 } 7477 } 7478 } 7479 7480 // Check for obvious equality. 7481 if (HasSameValue(LHS, RHS)) { 7482 if (ICmpInst::isTrueWhenEqual(Pred)) 7483 goto trivially_true; 7484 if (ICmpInst::isFalseWhenEqual(Pred)) 7485 goto trivially_false; 7486 } 7487 7488 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 7489 // adding or subtracting 1 from one of the operands. 7490 switch (Pred) { 7491 case ICmpInst::ICMP_SLE: 7492 if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) { 7493 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 7494 SCEV::FlagNSW); 7495 Pred = ICmpInst::ICMP_SLT; 7496 Changed = true; 7497 } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) { 7498 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 7499 SCEV::FlagNSW); 7500 Pred = ICmpInst::ICMP_SLT; 7501 Changed = true; 7502 } 7503 break; 7504 case ICmpInst::ICMP_SGE: 7505 if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) { 7506 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 7507 SCEV::FlagNSW); 7508 Pred = ICmpInst::ICMP_SGT; 7509 Changed = true; 7510 } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) { 7511 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 7512 SCEV::FlagNSW); 7513 Pred = ICmpInst::ICMP_SGT; 7514 Changed = true; 7515 } 7516 break; 7517 case ICmpInst::ICMP_ULE: 7518 if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) { 7519 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 7520 SCEV::FlagNUW); 7521 Pred = ICmpInst::ICMP_ULT; 7522 Changed = true; 7523 } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) { 7524 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 7525 Pred = ICmpInst::ICMP_ULT; 7526 Changed = true; 7527 } 7528 break; 7529 case ICmpInst::ICMP_UGE: 7530 if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) { 7531 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 7532 Pred = ICmpInst::ICMP_UGT; 7533 Changed = true; 7534 } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) { 7535 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 7536 SCEV::FlagNUW); 7537 Pred = ICmpInst::ICMP_UGT; 7538 Changed = true; 7539 } 7540 break; 7541 default: 7542 break; 7543 } 7544 7545 // TODO: More simplifications are possible here. 7546 7547 // Recursively simplify until we either hit a recursion limit or nothing 7548 // changes. 7549 if (Changed) 7550 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 7551 7552 return Changed; 7553 7554 trivially_true: 7555 // Return 0 == 0. 7556 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 7557 Pred = ICmpInst::ICMP_EQ; 7558 return true; 7559 7560 trivially_false: 7561 // Return 0 != 0. 7562 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 7563 Pred = ICmpInst::ICMP_NE; 7564 return true; 7565 } 7566 7567 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 7568 return getSignedRange(S).getSignedMax().isNegative(); 7569 } 7570 7571 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 7572 return getSignedRange(S).getSignedMin().isStrictlyPositive(); 7573 } 7574 7575 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 7576 return !getSignedRange(S).getSignedMin().isNegative(); 7577 } 7578 7579 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 7580 return !getSignedRange(S).getSignedMax().isStrictlyPositive(); 7581 } 7582 7583 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 7584 return isKnownNegative(S) || isKnownPositive(S); 7585 } 7586 7587 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 7588 const SCEV *LHS, const SCEV *RHS) { 7589 // Canonicalize the inputs first. 7590 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7591 7592 // If LHS or RHS is an addrec, check to see if the condition is true in 7593 // every iteration of the loop. 7594 // If LHS and RHS are both addrec, both conditions must be true in 7595 // every iteration of the loop. 7596 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 7597 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 7598 bool LeftGuarded = false; 7599 bool RightGuarded = false; 7600 if (LAR) { 7601 const Loop *L = LAR->getLoop(); 7602 if (isLoopEntryGuardedByCond(L, Pred, LAR->getStart(), RHS) && 7603 isLoopBackedgeGuardedByCond(L, Pred, LAR->getPostIncExpr(*this), RHS)) { 7604 if (!RAR) return true; 7605 LeftGuarded = true; 7606 } 7607 } 7608 if (RAR) { 7609 const Loop *L = RAR->getLoop(); 7610 if (isLoopEntryGuardedByCond(L, Pred, LHS, RAR->getStart()) && 7611 isLoopBackedgeGuardedByCond(L, Pred, LHS, RAR->getPostIncExpr(*this))) { 7612 if (!LAR) return true; 7613 RightGuarded = true; 7614 } 7615 } 7616 if (LeftGuarded && RightGuarded) 7617 return true; 7618 7619 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 7620 return true; 7621 7622 // Otherwise see what can be done with known constant ranges. 7623 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS); 7624 } 7625 7626 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 7627 ICmpInst::Predicate Pred, 7628 bool &Increasing) { 7629 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 7630 7631 #ifndef NDEBUG 7632 // Verify an invariant: inverting the predicate should turn a monotonically 7633 // increasing change to a monotonically decreasing one, and vice versa. 7634 bool IncreasingSwapped; 7635 bool ResultSwapped = isMonotonicPredicateImpl( 7636 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 7637 7638 assert(Result == ResultSwapped && "should be able to analyze both!"); 7639 if (ResultSwapped) 7640 assert(Increasing == !IncreasingSwapped && 7641 "monotonicity should flip as we flip the predicate"); 7642 #endif 7643 7644 return Result; 7645 } 7646 7647 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 7648 ICmpInst::Predicate Pred, 7649 bool &Increasing) { 7650 7651 // A zero step value for LHS means the induction variable is essentially a 7652 // loop invariant value. We don't really depend on the predicate actually 7653 // flipping from false to true (for increasing predicates, and the other way 7654 // around for decreasing predicates), all we care about is that *if* the 7655 // predicate changes then it only changes from false to true. 7656 // 7657 // A zero step value in itself is not very useful, but there may be places 7658 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 7659 // as general as possible. 7660 7661 switch (Pred) { 7662 default: 7663 return false; // Conservative answer 7664 7665 case ICmpInst::ICMP_UGT: 7666 case ICmpInst::ICMP_UGE: 7667 case ICmpInst::ICMP_ULT: 7668 case ICmpInst::ICMP_ULE: 7669 if (!LHS->hasNoUnsignedWrap()) 7670 return false; 7671 7672 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 7673 return true; 7674 7675 case ICmpInst::ICMP_SGT: 7676 case ICmpInst::ICMP_SGE: 7677 case ICmpInst::ICMP_SLT: 7678 case ICmpInst::ICMP_SLE: { 7679 if (!LHS->hasNoSignedWrap()) 7680 return false; 7681 7682 const SCEV *Step = LHS->getStepRecurrence(*this); 7683 7684 if (isKnownNonNegative(Step)) { 7685 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 7686 return true; 7687 } 7688 7689 if (isKnownNonPositive(Step)) { 7690 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 7691 return true; 7692 } 7693 7694 return false; 7695 } 7696 7697 } 7698 7699 llvm_unreachable("switch has default clause!"); 7700 } 7701 7702 bool ScalarEvolution::isLoopInvariantPredicate( 7703 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 7704 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 7705 const SCEV *&InvariantRHS) { 7706 7707 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 7708 if (!isLoopInvariant(RHS, L)) { 7709 if (!isLoopInvariant(LHS, L)) 7710 return false; 7711 7712 std::swap(LHS, RHS); 7713 Pred = ICmpInst::getSwappedPredicate(Pred); 7714 } 7715 7716 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 7717 if (!ArLHS || ArLHS->getLoop() != L) 7718 return false; 7719 7720 bool Increasing; 7721 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 7722 return false; 7723 7724 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 7725 // true as the loop iterates, and the backedge is control dependent on 7726 // "ArLHS `Pred` RHS" == true then we can reason as follows: 7727 // 7728 // * if the predicate was false in the first iteration then the predicate 7729 // is never evaluated again, since the loop exits without taking the 7730 // backedge. 7731 // * if the predicate was true in the first iteration then it will 7732 // continue to be true for all future iterations since it is 7733 // monotonically increasing. 7734 // 7735 // For both the above possibilities, we can replace the loop varying 7736 // predicate with its value on the first iteration of the loop (which is 7737 // loop invariant). 7738 // 7739 // A similar reasoning applies for a monotonically decreasing predicate, by 7740 // replacing true with false and false with true in the above two bullets. 7741 7742 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 7743 7744 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 7745 return false; 7746 7747 InvariantPred = Pred; 7748 InvariantLHS = ArLHS->getStart(); 7749 InvariantRHS = RHS; 7750 return true; 7751 } 7752 7753 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 7754 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 7755 if (HasSameValue(LHS, RHS)) 7756 return ICmpInst::isTrueWhenEqual(Pred); 7757 7758 // This code is split out from isKnownPredicate because it is called from 7759 // within isLoopEntryGuardedByCond. 7760 7761 auto CheckRanges = 7762 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 7763 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 7764 .contains(RangeLHS); 7765 }; 7766 7767 // The check at the top of the function catches the case where the values are 7768 // known to be equal. 7769 if (Pred == CmpInst::ICMP_EQ) 7770 return false; 7771 7772 if (Pred == CmpInst::ICMP_NE) 7773 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 7774 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 7775 isKnownNonZero(getMinusSCEV(LHS, RHS)); 7776 7777 if (CmpInst::isSigned(Pred)) 7778 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 7779 7780 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 7781 } 7782 7783 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 7784 const SCEV *LHS, 7785 const SCEV *RHS) { 7786 7787 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 7788 // Return Y via OutY. 7789 auto MatchBinaryAddToConst = 7790 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 7791 SCEV::NoWrapFlags ExpectedFlags) { 7792 const SCEV *NonConstOp, *ConstOp; 7793 SCEV::NoWrapFlags FlagsPresent; 7794 7795 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 7796 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 7797 return false; 7798 7799 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 7800 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 7801 }; 7802 7803 APInt C; 7804 7805 switch (Pred) { 7806 default: 7807 break; 7808 7809 case ICmpInst::ICMP_SGE: 7810 std::swap(LHS, RHS); 7811 case ICmpInst::ICMP_SLE: 7812 // X s<= (X + C)<nsw> if C >= 0 7813 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 7814 return true; 7815 7816 // (X + C)<nsw> s<= X if C <= 0 7817 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 7818 !C.isStrictlyPositive()) 7819 return true; 7820 break; 7821 7822 case ICmpInst::ICMP_SGT: 7823 std::swap(LHS, RHS); 7824 case ICmpInst::ICMP_SLT: 7825 // X s< (X + C)<nsw> if C > 0 7826 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 7827 C.isStrictlyPositive()) 7828 return true; 7829 7830 // (X + C)<nsw> s< X if C < 0 7831 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 7832 return true; 7833 break; 7834 } 7835 7836 return false; 7837 } 7838 7839 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 7840 const SCEV *LHS, 7841 const SCEV *RHS) { 7842 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 7843 return false; 7844 7845 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 7846 // the stack can result in exponential time complexity. 7847 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 7848 7849 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 7850 // 7851 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 7852 // isKnownPredicate. isKnownPredicate is more powerful, but also more 7853 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 7854 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 7855 // use isKnownPredicate later if needed. 7856 return isKnownNonNegative(RHS) && 7857 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 7858 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 7859 } 7860 7861 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 7862 ICmpInst::Predicate Pred, 7863 const SCEV *LHS, const SCEV *RHS) { 7864 // No need to even try if we know the module has no guards. 7865 if (!HasGuards) 7866 return false; 7867 7868 return any_of(*BB, [&](Instruction &I) { 7869 using namespace llvm::PatternMatch; 7870 7871 Value *Condition; 7872 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 7873 m_Value(Condition))) && 7874 isImpliedCond(Pred, LHS, RHS, Condition, false); 7875 }); 7876 } 7877 7878 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 7879 /// protected by a conditional between LHS and RHS. This is used to 7880 /// to eliminate casts. 7881 bool 7882 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 7883 ICmpInst::Predicate Pred, 7884 const SCEV *LHS, const SCEV *RHS) { 7885 // Interpret a null as meaning no loop, where there is obviously no guard 7886 // (interprocedural conditions notwithstanding). 7887 if (!L) return true; 7888 7889 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 7890 return true; 7891 7892 BasicBlock *Latch = L->getLoopLatch(); 7893 if (!Latch) 7894 return false; 7895 7896 BranchInst *LoopContinuePredicate = 7897 dyn_cast<BranchInst>(Latch->getTerminator()); 7898 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 7899 isImpliedCond(Pred, LHS, RHS, 7900 LoopContinuePredicate->getCondition(), 7901 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 7902 return true; 7903 7904 // We don't want more than one activation of the following loops on the stack 7905 // -- that can lead to O(n!) time complexity. 7906 if (WalkingBEDominatingConds) 7907 return false; 7908 7909 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 7910 7911 // See if we can exploit a trip count to prove the predicate. 7912 const auto &BETakenInfo = getBackedgeTakenInfo(L); 7913 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 7914 if (LatchBECount != getCouldNotCompute()) { 7915 // We know that Latch branches back to the loop header exactly 7916 // LatchBECount times. This means the backdege condition at Latch is 7917 // equivalent to "{0,+,1} u< LatchBECount". 7918 Type *Ty = LatchBECount->getType(); 7919 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 7920 const SCEV *LoopCounter = 7921 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 7922 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 7923 LatchBECount)) 7924 return true; 7925 } 7926 7927 // Check conditions due to any @llvm.assume intrinsics. 7928 for (auto &AssumeVH : AC.assumptions()) { 7929 if (!AssumeVH) 7930 continue; 7931 auto *CI = cast<CallInst>(AssumeVH); 7932 if (!DT.dominates(CI, Latch->getTerminator())) 7933 continue; 7934 7935 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 7936 return true; 7937 } 7938 7939 // If the loop is not reachable from the entry block, we risk running into an 7940 // infinite loop as we walk up into the dom tree. These loops do not matter 7941 // anyway, so we just return a conservative answer when we see them. 7942 if (!DT.isReachableFromEntry(L->getHeader())) 7943 return false; 7944 7945 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 7946 return true; 7947 7948 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 7949 DTN != HeaderDTN; DTN = DTN->getIDom()) { 7950 7951 assert(DTN && "should reach the loop header before reaching the root!"); 7952 7953 BasicBlock *BB = DTN->getBlock(); 7954 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 7955 return true; 7956 7957 BasicBlock *PBB = BB->getSinglePredecessor(); 7958 if (!PBB) 7959 continue; 7960 7961 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 7962 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 7963 continue; 7964 7965 Value *Condition = ContinuePredicate->getCondition(); 7966 7967 // If we have an edge `E` within the loop body that dominates the only 7968 // latch, the condition guarding `E` also guards the backedge. This 7969 // reasoning works only for loops with a single latch. 7970 7971 BasicBlockEdge DominatingEdge(PBB, BB); 7972 if (DominatingEdge.isSingleEdge()) { 7973 // We're constructively (and conservatively) enumerating edges within the 7974 // loop body that dominate the latch. The dominator tree better agree 7975 // with us on this: 7976 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 7977 7978 if (isImpliedCond(Pred, LHS, RHS, Condition, 7979 BB != ContinuePredicate->getSuccessor(0))) 7980 return true; 7981 } 7982 } 7983 7984 return false; 7985 } 7986 7987 bool 7988 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 7989 ICmpInst::Predicate Pred, 7990 const SCEV *LHS, const SCEV *RHS) { 7991 // Interpret a null as meaning no loop, where there is obviously no guard 7992 // (interprocedural conditions notwithstanding). 7993 if (!L) return false; 7994 7995 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 7996 return true; 7997 7998 // Starting at the loop predecessor, climb up the predecessor chain, as long 7999 // as there are predecessors that can be found that have unique successors 8000 // leading to the original header. 8001 for (std::pair<BasicBlock *, BasicBlock *> 8002 Pair(L->getLoopPredecessor(), L->getHeader()); 8003 Pair.first; 8004 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 8005 8006 if (isImpliedViaGuard(Pair.first, Pred, LHS, RHS)) 8007 return true; 8008 8009 BranchInst *LoopEntryPredicate = 8010 dyn_cast<BranchInst>(Pair.first->getTerminator()); 8011 if (!LoopEntryPredicate || 8012 LoopEntryPredicate->isUnconditional()) 8013 continue; 8014 8015 if (isImpliedCond(Pred, LHS, RHS, 8016 LoopEntryPredicate->getCondition(), 8017 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 8018 return true; 8019 } 8020 8021 // Check conditions due to any @llvm.assume intrinsics. 8022 for (auto &AssumeVH : AC.assumptions()) { 8023 if (!AssumeVH) 8024 continue; 8025 auto *CI = cast<CallInst>(AssumeVH); 8026 if (!DT.dominates(CI, L->getHeader())) 8027 continue; 8028 8029 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 8030 return true; 8031 } 8032 8033 return false; 8034 } 8035 8036 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 8037 const SCEV *LHS, const SCEV *RHS, 8038 Value *FoundCondValue, 8039 bool Inverse) { 8040 if (!PendingLoopPredicates.insert(FoundCondValue).second) 8041 return false; 8042 8043 auto ClearOnExit = 8044 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 8045 8046 // Recursively handle And and Or conditions. 8047 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 8048 if (BO->getOpcode() == Instruction::And) { 8049 if (!Inverse) 8050 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 8051 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 8052 } else if (BO->getOpcode() == Instruction::Or) { 8053 if (Inverse) 8054 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 8055 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 8056 } 8057 } 8058 8059 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 8060 if (!ICI) return false; 8061 8062 // Now that we found a conditional branch that dominates the loop or controls 8063 // the loop latch. Check to see if it is the comparison we are looking for. 8064 ICmpInst::Predicate FoundPred; 8065 if (Inverse) 8066 FoundPred = ICI->getInversePredicate(); 8067 else 8068 FoundPred = ICI->getPredicate(); 8069 8070 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 8071 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 8072 8073 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 8074 } 8075 8076 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 8077 const SCEV *RHS, 8078 ICmpInst::Predicate FoundPred, 8079 const SCEV *FoundLHS, 8080 const SCEV *FoundRHS) { 8081 // Balance the types. 8082 if (getTypeSizeInBits(LHS->getType()) < 8083 getTypeSizeInBits(FoundLHS->getType())) { 8084 if (CmpInst::isSigned(Pred)) { 8085 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 8086 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 8087 } else { 8088 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 8089 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 8090 } 8091 } else if (getTypeSizeInBits(LHS->getType()) > 8092 getTypeSizeInBits(FoundLHS->getType())) { 8093 if (CmpInst::isSigned(FoundPred)) { 8094 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 8095 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 8096 } else { 8097 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 8098 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 8099 } 8100 } 8101 8102 // Canonicalize the query to match the way instcombine will have 8103 // canonicalized the comparison. 8104 if (SimplifyICmpOperands(Pred, LHS, RHS)) 8105 if (LHS == RHS) 8106 return CmpInst::isTrueWhenEqual(Pred); 8107 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 8108 if (FoundLHS == FoundRHS) 8109 return CmpInst::isFalseWhenEqual(FoundPred); 8110 8111 // Check to see if we can make the LHS or RHS match. 8112 if (LHS == FoundRHS || RHS == FoundLHS) { 8113 if (isa<SCEVConstant>(RHS)) { 8114 std::swap(FoundLHS, FoundRHS); 8115 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 8116 } else { 8117 std::swap(LHS, RHS); 8118 Pred = ICmpInst::getSwappedPredicate(Pred); 8119 } 8120 } 8121 8122 // Check whether the found predicate is the same as the desired predicate. 8123 if (FoundPred == Pred) 8124 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 8125 8126 // Check whether swapping the found predicate makes it the same as the 8127 // desired predicate. 8128 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 8129 if (isa<SCEVConstant>(RHS)) 8130 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 8131 else 8132 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 8133 RHS, LHS, FoundLHS, FoundRHS); 8134 } 8135 8136 // Unsigned comparison is the same as signed comparison when both the operands 8137 // are non-negative. 8138 if (CmpInst::isUnsigned(FoundPred) && 8139 CmpInst::getSignedPredicate(FoundPred) == Pred && 8140 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 8141 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 8142 8143 // Check if we can make progress by sharpening ranges. 8144 if (FoundPred == ICmpInst::ICMP_NE && 8145 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 8146 8147 const SCEVConstant *C = nullptr; 8148 const SCEV *V = nullptr; 8149 8150 if (isa<SCEVConstant>(FoundLHS)) { 8151 C = cast<SCEVConstant>(FoundLHS); 8152 V = FoundRHS; 8153 } else { 8154 C = cast<SCEVConstant>(FoundRHS); 8155 V = FoundLHS; 8156 } 8157 8158 // The guarding predicate tells us that C != V. If the known range 8159 // of V is [C, t), we can sharpen the range to [C + 1, t). The 8160 // range we consider has to correspond to same signedness as the 8161 // predicate we're interested in folding. 8162 8163 APInt Min = ICmpInst::isSigned(Pred) ? 8164 getSignedRange(V).getSignedMin() : getUnsignedRange(V).getUnsignedMin(); 8165 8166 if (Min == C->getAPInt()) { 8167 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 8168 // This is true even if (Min + 1) wraps around -- in case of 8169 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 8170 8171 APInt SharperMin = Min + 1; 8172 8173 switch (Pred) { 8174 case ICmpInst::ICMP_SGE: 8175 case ICmpInst::ICMP_UGE: 8176 // We know V `Pred` SharperMin. If this implies LHS `Pred` 8177 // RHS, we're done. 8178 if (isImpliedCondOperands(Pred, LHS, RHS, V, 8179 getConstant(SharperMin))) 8180 return true; 8181 8182 case ICmpInst::ICMP_SGT: 8183 case ICmpInst::ICMP_UGT: 8184 // We know from the range information that (V `Pred` Min || 8185 // V == Min). We know from the guarding condition that !(V 8186 // == Min). This gives us 8187 // 8188 // V `Pred` Min || V == Min && !(V == Min) 8189 // => V `Pred` Min 8190 // 8191 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 8192 8193 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 8194 return true; 8195 8196 default: 8197 // No change 8198 break; 8199 } 8200 } 8201 } 8202 8203 // Check whether the actual condition is beyond sufficient. 8204 if (FoundPred == ICmpInst::ICMP_EQ) 8205 if (ICmpInst::isTrueWhenEqual(Pred)) 8206 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 8207 return true; 8208 if (Pred == ICmpInst::ICMP_NE) 8209 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 8210 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 8211 return true; 8212 8213 // Otherwise assume the worst. 8214 return false; 8215 } 8216 8217 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 8218 const SCEV *&L, const SCEV *&R, 8219 SCEV::NoWrapFlags &Flags) { 8220 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 8221 if (!AE || AE->getNumOperands() != 2) 8222 return false; 8223 8224 L = AE->getOperand(0); 8225 R = AE->getOperand(1); 8226 Flags = AE->getNoWrapFlags(); 8227 return true; 8228 } 8229 8230 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 8231 const SCEV *Less) { 8232 // We avoid subtracting expressions here because this function is usually 8233 // fairly deep in the call stack (i.e. is called many times). 8234 8235 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 8236 const auto *LAR = cast<SCEVAddRecExpr>(Less); 8237 const auto *MAR = cast<SCEVAddRecExpr>(More); 8238 8239 if (LAR->getLoop() != MAR->getLoop()) 8240 return None; 8241 8242 // We look at affine expressions only; not for correctness but to keep 8243 // getStepRecurrence cheap. 8244 if (!LAR->isAffine() || !MAR->isAffine()) 8245 return None; 8246 8247 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 8248 return None; 8249 8250 Less = LAR->getStart(); 8251 More = MAR->getStart(); 8252 8253 // fall through 8254 } 8255 8256 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 8257 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 8258 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 8259 return M - L; 8260 } 8261 8262 const SCEV *L, *R; 8263 SCEV::NoWrapFlags Flags; 8264 if (splitBinaryAdd(Less, L, R, Flags)) 8265 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 8266 if (R == More) 8267 return -(LC->getAPInt()); 8268 8269 if (splitBinaryAdd(More, L, R, Flags)) 8270 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 8271 if (R == Less) 8272 return LC->getAPInt(); 8273 8274 return None; 8275 } 8276 8277 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 8278 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 8279 const SCEV *FoundLHS, const SCEV *FoundRHS) { 8280 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 8281 return false; 8282 8283 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 8284 if (!AddRecLHS) 8285 return false; 8286 8287 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 8288 if (!AddRecFoundLHS) 8289 return false; 8290 8291 // We'd like to let SCEV reason about control dependencies, so we constrain 8292 // both the inequalities to be about add recurrences on the same loop. This 8293 // way we can use isLoopEntryGuardedByCond later. 8294 8295 const Loop *L = AddRecFoundLHS->getLoop(); 8296 if (L != AddRecLHS->getLoop()) 8297 return false; 8298 8299 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 8300 // 8301 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 8302 // ... (2) 8303 // 8304 // Informal proof for (2), assuming (1) [*]: 8305 // 8306 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 8307 // 8308 // Then 8309 // 8310 // FoundLHS s< FoundRHS s< INT_MIN - C 8311 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 8312 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 8313 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 8314 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 8315 // <=> FoundLHS + C s< FoundRHS + C 8316 // 8317 // [*]: (1) can be proved by ruling out overflow. 8318 // 8319 // [**]: This can be proved by analyzing all the four possibilities: 8320 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 8321 // (A s>= 0, B s>= 0). 8322 // 8323 // Note: 8324 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 8325 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 8326 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 8327 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 8328 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 8329 // C)". 8330 8331 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 8332 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 8333 if (!LDiff || !RDiff || *LDiff != *RDiff) 8334 return false; 8335 8336 if (LDiff->isMinValue()) 8337 return true; 8338 8339 APInt FoundRHSLimit; 8340 8341 if (Pred == CmpInst::ICMP_ULT) { 8342 FoundRHSLimit = -(*RDiff); 8343 } else { 8344 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 8345 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 8346 } 8347 8348 // Try to prove (1) or (2), as needed. 8349 return isLoopEntryGuardedByCond(L, Pred, FoundRHS, 8350 getConstant(FoundRHSLimit)); 8351 } 8352 8353 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 8354 const SCEV *LHS, const SCEV *RHS, 8355 const SCEV *FoundLHS, 8356 const SCEV *FoundRHS) { 8357 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 8358 return true; 8359 8360 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 8361 return true; 8362 8363 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 8364 FoundLHS, FoundRHS) || 8365 // ~x < ~y --> x > y 8366 isImpliedCondOperandsHelper(Pred, LHS, RHS, 8367 getNotSCEV(FoundRHS), 8368 getNotSCEV(FoundLHS)); 8369 } 8370 8371 8372 /// If Expr computes ~A, return A else return nullptr 8373 static const SCEV *MatchNotExpr(const SCEV *Expr) { 8374 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 8375 if (!Add || Add->getNumOperands() != 2 || 8376 !Add->getOperand(0)->isAllOnesValue()) 8377 return nullptr; 8378 8379 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 8380 if (!AddRHS || AddRHS->getNumOperands() != 2 || 8381 !AddRHS->getOperand(0)->isAllOnesValue()) 8382 return nullptr; 8383 8384 return AddRHS->getOperand(1); 8385 } 8386 8387 8388 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values? 8389 template<typename MaxExprType> 8390 static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, 8391 const SCEV *Candidate) { 8392 const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr); 8393 if (!MaxExpr) return false; 8394 8395 return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end(); 8396 } 8397 8398 8399 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values? 8400 template<typename MaxExprType> 8401 static bool IsMinConsistingOf(ScalarEvolution &SE, 8402 const SCEV *MaybeMinExpr, 8403 const SCEV *Candidate) { 8404 const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr); 8405 if (!MaybeMaxExpr) 8406 return false; 8407 8408 return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate)); 8409 } 8410 8411 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 8412 ICmpInst::Predicate Pred, 8413 const SCEV *LHS, const SCEV *RHS) { 8414 8415 // If both sides are affine addrecs for the same loop, with equal 8416 // steps, and we know the recurrences don't wrap, then we only 8417 // need to check the predicate on the starting values. 8418 8419 if (!ICmpInst::isRelational(Pred)) 8420 return false; 8421 8422 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 8423 if (!LAR) 8424 return false; 8425 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 8426 if (!RAR) 8427 return false; 8428 if (LAR->getLoop() != RAR->getLoop()) 8429 return false; 8430 if (!LAR->isAffine() || !RAR->isAffine()) 8431 return false; 8432 8433 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 8434 return false; 8435 8436 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 8437 SCEV::FlagNSW : SCEV::FlagNUW; 8438 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 8439 return false; 8440 8441 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 8442 } 8443 8444 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 8445 /// expression? 8446 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 8447 ICmpInst::Predicate Pred, 8448 const SCEV *LHS, const SCEV *RHS) { 8449 switch (Pred) { 8450 default: 8451 return false; 8452 8453 case ICmpInst::ICMP_SGE: 8454 std::swap(LHS, RHS); 8455 LLVM_FALLTHROUGH; 8456 case ICmpInst::ICMP_SLE: 8457 return 8458 // min(A, ...) <= A 8459 IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) || 8460 // A <= max(A, ...) 8461 IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 8462 8463 case ICmpInst::ICMP_UGE: 8464 std::swap(LHS, RHS); 8465 LLVM_FALLTHROUGH; 8466 case ICmpInst::ICMP_ULE: 8467 return 8468 // min(A, ...) <= A 8469 IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) || 8470 // A <= max(A, ...) 8471 IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 8472 } 8473 8474 llvm_unreachable("covered switch fell through?!"); 8475 } 8476 8477 bool 8478 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 8479 const SCEV *LHS, const SCEV *RHS, 8480 const SCEV *FoundLHS, 8481 const SCEV *FoundRHS) { 8482 auto IsKnownPredicateFull = 8483 [this](ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 8484 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 8485 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 8486 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 8487 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 8488 }; 8489 8490 switch (Pred) { 8491 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 8492 case ICmpInst::ICMP_EQ: 8493 case ICmpInst::ICMP_NE: 8494 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 8495 return true; 8496 break; 8497 case ICmpInst::ICMP_SLT: 8498 case ICmpInst::ICMP_SLE: 8499 if (IsKnownPredicateFull(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 8500 IsKnownPredicateFull(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 8501 return true; 8502 break; 8503 case ICmpInst::ICMP_SGT: 8504 case ICmpInst::ICMP_SGE: 8505 if (IsKnownPredicateFull(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 8506 IsKnownPredicateFull(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 8507 return true; 8508 break; 8509 case ICmpInst::ICMP_ULT: 8510 case ICmpInst::ICMP_ULE: 8511 if (IsKnownPredicateFull(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 8512 IsKnownPredicateFull(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 8513 return true; 8514 break; 8515 case ICmpInst::ICMP_UGT: 8516 case ICmpInst::ICMP_UGE: 8517 if (IsKnownPredicateFull(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 8518 IsKnownPredicateFull(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 8519 return true; 8520 break; 8521 } 8522 8523 return false; 8524 } 8525 8526 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 8527 const SCEV *LHS, 8528 const SCEV *RHS, 8529 const SCEV *FoundLHS, 8530 const SCEV *FoundRHS) { 8531 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 8532 // The restriction on `FoundRHS` be lifted easily -- it exists only to 8533 // reduce the compile time impact of this optimization. 8534 return false; 8535 8536 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 8537 if (!Addend) 8538 return false; 8539 8540 APInt ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 8541 8542 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 8543 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 8544 ConstantRange FoundLHSRange = 8545 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 8546 8547 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 8548 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 8549 8550 // We can also compute the range of values for `LHS` that satisfy the 8551 // consequent, "`LHS` `Pred` `RHS`": 8552 APInt ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 8553 ConstantRange SatisfyingLHSRange = 8554 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 8555 8556 // The antecedent implies the consequent if every value of `LHS` that 8557 // satisfies the antecedent also satisfies the consequent. 8558 return SatisfyingLHSRange.contains(LHSRange); 8559 } 8560 8561 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 8562 bool IsSigned, bool NoWrap) { 8563 assert(isKnownPositive(Stride) && "Positive stride expected!"); 8564 8565 if (NoWrap) return false; 8566 8567 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 8568 const SCEV *One = getOne(Stride->getType()); 8569 8570 if (IsSigned) { 8571 APInt MaxRHS = getSignedRange(RHS).getSignedMax(); 8572 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 8573 APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One)) 8574 .getSignedMax(); 8575 8576 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 8577 return (MaxValue - MaxStrideMinusOne).slt(MaxRHS); 8578 } 8579 8580 APInt MaxRHS = getUnsignedRange(RHS).getUnsignedMax(); 8581 APInt MaxValue = APInt::getMaxValue(BitWidth); 8582 APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One)) 8583 .getUnsignedMax(); 8584 8585 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 8586 return (MaxValue - MaxStrideMinusOne).ult(MaxRHS); 8587 } 8588 8589 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 8590 bool IsSigned, bool NoWrap) { 8591 if (NoWrap) return false; 8592 8593 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 8594 const SCEV *One = getOne(Stride->getType()); 8595 8596 if (IsSigned) { 8597 APInt MinRHS = getSignedRange(RHS).getSignedMin(); 8598 APInt MinValue = APInt::getSignedMinValue(BitWidth); 8599 APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One)) 8600 .getSignedMax(); 8601 8602 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 8603 return (MinValue + MaxStrideMinusOne).sgt(MinRHS); 8604 } 8605 8606 APInt MinRHS = getUnsignedRange(RHS).getUnsignedMin(); 8607 APInt MinValue = APInt::getMinValue(BitWidth); 8608 APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One)) 8609 .getUnsignedMax(); 8610 8611 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 8612 return (MinValue + MaxStrideMinusOne).ugt(MinRHS); 8613 } 8614 8615 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 8616 bool Equality) { 8617 const SCEV *One = getOne(Step->getType()); 8618 Delta = Equality ? getAddExpr(Delta, Step) 8619 : getAddExpr(Delta, getMinusSCEV(Step, One)); 8620 return getUDivExpr(Delta, Step); 8621 } 8622 8623 ScalarEvolution::ExitLimit 8624 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 8625 const Loop *L, bool IsSigned, 8626 bool ControlsExit, bool AllowPredicates) { 8627 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 8628 // We handle only IV < Invariant 8629 if (!isLoopInvariant(RHS, L)) 8630 return getCouldNotCompute(); 8631 8632 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 8633 bool PredicatedIV = false; 8634 8635 if (!IV && AllowPredicates) { 8636 // Try to make this an AddRec using runtime tests, in the first X 8637 // iterations of this loop, where X is the SCEV expression found by the 8638 // algorithm below. 8639 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 8640 PredicatedIV = true; 8641 } 8642 8643 // Avoid weird loops 8644 if (!IV || IV->getLoop() != L || !IV->isAffine()) 8645 return getCouldNotCompute(); 8646 8647 bool NoWrap = ControlsExit && 8648 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 8649 8650 const SCEV *Stride = IV->getStepRecurrence(*this); 8651 8652 bool PositiveStride = isKnownPositive(Stride); 8653 8654 // Avoid negative or zero stride values. 8655 if (!PositiveStride) { 8656 // We can compute the correct backedge taken count for loops with unknown 8657 // strides if we can prove that the loop is not an infinite loop with side 8658 // effects. Here's the loop structure we are trying to handle - 8659 // 8660 // i = start 8661 // do { 8662 // A[i] = i; 8663 // i += s; 8664 // } while (i < end); 8665 // 8666 // The backedge taken count for such loops is evaluated as - 8667 // (max(end, start + stride) - start - 1) /u stride 8668 // 8669 // The additional preconditions that we need to check to prove correctness 8670 // of the above formula is as follows - 8671 // 8672 // a) IV is either nuw or nsw depending upon signedness (indicated by the 8673 // NoWrap flag). 8674 // b) loop is single exit with no side effects. 8675 // 8676 // 8677 // Precondition a) implies that if the stride is negative, this is a single 8678 // trip loop. The backedge taken count formula reduces to zero in this case. 8679 // 8680 // Precondition b) implies that the unknown stride cannot be zero otherwise 8681 // we have UB. 8682 // 8683 // The positive stride case is the same as isKnownPositive(Stride) returning 8684 // true (original behavior of the function). 8685 // 8686 // We want to make sure that the stride is truly unknown as there are edge 8687 // cases where ScalarEvolution propagates no wrap flags to the 8688 // post-increment/decrement IV even though the increment/decrement operation 8689 // itself is wrapping. The computed backedge taken count may be wrong in 8690 // such cases. This is prevented by checking that the stride is not known to 8691 // be either positive or non-positive. For example, no wrap flags are 8692 // propagated to the post-increment IV of this loop with a trip count of 2 - 8693 // 8694 // unsigned char i; 8695 // for(i=127; i<128; i+=129) 8696 // A[i] = i; 8697 // 8698 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 8699 !loopHasNoSideEffects(L)) 8700 return getCouldNotCompute(); 8701 8702 } else if (!Stride->isOne() && 8703 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 8704 // Avoid proven overflow cases: this will ensure that the backedge taken 8705 // count will not generate any unsigned overflow. Relaxed no-overflow 8706 // conditions exploit NoWrapFlags, allowing to optimize in presence of 8707 // undefined behaviors like the case of C language. 8708 return getCouldNotCompute(); 8709 8710 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 8711 : ICmpInst::ICMP_ULT; 8712 const SCEV *Start = IV->getStart(); 8713 const SCEV *End = RHS; 8714 // If the backedge is taken at least once, then it will be taken 8715 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 8716 // is the LHS value of the less-than comparison the first time it is evaluated 8717 // and End is the RHS. 8718 const SCEV *BECountIfBackedgeTaken = 8719 computeBECount(getMinusSCEV(End, Start), Stride, false); 8720 // If the loop entry is guarded by the result of the backedge test of the 8721 // first loop iteration, then we know the backedge will be taken at least 8722 // once and so the backedge taken count is as above. If not then we use the 8723 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 8724 // as if the backedge is taken at least once max(End,Start) is End and so the 8725 // result is as above, and if not max(End,Start) is Start so we get a backedge 8726 // count of zero. 8727 const SCEV *BECount; 8728 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 8729 BECount = BECountIfBackedgeTaken; 8730 else { 8731 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 8732 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 8733 } 8734 8735 const SCEV *MaxBECount; 8736 bool MaxOrZero = false; 8737 if (isa<SCEVConstant>(BECount)) 8738 MaxBECount = BECount; 8739 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 8740 // If we know exactly how many times the backedge will be taken if it's 8741 // taken at least once, then the backedge count will either be that or 8742 // zero. 8743 MaxBECount = BECountIfBackedgeTaken; 8744 MaxOrZero = true; 8745 } else { 8746 // Calculate the maximum backedge count based on the range of values 8747 // permitted by Start, End, and Stride. 8748 APInt MinStart = IsSigned ? getSignedRange(Start).getSignedMin() 8749 : getUnsignedRange(Start).getUnsignedMin(); 8750 8751 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 8752 8753 APInt StrideForMaxBECount; 8754 8755 if (PositiveStride) 8756 StrideForMaxBECount = 8757 IsSigned ? getSignedRange(Stride).getSignedMin() 8758 : getUnsignedRange(Stride).getUnsignedMin(); 8759 else 8760 // Using a stride of 1 is safe when computing max backedge taken count for 8761 // a loop with unknown stride. 8762 StrideForMaxBECount = APInt(BitWidth, 1, IsSigned); 8763 8764 APInt Limit = 8765 IsSigned ? APInt::getSignedMaxValue(BitWidth) - (StrideForMaxBECount - 1) 8766 : APInt::getMaxValue(BitWidth) - (StrideForMaxBECount - 1); 8767 8768 // Although End can be a MAX expression we estimate MaxEnd considering only 8769 // the case End = RHS. This is safe because in the other case (End - Start) 8770 // is zero, leading to a zero maximum backedge taken count. 8771 APInt MaxEnd = 8772 IsSigned ? APIntOps::smin(getSignedRange(RHS).getSignedMax(), Limit) 8773 : APIntOps::umin(getUnsignedRange(RHS).getUnsignedMax(), Limit); 8774 8775 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart), 8776 getConstant(StrideForMaxBECount), false); 8777 } 8778 8779 if (isa<SCEVCouldNotCompute>(MaxBECount)) 8780 MaxBECount = BECount; 8781 8782 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 8783 } 8784 8785 ScalarEvolution::ExitLimit 8786 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 8787 const Loop *L, bool IsSigned, 8788 bool ControlsExit, bool AllowPredicates) { 8789 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 8790 // We handle only IV > Invariant 8791 if (!isLoopInvariant(RHS, L)) 8792 return getCouldNotCompute(); 8793 8794 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 8795 if (!IV && AllowPredicates) 8796 // Try to make this an AddRec using runtime tests, in the first X 8797 // iterations of this loop, where X is the SCEV expression found by the 8798 // algorithm below. 8799 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 8800 8801 // Avoid weird loops 8802 if (!IV || IV->getLoop() != L || !IV->isAffine()) 8803 return getCouldNotCompute(); 8804 8805 bool NoWrap = ControlsExit && 8806 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 8807 8808 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 8809 8810 // Avoid negative or zero stride values 8811 if (!isKnownPositive(Stride)) 8812 return getCouldNotCompute(); 8813 8814 // Avoid proven overflow cases: this will ensure that the backedge taken count 8815 // will not generate any unsigned overflow. Relaxed no-overflow conditions 8816 // exploit NoWrapFlags, allowing to optimize in presence of undefined 8817 // behaviors like the case of C language. 8818 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 8819 return getCouldNotCompute(); 8820 8821 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 8822 : ICmpInst::ICMP_UGT; 8823 8824 const SCEV *Start = IV->getStart(); 8825 const SCEV *End = RHS; 8826 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) 8827 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 8828 8829 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 8830 8831 APInt MaxStart = IsSigned ? getSignedRange(Start).getSignedMax() 8832 : getUnsignedRange(Start).getUnsignedMax(); 8833 8834 APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin() 8835 : getUnsignedRange(Stride).getUnsignedMin(); 8836 8837 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 8838 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 8839 : APInt::getMinValue(BitWidth) + (MinStride - 1); 8840 8841 // Although End can be a MIN expression we estimate MinEnd considering only 8842 // the case End = RHS. This is safe because in the other case (Start - End) 8843 // is zero, leading to a zero maximum backedge taken count. 8844 APInt MinEnd = 8845 IsSigned ? APIntOps::smax(getSignedRange(RHS).getSignedMin(), Limit) 8846 : APIntOps::umax(getUnsignedRange(RHS).getUnsignedMin(), Limit); 8847 8848 8849 const SCEV *MaxBECount = getCouldNotCompute(); 8850 if (isa<SCEVConstant>(BECount)) 8851 MaxBECount = BECount; 8852 else 8853 MaxBECount = computeBECount(getConstant(MaxStart - MinEnd), 8854 getConstant(MinStride), false); 8855 8856 if (isa<SCEVCouldNotCompute>(MaxBECount)) 8857 MaxBECount = BECount; 8858 8859 return ExitLimit(BECount, MaxBECount, false, Predicates); 8860 } 8861 8862 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 8863 ScalarEvolution &SE) const { 8864 if (Range.isFullSet()) // Infinite loop. 8865 return SE.getCouldNotCompute(); 8866 8867 // If the start is a non-zero constant, shift the range to simplify things. 8868 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 8869 if (!SC->getValue()->isZero()) { 8870 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 8871 Operands[0] = SE.getZero(SC->getType()); 8872 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 8873 getNoWrapFlags(FlagNW)); 8874 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 8875 return ShiftedAddRec->getNumIterationsInRange( 8876 Range.subtract(SC->getAPInt()), SE); 8877 // This is strange and shouldn't happen. 8878 return SE.getCouldNotCompute(); 8879 } 8880 8881 // The only time we can solve this is when we have all constant indices. 8882 // Otherwise, we cannot determine the overflow conditions. 8883 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 8884 return SE.getCouldNotCompute(); 8885 8886 // Okay at this point we know that all elements of the chrec are constants and 8887 // that the start element is zero. 8888 8889 // First check to see if the range contains zero. If not, the first 8890 // iteration exits. 8891 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 8892 if (!Range.contains(APInt(BitWidth, 0))) 8893 return SE.getZero(getType()); 8894 8895 if (isAffine()) { 8896 // If this is an affine expression then we have this situation: 8897 // Solve {0,+,A} in Range === Ax in Range 8898 8899 // We know that zero is in the range. If A is positive then we know that 8900 // the upper value of the range must be the first possible exit value. 8901 // If A is negative then the lower of the range is the last possible loop 8902 // value. Also note that we already checked for a full range. 8903 APInt One(BitWidth,1); 8904 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 8905 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower(); 8906 8907 // The exit value should be (End+A)/A. 8908 APInt ExitVal = (End + A).udiv(A); 8909 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 8910 8911 // Evaluate at the exit value. If we really did fall out of the valid 8912 // range, then we computed our trip count, otherwise wrap around or other 8913 // things must have happened. 8914 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 8915 if (Range.contains(Val->getValue())) 8916 return SE.getCouldNotCompute(); // Something strange happened 8917 8918 // Ensure that the previous value is in the range. This is a sanity check. 8919 assert(Range.contains( 8920 EvaluateConstantChrecAtConstant(this, 8921 ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) && 8922 "Linear scev computation is off in a bad way!"); 8923 return SE.getConstant(ExitValue); 8924 } else if (isQuadratic()) { 8925 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 8926 // quadratic equation to solve it. To do this, we must frame our problem in 8927 // terms of figuring out when zero is crossed, instead of when 8928 // Range.getUpper() is crossed. 8929 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); 8930 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 8931 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(), FlagAnyWrap); 8932 8933 // Next, solve the constructed addrec 8934 if (auto Roots = 8935 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE)) { 8936 const SCEVConstant *R1 = Roots->first; 8937 const SCEVConstant *R2 = Roots->second; 8938 // Pick the smallest positive root value. 8939 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 8940 ICmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 8941 if (!CB->getZExtValue()) 8942 std::swap(R1, R2); // R1 is the minimum root now. 8943 8944 // Make sure the root is not off by one. The returned iteration should 8945 // not be in the range, but the previous one should be. When solving 8946 // for "X*X < 5", for example, we should not return a root of 2. 8947 ConstantInt *R1Val = 8948 EvaluateConstantChrecAtConstant(this, R1->getValue(), SE); 8949 if (Range.contains(R1Val->getValue())) { 8950 // The next iteration must be out of the range... 8951 ConstantInt *NextVal = 8952 ConstantInt::get(SE.getContext(), R1->getAPInt() + 1); 8953 8954 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 8955 if (!Range.contains(R1Val->getValue())) 8956 return SE.getConstant(NextVal); 8957 return SE.getCouldNotCompute(); // Something strange happened 8958 } 8959 8960 // If R1 was not in the range, then it is a good return value. Make 8961 // sure that R1-1 WAS in the range though, just in case. 8962 ConstantInt *NextVal = 8963 ConstantInt::get(SE.getContext(), R1->getAPInt() - 1); 8964 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 8965 if (Range.contains(R1Val->getValue())) 8966 return R1; 8967 return SE.getCouldNotCompute(); // Something strange happened 8968 } 8969 } 8970 } 8971 8972 return SE.getCouldNotCompute(); 8973 } 8974 8975 // Return true when S contains at least an undef value. 8976 static inline bool containsUndefs(const SCEV *S) { 8977 return SCEVExprContains(S, [](const SCEV *S) { 8978 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 8979 return isa<UndefValue>(SU->getValue()); 8980 else if (const auto *SC = dyn_cast<SCEVConstant>(S)) 8981 return isa<UndefValue>(SC->getValue()); 8982 return false; 8983 }); 8984 } 8985 8986 namespace { 8987 // Collect all steps of SCEV expressions. 8988 struct SCEVCollectStrides { 8989 ScalarEvolution &SE; 8990 SmallVectorImpl<const SCEV *> &Strides; 8991 8992 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 8993 : SE(SE), Strides(S) {} 8994 8995 bool follow(const SCEV *S) { 8996 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 8997 Strides.push_back(AR->getStepRecurrence(SE)); 8998 return true; 8999 } 9000 bool isDone() const { return false; } 9001 }; 9002 9003 // Collect all SCEVUnknown and SCEVMulExpr expressions. 9004 struct SCEVCollectTerms { 9005 SmallVectorImpl<const SCEV *> &Terms; 9006 9007 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) 9008 : Terms(T) {} 9009 9010 bool follow(const SCEV *S) { 9011 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 9012 isa<SCEVSignExtendExpr>(S)) { 9013 if (!containsUndefs(S)) 9014 Terms.push_back(S); 9015 9016 // Stop recursion: once we collected a term, do not walk its operands. 9017 return false; 9018 } 9019 9020 // Keep looking. 9021 return true; 9022 } 9023 bool isDone() const { return false; } 9024 }; 9025 9026 // Check if a SCEV contains an AddRecExpr. 9027 struct SCEVHasAddRec { 9028 bool &ContainsAddRec; 9029 9030 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 9031 ContainsAddRec = false; 9032 } 9033 9034 bool follow(const SCEV *S) { 9035 if (isa<SCEVAddRecExpr>(S)) { 9036 ContainsAddRec = true; 9037 9038 // Stop recursion: once we collected a term, do not walk its operands. 9039 return false; 9040 } 9041 9042 // Keep looking. 9043 return true; 9044 } 9045 bool isDone() const { return false; } 9046 }; 9047 9048 // Find factors that are multiplied with an expression that (possibly as a 9049 // subexpression) contains an AddRecExpr. In the expression: 9050 // 9051 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 9052 // 9053 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 9054 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 9055 // parameters as they form a product with an induction variable. 9056 // 9057 // This collector expects all array size parameters to be in the same MulExpr. 9058 // It might be necessary to later add support for collecting parameters that are 9059 // spread over different nested MulExpr. 9060 struct SCEVCollectAddRecMultiplies { 9061 SmallVectorImpl<const SCEV *> &Terms; 9062 ScalarEvolution &SE; 9063 9064 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 9065 : Terms(T), SE(SE) {} 9066 9067 bool follow(const SCEV *S) { 9068 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 9069 bool HasAddRec = false; 9070 SmallVector<const SCEV *, 0> Operands; 9071 for (auto Op : Mul->operands()) { 9072 if (isa<SCEVUnknown>(Op)) { 9073 Operands.push_back(Op); 9074 } else { 9075 bool ContainsAddRec; 9076 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 9077 visitAll(Op, ContiansAddRec); 9078 HasAddRec |= ContainsAddRec; 9079 } 9080 } 9081 if (Operands.size() == 0) 9082 return true; 9083 9084 if (!HasAddRec) 9085 return false; 9086 9087 Terms.push_back(SE.getMulExpr(Operands)); 9088 // Stop recursion: once we collected a term, do not walk its operands. 9089 return false; 9090 } 9091 9092 // Keep looking. 9093 return true; 9094 } 9095 bool isDone() const { return false; } 9096 }; 9097 } 9098 9099 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 9100 /// two places: 9101 /// 1) The strides of AddRec expressions. 9102 /// 2) Unknowns that are multiplied with AddRec expressions. 9103 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 9104 SmallVectorImpl<const SCEV *> &Terms) { 9105 SmallVector<const SCEV *, 4> Strides; 9106 SCEVCollectStrides StrideCollector(*this, Strides); 9107 visitAll(Expr, StrideCollector); 9108 9109 DEBUG({ 9110 dbgs() << "Strides:\n"; 9111 for (const SCEV *S : Strides) 9112 dbgs() << *S << "\n"; 9113 }); 9114 9115 for (const SCEV *S : Strides) { 9116 SCEVCollectTerms TermCollector(Terms); 9117 visitAll(S, TermCollector); 9118 } 9119 9120 DEBUG({ 9121 dbgs() << "Terms:\n"; 9122 for (const SCEV *T : Terms) 9123 dbgs() << *T << "\n"; 9124 }); 9125 9126 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 9127 visitAll(Expr, MulCollector); 9128 } 9129 9130 static bool findArrayDimensionsRec(ScalarEvolution &SE, 9131 SmallVectorImpl<const SCEV *> &Terms, 9132 SmallVectorImpl<const SCEV *> &Sizes) { 9133 int Last = Terms.size() - 1; 9134 const SCEV *Step = Terms[Last]; 9135 9136 // End of recursion. 9137 if (Last == 0) { 9138 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 9139 SmallVector<const SCEV *, 2> Qs; 9140 for (const SCEV *Op : M->operands()) 9141 if (!isa<SCEVConstant>(Op)) 9142 Qs.push_back(Op); 9143 9144 Step = SE.getMulExpr(Qs); 9145 } 9146 9147 Sizes.push_back(Step); 9148 return true; 9149 } 9150 9151 for (const SCEV *&Term : Terms) { 9152 // Normalize the terms before the next call to findArrayDimensionsRec. 9153 const SCEV *Q, *R; 9154 SCEVDivision::divide(SE, Term, Step, &Q, &R); 9155 9156 // Bail out when GCD does not evenly divide one of the terms. 9157 if (!R->isZero()) 9158 return false; 9159 9160 Term = Q; 9161 } 9162 9163 // Remove all SCEVConstants. 9164 Terms.erase( 9165 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 9166 Terms.end()); 9167 9168 if (Terms.size() > 0) 9169 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 9170 return false; 9171 9172 Sizes.push_back(Step); 9173 return true; 9174 } 9175 9176 9177 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 9178 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 9179 for (const SCEV *T : Terms) 9180 if (SCEVExprContains(T, isa<SCEVUnknown, const SCEV *>)) 9181 return true; 9182 return false; 9183 } 9184 9185 // Return the number of product terms in S. 9186 static inline int numberOfTerms(const SCEV *S) { 9187 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 9188 return Expr->getNumOperands(); 9189 return 1; 9190 } 9191 9192 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 9193 if (isa<SCEVConstant>(T)) 9194 return nullptr; 9195 9196 if (isa<SCEVUnknown>(T)) 9197 return T; 9198 9199 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 9200 SmallVector<const SCEV *, 2> Factors; 9201 for (const SCEV *Op : M->operands()) 9202 if (!isa<SCEVConstant>(Op)) 9203 Factors.push_back(Op); 9204 9205 return SE.getMulExpr(Factors); 9206 } 9207 9208 return T; 9209 } 9210 9211 /// Return the size of an element read or written by Inst. 9212 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 9213 Type *Ty; 9214 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 9215 Ty = Store->getValueOperand()->getType(); 9216 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 9217 Ty = Load->getType(); 9218 else 9219 return nullptr; 9220 9221 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 9222 return getSizeOfExpr(ETy, Ty); 9223 } 9224 9225 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 9226 SmallVectorImpl<const SCEV *> &Sizes, 9227 const SCEV *ElementSize) const { 9228 if (Terms.size() < 1 || !ElementSize) 9229 return; 9230 9231 // Early return when Terms do not contain parameters: we do not delinearize 9232 // non parametric SCEVs. 9233 if (!containsParameters(Terms)) 9234 return; 9235 9236 DEBUG({ 9237 dbgs() << "Terms:\n"; 9238 for (const SCEV *T : Terms) 9239 dbgs() << *T << "\n"; 9240 }); 9241 9242 // Remove duplicates. 9243 std::sort(Terms.begin(), Terms.end()); 9244 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 9245 9246 // Put larger terms first. 9247 std::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) { 9248 return numberOfTerms(LHS) > numberOfTerms(RHS); 9249 }); 9250 9251 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 9252 9253 // Try to divide all terms by the element size. If term is not divisible by 9254 // element size, proceed with the original term. 9255 for (const SCEV *&Term : Terms) { 9256 const SCEV *Q, *R; 9257 SCEVDivision::divide(SE, Term, ElementSize, &Q, &R); 9258 if (!Q->isZero()) 9259 Term = Q; 9260 } 9261 9262 SmallVector<const SCEV *, 4> NewTerms; 9263 9264 // Remove constant factors. 9265 for (const SCEV *T : Terms) 9266 if (const SCEV *NewT = removeConstantFactors(SE, T)) 9267 NewTerms.push_back(NewT); 9268 9269 DEBUG({ 9270 dbgs() << "Terms after sorting:\n"; 9271 for (const SCEV *T : NewTerms) 9272 dbgs() << *T << "\n"; 9273 }); 9274 9275 if (NewTerms.empty() || 9276 !findArrayDimensionsRec(SE, NewTerms, Sizes)) { 9277 Sizes.clear(); 9278 return; 9279 } 9280 9281 // The last element to be pushed into Sizes is the size of an element. 9282 Sizes.push_back(ElementSize); 9283 9284 DEBUG({ 9285 dbgs() << "Sizes:\n"; 9286 for (const SCEV *S : Sizes) 9287 dbgs() << *S << "\n"; 9288 }); 9289 } 9290 9291 void ScalarEvolution::computeAccessFunctions( 9292 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 9293 SmallVectorImpl<const SCEV *> &Sizes) { 9294 9295 // Early exit in case this SCEV is not an affine multivariate function. 9296 if (Sizes.empty()) 9297 return; 9298 9299 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 9300 if (!AR->isAffine()) 9301 return; 9302 9303 const SCEV *Res = Expr; 9304 int Last = Sizes.size() - 1; 9305 for (int i = Last; i >= 0; i--) { 9306 const SCEV *Q, *R; 9307 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 9308 9309 DEBUG({ 9310 dbgs() << "Res: " << *Res << "\n"; 9311 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 9312 dbgs() << "Res divided by Sizes[i]:\n"; 9313 dbgs() << "Quotient: " << *Q << "\n"; 9314 dbgs() << "Remainder: " << *R << "\n"; 9315 }); 9316 9317 Res = Q; 9318 9319 // Do not record the last subscript corresponding to the size of elements in 9320 // the array. 9321 if (i == Last) { 9322 9323 // Bail out if the remainder is too complex. 9324 if (isa<SCEVAddRecExpr>(R)) { 9325 Subscripts.clear(); 9326 Sizes.clear(); 9327 return; 9328 } 9329 9330 continue; 9331 } 9332 9333 // Record the access function for the current subscript. 9334 Subscripts.push_back(R); 9335 } 9336 9337 // Also push in last position the remainder of the last division: it will be 9338 // the access function of the innermost dimension. 9339 Subscripts.push_back(Res); 9340 9341 std::reverse(Subscripts.begin(), Subscripts.end()); 9342 9343 DEBUG({ 9344 dbgs() << "Subscripts:\n"; 9345 for (const SCEV *S : Subscripts) 9346 dbgs() << *S << "\n"; 9347 }); 9348 } 9349 9350 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 9351 /// sizes of an array access. Returns the remainder of the delinearization that 9352 /// is the offset start of the array. The SCEV->delinearize algorithm computes 9353 /// the multiples of SCEV coefficients: that is a pattern matching of sub 9354 /// expressions in the stride and base of a SCEV corresponding to the 9355 /// computation of a GCD (greatest common divisor) of base and stride. When 9356 /// SCEV->delinearize fails, it returns the SCEV unchanged. 9357 /// 9358 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 9359 /// 9360 /// void foo(long n, long m, long o, double A[n][m][o]) { 9361 /// 9362 /// for (long i = 0; i < n; i++) 9363 /// for (long j = 0; j < m; j++) 9364 /// for (long k = 0; k < o; k++) 9365 /// A[i][j][k] = 1.0; 9366 /// } 9367 /// 9368 /// the delinearization input is the following AddRec SCEV: 9369 /// 9370 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 9371 /// 9372 /// From this SCEV, we are able to say that the base offset of the access is %A 9373 /// because it appears as an offset that does not divide any of the strides in 9374 /// the loops: 9375 /// 9376 /// CHECK: Base offset: %A 9377 /// 9378 /// and then SCEV->delinearize determines the size of some of the dimensions of 9379 /// the array as these are the multiples by which the strides are happening: 9380 /// 9381 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 9382 /// 9383 /// Note that the outermost dimension remains of UnknownSize because there are 9384 /// no strides that would help identifying the size of the last dimension: when 9385 /// the array has been statically allocated, one could compute the size of that 9386 /// dimension by dividing the overall size of the array by the size of the known 9387 /// dimensions: %m * %o * 8. 9388 /// 9389 /// Finally delinearize provides the access functions for the array reference 9390 /// that does correspond to A[i][j][k] of the above C testcase: 9391 /// 9392 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 9393 /// 9394 /// The testcases are checking the output of a function pass: 9395 /// DelinearizationPass that walks through all loads and stores of a function 9396 /// asking for the SCEV of the memory access with respect to all enclosing 9397 /// loops, calling SCEV->delinearize on that and printing the results. 9398 9399 void ScalarEvolution::delinearize(const SCEV *Expr, 9400 SmallVectorImpl<const SCEV *> &Subscripts, 9401 SmallVectorImpl<const SCEV *> &Sizes, 9402 const SCEV *ElementSize) { 9403 // First step: collect parametric terms. 9404 SmallVector<const SCEV *, 4> Terms; 9405 collectParametricTerms(Expr, Terms); 9406 9407 if (Terms.empty()) 9408 return; 9409 9410 // Second step: find subscript sizes. 9411 findArrayDimensions(Terms, Sizes, ElementSize); 9412 9413 if (Sizes.empty()) 9414 return; 9415 9416 // Third step: compute the access functions for each subscript. 9417 computeAccessFunctions(Expr, Subscripts, Sizes); 9418 9419 if (Subscripts.empty()) 9420 return; 9421 9422 DEBUG({ 9423 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 9424 dbgs() << "ArrayDecl[UnknownSize]"; 9425 for (const SCEV *S : Sizes) 9426 dbgs() << "[" << *S << "]"; 9427 9428 dbgs() << "\nArrayRef"; 9429 for (const SCEV *S : Subscripts) 9430 dbgs() << "[" << *S << "]"; 9431 dbgs() << "\n"; 9432 }); 9433 } 9434 9435 //===----------------------------------------------------------------------===// 9436 // SCEVCallbackVH Class Implementation 9437 //===----------------------------------------------------------------------===// 9438 9439 void ScalarEvolution::SCEVCallbackVH::deleted() { 9440 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 9441 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 9442 SE->ConstantEvolutionLoopExitValue.erase(PN); 9443 SE->eraseValueFromMap(getValPtr()); 9444 // this now dangles! 9445 } 9446 9447 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 9448 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 9449 9450 // Forget all the expressions associated with users of the old value, 9451 // so that future queries will recompute the expressions using the new 9452 // value. 9453 Value *Old = getValPtr(); 9454 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 9455 SmallPtrSet<User *, 8> Visited; 9456 while (!Worklist.empty()) { 9457 User *U = Worklist.pop_back_val(); 9458 // Deleting the Old value will cause this to dangle. Postpone 9459 // that until everything else is done. 9460 if (U == Old) 9461 continue; 9462 if (!Visited.insert(U).second) 9463 continue; 9464 if (PHINode *PN = dyn_cast<PHINode>(U)) 9465 SE->ConstantEvolutionLoopExitValue.erase(PN); 9466 SE->eraseValueFromMap(U); 9467 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 9468 } 9469 // Delete the Old value. 9470 if (PHINode *PN = dyn_cast<PHINode>(Old)) 9471 SE->ConstantEvolutionLoopExitValue.erase(PN); 9472 SE->eraseValueFromMap(Old); 9473 // this now dangles! 9474 } 9475 9476 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 9477 : CallbackVH(V), SE(se) {} 9478 9479 //===----------------------------------------------------------------------===// 9480 // ScalarEvolution Class Implementation 9481 //===----------------------------------------------------------------------===// 9482 9483 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 9484 AssumptionCache &AC, DominatorTree &DT, 9485 LoopInfo &LI) 9486 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 9487 CouldNotCompute(new SCEVCouldNotCompute()), 9488 WalkingBEDominatingConds(false), ProvingSplitPredicate(false), 9489 ValuesAtScopes(64), LoopDispositions(64), BlockDispositions(64), 9490 FirstUnknown(nullptr) { 9491 9492 // To use guards for proving predicates, we need to scan every instruction in 9493 // relevant basic blocks, and not just terminators. Doing this is a waste of 9494 // time if the IR does not actually contain any calls to 9495 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 9496 // 9497 // This pessimizes the case where a pass that preserves ScalarEvolution wants 9498 // to _add_ guards to the module when there weren't any before, and wants 9499 // ScalarEvolution to optimize based on those guards. For now we prefer to be 9500 // efficient in lieu of being smart in that rather obscure case. 9501 9502 auto *GuardDecl = F.getParent()->getFunction( 9503 Intrinsic::getName(Intrinsic::experimental_guard)); 9504 HasGuards = GuardDecl && !GuardDecl->use_empty(); 9505 } 9506 9507 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 9508 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 9509 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 9510 ValueExprMap(std::move(Arg.ValueExprMap)), 9511 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 9512 WalkingBEDominatingConds(false), ProvingSplitPredicate(false), 9513 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 9514 PredicatedBackedgeTakenCounts( 9515 std::move(Arg.PredicatedBackedgeTakenCounts)), 9516 ConstantEvolutionLoopExitValue( 9517 std::move(Arg.ConstantEvolutionLoopExitValue)), 9518 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 9519 LoopDispositions(std::move(Arg.LoopDispositions)), 9520 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 9521 BlockDispositions(std::move(Arg.BlockDispositions)), 9522 UnsignedRanges(std::move(Arg.UnsignedRanges)), 9523 SignedRanges(std::move(Arg.SignedRanges)), 9524 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 9525 UniquePreds(std::move(Arg.UniquePreds)), 9526 SCEVAllocator(std::move(Arg.SCEVAllocator)), 9527 FirstUnknown(Arg.FirstUnknown) { 9528 Arg.FirstUnknown = nullptr; 9529 } 9530 9531 ScalarEvolution::~ScalarEvolution() { 9532 // Iterate through all the SCEVUnknown instances and call their 9533 // destructors, so that they release their references to their values. 9534 for (SCEVUnknown *U = FirstUnknown; U;) { 9535 SCEVUnknown *Tmp = U; 9536 U = U->Next; 9537 Tmp->~SCEVUnknown(); 9538 } 9539 FirstUnknown = nullptr; 9540 9541 ExprValueMap.clear(); 9542 ValueExprMap.clear(); 9543 HasRecMap.clear(); 9544 9545 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 9546 // that a loop had multiple computable exits. 9547 for (auto &BTCI : BackedgeTakenCounts) 9548 BTCI.second.clear(); 9549 for (auto &BTCI : PredicatedBackedgeTakenCounts) 9550 BTCI.second.clear(); 9551 9552 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 9553 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 9554 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 9555 } 9556 9557 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 9558 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 9559 } 9560 9561 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 9562 const Loop *L) { 9563 // Print all inner loops first 9564 for (Loop *I : *L) 9565 PrintLoopInfo(OS, SE, I); 9566 9567 OS << "Loop "; 9568 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 9569 OS << ": "; 9570 9571 SmallVector<BasicBlock *, 8> ExitBlocks; 9572 L->getExitBlocks(ExitBlocks); 9573 if (ExitBlocks.size() != 1) 9574 OS << "<multiple exits> "; 9575 9576 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 9577 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 9578 } else { 9579 OS << "Unpredictable backedge-taken count. "; 9580 } 9581 9582 OS << "\n" 9583 "Loop "; 9584 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 9585 OS << ": "; 9586 9587 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 9588 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 9589 if (SE->isBackedgeTakenCountMaxOrZero(L)) 9590 OS << ", actual taken count either this or zero."; 9591 } else { 9592 OS << "Unpredictable max backedge-taken count. "; 9593 } 9594 9595 OS << "\n" 9596 "Loop "; 9597 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 9598 OS << ": "; 9599 9600 SCEVUnionPredicate Pred; 9601 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 9602 if (!isa<SCEVCouldNotCompute>(PBT)) { 9603 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 9604 OS << " Predicates:\n"; 9605 Pred.print(OS, 4); 9606 } else { 9607 OS << "Unpredictable predicated backedge-taken count. "; 9608 } 9609 OS << "\n"; 9610 } 9611 9612 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 9613 switch (LD) { 9614 case ScalarEvolution::LoopVariant: 9615 return "Variant"; 9616 case ScalarEvolution::LoopInvariant: 9617 return "Invariant"; 9618 case ScalarEvolution::LoopComputable: 9619 return "Computable"; 9620 } 9621 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 9622 } 9623 9624 void ScalarEvolution::print(raw_ostream &OS) const { 9625 // ScalarEvolution's implementation of the print method is to print 9626 // out SCEV values of all instructions that are interesting. Doing 9627 // this potentially causes it to create new SCEV objects though, 9628 // which technically conflicts with the const qualifier. This isn't 9629 // observable from outside the class though, so casting away the 9630 // const isn't dangerous. 9631 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 9632 9633 OS << "Classifying expressions for: "; 9634 F.printAsOperand(OS, /*PrintType=*/false); 9635 OS << "\n"; 9636 for (Instruction &I : instructions(F)) 9637 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 9638 OS << I << '\n'; 9639 OS << " --> "; 9640 const SCEV *SV = SE.getSCEV(&I); 9641 SV->print(OS); 9642 if (!isa<SCEVCouldNotCompute>(SV)) { 9643 OS << " U: "; 9644 SE.getUnsignedRange(SV).print(OS); 9645 OS << " S: "; 9646 SE.getSignedRange(SV).print(OS); 9647 } 9648 9649 const Loop *L = LI.getLoopFor(I.getParent()); 9650 9651 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 9652 if (AtUse != SV) { 9653 OS << " --> "; 9654 AtUse->print(OS); 9655 if (!isa<SCEVCouldNotCompute>(AtUse)) { 9656 OS << " U: "; 9657 SE.getUnsignedRange(AtUse).print(OS); 9658 OS << " S: "; 9659 SE.getSignedRange(AtUse).print(OS); 9660 } 9661 } 9662 9663 if (L) { 9664 OS << "\t\t" "Exits: "; 9665 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 9666 if (!SE.isLoopInvariant(ExitValue, L)) { 9667 OS << "<<Unknown>>"; 9668 } else { 9669 OS << *ExitValue; 9670 } 9671 9672 bool First = true; 9673 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 9674 if (First) { 9675 OS << "\t\t" "LoopDispositions: { "; 9676 First = false; 9677 } else { 9678 OS << ", "; 9679 } 9680 9681 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 9682 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 9683 } 9684 9685 for (auto *InnerL : depth_first(L)) { 9686 if (InnerL == L) 9687 continue; 9688 if (First) { 9689 OS << "\t\t" "LoopDispositions: { "; 9690 First = false; 9691 } else { 9692 OS << ", "; 9693 } 9694 9695 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 9696 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 9697 } 9698 9699 OS << " }"; 9700 } 9701 9702 OS << "\n"; 9703 } 9704 9705 OS << "Determining loop execution counts for: "; 9706 F.printAsOperand(OS, /*PrintType=*/false); 9707 OS << "\n"; 9708 for (Loop *I : LI) 9709 PrintLoopInfo(OS, &SE, I); 9710 } 9711 9712 ScalarEvolution::LoopDisposition 9713 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 9714 auto &Values = LoopDispositions[S]; 9715 for (auto &V : Values) { 9716 if (V.getPointer() == L) 9717 return V.getInt(); 9718 } 9719 Values.emplace_back(L, LoopVariant); 9720 LoopDisposition D = computeLoopDisposition(S, L); 9721 auto &Values2 = LoopDispositions[S]; 9722 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 9723 if (V.getPointer() == L) { 9724 V.setInt(D); 9725 break; 9726 } 9727 } 9728 return D; 9729 } 9730 9731 ScalarEvolution::LoopDisposition 9732 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 9733 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 9734 case scConstant: 9735 return LoopInvariant; 9736 case scTruncate: 9737 case scZeroExtend: 9738 case scSignExtend: 9739 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 9740 case scAddRecExpr: { 9741 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 9742 9743 // If L is the addrec's loop, it's computable. 9744 if (AR->getLoop() == L) 9745 return LoopComputable; 9746 9747 // Add recurrences are never invariant in the function-body (null loop). 9748 if (!L) 9749 return LoopVariant; 9750 9751 // This recurrence is variant w.r.t. L if L contains AR's loop. 9752 if (L->contains(AR->getLoop())) 9753 return LoopVariant; 9754 9755 // This recurrence is invariant w.r.t. L if AR's loop contains L. 9756 if (AR->getLoop()->contains(L)) 9757 return LoopInvariant; 9758 9759 // This recurrence is variant w.r.t. L if any of its operands 9760 // are variant. 9761 for (auto *Op : AR->operands()) 9762 if (!isLoopInvariant(Op, L)) 9763 return LoopVariant; 9764 9765 // Otherwise it's loop-invariant. 9766 return LoopInvariant; 9767 } 9768 case scAddExpr: 9769 case scMulExpr: 9770 case scUMaxExpr: 9771 case scSMaxExpr: { 9772 bool HasVarying = false; 9773 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 9774 LoopDisposition D = getLoopDisposition(Op, L); 9775 if (D == LoopVariant) 9776 return LoopVariant; 9777 if (D == LoopComputable) 9778 HasVarying = true; 9779 } 9780 return HasVarying ? LoopComputable : LoopInvariant; 9781 } 9782 case scUDivExpr: { 9783 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 9784 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 9785 if (LD == LoopVariant) 9786 return LoopVariant; 9787 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 9788 if (RD == LoopVariant) 9789 return LoopVariant; 9790 return (LD == LoopInvariant && RD == LoopInvariant) ? 9791 LoopInvariant : LoopComputable; 9792 } 9793 case scUnknown: 9794 // All non-instruction values are loop invariant. All instructions are loop 9795 // invariant if they are not contained in the specified loop. 9796 // Instructions are never considered invariant in the function body 9797 // (null loop) because they are defined within the "loop". 9798 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 9799 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 9800 return LoopInvariant; 9801 case scCouldNotCompute: 9802 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 9803 } 9804 llvm_unreachable("Unknown SCEV kind!"); 9805 } 9806 9807 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 9808 return getLoopDisposition(S, L) == LoopInvariant; 9809 } 9810 9811 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 9812 return getLoopDisposition(S, L) == LoopComputable; 9813 } 9814 9815 ScalarEvolution::BlockDisposition 9816 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 9817 auto &Values = BlockDispositions[S]; 9818 for (auto &V : Values) { 9819 if (V.getPointer() == BB) 9820 return V.getInt(); 9821 } 9822 Values.emplace_back(BB, DoesNotDominateBlock); 9823 BlockDisposition D = computeBlockDisposition(S, BB); 9824 auto &Values2 = BlockDispositions[S]; 9825 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 9826 if (V.getPointer() == BB) { 9827 V.setInt(D); 9828 break; 9829 } 9830 } 9831 return D; 9832 } 9833 9834 ScalarEvolution::BlockDisposition 9835 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 9836 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 9837 case scConstant: 9838 return ProperlyDominatesBlock; 9839 case scTruncate: 9840 case scZeroExtend: 9841 case scSignExtend: 9842 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 9843 case scAddRecExpr: { 9844 // This uses a "dominates" query instead of "properly dominates" query 9845 // to test for proper dominance too, because the instruction which 9846 // produces the addrec's value is a PHI, and a PHI effectively properly 9847 // dominates its entire containing block. 9848 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 9849 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 9850 return DoesNotDominateBlock; 9851 9852 // Fall through into SCEVNAryExpr handling. 9853 LLVM_FALLTHROUGH; 9854 } 9855 case scAddExpr: 9856 case scMulExpr: 9857 case scUMaxExpr: 9858 case scSMaxExpr: { 9859 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 9860 bool Proper = true; 9861 for (const SCEV *NAryOp : NAry->operands()) { 9862 BlockDisposition D = getBlockDisposition(NAryOp, BB); 9863 if (D == DoesNotDominateBlock) 9864 return DoesNotDominateBlock; 9865 if (D == DominatesBlock) 9866 Proper = false; 9867 } 9868 return Proper ? ProperlyDominatesBlock : DominatesBlock; 9869 } 9870 case scUDivExpr: { 9871 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 9872 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 9873 BlockDisposition LD = getBlockDisposition(LHS, BB); 9874 if (LD == DoesNotDominateBlock) 9875 return DoesNotDominateBlock; 9876 BlockDisposition RD = getBlockDisposition(RHS, BB); 9877 if (RD == DoesNotDominateBlock) 9878 return DoesNotDominateBlock; 9879 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 9880 ProperlyDominatesBlock : DominatesBlock; 9881 } 9882 case scUnknown: 9883 if (Instruction *I = 9884 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 9885 if (I->getParent() == BB) 9886 return DominatesBlock; 9887 if (DT.properlyDominates(I->getParent(), BB)) 9888 return ProperlyDominatesBlock; 9889 return DoesNotDominateBlock; 9890 } 9891 return ProperlyDominatesBlock; 9892 case scCouldNotCompute: 9893 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 9894 } 9895 llvm_unreachable("Unknown SCEV kind!"); 9896 } 9897 9898 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 9899 return getBlockDisposition(S, BB) >= DominatesBlock; 9900 } 9901 9902 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 9903 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 9904 } 9905 9906 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 9907 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 9908 } 9909 9910 void ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 9911 ValuesAtScopes.erase(S); 9912 LoopDispositions.erase(S); 9913 BlockDispositions.erase(S); 9914 UnsignedRanges.erase(S); 9915 SignedRanges.erase(S); 9916 ExprValueMap.erase(S); 9917 HasRecMap.erase(S); 9918 9919 auto RemoveSCEVFromBackedgeMap = 9920 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 9921 for (auto I = Map.begin(), E = Map.end(); I != E;) { 9922 BackedgeTakenInfo &BEInfo = I->second; 9923 if (BEInfo.hasOperand(S, this)) { 9924 BEInfo.clear(); 9925 Map.erase(I++); 9926 } else 9927 ++I; 9928 } 9929 }; 9930 9931 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 9932 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 9933 } 9934 9935 typedef DenseMap<const Loop *, std::string> VerifyMap; 9936 9937 /// replaceSubString - Replaces all occurrences of From in Str with To. 9938 static void replaceSubString(std::string &Str, StringRef From, StringRef To) { 9939 size_t Pos = 0; 9940 while ((Pos = Str.find(From, Pos)) != std::string::npos) { 9941 Str.replace(Pos, From.size(), To.data(), To.size()); 9942 Pos += To.size(); 9943 } 9944 } 9945 9946 /// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis. 9947 static void 9948 getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) { 9949 std::string &S = Map[L]; 9950 if (S.empty()) { 9951 raw_string_ostream OS(S); 9952 SE.getBackedgeTakenCount(L)->print(OS); 9953 9954 // false and 0 are semantically equivalent. This can happen in dead loops. 9955 replaceSubString(OS.str(), "false", "0"); 9956 // Remove wrap flags, their use in SCEV is highly fragile. 9957 // FIXME: Remove this when SCEV gets smarter about them. 9958 replaceSubString(OS.str(), "<nw>", ""); 9959 replaceSubString(OS.str(), "<nsw>", ""); 9960 replaceSubString(OS.str(), "<nuw>", ""); 9961 } 9962 9963 for (auto *R : reverse(*L)) 9964 getLoopBackedgeTakenCounts(R, Map, SE); // recurse. 9965 } 9966 9967 void ScalarEvolution::verify() const { 9968 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 9969 9970 // Gather stringified backedge taken counts for all loops using SCEV's caches. 9971 // FIXME: It would be much better to store actual values instead of strings, 9972 // but SCEV pointers will change if we drop the caches. 9973 VerifyMap BackedgeDumpsOld, BackedgeDumpsNew; 9974 for (LoopInfo::reverse_iterator I = LI.rbegin(), E = LI.rend(); I != E; ++I) 9975 getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE); 9976 9977 // Gather stringified backedge taken counts for all loops using a fresh 9978 // ScalarEvolution object. 9979 ScalarEvolution SE2(F, TLI, AC, DT, LI); 9980 for (LoopInfo::reverse_iterator I = LI.rbegin(), E = LI.rend(); I != E; ++I) 9981 getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE2); 9982 9983 // Now compare whether they're the same with and without caches. This allows 9984 // verifying that no pass changed the cache. 9985 assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() && 9986 "New loops suddenly appeared!"); 9987 9988 for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(), 9989 OldE = BackedgeDumpsOld.end(), 9990 NewI = BackedgeDumpsNew.begin(); 9991 OldI != OldE; ++OldI, ++NewI) { 9992 assert(OldI->first == NewI->first && "Loop order changed!"); 9993 9994 // Compare the stringified SCEVs. We don't care if undef backedgetaken count 9995 // changes. 9996 // FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This 9997 // means that a pass is buggy or SCEV has to learn a new pattern but is 9998 // usually not harmful. 9999 if (OldI->second != NewI->second && 10000 OldI->second.find("undef") == std::string::npos && 10001 NewI->second.find("undef") == std::string::npos && 10002 OldI->second != "***COULDNOTCOMPUTE***" && 10003 NewI->second != "***COULDNOTCOMPUTE***") { 10004 dbgs() << "SCEVValidator: SCEV for loop '" 10005 << OldI->first->getHeader()->getName() 10006 << "' changed from '" << OldI->second 10007 << "' to '" << NewI->second << "'!\n"; 10008 std::abort(); 10009 } 10010 } 10011 10012 // TODO: Verify more things. 10013 } 10014 10015 bool ScalarEvolution::invalidate( 10016 Function &F, const PreservedAnalyses &PA, 10017 FunctionAnalysisManager::Invalidator &Inv) { 10018 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 10019 // of its dependencies is invalidated. 10020 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 10021 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 10022 Inv.invalidate<AssumptionAnalysis>(F, PA) || 10023 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 10024 Inv.invalidate<LoopAnalysis>(F, PA); 10025 } 10026 10027 AnalysisKey ScalarEvolutionAnalysis::Key; 10028 10029 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 10030 FunctionAnalysisManager &AM) { 10031 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 10032 AM.getResult<AssumptionAnalysis>(F), 10033 AM.getResult<DominatorTreeAnalysis>(F), 10034 AM.getResult<LoopAnalysis>(F)); 10035 } 10036 10037 PreservedAnalyses 10038 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 10039 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 10040 return PreservedAnalyses::all(); 10041 } 10042 10043 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 10044 "Scalar Evolution Analysis", false, true) 10045 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 10046 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 10047 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 10048 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 10049 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 10050 "Scalar Evolution Analysis", false, true) 10051 char ScalarEvolutionWrapperPass::ID = 0; 10052 10053 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 10054 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 10055 } 10056 10057 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 10058 SE.reset(new ScalarEvolution( 10059 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 10060 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 10061 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 10062 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 10063 return false; 10064 } 10065 10066 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 10067 10068 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 10069 SE->print(OS); 10070 } 10071 10072 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 10073 if (!VerifySCEV) 10074 return; 10075 10076 SE->verify(); 10077 } 10078 10079 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 10080 AU.setPreservesAll(); 10081 AU.addRequiredTransitive<AssumptionCacheTracker>(); 10082 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 10083 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 10084 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 10085 } 10086 10087 const SCEVPredicate * 10088 ScalarEvolution::getEqualPredicate(const SCEVUnknown *LHS, 10089 const SCEVConstant *RHS) { 10090 FoldingSetNodeID ID; 10091 // Unique this node based on the arguments 10092 ID.AddInteger(SCEVPredicate::P_Equal); 10093 ID.AddPointer(LHS); 10094 ID.AddPointer(RHS); 10095 void *IP = nullptr; 10096 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 10097 return S; 10098 SCEVEqualPredicate *Eq = new (SCEVAllocator) 10099 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 10100 UniquePreds.InsertNode(Eq, IP); 10101 return Eq; 10102 } 10103 10104 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 10105 const SCEVAddRecExpr *AR, 10106 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 10107 FoldingSetNodeID ID; 10108 // Unique this node based on the arguments 10109 ID.AddInteger(SCEVPredicate::P_Wrap); 10110 ID.AddPointer(AR); 10111 ID.AddInteger(AddedFlags); 10112 void *IP = nullptr; 10113 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 10114 return S; 10115 auto *OF = new (SCEVAllocator) 10116 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 10117 UniquePreds.InsertNode(OF, IP); 10118 return OF; 10119 } 10120 10121 namespace { 10122 10123 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 10124 public: 10125 /// Rewrites \p S in the context of a loop L and the SCEV predication 10126 /// infrastructure. 10127 /// 10128 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 10129 /// equivalences present in \p Pred. 10130 /// 10131 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 10132 /// \p NewPreds such that the result will be an AddRecExpr. 10133 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 10134 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 10135 SCEVUnionPredicate *Pred) { 10136 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 10137 return Rewriter.visit(S); 10138 } 10139 10140 SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 10141 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 10142 SCEVUnionPredicate *Pred) 10143 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 10144 10145 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 10146 if (Pred) { 10147 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 10148 for (auto *Pred : ExprPreds) 10149 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 10150 if (IPred->getLHS() == Expr) 10151 return IPred->getRHS(); 10152 } 10153 10154 return Expr; 10155 } 10156 10157 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 10158 const SCEV *Operand = visit(Expr->getOperand()); 10159 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 10160 if (AR && AR->getLoop() == L && AR->isAffine()) { 10161 // This couldn't be folded because the operand didn't have the nuw 10162 // flag. Add the nusw flag as an assumption that we could make. 10163 const SCEV *Step = AR->getStepRecurrence(SE); 10164 Type *Ty = Expr->getType(); 10165 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 10166 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 10167 SE.getSignExtendExpr(Step, Ty), L, 10168 AR->getNoWrapFlags()); 10169 } 10170 return SE.getZeroExtendExpr(Operand, Expr->getType()); 10171 } 10172 10173 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 10174 const SCEV *Operand = visit(Expr->getOperand()); 10175 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 10176 if (AR && AR->getLoop() == L && AR->isAffine()) { 10177 // This couldn't be folded because the operand didn't have the nsw 10178 // flag. Add the nssw flag as an assumption that we could make. 10179 const SCEV *Step = AR->getStepRecurrence(SE); 10180 Type *Ty = Expr->getType(); 10181 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 10182 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 10183 SE.getSignExtendExpr(Step, Ty), L, 10184 AR->getNoWrapFlags()); 10185 } 10186 return SE.getSignExtendExpr(Operand, Expr->getType()); 10187 } 10188 10189 private: 10190 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 10191 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 10192 auto *A = SE.getWrapPredicate(AR, AddedFlags); 10193 if (!NewPreds) { 10194 // Check if we've already made this assumption. 10195 return Pred && Pred->implies(A); 10196 } 10197 NewPreds->insert(A); 10198 return true; 10199 } 10200 10201 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 10202 SCEVUnionPredicate *Pred; 10203 const Loop *L; 10204 }; 10205 } // end anonymous namespace 10206 10207 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 10208 SCEVUnionPredicate &Preds) { 10209 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 10210 } 10211 10212 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 10213 const SCEV *S, const Loop *L, 10214 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 10215 10216 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 10217 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 10218 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 10219 10220 if (!AddRec) 10221 return nullptr; 10222 10223 // Since the transformation was successful, we can now transfer the SCEV 10224 // predicates. 10225 for (auto *P : TransformPreds) 10226 Preds.insert(P); 10227 10228 return AddRec; 10229 } 10230 10231 /// SCEV predicates 10232 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 10233 SCEVPredicateKind Kind) 10234 : FastID(ID), Kind(Kind) {} 10235 10236 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 10237 const SCEVUnknown *LHS, 10238 const SCEVConstant *RHS) 10239 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) {} 10240 10241 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 10242 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 10243 10244 if (!Op) 10245 return false; 10246 10247 return Op->LHS == LHS && Op->RHS == RHS; 10248 } 10249 10250 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 10251 10252 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 10253 10254 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 10255 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 10256 } 10257 10258 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 10259 const SCEVAddRecExpr *AR, 10260 IncrementWrapFlags Flags) 10261 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 10262 10263 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 10264 10265 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 10266 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 10267 10268 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 10269 } 10270 10271 bool SCEVWrapPredicate::isAlwaysTrue() const { 10272 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 10273 IncrementWrapFlags IFlags = Flags; 10274 10275 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 10276 IFlags = clearFlags(IFlags, IncrementNSSW); 10277 10278 return IFlags == IncrementAnyWrap; 10279 } 10280 10281 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 10282 OS.indent(Depth) << *getExpr() << " Added Flags: "; 10283 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 10284 OS << "<nusw>"; 10285 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 10286 OS << "<nssw>"; 10287 OS << "\n"; 10288 } 10289 10290 SCEVWrapPredicate::IncrementWrapFlags 10291 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 10292 ScalarEvolution &SE) { 10293 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 10294 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 10295 10296 // We can safely transfer the NSW flag as NSSW. 10297 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 10298 ImpliedFlags = IncrementNSSW; 10299 10300 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 10301 // If the increment is positive, the SCEV NUW flag will also imply the 10302 // WrapPredicate NUSW flag. 10303 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 10304 if (Step->getValue()->getValue().isNonNegative()) 10305 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 10306 } 10307 10308 return ImpliedFlags; 10309 } 10310 10311 /// Union predicates don't get cached so create a dummy set ID for it. 10312 SCEVUnionPredicate::SCEVUnionPredicate() 10313 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 10314 10315 bool SCEVUnionPredicate::isAlwaysTrue() const { 10316 return all_of(Preds, 10317 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 10318 } 10319 10320 ArrayRef<const SCEVPredicate *> 10321 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 10322 auto I = SCEVToPreds.find(Expr); 10323 if (I == SCEVToPreds.end()) 10324 return ArrayRef<const SCEVPredicate *>(); 10325 return I->second; 10326 } 10327 10328 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 10329 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 10330 return all_of(Set->Preds, 10331 [this](const SCEVPredicate *I) { return this->implies(I); }); 10332 10333 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 10334 if (ScevPredsIt == SCEVToPreds.end()) 10335 return false; 10336 auto &SCEVPreds = ScevPredsIt->second; 10337 10338 return any_of(SCEVPreds, 10339 [N](const SCEVPredicate *I) { return I->implies(N); }); 10340 } 10341 10342 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 10343 10344 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 10345 for (auto Pred : Preds) 10346 Pred->print(OS, Depth); 10347 } 10348 10349 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 10350 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 10351 for (auto Pred : Set->Preds) 10352 add(Pred); 10353 return; 10354 } 10355 10356 if (implies(N)) 10357 return; 10358 10359 const SCEV *Key = N->getExpr(); 10360 assert(Key && "Only SCEVUnionPredicate doesn't have an " 10361 " associated expression!"); 10362 10363 SCEVToPreds[Key].push_back(N); 10364 Preds.push_back(N); 10365 } 10366 10367 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 10368 Loop &L) 10369 : SE(SE), L(L), Generation(0), BackedgeCount(nullptr) {} 10370 10371 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 10372 const SCEV *Expr = SE.getSCEV(V); 10373 RewriteEntry &Entry = RewriteMap[Expr]; 10374 10375 // If we already have an entry and the version matches, return it. 10376 if (Entry.second && Generation == Entry.first) 10377 return Entry.second; 10378 10379 // We found an entry but it's stale. Rewrite the stale entry 10380 // according to the current predicate. 10381 if (Entry.second) 10382 Expr = Entry.second; 10383 10384 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 10385 Entry = {Generation, NewSCEV}; 10386 10387 return NewSCEV; 10388 } 10389 10390 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 10391 if (!BackedgeCount) { 10392 SCEVUnionPredicate BackedgePred; 10393 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 10394 addPredicate(BackedgePred); 10395 } 10396 return BackedgeCount; 10397 } 10398 10399 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 10400 if (Preds.implies(&Pred)) 10401 return; 10402 Preds.add(&Pred); 10403 updateGeneration(); 10404 } 10405 10406 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 10407 return Preds; 10408 } 10409 10410 void PredicatedScalarEvolution::updateGeneration() { 10411 // If the generation number wrapped recompute everything. 10412 if (++Generation == 0) { 10413 for (auto &II : RewriteMap) { 10414 const SCEV *Rewritten = II.second.second; 10415 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 10416 } 10417 } 10418 } 10419 10420 void PredicatedScalarEvolution::setNoOverflow( 10421 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 10422 const SCEV *Expr = getSCEV(V); 10423 const auto *AR = cast<SCEVAddRecExpr>(Expr); 10424 10425 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 10426 10427 // Clear the statically implied flags. 10428 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 10429 addPredicate(*SE.getWrapPredicate(AR, Flags)); 10430 10431 auto II = FlagsMap.insert({V, Flags}); 10432 if (!II.second) 10433 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 10434 } 10435 10436 bool PredicatedScalarEvolution::hasNoOverflow( 10437 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 10438 const SCEV *Expr = getSCEV(V); 10439 const auto *AR = cast<SCEVAddRecExpr>(Expr); 10440 10441 Flags = SCEVWrapPredicate::clearFlags( 10442 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 10443 10444 auto II = FlagsMap.find(V); 10445 10446 if (II != FlagsMap.end()) 10447 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 10448 10449 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 10450 } 10451 10452 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 10453 const SCEV *Expr = this->getSCEV(V); 10454 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 10455 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 10456 10457 if (!New) 10458 return nullptr; 10459 10460 for (auto *P : NewPreds) 10461 Preds.add(P); 10462 10463 updateGeneration(); 10464 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 10465 return New; 10466 } 10467 10468 PredicatedScalarEvolution::PredicatedScalarEvolution( 10469 const PredicatedScalarEvolution &Init) 10470 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 10471 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 10472 for (const auto &I : Init.FlagsMap) 10473 FlagsMap.insert(I); 10474 } 10475 10476 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 10477 // For each block. 10478 for (auto *BB : L.getBlocks()) 10479 for (auto &I : *BB) { 10480 if (!SE.isSCEVable(I.getType())) 10481 continue; 10482 10483 auto *Expr = SE.getSCEV(&I); 10484 auto II = RewriteMap.find(Expr); 10485 10486 if (II == RewriteMap.end()) 10487 continue; 10488 10489 // Don't print things that are not interesting. 10490 if (II->second.second == Expr) 10491 continue; 10492 10493 OS.indent(Depth) << "[PSE]" << I << ":\n"; 10494 OS.indent(Depth + 2) << *Expr << "\n"; 10495 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 10496 } 10497 } 10498