1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #include "llvm/Analysis/ScalarEvolution.h" 62 #include "llvm/ADT/Optional.h" 63 #include "llvm/ADT/STLExtras.h" 64 #include "llvm/ADT/ScopeExit.h" 65 #include "llvm/ADT/Sequence.h" 66 #include "llvm/ADT/SmallPtrSet.h" 67 #include "llvm/ADT/Statistic.h" 68 #include "llvm/Analysis/AssumptionCache.h" 69 #include "llvm/Analysis/ConstantFolding.h" 70 #include "llvm/Analysis/InstructionSimplify.h" 71 #include "llvm/Analysis/LoopInfo.h" 72 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 73 #include "llvm/Analysis/TargetLibraryInfo.h" 74 #include "llvm/Analysis/ValueTracking.h" 75 #include "llvm/IR/ConstantRange.h" 76 #include "llvm/IR/Constants.h" 77 #include "llvm/IR/DataLayout.h" 78 #include "llvm/IR/DerivedTypes.h" 79 #include "llvm/IR/Dominators.h" 80 #include "llvm/IR/GetElementPtrTypeIterator.h" 81 #include "llvm/IR/GlobalAlias.h" 82 #include "llvm/IR/GlobalVariable.h" 83 #include "llvm/IR/InstIterator.h" 84 #include "llvm/IR/Instructions.h" 85 #include "llvm/IR/LLVMContext.h" 86 #include "llvm/IR/Metadata.h" 87 #include "llvm/IR/Operator.h" 88 #include "llvm/IR/PatternMatch.h" 89 #include "llvm/Support/CommandLine.h" 90 #include "llvm/Support/Debug.h" 91 #include "llvm/Support/ErrorHandling.h" 92 #include "llvm/Support/KnownBits.h" 93 #include "llvm/Support/MathExtras.h" 94 #include "llvm/Support/SaveAndRestore.h" 95 #include "llvm/Support/raw_ostream.h" 96 #include <algorithm> 97 using namespace llvm; 98 99 #define DEBUG_TYPE "scalar-evolution" 100 101 STATISTIC(NumArrayLenItCounts, 102 "Number of trip counts computed with array length"); 103 STATISTIC(NumTripCountsComputed, 104 "Number of loops with predictable loop counts"); 105 STATISTIC(NumTripCountsNotComputed, 106 "Number of loops without predictable loop counts"); 107 STATISTIC(NumBruteForceTripCountsComputed, 108 "Number of loops with trip counts computed by force"); 109 110 static cl::opt<unsigned> 111 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 112 cl::desc("Maximum number of iterations SCEV will " 113 "symbolically execute a constant " 114 "derived loop"), 115 cl::init(100)); 116 117 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 118 static cl::opt<bool> 119 VerifySCEV("verify-scev", 120 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 121 static cl::opt<bool> 122 VerifySCEVMap("verify-scev-maps", 123 cl::desc("Verify no dangling value in ScalarEvolution's " 124 "ExprValueMap (slow)")); 125 126 static cl::opt<unsigned> MulOpsInlineThreshold( 127 "scev-mulops-inline-threshold", cl::Hidden, 128 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 129 cl::init(32)); 130 131 static cl::opt<unsigned> AddOpsInlineThreshold( 132 "scev-addops-inline-threshold", cl::Hidden, 133 cl::desc("Threshold for inlining addition operands into a SCEV"), 134 cl::init(500)); 135 136 static cl::opt<unsigned> MaxSCEVCompareDepth( 137 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 138 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 139 cl::init(32)); 140 141 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 142 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 143 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 144 cl::init(2)); 145 146 static cl::opt<unsigned> MaxValueCompareDepth( 147 "scalar-evolution-max-value-compare-depth", cl::Hidden, 148 cl::desc("Maximum depth of recursive value complexity comparisons"), 149 cl::init(2)); 150 151 static cl::opt<unsigned> 152 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 153 cl::desc("Maximum depth of recursive arithmetics"), 154 cl::init(32)); 155 156 static cl::opt<unsigned> MaxConstantEvolvingDepth( 157 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 158 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 159 160 //===----------------------------------------------------------------------===// 161 // SCEV class definitions 162 //===----------------------------------------------------------------------===// 163 164 //===----------------------------------------------------------------------===// 165 // Implementation of the SCEV class. 166 // 167 168 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 169 LLVM_DUMP_METHOD void SCEV::dump() const { 170 print(dbgs()); 171 dbgs() << '\n'; 172 } 173 #endif 174 175 void SCEV::print(raw_ostream &OS) const { 176 switch (static_cast<SCEVTypes>(getSCEVType())) { 177 case scConstant: 178 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 179 return; 180 case scTruncate: { 181 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 182 const SCEV *Op = Trunc->getOperand(); 183 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 184 << *Trunc->getType() << ")"; 185 return; 186 } 187 case scZeroExtend: { 188 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 189 const SCEV *Op = ZExt->getOperand(); 190 OS << "(zext " << *Op->getType() << " " << *Op << " to " 191 << *ZExt->getType() << ")"; 192 return; 193 } 194 case scSignExtend: { 195 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 196 const SCEV *Op = SExt->getOperand(); 197 OS << "(sext " << *Op->getType() << " " << *Op << " to " 198 << *SExt->getType() << ")"; 199 return; 200 } 201 case scAddRecExpr: { 202 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 203 OS << "{" << *AR->getOperand(0); 204 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 205 OS << ",+," << *AR->getOperand(i); 206 OS << "}<"; 207 if (AR->hasNoUnsignedWrap()) 208 OS << "nuw><"; 209 if (AR->hasNoSignedWrap()) 210 OS << "nsw><"; 211 if (AR->hasNoSelfWrap() && 212 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 213 OS << "nw><"; 214 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 215 OS << ">"; 216 return; 217 } 218 case scAddExpr: 219 case scMulExpr: 220 case scUMaxExpr: 221 case scSMaxExpr: { 222 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 223 const char *OpStr = nullptr; 224 switch (NAry->getSCEVType()) { 225 case scAddExpr: OpStr = " + "; break; 226 case scMulExpr: OpStr = " * "; break; 227 case scUMaxExpr: OpStr = " umax "; break; 228 case scSMaxExpr: OpStr = " smax "; break; 229 } 230 OS << "("; 231 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 232 I != E; ++I) { 233 OS << **I; 234 if (std::next(I) != E) 235 OS << OpStr; 236 } 237 OS << ")"; 238 switch (NAry->getSCEVType()) { 239 case scAddExpr: 240 case scMulExpr: 241 if (NAry->hasNoUnsignedWrap()) 242 OS << "<nuw>"; 243 if (NAry->hasNoSignedWrap()) 244 OS << "<nsw>"; 245 } 246 return; 247 } 248 case scUDivExpr: { 249 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 250 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 251 return; 252 } 253 case scUnknown: { 254 const SCEVUnknown *U = cast<SCEVUnknown>(this); 255 Type *AllocTy; 256 if (U->isSizeOf(AllocTy)) { 257 OS << "sizeof(" << *AllocTy << ")"; 258 return; 259 } 260 if (U->isAlignOf(AllocTy)) { 261 OS << "alignof(" << *AllocTy << ")"; 262 return; 263 } 264 265 Type *CTy; 266 Constant *FieldNo; 267 if (U->isOffsetOf(CTy, FieldNo)) { 268 OS << "offsetof(" << *CTy << ", "; 269 FieldNo->printAsOperand(OS, false); 270 OS << ")"; 271 return; 272 } 273 274 // Otherwise just print it normally. 275 U->getValue()->printAsOperand(OS, false); 276 return; 277 } 278 case scCouldNotCompute: 279 OS << "***COULDNOTCOMPUTE***"; 280 return; 281 } 282 llvm_unreachable("Unknown SCEV kind!"); 283 } 284 285 Type *SCEV::getType() const { 286 switch (static_cast<SCEVTypes>(getSCEVType())) { 287 case scConstant: 288 return cast<SCEVConstant>(this)->getType(); 289 case scTruncate: 290 case scZeroExtend: 291 case scSignExtend: 292 return cast<SCEVCastExpr>(this)->getType(); 293 case scAddRecExpr: 294 case scMulExpr: 295 case scUMaxExpr: 296 case scSMaxExpr: 297 return cast<SCEVNAryExpr>(this)->getType(); 298 case scAddExpr: 299 return cast<SCEVAddExpr>(this)->getType(); 300 case scUDivExpr: 301 return cast<SCEVUDivExpr>(this)->getType(); 302 case scUnknown: 303 return cast<SCEVUnknown>(this)->getType(); 304 case scCouldNotCompute: 305 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 306 } 307 llvm_unreachable("Unknown SCEV kind!"); 308 } 309 310 bool SCEV::isZero() const { 311 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 312 return SC->getValue()->isZero(); 313 return false; 314 } 315 316 bool SCEV::isOne() const { 317 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 318 return SC->getValue()->isOne(); 319 return false; 320 } 321 322 bool SCEV::isAllOnesValue() const { 323 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 324 return SC->getValue()->isAllOnesValue(); 325 return false; 326 } 327 328 bool SCEV::isNonConstantNegative() const { 329 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 330 if (!Mul) return false; 331 332 // If there is a constant factor, it will be first. 333 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 334 if (!SC) return false; 335 336 // Return true if the value is negative, this matches things like (-42 * V). 337 return SC->getAPInt().isNegative(); 338 } 339 340 SCEVCouldNotCompute::SCEVCouldNotCompute() : 341 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} 342 343 bool SCEVCouldNotCompute::classof(const SCEV *S) { 344 return S->getSCEVType() == scCouldNotCompute; 345 } 346 347 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 348 FoldingSetNodeID ID; 349 ID.AddInteger(scConstant); 350 ID.AddPointer(V); 351 void *IP = nullptr; 352 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 353 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 354 UniqueSCEVs.InsertNode(S, IP); 355 return S; 356 } 357 358 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 359 return getConstant(ConstantInt::get(getContext(), Val)); 360 } 361 362 const SCEV * 363 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 364 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 365 return getConstant(ConstantInt::get(ITy, V, isSigned)); 366 } 367 368 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 369 unsigned SCEVTy, const SCEV *op, Type *ty) 370 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 371 372 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 373 const SCEV *op, Type *ty) 374 : SCEVCastExpr(ID, scTruncate, op, ty) { 375 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 376 (Ty->isIntegerTy() || Ty->isPointerTy()) && 377 "Cannot truncate non-integer value!"); 378 } 379 380 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 381 const SCEV *op, Type *ty) 382 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 383 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 384 (Ty->isIntegerTy() || Ty->isPointerTy()) && 385 "Cannot zero extend non-integer value!"); 386 } 387 388 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 389 const SCEV *op, Type *ty) 390 : SCEVCastExpr(ID, scSignExtend, op, ty) { 391 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 392 (Ty->isIntegerTy() || Ty->isPointerTy()) && 393 "Cannot sign extend non-integer value!"); 394 } 395 396 void SCEVUnknown::deleted() { 397 // Clear this SCEVUnknown from various maps. 398 SE->forgetMemoizedResults(this); 399 400 // Remove this SCEVUnknown from the uniquing map. 401 SE->UniqueSCEVs.RemoveNode(this); 402 403 // Release the value. 404 setValPtr(nullptr); 405 } 406 407 void SCEVUnknown::allUsesReplacedWith(Value *New) { 408 // Clear this SCEVUnknown from various maps. 409 SE->forgetMemoizedResults(this); 410 411 // Remove this SCEVUnknown from the uniquing map. 412 SE->UniqueSCEVs.RemoveNode(this); 413 414 // Update this SCEVUnknown to point to the new value. This is needed 415 // because there may still be outstanding SCEVs which still point to 416 // this SCEVUnknown. 417 setValPtr(New); 418 } 419 420 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 421 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 422 if (VCE->getOpcode() == Instruction::PtrToInt) 423 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 424 if (CE->getOpcode() == Instruction::GetElementPtr && 425 CE->getOperand(0)->isNullValue() && 426 CE->getNumOperands() == 2) 427 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 428 if (CI->isOne()) { 429 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 430 ->getElementType(); 431 return true; 432 } 433 434 return false; 435 } 436 437 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 438 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 439 if (VCE->getOpcode() == Instruction::PtrToInt) 440 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 441 if (CE->getOpcode() == Instruction::GetElementPtr && 442 CE->getOperand(0)->isNullValue()) { 443 Type *Ty = 444 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 445 if (StructType *STy = dyn_cast<StructType>(Ty)) 446 if (!STy->isPacked() && 447 CE->getNumOperands() == 3 && 448 CE->getOperand(1)->isNullValue()) { 449 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 450 if (CI->isOne() && 451 STy->getNumElements() == 2 && 452 STy->getElementType(0)->isIntegerTy(1)) { 453 AllocTy = STy->getElementType(1); 454 return true; 455 } 456 } 457 } 458 459 return false; 460 } 461 462 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 463 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 464 if (VCE->getOpcode() == Instruction::PtrToInt) 465 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 466 if (CE->getOpcode() == Instruction::GetElementPtr && 467 CE->getNumOperands() == 3 && 468 CE->getOperand(0)->isNullValue() && 469 CE->getOperand(1)->isNullValue()) { 470 Type *Ty = 471 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 472 // Ignore vector types here so that ScalarEvolutionExpander doesn't 473 // emit getelementptrs that index into vectors. 474 if (Ty->isStructTy() || Ty->isArrayTy()) { 475 CTy = Ty; 476 FieldNo = CE->getOperand(2); 477 return true; 478 } 479 } 480 481 return false; 482 } 483 484 //===----------------------------------------------------------------------===// 485 // SCEV Utilities 486 //===----------------------------------------------------------------------===// 487 488 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 489 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 490 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 491 /// have been previously deemed to be "equally complex" by this routine. It is 492 /// intended to avoid exponential time complexity in cases like: 493 /// 494 /// %a = f(%x, %y) 495 /// %b = f(%a, %a) 496 /// %c = f(%b, %b) 497 /// 498 /// %d = f(%x, %y) 499 /// %e = f(%d, %d) 500 /// %f = f(%e, %e) 501 /// 502 /// CompareValueComplexity(%f, %c) 503 /// 504 /// Since we do not continue running this routine on expression trees once we 505 /// have seen unequal values, there is no need to track them in the cache. 506 static int 507 CompareValueComplexity(SmallSet<std::pair<Value *, Value *>, 8> &EqCache, 508 const LoopInfo *const LI, Value *LV, Value *RV, 509 unsigned Depth) { 510 if (Depth > MaxValueCompareDepth || EqCache.count({LV, RV})) 511 return 0; 512 513 // Order pointer values after integer values. This helps SCEVExpander form 514 // GEPs. 515 bool LIsPointer = LV->getType()->isPointerTy(), 516 RIsPointer = RV->getType()->isPointerTy(); 517 if (LIsPointer != RIsPointer) 518 return (int)LIsPointer - (int)RIsPointer; 519 520 // Compare getValueID values. 521 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 522 if (LID != RID) 523 return (int)LID - (int)RID; 524 525 // Sort arguments by their position. 526 if (const auto *LA = dyn_cast<Argument>(LV)) { 527 const auto *RA = cast<Argument>(RV); 528 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 529 return (int)LArgNo - (int)RArgNo; 530 } 531 532 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 533 const auto *RGV = cast<GlobalValue>(RV); 534 535 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 536 auto LT = GV->getLinkage(); 537 return !(GlobalValue::isPrivateLinkage(LT) || 538 GlobalValue::isInternalLinkage(LT)); 539 }; 540 541 // Use the names to distinguish the two values, but only if the 542 // names are semantically important. 543 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 544 return LGV->getName().compare(RGV->getName()); 545 } 546 547 // For instructions, compare their loop depth, and their operand count. This 548 // is pretty loose. 549 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 550 const auto *RInst = cast<Instruction>(RV); 551 552 // Compare loop depths. 553 const BasicBlock *LParent = LInst->getParent(), 554 *RParent = RInst->getParent(); 555 if (LParent != RParent) { 556 unsigned LDepth = LI->getLoopDepth(LParent), 557 RDepth = LI->getLoopDepth(RParent); 558 if (LDepth != RDepth) 559 return (int)LDepth - (int)RDepth; 560 } 561 562 // Compare the number of operands. 563 unsigned LNumOps = LInst->getNumOperands(), 564 RNumOps = RInst->getNumOperands(); 565 if (LNumOps != RNumOps) 566 return (int)LNumOps - (int)RNumOps; 567 568 for (unsigned Idx : seq(0u, LNumOps)) { 569 int Result = 570 CompareValueComplexity(EqCache, LI, LInst->getOperand(Idx), 571 RInst->getOperand(Idx), Depth + 1); 572 if (Result != 0) 573 return Result; 574 } 575 } 576 577 EqCache.insert({LV, RV}); 578 return 0; 579 } 580 581 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 582 // than RHS, respectively. A three-way result allows recursive comparisons to be 583 // more efficient. 584 static int CompareSCEVComplexity( 585 SmallSet<std::pair<const SCEV *, const SCEV *>, 8> &EqCacheSCEV, 586 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 587 DominatorTree &DT, unsigned Depth = 0) { 588 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 589 if (LHS == RHS) 590 return 0; 591 592 // Primarily, sort the SCEVs by their getSCEVType(). 593 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 594 if (LType != RType) 595 return (int)LType - (int)RType; 596 597 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.count({LHS, RHS})) 598 return 0; 599 // Aside from the getSCEVType() ordering, the particular ordering 600 // isn't very important except that it's beneficial to be consistent, 601 // so that (a + b) and (b + a) don't end up as different expressions. 602 switch (static_cast<SCEVTypes>(LType)) { 603 case scUnknown: { 604 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 605 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 606 607 SmallSet<std::pair<Value *, Value *>, 8> EqCache; 608 int X = CompareValueComplexity(EqCache, LI, LU->getValue(), RU->getValue(), 609 Depth + 1); 610 if (X == 0) 611 EqCacheSCEV.insert({LHS, RHS}); 612 return X; 613 } 614 615 case scConstant: { 616 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 617 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 618 619 // Compare constant values. 620 const APInt &LA = LC->getAPInt(); 621 const APInt &RA = RC->getAPInt(); 622 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 623 if (LBitWidth != RBitWidth) 624 return (int)LBitWidth - (int)RBitWidth; 625 return LA.ult(RA) ? -1 : 1; 626 } 627 628 case scAddRecExpr: { 629 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 630 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 631 632 // There is always a dominance between two recs that are used by one SCEV, 633 // so we can safely sort recs by loop header dominance. We require such 634 // order in getAddExpr. 635 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 636 if (LLoop != RLoop) { 637 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 638 assert(LHead != RHead && "Two loops share the same header?"); 639 if (DT.dominates(LHead, RHead)) 640 return 1; 641 else 642 assert(DT.dominates(RHead, LHead) && 643 "No dominance between recurrences used by one SCEV?"); 644 return -1; 645 } 646 647 // Addrec complexity grows with operand count. 648 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 649 if (LNumOps != RNumOps) 650 return (int)LNumOps - (int)RNumOps; 651 652 // Lexicographically compare. 653 for (unsigned i = 0; i != LNumOps; ++i) { 654 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LA->getOperand(i), 655 RA->getOperand(i), DT, Depth + 1); 656 if (X != 0) 657 return X; 658 } 659 EqCacheSCEV.insert({LHS, RHS}); 660 return 0; 661 } 662 663 case scAddExpr: 664 case scMulExpr: 665 case scSMaxExpr: 666 case scUMaxExpr: { 667 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 668 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 669 670 // Lexicographically compare n-ary expressions. 671 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 672 if (LNumOps != RNumOps) 673 return (int)LNumOps - (int)RNumOps; 674 675 for (unsigned i = 0; i != LNumOps; ++i) { 676 if (i >= RNumOps) 677 return 1; 678 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getOperand(i), 679 RC->getOperand(i), DT, Depth + 1); 680 if (X != 0) 681 return X; 682 } 683 EqCacheSCEV.insert({LHS, RHS}); 684 return 0; 685 } 686 687 case scUDivExpr: { 688 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 689 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 690 691 // Lexicographically compare udiv expressions. 692 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getLHS(), RC->getLHS(), 693 DT, Depth + 1); 694 if (X != 0) 695 return X; 696 X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getRHS(), RC->getRHS(), DT, 697 Depth + 1); 698 if (X == 0) 699 EqCacheSCEV.insert({LHS, RHS}); 700 return X; 701 } 702 703 case scTruncate: 704 case scZeroExtend: 705 case scSignExtend: { 706 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 707 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 708 709 // Compare cast expressions by operand. 710 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getOperand(), 711 RC->getOperand(), DT, Depth + 1); 712 if (X == 0) 713 EqCacheSCEV.insert({LHS, RHS}); 714 return X; 715 } 716 717 case scCouldNotCompute: 718 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 719 } 720 llvm_unreachable("Unknown SCEV kind!"); 721 } 722 723 /// Given a list of SCEV objects, order them by their complexity, and group 724 /// objects of the same complexity together by value. When this routine is 725 /// finished, we know that any duplicates in the vector are consecutive and that 726 /// complexity is monotonically increasing. 727 /// 728 /// Note that we go take special precautions to ensure that we get deterministic 729 /// results from this routine. In other words, we don't want the results of 730 /// this to depend on where the addresses of various SCEV objects happened to 731 /// land in memory. 732 /// 733 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 734 LoopInfo *LI, DominatorTree &DT) { 735 if (Ops.size() < 2) return; // Noop 736 737 SmallSet<std::pair<const SCEV *, const SCEV *>, 8> EqCache; 738 if (Ops.size() == 2) { 739 // This is the common case, which also happens to be trivially simple. 740 // Special case it. 741 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 742 if (CompareSCEVComplexity(EqCache, LI, RHS, LHS, DT) < 0) 743 std::swap(LHS, RHS); 744 return; 745 } 746 747 // Do the rough sort by complexity. 748 std::stable_sort(Ops.begin(), Ops.end(), 749 [&EqCache, LI, &DT](const SCEV *LHS, const SCEV *RHS) { 750 return 751 CompareSCEVComplexity(EqCache, LI, LHS, RHS, DT) < 0; 752 }); 753 754 // Now that we are sorted by complexity, group elements of the same 755 // complexity. Note that this is, at worst, N^2, but the vector is likely to 756 // be extremely short in practice. Note that we take this approach because we 757 // do not want to depend on the addresses of the objects we are grouping. 758 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 759 const SCEV *S = Ops[i]; 760 unsigned Complexity = S->getSCEVType(); 761 762 // If there are any objects of the same complexity and same value as this 763 // one, group them. 764 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 765 if (Ops[j] == S) { // Found a duplicate. 766 // Move it to immediately after i'th element. 767 std::swap(Ops[i+1], Ops[j]); 768 ++i; // no need to rescan it. 769 if (i == e-2) return; // Done! 770 } 771 } 772 } 773 } 774 775 // Returns the size of the SCEV S. 776 static inline int sizeOfSCEV(const SCEV *S) { 777 struct FindSCEVSize { 778 int Size; 779 FindSCEVSize() : Size(0) {} 780 781 bool follow(const SCEV *S) { 782 ++Size; 783 // Keep looking at all operands of S. 784 return true; 785 } 786 bool isDone() const { 787 return false; 788 } 789 }; 790 791 FindSCEVSize F; 792 SCEVTraversal<FindSCEVSize> ST(F); 793 ST.visitAll(S); 794 return F.Size; 795 } 796 797 namespace { 798 799 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { 800 public: 801 // Computes the Quotient and Remainder of the division of Numerator by 802 // Denominator. 803 static void divide(ScalarEvolution &SE, const SCEV *Numerator, 804 const SCEV *Denominator, const SCEV **Quotient, 805 const SCEV **Remainder) { 806 assert(Numerator && Denominator && "Uninitialized SCEV"); 807 808 SCEVDivision D(SE, Numerator, Denominator); 809 810 // Check for the trivial case here to avoid having to check for it in the 811 // rest of the code. 812 if (Numerator == Denominator) { 813 *Quotient = D.One; 814 *Remainder = D.Zero; 815 return; 816 } 817 818 if (Numerator->isZero()) { 819 *Quotient = D.Zero; 820 *Remainder = D.Zero; 821 return; 822 } 823 824 // A simple case when N/1. The quotient is N. 825 if (Denominator->isOne()) { 826 *Quotient = Numerator; 827 *Remainder = D.Zero; 828 return; 829 } 830 831 // Split the Denominator when it is a product. 832 if (const SCEVMulExpr *T = dyn_cast<SCEVMulExpr>(Denominator)) { 833 const SCEV *Q, *R; 834 *Quotient = Numerator; 835 for (const SCEV *Op : T->operands()) { 836 divide(SE, *Quotient, Op, &Q, &R); 837 *Quotient = Q; 838 839 // Bail out when the Numerator is not divisible by one of the terms of 840 // the Denominator. 841 if (!R->isZero()) { 842 *Quotient = D.Zero; 843 *Remainder = Numerator; 844 return; 845 } 846 } 847 *Remainder = D.Zero; 848 return; 849 } 850 851 D.visit(Numerator); 852 *Quotient = D.Quotient; 853 *Remainder = D.Remainder; 854 } 855 856 // Except in the trivial case described above, we do not know how to divide 857 // Expr by Denominator for the following functions with empty implementation. 858 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} 859 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} 860 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} 861 void visitUDivExpr(const SCEVUDivExpr *Numerator) {} 862 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} 863 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} 864 void visitUnknown(const SCEVUnknown *Numerator) {} 865 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} 866 867 void visitConstant(const SCEVConstant *Numerator) { 868 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { 869 APInt NumeratorVal = Numerator->getAPInt(); 870 APInt DenominatorVal = D->getAPInt(); 871 uint32_t NumeratorBW = NumeratorVal.getBitWidth(); 872 uint32_t DenominatorBW = DenominatorVal.getBitWidth(); 873 874 if (NumeratorBW > DenominatorBW) 875 DenominatorVal = DenominatorVal.sext(NumeratorBW); 876 else if (NumeratorBW < DenominatorBW) 877 NumeratorVal = NumeratorVal.sext(DenominatorBW); 878 879 APInt QuotientVal(NumeratorVal.getBitWidth(), 0); 880 APInt RemainderVal(NumeratorVal.getBitWidth(), 0); 881 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); 882 Quotient = SE.getConstant(QuotientVal); 883 Remainder = SE.getConstant(RemainderVal); 884 return; 885 } 886 } 887 888 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { 889 const SCEV *StartQ, *StartR, *StepQ, *StepR; 890 if (!Numerator->isAffine()) 891 return cannotDivide(Numerator); 892 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); 893 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); 894 // Bail out if the types do not match. 895 Type *Ty = Denominator->getType(); 896 if (Ty != StartQ->getType() || Ty != StartR->getType() || 897 Ty != StepQ->getType() || Ty != StepR->getType()) 898 return cannotDivide(Numerator); 899 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), 900 Numerator->getNoWrapFlags()); 901 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), 902 Numerator->getNoWrapFlags()); 903 } 904 905 void visitAddExpr(const SCEVAddExpr *Numerator) { 906 SmallVector<const SCEV *, 2> Qs, Rs; 907 Type *Ty = Denominator->getType(); 908 909 for (const SCEV *Op : Numerator->operands()) { 910 const SCEV *Q, *R; 911 divide(SE, Op, Denominator, &Q, &R); 912 913 // Bail out if types do not match. 914 if (Ty != Q->getType() || Ty != R->getType()) 915 return cannotDivide(Numerator); 916 917 Qs.push_back(Q); 918 Rs.push_back(R); 919 } 920 921 if (Qs.size() == 1) { 922 Quotient = Qs[0]; 923 Remainder = Rs[0]; 924 return; 925 } 926 927 Quotient = SE.getAddExpr(Qs); 928 Remainder = SE.getAddExpr(Rs); 929 } 930 931 void visitMulExpr(const SCEVMulExpr *Numerator) { 932 SmallVector<const SCEV *, 2> Qs; 933 Type *Ty = Denominator->getType(); 934 935 bool FoundDenominatorTerm = false; 936 for (const SCEV *Op : Numerator->operands()) { 937 // Bail out if types do not match. 938 if (Ty != Op->getType()) 939 return cannotDivide(Numerator); 940 941 if (FoundDenominatorTerm) { 942 Qs.push_back(Op); 943 continue; 944 } 945 946 // Check whether Denominator divides one of the product operands. 947 const SCEV *Q, *R; 948 divide(SE, Op, Denominator, &Q, &R); 949 if (!R->isZero()) { 950 Qs.push_back(Op); 951 continue; 952 } 953 954 // Bail out if types do not match. 955 if (Ty != Q->getType()) 956 return cannotDivide(Numerator); 957 958 FoundDenominatorTerm = true; 959 Qs.push_back(Q); 960 } 961 962 if (FoundDenominatorTerm) { 963 Remainder = Zero; 964 if (Qs.size() == 1) 965 Quotient = Qs[0]; 966 else 967 Quotient = SE.getMulExpr(Qs); 968 return; 969 } 970 971 if (!isa<SCEVUnknown>(Denominator)) 972 return cannotDivide(Numerator); 973 974 // The Remainder is obtained by replacing Denominator by 0 in Numerator. 975 ValueToValueMap RewriteMap; 976 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 977 cast<SCEVConstant>(Zero)->getValue(); 978 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 979 980 if (Remainder->isZero()) { 981 // The Quotient is obtained by replacing Denominator by 1 in Numerator. 982 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 983 cast<SCEVConstant>(One)->getValue(); 984 Quotient = 985 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 986 return; 987 } 988 989 // Quotient is (Numerator - Remainder) divided by Denominator. 990 const SCEV *Q, *R; 991 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); 992 // This SCEV does not seem to simplify: fail the division here. 993 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) 994 return cannotDivide(Numerator); 995 divide(SE, Diff, Denominator, &Q, &R); 996 if (R != Zero) 997 return cannotDivide(Numerator); 998 Quotient = Q; 999 } 1000 1001 private: 1002 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, 1003 const SCEV *Denominator) 1004 : SE(S), Denominator(Denominator) { 1005 Zero = SE.getZero(Denominator->getType()); 1006 One = SE.getOne(Denominator->getType()); 1007 1008 // We generally do not know how to divide Expr by Denominator. We 1009 // initialize the division to a "cannot divide" state to simplify the rest 1010 // of the code. 1011 cannotDivide(Numerator); 1012 } 1013 1014 // Convenience function for giving up on the division. We set the quotient to 1015 // be equal to zero and the remainder to be equal to the numerator. 1016 void cannotDivide(const SCEV *Numerator) { 1017 Quotient = Zero; 1018 Remainder = Numerator; 1019 } 1020 1021 ScalarEvolution &SE; 1022 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; 1023 }; 1024 1025 } 1026 1027 //===----------------------------------------------------------------------===// 1028 // Simple SCEV method implementations 1029 //===----------------------------------------------------------------------===// 1030 1031 /// Compute BC(It, K). The result has width W. Assume, K > 0. 1032 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 1033 ScalarEvolution &SE, 1034 Type *ResultTy) { 1035 // Handle the simplest case efficiently. 1036 if (K == 1) 1037 return SE.getTruncateOrZeroExtend(It, ResultTy); 1038 1039 // We are using the following formula for BC(It, K): 1040 // 1041 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 1042 // 1043 // Suppose, W is the bitwidth of the return value. We must be prepared for 1044 // overflow. Hence, we must assure that the result of our computation is 1045 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 1046 // safe in modular arithmetic. 1047 // 1048 // However, this code doesn't use exactly that formula; the formula it uses 1049 // is something like the following, where T is the number of factors of 2 in 1050 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 1051 // exponentiation: 1052 // 1053 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 1054 // 1055 // This formula is trivially equivalent to the previous formula. However, 1056 // this formula can be implemented much more efficiently. The trick is that 1057 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 1058 // arithmetic. To do exact division in modular arithmetic, all we have 1059 // to do is multiply by the inverse. Therefore, this step can be done at 1060 // width W. 1061 // 1062 // The next issue is how to safely do the division by 2^T. The way this 1063 // is done is by doing the multiplication step at a width of at least W + T 1064 // bits. This way, the bottom W+T bits of the product are accurate. Then, 1065 // when we perform the division by 2^T (which is equivalent to a right shift 1066 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 1067 // truncated out after the division by 2^T. 1068 // 1069 // In comparison to just directly using the first formula, this technique 1070 // is much more efficient; using the first formula requires W * K bits, 1071 // but this formula less than W + K bits. Also, the first formula requires 1072 // a division step, whereas this formula only requires multiplies and shifts. 1073 // 1074 // It doesn't matter whether the subtraction step is done in the calculation 1075 // width or the input iteration count's width; if the subtraction overflows, 1076 // the result must be zero anyway. We prefer here to do it in the width of 1077 // the induction variable because it helps a lot for certain cases; CodeGen 1078 // isn't smart enough to ignore the overflow, which leads to much less 1079 // efficient code if the width of the subtraction is wider than the native 1080 // register width. 1081 // 1082 // (It's possible to not widen at all by pulling out factors of 2 before 1083 // the multiplication; for example, K=2 can be calculated as 1084 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 1085 // extra arithmetic, so it's not an obvious win, and it gets 1086 // much more complicated for K > 3.) 1087 1088 // Protection from insane SCEVs; this bound is conservative, 1089 // but it probably doesn't matter. 1090 if (K > 1000) 1091 return SE.getCouldNotCompute(); 1092 1093 unsigned W = SE.getTypeSizeInBits(ResultTy); 1094 1095 // Calculate K! / 2^T and T; we divide out the factors of two before 1096 // multiplying for calculating K! / 2^T to avoid overflow. 1097 // Other overflow doesn't matter because we only care about the bottom 1098 // W bits of the result. 1099 APInt OddFactorial(W, 1); 1100 unsigned T = 1; 1101 for (unsigned i = 3; i <= K; ++i) { 1102 APInt Mult(W, i); 1103 unsigned TwoFactors = Mult.countTrailingZeros(); 1104 T += TwoFactors; 1105 Mult.lshrInPlace(TwoFactors); 1106 OddFactorial *= Mult; 1107 } 1108 1109 // We need at least W + T bits for the multiplication step 1110 unsigned CalculationBits = W + T; 1111 1112 // Calculate 2^T, at width T+W. 1113 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1114 1115 // Calculate the multiplicative inverse of K! / 2^T; 1116 // this multiplication factor will perform the exact division by 1117 // K! / 2^T. 1118 APInt Mod = APInt::getSignedMinValue(W+1); 1119 APInt MultiplyFactor = OddFactorial.zext(W+1); 1120 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1121 MultiplyFactor = MultiplyFactor.trunc(W); 1122 1123 // Calculate the product, at width T+W 1124 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1125 CalculationBits); 1126 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1127 for (unsigned i = 1; i != K; ++i) { 1128 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1129 Dividend = SE.getMulExpr(Dividend, 1130 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1131 } 1132 1133 // Divide by 2^T 1134 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1135 1136 // Truncate the result, and divide by K! / 2^T. 1137 1138 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1139 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1140 } 1141 1142 /// Return the value of this chain of recurrences at the specified iteration 1143 /// number. We can evaluate this recurrence by multiplying each element in the 1144 /// chain by the binomial coefficient corresponding to it. In other words, we 1145 /// can evaluate {A,+,B,+,C,+,D} as: 1146 /// 1147 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1148 /// 1149 /// where BC(It, k) stands for binomial coefficient. 1150 /// 1151 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1152 ScalarEvolution &SE) const { 1153 const SCEV *Result = getStart(); 1154 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1155 // The computation is correct in the face of overflow provided that the 1156 // multiplication is performed _after_ the evaluation of the binomial 1157 // coefficient. 1158 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1159 if (isa<SCEVCouldNotCompute>(Coeff)) 1160 return Coeff; 1161 1162 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1163 } 1164 return Result; 1165 } 1166 1167 //===----------------------------------------------------------------------===// 1168 // SCEV Expression folder implementations 1169 //===----------------------------------------------------------------------===// 1170 1171 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 1172 Type *Ty) { 1173 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1174 "This is not a truncating conversion!"); 1175 assert(isSCEVable(Ty) && 1176 "This is not a conversion to a SCEVable type!"); 1177 Ty = getEffectiveSCEVType(Ty); 1178 1179 FoldingSetNodeID ID; 1180 ID.AddInteger(scTruncate); 1181 ID.AddPointer(Op); 1182 ID.AddPointer(Ty); 1183 void *IP = nullptr; 1184 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1185 1186 // Fold if the operand is constant. 1187 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1188 return getConstant( 1189 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1190 1191 // trunc(trunc(x)) --> trunc(x) 1192 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1193 return getTruncateExpr(ST->getOperand(), Ty); 1194 1195 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1196 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1197 return getTruncateOrSignExtend(SS->getOperand(), Ty); 1198 1199 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1200 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1201 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 1202 1203 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can 1204 // eliminate all the truncates, or we replace other casts with truncates. 1205 if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) { 1206 SmallVector<const SCEV *, 4> Operands; 1207 bool hasTrunc = false; 1208 for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) { 1209 const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty); 1210 if (!isa<SCEVCastExpr>(SA->getOperand(i))) 1211 hasTrunc = isa<SCEVTruncateExpr>(S); 1212 Operands.push_back(S); 1213 } 1214 if (!hasTrunc) 1215 return getAddExpr(Operands); 1216 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1217 } 1218 1219 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can 1220 // eliminate all the truncates, or we replace other casts with truncates. 1221 if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) { 1222 SmallVector<const SCEV *, 4> Operands; 1223 bool hasTrunc = false; 1224 for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) { 1225 const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty); 1226 if (!isa<SCEVCastExpr>(SM->getOperand(i))) 1227 hasTrunc = isa<SCEVTruncateExpr>(S); 1228 Operands.push_back(S); 1229 } 1230 if (!hasTrunc) 1231 return getMulExpr(Operands); 1232 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1233 } 1234 1235 // If the input value is a chrec scev, truncate the chrec's operands. 1236 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1237 SmallVector<const SCEV *, 4> Operands; 1238 for (const SCEV *Op : AddRec->operands()) 1239 Operands.push_back(getTruncateExpr(Op, Ty)); 1240 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1241 } 1242 1243 // The cast wasn't folded; create an explicit cast node. We can reuse 1244 // the existing insert position since if we get here, we won't have 1245 // made any changes which would invalidate it. 1246 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1247 Op, Ty); 1248 UniqueSCEVs.InsertNode(S, IP); 1249 return S; 1250 } 1251 1252 // Get the limit of a recurrence such that incrementing by Step cannot cause 1253 // signed overflow as long as the value of the recurrence within the 1254 // loop does not exceed this limit before incrementing. 1255 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1256 ICmpInst::Predicate *Pred, 1257 ScalarEvolution *SE) { 1258 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1259 if (SE->isKnownPositive(Step)) { 1260 *Pred = ICmpInst::ICMP_SLT; 1261 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1262 SE->getSignedRangeMax(Step)); 1263 } 1264 if (SE->isKnownNegative(Step)) { 1265 *Pred = ICmpInst::ICMP_SGT; 1266 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1267 SE->getSignedRangeMin(Step)); 1268 } 1269 return nullptr; 1270 } 1271 1272 // Get the limit of a recurrence such that incrementing by Step cannot cause 1273 // unsigned overflow as long as the value of the recurrence within the loop does 1274 // not exceed this limit before incrementing. 1275 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1276 ICmpInst::Predicate *Pred, 1277 ScalarEvolution *SE) { 1278 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1279 *Pred = ICmpInst::ICMP_ULT; 1280 1281 return SE->getConstant(APInt::getMinValue(BitWidth) - 1282 SE->getUnsignedRangeMax(Step)); 1283 } 1284 1285 namespace { 1286 1287 struct ExtendOpTraitsBase { 1288 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)( 1289 const SCEV *, Type *, ScalarEvolution::ExtendCacheTy &Cache); 1290 }; 1291 1292 // Used to make code generic over signed and unsigned overflow. 1293 template <typename ExtendOp> struct ExtendOpTraits { 1294 // Members present: 1295 // 1296 // static const SCEV::NoWrapFlags WrapType; 1297 // 1298 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1299 // 1300 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1301 // ICmpInst::Predicate *Pred, 1302 // ScalarEvolution *SE); 1303 }; 1304 1305 template <> 1306 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1307 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1308 1309 static const GetExtendExprTy GetExtendExpr; 1310 1311 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1312 ICmpInst::Predicate *Pred, 1313 ScalarEvolution *SE) { 1314 return getSignedOverflowLimitForStep(Step, Pred, SE); 1315 } 1316 }; 1317 1318 const ExtendOpTraitsBase::GetExtendExprTy 1319 ExtendOpTraits<SCEVSignExtendExpr>::GetExtendExpr = 1320 &ScalarEvolution::getSignExtendExprCached; 1321 1322 template <> 1323 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1324 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1325 1326 static const GetExtendExprTy GetExtendExpr; 1327 1328 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1329 ICmpInst::Predicate *Pred, 1330 ScalarEvolution *SE) { 1331 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1332 } 1333 }; 1334 1335 const ExtendOpTraitsBase::GetExtendExprTy 1336 ExtendOpTraits<SCEVZeroExtendExpr>::GetExtendExpr = 1337 &ScalarEvolution::getZeroExtendExprCached; 1338 } 1339 1340 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1341 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1342 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1343 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1344 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1345 // expression "Step + sext/zext(PreIncAR)" is congruent with 1346 // "sext/zext(PostIncAR)" 1347 template <typename ExtendOpTy> 1348 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1349 ScalarEvolution *SE, 1350 ScalarEvolution::ExtendCacheTy &Cache) { 1351 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1352 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1353 1354 const Loop *L = AR->getLoop(); 1355 const SCEV *Start = AR->getStart(); 1356 const SCEV *Step = AR->getStepRecurrence(*SE); 1357 1358 // Check for a simple looking step prior to loop entry. 1359 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1360 if (!SA) 1361 return nullptr; 1362 1363 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1364 // subtraction is expensive. For this purpose, perform a quick and dirty 1365 // difference, by checking for Step in the operand list. 1366 SmallVector<const SCEV *, 4> DiffOps; 1367 for (const SCEV *Op : SA->operands()) 1368 if (Op != Step) 1369 DiffOps.push_back(Op); 1370 1371 if (DiffOps.size() == SA->getNumOperands()) 1372 return nullptr; 1373 1374 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1375 // `Step`: 1376 1377 // 1. NSW/NUW flags on the step increment. 1378 auto PreStartFlags = 1379 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1380 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1381 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1382 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1383 1384 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1385 // "S+X does not sign/unsign-overflow". 1386 // 1387 1388 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1389 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1390 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1391 return PreStart; 1392 1393 // 2. Direct overflow check on the step operation's expression. 1394 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1395 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1396 const SCEV *OperandExtendedStart = 1397 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Cache), 1398 (SE->*GetExtendExpr)(Step, WideTy, Cache)); 1399 if ((SE->*GetExtendExpr)(Start, WideTy, Cache) == OperandExtendedStart) { 1400 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1401 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1402 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1403 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1404 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1405 } 1406 return PreStart; 1407 } 1408 1409 // 3. Loop precondition. 1410 ICmpInst::Predicate Pred; 1411 const SCEV *OverflowLimit = 1412 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1413 1414 if (OverflowLimit && 1415 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1416 return PreStart; 1417 1418 return nullptr; 1419 } 1420 1421 // Get the normalized zero or sign extended expression for this AddRec's Start. 1422 template <typename ExtendOpTy> 1423 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1424 ScalarEvolution *SE, 1425 ScalarEvolution::ExtendCacheTy &Cache) { 1426 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1427 1428 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Cache); 1429 if (!PreStart) 1430 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Cache); 1431 1432 return SE->getAddExpr( 1433 (SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, Cache), 1434 (SE->*GetExtendExpr)(PreStart, Ty, Cache)); 1435 } 1436 1437 // Try to prove away overflow by looking at "nearby" add recurrences. A 1438 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1439 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1440 // 1441 // Formally: 1442 // 1443 // {S,+,X} == {S-T,+,X} + T 1444 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1445 // 1446 // If ({S-T,+,X} + T) does not overflow ... (1) 1447 // 1448 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1449 // 1450 // If {S-T,+,X} does not overflow ... (2) 1451 // 1452 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1453 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1454 // 1455 // If (S-T)+T does not overflow ... (3) 1456 // 1457 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1458 // == {Ext(S),+,Ext(X)} == LHS 1459 // 1460 // Thus, if (1), (2) and (3) are true for some T, then 1461 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1462 // 1463 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1464 // does not overflow" restricted to the 0th iteration. Therefore we only need 1465 // to check for (1) and (2). 1466 // 1467 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1468 // is `Delta` (defined below). 1469 // 1470 template <typename ExtendOpTy> 1471 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1472 const SCEV *Step, 1473 const Loop *L) { 1474 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1475 1476 // We restrict `Start` to a constant to prevent SCEV from spending too much 1477 // time here. It is correct (but more expensive) to continue with a 1478 // non-constant `Start` and do a general SCEV subtraction to compute 1479 // `PreStart` below. 1480 // 1481 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1482 if (!StartC) 1483 return false; 1484 1485 APInt StartAI = StartC->getAPInt(); 1486 1487 for (unsigned Delta : {-2, -1, 1, 2}) { 1488 const SCEV *PreStart = getConstant(StartAI - Delta); 1489 1490 FoldingSetNodeID ID; 1491 ID.AddInteger(scAddRecExpr); 1492 ID.AddPointer(PreStart); 1493 ID.AddPointer(Step); 1494 ID.AddPointer(L); 1495 void *IP = nullptr; 1496 const auto *PreAR = 1497 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1498 1499 // Give up if we don't already have the add recurrence we need because 1500 // actually constructing an add recurrence is relatively expensive. 1501 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1502 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1503 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1504 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1505 DeltaS, &Pred, this); 1506 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1507 return true; 1508 } 1509 } 1510 1511 return false; 1512 } 1513 1514 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty) { 1515 // Use the local cache to prevent exponential behavior of 1516 // getZeroExtendExprImpl. 1517 ExtendCacheTy Cache; 1518 return getZeroExtendExprCached(Op, Ty, Cache); 1519 } 1520 1521 /// Query \p Cache before calling getZeroExtendExprImpl. If there is no 1522 /// related entry in the \p Cache, call getZeroExtendExprImpl and save 1523 /// the result in the \p Cache. 1524 const SCEV *ScalarEvolution::getZeroExtendExprCached(const SCEV *Op, Type *Ty, 1525 ExtendCacheTy &Cache) { 1526 auto It = Cache.find({Op, Ty}); 1527 if (It != Cache.end()) 1528 return It->second; 1529 const SCEV *ZExt = getZeroExtendExprImpl(Op, Ty, Cache); 1530 auto InsertResult = Cache.insert({{Op, Ty}, ZExt}); 1531 assert(InsertResult.second && "Expect the key was not in the cache"); 1532 (void)InsertResult; 1533 return ZExt; 1534 } 1535 1536 /// The real implementation of getZeroExtendExpr. 1537 const SCEV *ScalarEvolution::getZeroExtendExprImpl(const SCEV *Op, Type *Ty, 1538 ExtendCacheTy &Cache) { 1539 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1540 "This is not an extending conversion!"); 1541 assert(isSCEVable(Ty) && 1542 "This is not a conversion to a SCEVable type!"); 1543 Ty = getEffectiveSCEVType(Ty); 1544 1545 // Fold if the operand is constant. 1546 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1547 return getConstant( 1548 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1549 1550 // zext(zext(x)) --> zext(x) 1551 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1552 return getZeroExtendExprCached(SZ->getOperand(), Ty, Cache); 1553 1554 // Before doing any expensive analysis, check to see if we've already 1555 // computed a SCEV for this Op and Ty. 1556 FoldingSetNodeID ID; 1557 ID.AddInteger(scZeroExtend); 1558 ID.AddPointer(Op); 1559 ID.AddPointer(Ty); 1560 void *IP = nullptr; 1561 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1562 1563 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1564 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1565 // It's possible the bits taken off by the truncate were all zero bits. If 1566 // so, we should be able to simplify this further. 1567 const SCEV *X = ST->getOperand(); 1568 ConstantRange CR = getUnsignedRange(X); 1569 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1570 unsigned NewBits = getTypeSizeInBits(Ty); 1571 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1572 CR.zextOrTrunc(NewBits))) 1573 return getTruncateOrZeroExtend(X, Ty); 1574 } 1575 1576 // If the input value is a chrec scev, and we can prove that the value 1577 // did not overflow the old, smaller, value, we can zero extend all of the 1578 // operands (often constants). This allows analysis of something like 1579 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1580 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1581 if (AR->isAffine()) { 1582 const SCEV *Start = AR->getStart(); 1583 const SCEV *Step = AR->getStepRecurrence(*this); 1584 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1585 const Loop *L = AR->getLoop(); 1586 1587 if (!AR->hasNoUnsignedWrap()) { 1588 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1589 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1590 } 1591 1592 // If we have special knowledge that this addrec won't overflow, 1593 // we don't need to do any further analysis. 1594 if (AR->hasNoUnsignedWrap()) 1595 return getAddRecExpr( 1596 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache), 1597 getZeroExtendExprCached(Step, Ty, Cache), L, AR->getNoWrapFlags()); 1598 1599 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1600 // Note that this serves two purposes: It filters out loops that are 1601 // simply not analyzable, and it covers the case where this code is 1602 // being called from within backedge-taken count analysis, such that 1603 // attempting to ask for the backedge-taken count would likely result 1604 // in infinite recursion. In the later case, the analysis code will 1605 // cope with a conservative value, and it will take care to purge 1606 // that value once it has finished. 1607 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1608 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1609 // Manually compute the final value for AR, checking for 1610 // overflow. 1611 1612 // Check whether the backedge-taken count can be losslessly casted to 1613 // the addrec's type. The count is always unsigned. 1614 const SCEV *CastedMaxBECount = 1615 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1616 const SCEV *RecastedMaxBECount = 1617 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1618 if (MaxBECount == RecastedMaxBECount) { 1619 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1620 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1621 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step); 1622 const SCEV *ZAdd = 1623 getZeroExtendExprCached(getAddExpr(Start, ZMul), WideTy, Cache); 1624 const SCEV *WideStart = getZeroExtendExprCached(Start, WideTy, Cache); 1625 const SCEV *WideMaxBECount = 1626 getZeroExtendExprCached(CastedMaxBECount, WideTy, Cache); 1627 const SCEV *OperandExtendedAdd = getAddExpr( 1628 WideStart, getMulExpr(WideMaxBECount, getZeroExtendExprCached( 1629 Step, WideTy, Cache))); 1630 if (ZAdd == OperandExtendedAdd) { 1631 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1632 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1633 // Return the expression with the addrec on the outside. 1634 return getAddRecExpr( 1635 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache), 1636 getZeroExtendExprCached(Step, Ty, Cache), L, 1637 AR->getNoWrapFlags()); 1638 } 1639 // Similar to above, only this time treat the step value as signed. 1640 // This covers loops that count down. 1641 OperandExtendedAdd = 1642 getAddExpr(WideStart, 1643 getMulExpr(WideMaxBECount, 1644 getSignExtendExpr(Step, WideTy))); 1645 if (ZAdd == OperandExtendedAdd) { 1646 // Cache knowledge of AR NW, which is propagated to this AddRec. 1647 // Negative step causes unsigned wrap, but it still can't self-wrap. 1648 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1649 // Return the expression with the addrec on the outside. 1650 return getAddRecExpr( 1651 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache), 1652 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1653 } 1654 } 1655 } 1656 1657 // Normally, in the cases we can prove no-overflow via a 1658 // backedge guarding condition, we can also compute a backedge 1659 // taken count for the loop. The exceptions are assumptions and 1660 // guards present in the loop -- SCEV is not great at exploiting 1661 // these to compute max backedge taken counts, but can still use 1662 // these to prove lack of overflow. Use this fact to avoid 1663 // doing extra work that may not pay off. 1664 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1665 !AC.assumptions().empty()) { 1666 // If the backedge is guarded by a comparison with the pre-inc 1667 // value the addrec is safe. Also, if the entry is guarded by 1668 // a comparison with the start value and the backedge is 1669 // guarded by a comparison with the post-inc value, the addrec 1670 // is safe. 1671 if (isKnownPositive(Step)) { 1672 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1673 getUnsignedRangeMax(Step)); 1674 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1675 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && 1676 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, 1677 AR->getPostIncExpr(*this), N))) { 1678 // Cache knowledge of AR NUW, which is propagated to this 1679 // AddRec. 1680 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1681 // Return the expression with the addrec on the outside. 1682 return getAddRecExpr( 1683 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache), 1684 getZeroExtendExprCached(Step, Ty, Cache), L, 1685 AR->getNoWrapFlags()); 1686 } 1687 } else if (isKnownNegative(Step)) { 1688 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1689 getSignedRangeMin(Step)); 1690 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1691 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) && 1692 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, 1693 AR->getPostIncExpr(*this), N))) { 1694 // Cache knowledge of AR NW, which is propagated to this 1695 // AddRec. Negative step causes unsigned wrap, but it 1696 // still can't self-wrap. 1697 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1698 // Return the expression with the addrec on the outside. 1699 return getAddRecExpr( 1700 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache), 1701 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1702 } 1703 } 1704 } 1705 1706 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1707 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1708 return getAddRecExpr( 1709 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Cache), 1710 getZeroExtendExprCached(Step, Ty, Cache), L, AR->getNoWrapFlags()); 1711 } 1712 } 1713 1714 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1715 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1716 if (SA->hasNoUnsignedWrap()) { 1717 // If the addition does not unsign overflow then we can, by definition, 1718 // commute the zero extension with the addition operation. 1719 SmallVector<const SCEV *, 4> Ops; 1720 for (const auto *Op : SA->operands()) 1721 Ops.push_back(getZeroExtendExprCached(Op, Ty, Cache)); 1722 return getAddExpr(Ops, SCEV::FlagNUW); 1723 } 1724 } 1725 1726 // The cast wasn't folded; create an explicit cast node. 1727 // Recompute the insert position, as it may have been invalidated. 1728 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1729 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1730 Op, Ty); 1731 UniqueSCEVs.InsertNode(S, IP); 1732 return S; 1733 } 1734 1735 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty) { 1736 // Use the local cache to prevent exponential behavior of 1737 // getSignExtendExprImpl. 1738 ExtendCacheTy Cache; 1739 return getSignExtendExprCached(Op, Ty, Cache); 1740 } 1741 1742 /// Query \p Cache before calling getSignExtendExprImpl. If there is no 1743 /// related entry in the \p Cache, call getSignExtendExprImpl and save 1744 /// the result in the \p Cache. 1745 const SCEV *ScalarEvolution::getSignExtendExprCached(const SCEV *Op, Type *Ty, 1746 ExtendCacheTy &Cache) { 1747 auto It = Cache.find({Op, Ty}); 1748 if (It != Cache.end()) 1749 return It->second; 1750 const SCEV *SExt = getSignExtendExprImpl(Op, Ty, Cache); 1751 auto InsertResult = Cache.insert({{Op, Ty}, SExt}); 1752 assert(InsertResult.second && "Expect the key was not in the cache"); 1753 (void)InsertResult; 1754 return SExt; 1755 } 1756 1757 /// The real implementation of getSignExtendExpr. 1758 const SCEV *ScalarEvolution::getSignExtendExprImpl(const SCEV *Op, Type *Ty, 1759 ExtendCacheTy &Cache) { 1760 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1761 "This is not an extending conversion!"); 1762 assert(isSCEVable(Ty) && 1763 "This is not a conversion to a SCEVable type!"); 1764 Ty = getEffectiveSCEVType(Ty); 1765 1766 // Fold if the operand is constant. 1767 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1768 return getConstant( 1769 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1770 1771 // sext(sext(x)) --> sext(x) 1772 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1773 return getSignExtendExprCached(SS->getOperand(), Ty, Cache); 1774 1775 // sext(zext(x)) --> zext(x) 1776 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1777 return getZeroExtendExpr(SZ->getOperand(), Ty); 1778 1779 // Before doing any expensive analysis, check to see if we've already 1780 // computed a SCEV for this Op and Ty. 1781 FoldingSetNodeID ID; 1782 ID.AddInteger(scSignExtend); 1783 ID.AddPointer(Op); 1784 ID.AddPointer(Ty); 1785 void *IP = nullptr; 1786 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1787 1788 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1789 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1790 // It's possible the bits taken off by the truncate were all sign bits. If 1791 // so, we should be able to simplify this further. 1792 const SCEV *X = ST->getOperand(); 1793 ConstantRange CR = getSignedRange(X); 1794 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1795 unsigned NewBits = getTypeSizeInBits(Ty); 1796 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1797 CR.sextOrTrunc(NewBits))) 1798 return getTruncateOrSignExtend(X, Ty); 1799 } 1800 1801 // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2 1802 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1803 if (SA->getNumOperands() == 2) { 1804 auto *SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0)); 1805 auto *SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1)); 1806 if (SMul && SC1) { 1807 if (auto *SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) { 1808 const APInt &C1 = SC1->getAPInt(); 1809 const APInt &C2 = SC2->getAPInt(); 1810 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && 1811 C2.ugt(C1) && C2.isPowerOf2()) 1812 return getAddExpr(getSignExtendExprCached(SC1, Ty, Cache), 1813 getSignExtendExprCached(SMul, Ty, Cache)); 1814 } 1815 } 1816 } 1817 1818 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1819 if (SA->hasNoSignedWrap()) { 1820 // If the addition does not sign overflow then we can, by definition, 1821 // commute the sign extension with the addition operation. 1822 SmallVector<const SCEV *, 4> Ops; 1823 for (const auto *Op : SA->operands()) 1824 Ops.push_back(getSignExtendExprCached(Op, Ty, Cache)); 1825 return getAddExpr(Ops, SCEV::FlagNSW); 1826 } 1827 } 1828 // If the input value is a chrec scev, and we can prove that the value 1829 // did not overflow the old, smaller, value, we can sign extend all of the 1830 // operands (often constants). This allows analysis of something like 1831 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1832 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1833 if (AR->isAffine()) { 1834 const SCEV *Start = AR->getStart(); 1835 const SCEV *Step = AR->getStepRecurrence(*this); 1836 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1837 const Loop *L = AR->getLoop(); 1838 1839 if (!AR->hasNoSignedWrap()) { 1840 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1841 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1842 } 1843 1844 // If we have special knowledge that this addrec won't overflow, 1845 // we don't need to do any further analysis. 1846 if (AR->hasNoSignedWrap()) 1847 return getAddRecExpr( 1848 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache), 1849 getSignExtendExprCached(Step, Ty, Cache), L, SCEV::FlagNSW); 1850 1851 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1852 // Note that this serves two purposes: It filters out loops that are 1853 // simply not analyzable, and it covers the case where this code is 1854 // being called from within backedge-taken count analysis, such that 1855 // attempting to ask for the backedge-taken count would likely result 1856 // in infinite recursion. In the later case, the analysis code will 1857 // cope with a conservative value, and it will take care to purge 1858 // that value once it has finished. 1859 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1860 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1861 // Manually compute the final value for AR, checking for 1862 // overflow. 1863 1864 // Check whether the backedge-taken count can be losslessly casted to 1865 // the addrec's type. The count is always unsigned. 1866 const SCEV *CastedMaxBECount = 1867 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1868 const SCEV *RecastedMaxBECount = 1869 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1870 if (MaxBECount == RecastedMaxBECount) { 1871 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1872 // Check whether Start+Step*MaxBECount has no signed overflow. 1873 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step); 1874 const SCEV *SAdd = 1875 getSignExtendExprCached(getAddExpr(Start, SMul), WideTy, Cache); 1876 const SCEV *WideStart = getSignExtendExprCached(Start, WideTy, Cache); 1877 const SCEV *WideMaxBECount = 1878 getZeroExtendExpr(CastedMaxBECount, WideTy); 1879 const SCEV *OperandExtendedAdd = getAddExpr( 1880 WideStart, getMulExpr(WideMaxBECount, getSignExtendExprCached( 1881 Step, WideTy, Cache))); 1882 if (SAdd == OperandExtendedAdd) { 1883 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1884 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1885 // Return the expression with the addrec on the outside. 1886 return getAddRecExpr( 1887 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache), 1888 getSignExtendExprCached(Step, Ty, Cache), L, 1889 AR->getNoWrapFlags()); 1890 } 1891 // Similar to above, only this time treat the step value as unsigned. 1892 // This covers loops that count up with an unsigned step. 1893 OperandExtendedAdd = 1894 getAddExpr(WideStart, 1895 getMulExpr(WideMaxBECount, 1896 getZeroExtendExpr(Step, WideTy))); 1897 if (SAdd == OperandExtendedAdd) { 1898 // If AR wraps around then 1899 // 1900 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 1901 // => SAdd != OperandExtendedAdd 1902 // 1903 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 1904 // (SAdd == OperandExtendedAdd => AR is NW) 1905 1906 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1907 1908 // Return the expression with the addrec on the outside. 1909 return getAddRecExpr( 1910 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache), 1911 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1912 } 1913 } 1914 } 1915 1916 // Normally, in the cases we can prove no-overflow via a 1917 // backedge guarding condition, we can also compute a backedge 1918 // taken count for the loop. The exceptions are assumptions and 1919 // guards present in the loop -- SCEV is not great at exploiting 1920 // these to compute max backedge taken counts, but can still use 1921 // these to prove lack of overflow. Use this fact to avoid 1922 // doing extra work that may not pay off. 1923 1924 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1925 !AC.assumptions().empty()) { 1926 // If the backedge is guarded by a comparison with the pre-inc 1927 // value the addrec is safe. Also, if the entry is guarded by 1928 // a comparison with the start value and the backedge is 1929 // guarded by a comparison with the post-inc value, the addrec 1930 // is safe. 1931 ICmpInst::Predicate Pred; 1932 const SCEV *OverflowLimit = 1933 getSignedOverflowLimitForStep(Step, &Pred, this); 1934 if (OverflowLimit && 1935 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 1936 (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) && 1937 isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this), 1938 OverflowLimit)))) { 1939 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 1940 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1941 return getAddRecExpr( 1942 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache), 1943 getSignExtendExprCached(Step, Ty, Cache), L, 1944 AR->getNoWrapFlags()); 1945 } 1946 } 1947 1948 // If Start and Step are constants, check if we can apply this 1949 // transformation: 1950 // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2 1951 auto *SC1 = dyn_cast<SCEVConstant>(Start); 1952 auto *SC2 = dyn_cast<SCEVConstant>(Step); 1953 if (SC1 && SC2) { 1954 const APInt &C1 = SC1->getAPInt(); 1955 const APInt &C2 = SC2->getAPInt(); 1956 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) && 1957 C2.isPowerOf2()) { 1958 Start = getSignExtendExprCached(Start, Ty, Cache); 1959 const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L, 1960 AR->getNoWrapFlags()); 1961 return getAddExpr(Start, getSignExtendExprCached(NewAR, Ty, Cache)); 1962 } 1963 } 1964 1965 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 1966 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1967 return getAddRecExpr( 1968 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Cache), 1969 getSignExtendExprCached(Step, Ty, Cache), L, AR->getNoWrapFlags()); 1970 } 1971 } 1972 1973 // If the input value is provably positive and we could not simplify 1974 // away the sext build a zext instead. 1975 if (isKnownNonNegative(Op)) 1976 return getZeroExtendExpr(Op, Ty); 1977 1978 // The cast wasn't folded; create an explicit cast node. 1979 // Recompute the insert position, as it may have been invalidated. 1980 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1981 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1982 Op, Ty); 1983 UniqueSCEVs.InsertNode(S, IP); 1984 return S; 1985 } 1986 1987 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 1988 /// unspecified bits out to the given type. 1989 /// 1990 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 1991 Type *Ty) { 1992 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1993 "This is not an extending conversion!"); 1994 assert(isSCEVable(Ty) && 1995 "This is not a conversion to a SCEVable type!"); 1996 Ty = getEffectiveSCEVType(Ty); 1997 1998 // Sign-extend negative constants. 1999 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2000 if (SC->getAPInt().isNegative()) 2001 return getSignExtendExpr(Op, Ty); 2002 2003 // Peel off a truncate cast. 2004 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2005 const SCEV *NewOp = T->getOperand(); 2006 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2007 return getAnyExtendExpr(NewOp, Ty); 2008 return getTruncateOrNoop(NewOp, Ty); 2009 } 2010 2011 // Next try a zext cast. If the cast is folded, use it. 2012 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2013 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2014 return ZExt; 2015 2016 // Next try a sext cast. If the cast is folded, use it. 2017 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2018 if (!isa<SCEVSignExtendExpr>(SExt)) 2019 return SExt; 2020 2021 // Force the cast to be folded into the operands of an addrec. 2022 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2023 SmallVector<const SCEV *, 4> Ops; 2024 for (const SCEV *Op : AR->operands()) 2025 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2026 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2027 } 2028 2029 // If the expression is obviously signed, use the sext cast value. 2030 if (isa<SCEVSMaxExpr>(Op)) 2031 return SExt; 2032 2033 // Absent any other information, use the zext cast value. 2034 return ZExt; 2035 } 2036 2037 /// Process the given Ops list, which is a list of operands to be added under 2038 /// the given scale, update the given map. This is a helper function for 2039 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2040 /// that would form an add expression like this: 2041 /// 2042 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2043 /// 2044 /// where A and B are constants, update the map with these values: 2045 /// 2046 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2047 /// 2048 /// and add 13 + A*B*29 to AccumulatedConstant. 2049 /// This will allow getAddRecExpr to produce this: 2050 /// 2051 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2052 /// 2053 /// This form often exposes folding opportunities that are hidden in 2054 /// the original operand list. 2055 /// 2056 /// Return true iff it appears that any interesting folding opportunities 2057 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2058 /// the common case where no interesting opportunities are present, and 2059 /// is also used as a check to avoid infinite recursion. 2060 /// 2061 static bool 2062 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2063 SmallVectorImpl<const SCEV *> &NewOps, 2064 APInt &AccumulatedConstant, 2065 const SCEV *const *Ops, size_t NumOperands, 2066 const APInt &Scale, 2067 ScalarEvolution &SE) { 2068 bool Interesting = false; 2069 2070 // Iterate over the add operands. They are sorted, with constants first. 2071 unsigned i = 0; 2072 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2073 ++i; 2074 // Pull a buried constant out to the outside. 2075 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2076 Interesting = true; 2077 AccumulatedConstant += Scale * C->getAPInt(); 2078 } 2079 2080 // Next comes everything else. We're especially interested in multiplies 2081 // here, but they're in the middle, so just visit the rest with one loop. 2082 for (; i != NumOperands; ++i) { 2083 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2084 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2085 APInt NewScale = 2086 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2087 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2088 // A multiplication of a constant with another add; recurse. 2089 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2090 Interesting |= 2091 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2092 Add->op_begin(), Add->getNumOperands(), 2093 NewScale, SE); 2094 } else { 2095 // A multiplication of a constant with some other value. Update 2096 // the map. 2097 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2098 const SCEV *Key = SE.getMulExpr(MulOps); 2099 auto Pair = M.insert({Key, NewScale}); 2100 if (Pair.second) { 2101 NewOps.push_back(Pair.first->first); 2102 } else { 2103 Pair.first->second += NewScale; 2104 // The map already had an entry for this value, which may indicate 2105 // a folding opportunity. 2106 Interesting = true; 2107 } 2108 } 2109 } else { 2110 // An ordinary operand. Update the map. 2111 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2112 M.insert({Ops[i], Scale}); 2113 if (Pair.second) { 2114 NewOps.push_back(Pair.first->first); 2115 } else { 2116 Pair.first->second += Scale; 2117 // The map already had an entry for this value, which may indicate 2118 // a folding opportunity. 2119 Interesting = true; 2120 } 2121 } 2122 } 2123 2124 return Interesting; 2125 } 2126 2127 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2128 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2129 // can't-overflow flags for the operation if possible. 2130 static SCEV::NoWrapFlags 2131 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2132 const SmallVectorImpl<const SCEV *> &Ops, 2133 SCEV::NoWrapFlags Flags) { 2134 using namespace std::placeholders; 2135 typedef OverflowingBinaryOperator OBO; 2136 2137 bool CanAnalyze = 2138 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2139 (void)CanAnalyze; 2140 assert(CanAnalyze && "don't call from other places!"); 2141 2142 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2143 SCEV::NoWrapFlags SignOrUnsignWrap = 2144 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2145 2146 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2147 auto IsKnownNonNegative = [&](const SCEV *S) { 2148 return SE->isKnownNonNegative(S); 2149 }; 2150 2151 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2152 Flags = 2153 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2154 2155 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2156 2157 if (SignOrUnsignWrap != SignOrUnsignMask && Type == scAddExpr && 2158 Ops.size() == 2 && isa<SCEVConstant>(Ops[0])) { 2159 2160 // (A + C) --> (A + C)<nsw> if the addition does not sign overflow 2161 // (A + C) --> (A + C)<nuw> if the addition does not unsign overflow 2162 2163 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2164 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2165 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2166 Instruction::Add, C, OBO::NoSignedWrap); 2167 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2168 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2169 } 2170 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2171 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2172 Instruction::Add, C, OBO::NoUnsignedWrap); 2173 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2174 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2175 } 2176 } 2177 2178 return Flags; 2179 } 2180 2181 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2182 if (!isLoopInvariant(S, L)) 2183 return false; 2184 // If a value depends on a SCEVUnknown which is defined after the loop, we 2185 // conservatively assume that we cannot calculate it at the loop's entry. 2186 struct FindDominatedSCEVUnknown { 2187 bool Found = false; 2188 const Loop *L; 2189 DominatorTree &DT; 2190 LoopInfo &LI; 2191 2192 FindDominatedSCEVUnknown(const Loop *L, DominatorTree &DT, LoopInfo &LI) 2193 : L(L), DT(DT), LI(LI) {} 2194 2195 bool checkSCEVUnknown(const SCEVUnknown *SU) { 2196 if (auto *I = dyn_cast<Instruction>(SU->getValue())) { 2197 if (DT.dominates(L->getHeader(), I->getParent())) 2198 Found = true; 2199 else 2200 assert(DT.dominates(I->getParent(), L->getHeader()) && 2201 "No dominance relationship between SCEV and loop?"); 2202 } 2203 return false; 2204 } 2205 2206 bool follow(const SCEV *S) { 2207 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 2208 case scConstant: 2209 return false; 2210 case scAddRecExpr: 2211 case scTruncate: 2212 case scZeroExtend: 2213 case scSignExtend: 2214 case scAddExpr: 2215 case scMulExpr: 2216 case scUMaxExpr: 2217 case scSMaxExpr: 2218 case scUDivExpr: 2219 return true; 2220 case scUnknown: 2221 return checkSCEVUnknown(cast<SCEVUnknown>(S)); 2222 case scCouldNotCompute: 2223 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 2224 } 2225 return false; 2226 } 2227 2228 bool isDone() { return Found; } 2229 }; 2230 2231 FindDominatedSCEVUnknown FSU(L, DT, LI); 2232 SCEVTraversal<FindDominatedSCEVUnknown> ST(FSU); 2233 ST.visitAll(S); 2234 return !FSU.Found; 2235 } 2236 2237 /// Get a canonical add expression, or something simpler if possible. 2238 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2239 SCEV::NoWrapFlags Flags, 2240 unsigned Depth) { 2241 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2242 "only nuw or nsw allowed"); 2243 assert(!Ops.empty() && "Cannot get empty add!"); 2244 if (Ops.size() == 1) return Ops[0]; 2245 #ifndef NDEBUG 2246 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2247 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2248 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2249 "SCEVAddExpr operand types don't match!"); 2250 #endif 2251 2252 // Sort by complexity, this groups all similar expression types together. 2253 GroupByComplexity(Ops, &LI, DT); 2254 2255 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2256 2257 // If there are any constants, fold them together. 2258 unsigned Idx = 0; 2259 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2260 ++Idx; 2261 assert(Idx < Ops.size()); 2262 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2263 // We found two constants, fold them together! 2264 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2265 if (Ops.size() == 2) return Ops[0]; 2266 Ops.erase(Ops.begin()+1); // Erase the folded element 2267 LHSC = cast<SCEVConstant>(Ops[0]); 2268 } 2269 2270 // If we are left with a constant zero being added, strip it off. 2271 if (LHSC->getValue()->isZero()) { 2272 Ops.erase(Ops.begin()); 2273 --Idx; 2274 } 2275 2276 if (Ops.size() == 1) return Ops[0]; 2277 } 2278 2279 // Limit recursion calls depth. 2280 if (Depth > MaxArithDepth) 2281 return getOrCreateAddExpr(Ops, Flags); 2282 2283 // Okay, check to see if the same value occurs in the operand list more than 2284 // once. If so, merge them together into an multiply expression. Since we 2285 // sorted the list, these values are required to be adjacent. 2286 Type *Ty = Ops[0]->getType(); 2287 bool FoundMatch = false; 2288 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2289 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2290 // Scan ahead to count how many equal operands there are. 2291 unsigned Count = 2; 2292 while (i+Count != e && Ops[i+Count] == Ops[i]) 2293 ++Count; 2294 // Merge the values into a multiply. 2295 const SCEV *Scale = getConstant(Ty, Count); 2296 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2297 if (Ops.size() == Count) 2298 return Mul; 2299 Ops[i] = Mul; 2300 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2301 --i; e -= Count - 1; 2302 FoundMatch = true; 2303 } 2304 if (FoundMatch) 2305 return getAddExpr(Ops, Flags); 2306 2307 // Check for truncates. If all the operands are truncated from the same 2308 // type, see if factoring out the truncate would permit the result to be 2309 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) 2310 // if the contents of the resulting outer trunc fold to something simple. 2311 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { 2312 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); 2313 Type *DstType = Trunc->getType(); 2314 Type *SrcType = Trunc->getOperand()->getType(); 2315 SmallVector<const SCEV *, 8> LargeOps; 2316 bool Ok = true; 2317 // Check all the operands to see if they can be represented in the 2318 // source type of the truncate. 2319 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2320 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2321 if (T->getOperand()->getType() != SrcType) { 2322 Ok = false; 2323 break; 2324 } 2325 LargeOps.push_back(T->getOperand()); 2326 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2327 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2328 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2329 SmallVector<const SCEV *, 8> LargeMulOps; 2330 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2331 if (const SCEVTruncateExpr *T = 2332 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2333 if (T->getOperand()->getType() != SrcType) { 2334 Ok = false; 2335 break; 2336 } 2337 LargeMulOps.push_back(T->getOperand()); 2338 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2339 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2340 } else { 2341 Ok = false; 2342 break; 2343 } 2344 } 2345 if (Ok) 2346 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2347 } else { 2348 Ok = false; 2349 break; 2350 } 2351 } 2352 if (Ok) { 2353 // Evaluate the expression in the larger type. 2354 const SCEV *Fold = getAddExpr(LargeOps, Flags, Depth + 1); 2355 // If it folds to something simple, use it. Otherwise, don't. 2356 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2357 return getTruncateExpr(Fold, DstType); 2358 } 2359 } 2360 2361 // Skip past any other cast SCEVs. 2362 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2363 ++Idx; 2364 2365 // If there are add operands they would be next. 2366 if (Idx < Ops.size()) { 2367 bool DeletedAdd = false; 2368 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2369 if (Ops.size() > AddOpsInlineThreshold || 2370 Add->getNumOperands() > AddOpsInlineThreshold) 2371 break; 2372 // If we have an add, expand the add operands onto the end of the operands 2373 // list. 2374 Ops.erase(Ops.begin()+Idx); 2375 Ops.append(Add->op_begin(), Add->op_end()); 2376 DeletedAdd = true; 2377 } 2378 2379 // If we deleted at least one add, we added operands to the end of the list, 2380 // and they are not necessarily sorted. Recurse to resort and resimplify 2381 // any operands we just acquired. 2382 if (DeletedAdd) 2383 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2384 } 2385 2386 // Skip over the add expression until we get to a multiply. 2387 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2388 ++Idx; 2389 2390 // Check to see if there are any folding opportunities present with 2391 // operands multiplied by constant values. 2392 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2393 uint64_t BitWidth = getTypeSizeInBits(Ty); 2394 DenseMap<const SCEV *, APInt> M; 2395 SmallVector<const SCEV *, 8> NewOps; 2396 APInt AccumulatedConstant(BitWidth, 0); 2397 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2398 Ops.data(), Ops.size(), 2399 APInt(BitWidth, 1), *this)) { 2400 struct APIntCompare { 2401 bool operator()(const APInt &LHS, const APInt &RHS) const { 2402 return LHS.ult(RHS); 2403 } 2404 }; 2405 2406 // Some interesting folding opportunity is present, so its worthwhile to 2407 // re-generate the operands list. Group the operands by constant scale, 2408 // to avoid multiplying by the same constant scale multiple times. 2409 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2410 for (const SCEV *NewOp : NewOps) 2411 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2412 // Re-generate the operands list. 2413 Ops.clear(); 2414 if (AccumulatedConstant != 0) 2415 Ops.push_back(getConstant(AccumulatedConstant)); 2416 for (auto &MulOp : MulOpLists) 2417 if (MulOp.first != 0) 2418 Ops.push_back(getMulExpr( 2419 getConstant(MulOp.first), 2420 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2421 SCEV::FlagAnyWrap, Depth + 1)); 2422 if (Ops.empty()) 2423 return getZero(Ty); 2424 if (Ops.size() == 1) 2425 return Ops[0]; 2426 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2427 } 2428 } 2429 2430 // If we are adding something to a multiply expression, make sure the 2431 // something is not already an operand of the multiply. If so, merge it into 2432 // the multiply. 2433 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2434 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2435 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2436 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2437 if (isa<SCEVConstant>(MulOpSCEV)) 2438 continue; 2439 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2440 if (MulOpSCEV == Ops[AddOp]) { 2441 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2442 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2443 if (Mul->getNumOperands() != 2) { 2444 // If the multiply has more than two operands, we must get the 2445 // Y*Z term. 2446 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2447 Mul->op_begin()+MulOp); 2448 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2449 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2450 } 2451 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2452 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2453 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2454 SCEV::FlagAnyWrap, Depth + 1); 2455 if (Ops.size() == 2) return OuterMul; 2456 if (AddOp < Idx) { 2457 Ops.erase(Ops.begin()+AddOp); 2458 Ops.erase(Ops.begin()+Idx-1); 2459 } else { 2460 Ops.erase(Ops.begin()+Idx); 2461 Ops.erase(Ops.begin()+AddOp-1); 2462 } 2463 Ops.push_back(OuterMul); 2464 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2465 } 2466 2467 // Check this multiply against other multiplies being added together. 2468 for (unsigned OtherMulIdx = Idx+1; 2469 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2470 ++OtherMulIdx) { 2471 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2472 // If MulOp occurs in OtherMul, we can fold the two multiplies 2473 // together. 2474 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2475 OMulOp != e; ++OMulOp) 2476 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2477 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2478 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2479 if (Mul->getNumOperands() != 2) { 2480 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2481 Mul->op_begin()+MulOp); 2482 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2483 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2484 } 2485 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2486 if (OtherMul->getNumOperands() != 2) { 2487 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2488 OtherMul->op_begin()+OMulOp); 2489 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2490 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2491 } 2492 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2493 const SCEV *InnerMulSum = 2494 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2495 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2496 SCEV::FlagAnyWrap, Depth + 1); 2497 if (Ops.size() == 2) return OuterMul; 2498 Ops.erase(Ops.begin()+Idx); 2499 Ops.erase(Ops.begin()+OtherMulIdx-1); 2500 Ops.push_back(OuterMul); 2501 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2502 } 2503 } 2504 } 2505 } 2506 2507 // If there are any add recurrences in the operands list, see if any other 2508 // added values are loop invariant. If so, we can fold them into the 2509 // recurrence. 2510 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2511 ++Idx; 2512 2513 // Scan over all recurrences, trying to fold loop invariants into them. 2514 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2515 // Scan all of the other operands to this add and add them to the vector if 2516 // they are loop invariant w.r.t. the recurrence. 2517 SmallVector<const SCEV *, 8> LIOps; 2518 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2519 const Loop *AddRecLoop = AddRec->getLoop(); 2520 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2521 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2522 LIOps.push_back(Ops[i]); 2523 Ops.erase(Ops.begin()+i); 2524 --i; --e; 2525 } 2526 2527 // If we found some loop invariants, fold them into the recurrence. 2528 if (!LIOps.empty()) { 2529 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2530 LIOps.push_back(AddRec->getStart()); 2531 2532 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2533 AddRec->op_end()); 2534 // This follows from the fact that the no-wrap flags on the outer add 2535 // expression are applicable on the 0th iteration, when the add recurrence 2536 // will be equal to its start value. 2537 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2538 2539 // Build the new addrec. Propagate the NUW and NSW flags if both the 2540 // outer add and the inner addrec are guaranteed to have no overflow. 2541 // Always propagate NW. 2542 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2543 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2544 2545 // If all of the other operands were loop invariant, we are done. 2546 if (Ops.size() == 1) return NewRec; 2547 2548 // Otherwise, add the folded AddRec by the non-invariant parts. 2549 for (unsigned i = 0;; ++i) 2550 if (Ops[i] == AddRec) { 2551 Ops[i] = NewRec; 2552 break; 2553 } 2554 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2555 } 2556 2557 // Okay, if there weren't any loop invariants to be folded, check to see if 2558 // there are multiple AddRec's with the same loop induction variable being 2559 // added together. If so, we can fold them. 2560 for (unsigned OtherIdx = Idx+1; 2561 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2562 ++OtherIdx) { 2563 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2564 // so that the 1st found AddRecExpr is dominated by all others. 2565 assert(DT.dominates( 2566 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2567 AddRec->getLoop()->getHeader()) && 2568 "AddRecExprs are not sorted in reverse dominance order?"); 2569 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2570 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2571 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2572 AddRec->op_end()); 2573 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2574 ++OtherIdx) { 2575 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2576 if (OtherAddRec->getLoop() == AddRecLoop) { 2577 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2578 i != e; ++i) { 2579 if (i >= AddRecOps.size()) { 2580 AddRecOps.append(OtherAddRec->op_begin()+i, 2581 OtherAddRec->op_end()); 2582 break; 2583 } 2584 SmallVector<const SCEV *, 2> TwoOps = { 2585 AddRecOps[i], OtherAddRec->getOperand(i)}; 2586 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2587 } 2588 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2589 } 2590 } 2591 // Step size has changed, so we cannot guarantee no self-wraparound. 2592 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2593 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2594 } 2595 } 2596 2597 // Otherwise couldn't fold anything into this recurrence. Move onto the 2598 // next one. 2599 } 2600 2601 // Okay, it looks like we really DO need an add expr. Check to see if we 2602 // already have one, otherwise create a new one. 2603 return getOrCreateAddExpr(Ops, Flags); 2604 } 2605 2606 const SCEV * 2607 ScalarEvolution::getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2608 SCEV::NoWrapFlags Flags) { 2609 FoldingSetNodeID ID; 2610 ID.AddInteger(scAddExpr); 2611 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2612 ID.AddPointer(Ops[i]); 2613 void *IP = nullptr; 2614 SCEVAddExpr *S = 2615 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2616 if (!S) { 2617 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2618 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2619 S = new (SCEVAllocator) 2620 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2621 UniqueSCEVs.InsertNode(S, IP); 2622 } 2623 S->setNoWrapFlags(Flags); 2624 return S; 2625 } 2626 2627 const SCEV * 2628 ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2629 SCEV::NoWrapFlags Flags) { 2630 FoldingSetNodeID ID; 2631 ID.AddInteger(scMulExpr); 2632 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2633 ID.AddPointer(Ops[i]); 2634 void *IP = nullptr; 2635 SCEVMulExpr *S = 2636 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2637 if (!S) { 2638 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2639 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2640 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2641 O, Ops.size()); 2642 UniqueSCEVs.InsertNode(S, IP); 2643 } 2644 S->setNoWrapFlags(Flags); 2645 return S; 2646 } 2647 2648 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2649 uint64_t k = i*j; 2650 if (j > 1 && k / j != i) Overflow = true; 2651 return k; 2652 } 2653 2654 /// Compute the result of "n choose k", the binomial coefficient. If an 2655 /// intermediate computation overflows, Overflow will be set and the return will 2656 /// be garbage. Overflow is not cleared on absence of overflow. 2657 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2658 // We use the multiplicative formula: 2659 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2660 // At each iteration, we take the n-th term of the numeral and divide by the 2661 // (k-n)th term of the denominator. This division will always produce an 2662 // integral result, and helps reduce the chance of overflow in the 2663 // intermediate computations. However, we can still overflow even when the 2664 // final result would fit. 2665 2666 if (n == 0 || n == k) return 1; 2667 if (k > n) return 0; 2668 2669 if (k > n/2) 2670 k = n-k; 2671 2672 uint64_t r = 1; 2673 for (uint64_t i = 1; i <= k; ++i) { 2674 r = umul_ov(r, n-(i-1), Overflow); 2675 r /= i; 2676 } 2677 return r; 2678 } 2679 2680 /// Determine if any of the operands in this SCEV are a constant or if 2681 /// any of the add or multiply expressions in this SCEV contain a constant. 2682 static bool containsConstantSomewhere(const SCEV *StartExpr) { 2683 SmallVector<const SCEV *, 4> Ops; 2684 Ops.push_back(StartExpr); 2685 while (!Ops.empty()) { 2686 const SCEV *CurrentExpr = Ops.pop_back_val(); 2687 if (isa<SCEVConstant>(*CurrentExpr)) 2688 return true; 2689 2690 if (isa<SCEVAddExpr>(*CurrentExpr) || isa<SCEVMulExpr>(*CurrentExpr)) { 2691 const auto *CurrentNAry = cast<SCEVNAryExpr>(CurrentExpr); 2692 Ops.append(CurrentNAry->op_begin(), CurrentNAry->op_end()); 2693 } 2694 } 2695 return false; 2696 } 2697 2698 /// Get a canonical multiply expression, or something simpler if possible. 2699 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2700 SCEV::NoWrapFlags Flags, 2701 unsigned Depth) { 2702 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2703 "only nuw or nsw allowed"); 2704 assert(!Ops.empty() && "Cannot get empty mul!"); 2705 if (Ops.size() == 1) return Ops[0]; 2706 #ifndef NDEBUG 2707 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2708 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2709 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2710 "SCEVMulExpr operand types don't match!"); 2711 #endif 2712 2713 // Sort by complexity, this groups all similar expression types together. 2714 GroupByComplexity(Ops, &LI, DT); 2715 2716 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2717 2718 // Limit recursion calls depth. 2719 if (Depth > MaxArithDepth) 2720 return getOrCreateMulExpr(Ops, Flags); 2721 2722 // If there are any constants, fold them together. 2723 unsigned Idx = 0; 2724 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2725 2726 // C1*(C2+V) -> C1*C2 + C1*V 2727 if (Ops.size() == 2) 2728 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2729 // If any of Add's ops are Adds or Muls with a constant, 2730 // apply this transformation as well. 2731 if (Add->getNumOperands() == 2) 2732 if (containsConstantSomewhere(Add)) 2733 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2734 SCEV::FlagAnyWrap, Depth + 1), 2735 getMulExpr(LHSC, Add->getOperand(1), 2736 SCEV::FlagAnyWrap, Depth + 1), 2737 SCEV::FlagAnyWrap, Depth + 1); 2738 2739 ++Idx; 2740 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2741 // We found two constants, fold them together! 2742 ConstantInt *Fold = 2743 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2744 Ops[0] = getConstant(Fold); 2745 Ops.erase(Ops.begin()+1); // Erase the folded element 2746 if (Ops.size() == 1) return Ops[0]; 2747 LHSC = cast<SCEVConstant>(Ops[0]); 2748 } 2749 2750 // If we are left with a constant one being multiplied, strip it off. 2751 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) { 2752 Ops.erase(Ops.begin()); 2753 --Idx; 2754 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2755 // If we have a multiply of zero, it will always be zero. 2756 return Ops[0]; 2757 } else if (Ops[0]->isAllOnesValue()) { 2758 // If we have a mul by -1 of an add, try distributing the -1 among the 2759 // add operands. 2760 if (Ops.size() == 2) { 2761 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2762 SmallVector<const SCEV *, 4> NewOps; 2763 bool AnyFolded = false; 2764 for (const SCEV *AddOp : Add->operands()) { 2765 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2766 Depth + 1); 2767 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2768 NewOps.push_back(Mul); 2769 } 2770 if (AnyFolded) 2771 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2772 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2773 // Negation preserves a recurrence's no self-wrap property. 2774 SmallVector<const SCEV *, 4> Operands; 2775 for (const SCEV *AddRecOp : AddRec->operands()) 2776 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2777 Depth + 1)); 2778 2779 return getAddRecExpr(Operands, AddRec->getLoop(), 2780 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2781 } 2782 } 2783 } 2784 2785 if (Ops.size() == 1) 2786 return Ops[0]; 2787 } 2788 2789 // Skip over the add expression until we get to a multiply. 2790 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2791 ++Idx; 2792 2793 // If there are mul operands inline them all into this expression. 2794 if (Idx < Ops.size()) { 2795 bool DeletedMul = false; 2796 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2797 if (Ops.size() > MulOpsInlineThreshold) 2798 break; 2799 // If we have an mul, expand the mul operands onto the end of the 2800 // operands list. 2801 Ops.erase(Ops.begin()+Idx); 2802 Ops.append(Mul->op_begin(), Mul->op_end()); 2803 DeletedMul = true; 2804 } 2805 2806 // If we deleted at least one mul, we added operands to the end of the 2807 // list, and they are not necessarily sorted. Recurse to resort and 2808 // resimplify any operands we just acquired. 2809 if (DeletedMul) 2810 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2811 } 2812 2813 // If there are any add recurrences in the operands list, see if any other 2814 // added values are loop invariant. If so, we can fold them into the 2815 // recurrence. 2816 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2817 ++Idx; 2818 2819 // Scan over all recurrences, trying to fold loop invariants into them. 2820 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2821 // Scan all of the other operands to this mul and add them to the vector 2822 // if they are loop invariant w.r.t. the recurrence. 2823 SmallVector<const SCEV *, 8> LIOps; 2824 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2825 const Loop *AddRecLoop = AddRec->getLoop(); 2826 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2827 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2828 LIOps.push_back(Ops[i]); 2829 Ops.erase(Ops.begin()+i); 2830 --i; --e; 2831 } 2832 2833 // If we found some loop invariants, fold them into the recurrence. 2834 if (!LIOps.empty()) { 2835 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2836 SmallVector<const SCEV *, 4> NewOps; 2837 NewOps.reserve(AddRec->getNumOperands()); 2838 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 2839 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2840 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 2841 SCEV::FlagAnyWrap, Depth + 1)); 2842 2843 // Build the new addrec. Propagate the NUW and NSW flags if both the 2844 // outer mul and the inner addrec are guaranteed to have no overflow. 2845 // 2846 // No self-wrap cannot be guaranteed after changing the step size, but 2847 // will be inferred if either NUW or NSW is true. 2848 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 2849 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 2850 2851 // If all of the other operands were loop invariant, we are done. 2852 if (Ops.size() == 1) return NewRec; 2853 2854 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2855 for (unsigned i = 0;; ++i) 2856 if (Ops[i] == AddRec) { 2857 Ops[i] = NewRec; 2858 break; 2859 } 2860 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2861 } 2862 2863 // Okay, if there weren't any loop invariants to be folded, check to see 2864 // if there are multiple AddRec's with the same loop induction variable 2865 // being multiplied together. If so, we can fold them. 2866 2867 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2868 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2869 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2870 // ]]],+,...up to x=2n}. 2871 // Note that the arguments to choose() are always integers with values 2872 // known at compile time, never SCEV objects. 2873 // 2874 // The implementation avoids pointless extra computations when the two 2875 // addrec's are of different length (mathematically, it's equivalent to 2876 // an infinite stream of zeros on the right). 2877 bool OpsModified = false; 2878 for (unsigned OtherIdx = Idx+1; 2879 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2880 ++OtherIdx) { 2881 const SCEVAddRecExpr *OtherAddRec = 2882 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2883 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 2884 continue; 2885 2886 bool Overflow = false; 2887 Type *Ty = AddRec->getType(); 2888 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 2889 SmallVector<const SCEV*, 7> AddRecOps; 2890 for (int x = 0, xe = AddRec->getNumOperands() + 2891 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 2892 const SCEV *Term = getZero(Ty); 2893 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 2894 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 2895 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 2896 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 2897 z < ze && !Overflow; ++z) { 2898 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 2899 uint64_t Coeff; 2900 if (LargerThan64Bits) 2901 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 2902 else 2903 Coeff = Coeff1*Coeff2; 2904 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 2905 const SCEV *Term1 = AddRec->getOperand(y-z); 2906 const SCEV *Term2 = OtherAddRec->getOperand(z); 2907 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1, Term2, 2908 SCEV::FlagAnyWrap, Depth + 1), 2909 SCEV::FlagAnyWrap, Depth + 1); 2910 } 2911 } 2912 AddRecOps.push_back(Term); 2913 } 2914 if (!Overflow) { 2915 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(), 2916 SCEV::FlagAnyWrap); 2917 if (Ops.size() == 2) return NewAddRec; 2918 Ops[Idx] = NewAddRec; 2919 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2920 OpsModified = true; 2921 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 2922 if (!AddRec) 2923 break; 2924 } 2925 } 2926 if (OpsModified) 2927 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2928 2929 // Otherwise couldn't fold anything into this recurrence. Move onto the 2930 // next one. 2931 } 2932 2933 // Okay, it looks like we really DO need an mul expr. Check to see if we 2934 // already have one, otherwise create a new one. 2935 return getOrCreateMulExpr(Ops, Flags); 2936 } 2937 2938 /// Get a canonical unsigned division expression, or something simpler if 2939 /// possible. 2940 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 2941 const SCEV *RHS) { 2942 assert(getEffectiveSCEVType(LHS->getType()) == 2943 getEffectiveSCEVType(RHS->getType()) && 2944 "SCEVUDivExpr operand types don't match!"); 2945 2946 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 2947 if (RHSC->getValue()->equalsInt(1)) 2948 return LHS; // X udiv 1 --> x 2949 // If the denominator is zero, the result of the udiv is undefined. Don't 2950 // try to analyze it, because the resolution chosen here may differ from 2951 // the resolution chosen in other parts of the compiler. 2952 if (!RHSC->getValue()->isZero()) { 2953 // Determine if the division can be folded into the operands of 2954 // its operands. 2955 // TODO: Generalize this to non-constants by using known-bits information. 2956 Type *Ty = LHS->getType(); 2957 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 2958 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 2959 // For non-power-of-two values, effectively round the value up to the 2960 // nearest power of two. 2961 if (!RHSC->getAPInt().isPowerOf2()) 2962 ++MaxShiftAmt; 2963 IntegerType *ExtTy = 2964 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 2965 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 2966 if (const SCEVConstant *Step = 2967 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 2968 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 2969 const APInt &StepInt = Step->getAPInt(); 2970 const APInt &DivInt = RHSC->getAPInt(); 2971 if (!StepInt.urem(DivInt) && 2972 getZeroExtendExpr(AR, ExtTy) == 2973 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 2974 getZeroExtendExpr(Step, ExtTy), 2975 AR->getLoop(), SCEV::FlagAnyWrap)) { 2976 SmallVector<const SCEV *, 4> Operands; 2977 for (const SCEV *Op : AR->operands()) 2978 Operands.push_back(getUDivExpr(Op, RHS)); 2979 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 2980 } 2981 /// Get a canonical UDivExpr for a recurrence. 2982 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 2983 // We can currently only fold X%N if X is constant. 2984 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 2985 if (StartC && !DivInt.urem(StepInt) && 2986 getZeroExtendExpr(AR, ExtTy) == 2987 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 2988 getZeroExtendExpr(Step, ExtTy), 2989 AR->getLoop(), SCEV::FlagAnyWrap)) { 2990 const APInt &StartInt = StartC->getAPInt(); 2991 const APInt &StartRem = StartInt.urem(StepInt); 2992 if (StartRem != 0) 2993 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, 2994 AR->getLoop(), SCEV::FlagNW); 2995 } 2996 } 2997 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 2998 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 2999 SmallVector<const SCEV *, 4> Operands; 3000 for (const SCEV *Op : M->operands()) 3001 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3002 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3003 // Find an operand that's safely divisible. 3004 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3005 const SCEV *Op = M->getOperand(i); 3006 const SCEV *Div = getUDivExpr(Op, RHSC); 3007 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3008 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3009 M->op_end()); 3010 Operands[i] = Div; 3011 return getMulExpr(Operands); 3012 } 3013 } 3014 } 3015 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3016 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3017 SmallVector<const SCEV *, 4> Operands; 3018 for (const SCEV *Op : A->operands()) 3019 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3020 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3021 Operands.clear(); 3022 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3023 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3024 if (isa<SCEVUDivExpr>(Op) || 3025 getMulExpr(Op, RHS) != A->getOperand(i)) 3026 break; 3027 Operands.push_back(Op); 3028 } 3029 if (Operands.size() == A->getNumOperands()) 3030 return getAddExpr(Operands); 3031 } 3032 } 3033 3034 // Fold if both operands are constant. 3035 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3036 Constant *LHSCV = LHSC->getValue(); 3037 Constant *RHSCV = RHSC->getValue(); 3038 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3039 RHSCV))); 3040 } 3041 } 3042 } 3043 3044 FoldingSetNodeID ID; 3045 ID.AddInteger(scUDivExpr); 3046 ID.AddPointer(LHS); 3047 ID.AddPointer(RHS); 3048 void *IP = nullptr; 3049 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3050 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3051 LHS, RHS); 3052 UniqueSCEVs.InsertNode(S, IP); 3053 return S; 3054 } 3055 3056 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3057 APInt A = C1->getAPInt().abs(); 3058 APInt B = C2->getAPInt().abs(); 3059 uint32_t ABW = A.getBitWidth(); 3060 uint32_t BBW = B.getBitWidth(); 3061 3062 if (ABW > BBW) 3063 B = B.zext(ABW); 3064 else if (ABW < BBW) 3065 A = A.zext(BBW); 3066 3067 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3068 } 3069 3070 /// Get a canonical unsigned division expression, or something simpler if 3071 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3072 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3073 /// it's not exact because the udiv may be clearing bits. 3074 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3075 const SCEV *RHS) { 3076 // TODO: we could try to find factors in all sorts of things, but for now we 3077 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3078 // end of this file for inspiration. 3079 3080 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3081 if (!Mul || !Mul->hasNoUnsignedWrap()) 3082 return getUDivExpr(LHS, RHS); 3083 3084 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3085 // If the mulexpr multiplies by a constant, then that constant must be the 3086 // first element of the mulexpr. 3087 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3088 if (LHSCst == RHSCst) { 3089 SmallVector<const SCEV *, 2> Operands; 3090 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3091 return getMulExpr(Operands); 3092 } 3093 3094 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3095 // that there's a factor provided by one of the other terms. We need to 3096 // check. 3097 APInt Factor = gcd(LHSCst, RHSCst); 3098 if (!Factor.isIntN(1)) { 3099 LHSCst = 3100 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3101 RHSCst = 3102 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3103 SmallVector<const SCEV *, 2> Operands; 3104 Operands.push_back(LHSCst); 3105 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3106 LHS = getMulExpr(Operands); 3107 RHS = RHSCst; 3108 Mul = dyn_cast<SCEVMulExpr>(LHS); 3109 if (!Mul) 3110 return getUDivExactExpr(LHS, RHS); 3111 } 3112 } 3113 } 3114 3115 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3116 if (Mul->getOperand(i) == RHS) { 3117 SmallVector<const SCEV *, 2> Operands; 3118 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3119 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3120 return getMulExpr(Operands); 3121 } 3122 } 3123 3124 return getUDivExpr(LHS, RHS); 3125 } 3126 3127 /// Get an add recurrence expression for the specified loop. Simplify the 3128 /// expression as much as possible. 3129 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3130 const Loop *L, 3131 SCEV::NoWrapFlags Flags) { 3132 SmallVector<const SCEV *, 4> Operands; 3133 Operands.push_back(Start); 3134 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3135 if (StepChrec->getLoop() == L) { 3136 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3137 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3138 } 3139 3140 Operands.push_back(Step); 3141 return getAddRecExpr(Operands, L, Flags); 3142 } 3143 3144 /// Get an add recurrence expression for the specified loop. Simplify the 3145 /// expression as much as possible. 3146 const SCEV * 3147 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3148 const Loop *L, SCEV::NoWrapFlags Flags) { 3149 if (Operands.size() == 1) return Operands[0]; 3150 #ifndef NDEBUG 3151 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3152 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3153 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3154 "SCEVAddRecExpr operand types don't match!"); 3155 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3156 assert(isLoopInvariant(Operands[i], L) && 3157 "SCEVAddRecExpr operand is not loop-invariant!"); 3158 #endif 3159 3160 if (Operands.back()->isZero()) { 3161 Operands.pop_back(); 3162 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3163 } 3164 3165 // It's tempting to want to call getMaxBackedgeTakenCount count here and 3166 // use that information to infer NUW and NSW flags. However, computing a 3167 // BE count requires calling getAddRecExpr, so we may not yet have a 3168 // meaningful BE count at this point (and if we don't, we'd be stuck 3169 // with a SCEVCouldNotCompute as the cached BE count). 3170 3171 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3172 3173 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3174 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3175 const Loop *NestedLoop = NestedAR->getLoop(); 3176 if (L->contains(NestedLoop) 3177 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3178 : (!NestedLoop->contains(L) && 3179 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3180 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3181 NestedAR->op_end()); 3182 Operands[0] = NestedAR->getStart(); 3183 // AddRecs require their operands be loop-invariant with respect to their 3184 // loops. Don't perform this transformation if it would break this 3185 // requirement. 3186 bool AllInvariant = all_of( 3187 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3188 3189 if (AllInvariant) { 3190 // Create a recurrence for the outer loop with the same step size. 3191 // 3192 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3193 // inner recurrence has the same property. 3194 SCEV::NoWrapFlags OuterFlags = 3195 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3196 3197 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3198 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3199 return isLoopInvariant(Op, NestedLoop); 3200 }); 3201 3202 if (AllInvariant) { 3203 // Ok, both add recurrences are valid after the transformation. 3204 // 3205 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3206 // the outer recurrence has the same property. 3207 SCEV::NoWrapFlags InnerFlags = 3208 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3209 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3210 } 3211 } 3212 // Reset Operands to its original state. 3213 Operands[0] = NestedAR; 3214 } 3215 } 3216 3217 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3218 // already have one, otherwise create a new one. 3219 FoldingSetNodeID ID; 3220 ID.AddInteger(scAddRecExpr); 3221 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3222 ID.AddPointer(Operands[i]); 3223 ID.AddPointer(L); 3224 void *IP = nullptr; 3225 SCEVAddRecExpr *S = 3226 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 3227 if (!S) { 3228 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); 3229 std::uninitialized_copy(Operands.begin(), Operands.end(), O); 3230 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), 3231 O, Operands.size(), L); 3232 UniqueSCEVs.InsertNode(S, IP); 3233 } 3234 S->setNoWrapFlags(Flags); 3235 return S; 3236 } 3237 3238 const SCEV * 3239 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3240 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3241 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3242 // getSCEV(Base)->getType() has the same address space as Base->getType() 3243 // because SCEV::getType() preserves the address space. 3244 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); 3245 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3246 // instruction to its SCEV, because the Instruction may be guarded by control 3247 // flow and the no-overflow bits may not be valid for the expression in any 3248 // context. This can be fixed similarly to how these flags are handled for 3249 // adds. 3250 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3251 : SCEV::FlagAnyWrap; 3252 3253 const SCEV *TotalOffset = getZero(IntPtrTy); 3254 // The array size is unimportant. The first thing we do on CurTy is getting 3255 // its element type. 3256 Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0); 3257 for (const SCEV *IndexExpr : IndexExprs) { 3258 // Compute the (potentially symbolic) offset in bytes for this index. 3259 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3260 // For a struct, add the member offset. 3261 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3262 unsigned FieldNo = Index->getZExtValue(); 3263 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); 3264 3265 // Add the field offset to the running total offset. 3266 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3267 3268 // Update CurTy to the type of the field at Index. 3269 CurTy = STy->getTypeAtIndex(Index); 3270 } else { 3271 // Update CurTy to its element type. 3272 CurTy = cast<SequentialType>(CurTy)->getElementType(); 3273 // For an array, add the element offset, explicitly scaled. 3274 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); 3275 // Getelementptr indices are signed. 3276 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); 3277 3278 // Multiply the index by the element size to compute the element offset. 3279 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3280 3281 // Add the element offset to the running total offset. 3282 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3283 } 3284 } 3285 3286 // Add the total offset from all the GEP indices to the base. 3287 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3288 } 3289 3290 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 3291 const SCEV *RHS) { 3292 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3293 return getSMaxExpr(Ops); 3294 } 3295 3296 const SCEV * 3297 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3298 assert(!Ops.empty() && "Cannot get empty smax!"); 3299 if (Ops.size() == 1) return Ops[0]; 3300 #ifndef NDEBUG 3301 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3302 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3303 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3304 "SCEVSMaxExpr operand types don't match!"); 3305 #endif 3306 3307 // Sort by complexity, this groups all similar expression types together. 3308 GroupByComplexity(Ops, &LI, DT); 3309 3310 // If there are any constants, fold them together. 3311 unsigned Idx = 0; 3312 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3313 ++Idx; 3314 assert(Idx < Ops.size()); 3315 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3316 // We found two constants, fold them together! 3317 ConstantInt *Fold = ConstantInt::get( 3318 getContext(), APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt())); 3319 Ops[0] = getConstant(Fold); 3320 Ops.erase(Ops.begin()+1); // Erase the folded element 3321 if (Ops.size() == 1) return Ops[0]; 3322 LHSC = cast<SCEVConstant>(Ops[0]); 3323 } 3324 3325 // If we are left with a constant minimum-int, strip it off. 3326 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 3327 Ops.erase(Ops.begin()); 3328 --Idx; 3329 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 3330 // If we have an smax with a constant maximum-int, it will always be 3331 // maximum-int. 3332 return Ops[0]; 3333 } 3334 3335 if (Ops.size() == 1) return Ops[0]; 3336 } 3337 3338 // Find the first SMax 3339 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 3340 ++Idx; 3341 3342 // Check to see if one of the operands is an SMax. If so, expand its operands 3343 // onto our operand list, and recurse to simplify. 3344 if (Idx < Ops.size()) { 3345 bool DeletedSMax = false; 3346 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 3347 Ops.erase(Ops.begin()+Idx); 3348 Ops.append(SMax->op_begin(), SMax->op_end()); 3349 DeletedSMax = true; 3350 } 3351 3352 if (DeletedSMax) 3353 return getSMaxExpr(Ops); 3354 } 3355 3356 // Okay, check to see if the same value occurs in the operand list twice. If 3357 // so, delete one. Since we sorted the list, these values are required to 3358 // be adjacent. 3359 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3360 // X smax Y smax Y --> X smax Y 3361 // X smax Y --> X, if X is always greater than Y 3362 if (Ops[i] == Ops[i+1] || 3363 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 3364 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3365 --i; --e; 3366 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 3367 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3368 --i; --e; 3369 } 3370 3371 if (Ops.size() == 1) return Ops[0]; 3372 3373 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3374 3375 // Okay, it looks like we really DO need an smax expr. Check to see if we 3376 // already have one, otherwise create a new one. 3377 FoldingSetNodeID ID; 3378 ID.AddInteger(scSMaxExpr); 3379 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3380 ID.AddPointer(Ops[i]); 3381 void *IP = nullptr; 3382 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3383 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3384 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3385 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), 3386 O, Ops.size()); 3387 UniqueSCEVs.InsertNode(S, IP); 3388 return S; 3389 } 3390 3391 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 3392 const SCEV *RHS) { 3393 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3394 return getUMaxExpr(Ops); 3395 } 3396 3397 const SCEV * 3398 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3399 assert(!Ops.empty() && "Cannot get empty umax!"); 3400 if (Ops.size() == 1) return Ops[0]; 3401 #ifndef NDEBUG 3402 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3403 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3404 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3405 "SCEVUMaxExpr operand types don't match!"); 3406 #endif 3407 3408 // Sort by complexity, this groups all similar expression types together. 3409 GroupByComplexity(Ops, &LI, DT); 3410 3411 // If there are any constants, fold them together. 3412 unsigned Idx = 0; 3413 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3414 ++Idx; 3415 assert(Idx < Ops.size()); 3416 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3417 // We found two constants, fold them together! 3418 ConstantInt *Fold = ConstantInt::get( 3419 getContext(), APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt())); 3420 Ops[0] = getConstant(Fold); 3421 Ops.erase(Ops.begin()+1); // Erase the folded element 3422 if (Ops.size() == 1) return Ops[0]; 3423 LHSC = cast<SCEVConstant>(Ops[0]); 3424 } 3425 3426 // If we are left with a constant minimum-int, strip it off. 3427 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 3428 Ops.erase(Ops.begin()); 3429 --Idx; 3430 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 3431 // If we have an umax with a constant maximum-int, it will always be 3432 // maximum-int. 3433 return Ops[0]; 3434 } 3435 3436 if (Ops.size() == 1) return Ops[0]; 3437 } 3438 3439 // Find the first UMax 3440 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 3441 ++Idx; 3442 3443 // Check to see if one of the operands is a UMax. If so, expand its operands 3444 // onto our operand list, and recurse to simplify. 3445 if (Idx < Ops.size()) { 3446 bool DeletedUMax = false; 3447 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 3448 Ops.erase(Ops.begin()+Idx); 3449 Ops.append(UMax->op_begin(), UMax->op_end()); 3450 DeletedUMax = true; 3451 } 3452 3453 if (DeletedUMax) 3454 return getUMaxExpr(Ops); 3455 } 3456 3457 // Okay, check to see if the same value occurs in the operand list twice. If 3458 // so, delete one. Since we sorted the list, these values are required to 3459 // be adjacent. 3460 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3461 // X umax Y umax Y --> X umax Y 3462 // X umax Y --> X, if X is always greater than Y 3463 if (Ops[i] == Ops[i+1] || 3464 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) { 3465 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3466 --i; --e; 3467 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) { 3468 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3469 --i; --e; 3470 } 3471 3472 if (Ops.size() == 1) return Ops[0]; 3473 3474 assert(!Ops.empty() && "Reduced umax down to nothing!"); 3475 3476 // Okay, it looks like we really DO need a umax expr. Check to see if we 3477 // already have one, otherwise create a new one. 3478 FoldingSetNodeID ID; 3479 ID.AddInteger(scUMaxExpr); 3480 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3481 ID.AddPointer(Ops[i]); 3482 void *IP = nullptr; 3483 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3484 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3485 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3486 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 3487 O, Ops.size()); 3488 UniqueSCEVs.InsertNode(S, IP); 3489 return S; 3490 } 3491 3492 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3493 const SCEV *RHS) { 3494 // ~smax(~x, ~y) == smin(x, y). 3495 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3496 } 3497 3498 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3499 const SCEV *RHS) { 3500 // ~umax(~x, ~y) == umin(x, y) 3501 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3502 } 3503 3504 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3505 // We can bypass creating a target-independent 3506 // constant expression and then folding it back into a ConstantInt. 3507 // This is just a compile-time optimization. 3508 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3509 } 3510 3511 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3512 StructType *STy, 3513 unsigned FieldNo) { 3514 // We can bypass creating a target-independent 3515 // constant expression and then folding it back into a ConstantInt. 3516 // This is just a compile-time optimization. 3517 return getConstant( 3518 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3519 } 3520 3521 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3522 // Don't attempt to do anything other than create a SCEVUnknown object 3523 // here. createSCEV only calls getUnknown after checking for all other 3524 // interesting possibilities, and any other code that calls getUnknown 3525 // is doing so in order to hide a value from SCEV canonicalization. 3526 3527 FoldingSetNodeID ID; 3528 ID.AddInteger(scUnknown); 3529 ID.AddPointer(V); 3530 void *IP = nullptr; 3531 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3532 assert(cast<SCEVUnknown>(S)->getValue() == V && 3533 "Stale SCEVUnknown in uniquing map!"); 3534 return S; 3535 } 3536 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3537 FirstUnknown); 3538 FirstUnknown = cast<SCEVUnknown>(S); 3539 UniqueSCEVs.InsertNode(S, IP); 3540 return S; 3541 } 3542 3543 //===----------------------------------------------------------------------===// 3544 // Basic SCEV Analysis and PHI Idiom Recognition Code 3545 // 3546 3547 /// Test if values of the given type are analyzable within the SCEV 3548 /// framework. This primarily includes integer types, and it can optionally 3549 /// include pointer types if the ScalarEvolution class has access to 3550 /// target-specific information. 3551 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3552 // Integers and pointers are always SCEVable. 3553 return Ty->isIntegerTy() || Ty->isPointerTy(); 3554 } 3555 3556 /// Return the size in bits of the specified type, for which isSCEVable must 3557 /// return true. 3558 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3559 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3560 return getDataLayout().getTypeSizeInBits(Ty); 3561 } 3562 3563 /// Return a type with the same bitwidth as the given type and which represents 3564 /// how SCEV will treat the given type, for which isSCEVable must return 3565 /// true. For pointer types, this is the pointer-sized integer type. 3566 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3567 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3568 3569 if (Ty->isIntegerTy()) 3570 return Ty; 3571 3572 // The only other support type is pointer. 3573 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3574 return getDataLayout().getIntPtrType(Ty); 3575 } 3576 3577 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3578 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3579 } 3580 3581 const SCEV *ScalarEvolution::getCouldNotCompute() { 3582 return CouldNotCompute.get(); 3583 } 3584 3585 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3586 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3587 auto *SU = dyn_cast<SCEVUnknown>(S); 3588 return SU && SU->getValue() == nullptr; 3589 }); 3590 3591 return !ContainsNulls; 3592 } 3593 3594 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3595 HasRecMapType::iterator I = HasRecMap.find(S); 3596 if (I != HasRecMap.end()) 3597 return I->second; 3598 3599 bool FoundAddRec = SCEVExprContains(S, isa<SCEVAddRecExpr, const SCEV *>); 3600 HasRecMap.insert({S, FoundAddRec}); 3601 return FoundAddRec; 3602 } 3603 3604 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3605 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3606 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3607 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3608 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3609 if (!Add) 3610 return {S, nullptr}; 3611 3612 if (Add->getNumOperands() != 2) 3613 return {S, nullptr}; 3614 3615 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3616 if (!ConstOp) 3617 return {S, nullptr}; 3618 3619 return {Add->getOperand(1), ConstOp->getValue()}; 3620 } 3621 3622 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3623 /// by the value and offset from any ValueOffsetPair in the set. 3624 SetVector<ScalarEvolution::ValueOffsetPair> * 3625 ScalarEvolution::getSCEVValues(const SCEV *S) { 3626 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3627 if (SI == ExprValueMap.end()) 3628 return nullptr; 3629 #ifndef NDEBUG 3630 if (VerifySCEVMap) { 3631 // Check there is no dangling Value in the set returned. 3632 for (const auto &VE : SI->second) 3633 assert(ValueExprMap.count(VE.first)); 3634 } 3635 #endif 3636 return &SI->second; 3637 } 3638 3639 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3640 /// cannot be used separately. eraseValueFromMap should be used to remove 3641 /// V from ValueExprMap and ExprValueMap at the same time. 3642 void ScalarEvolution::eraseValueFromMap(Value *V) { 3643 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3644 if (I != ValueExprMap.end()) { 3645 const SCEV *S = I->second; 3646 // Remove {V, 0} from the set of ExprValueMap[S] 3647 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3648 SV->remove({V, nullptr}); 3649 3650 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3651 const SCEV *Stripped; 3652 ConstantInt *Offset; 3653 std::tie(Stripped, Offset) = splitAddExpr(S); 3654 if (Offset != nullptr) { 3655 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3656 SV->remove({V, Offset}); 3657 } 3658 ValueExprMap.erase(V); 3659 } 3660 } 3661 3662 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3663 /// create a new one. 3664 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3665 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3666 3667 const SCEV *S = getExistingSCEV(V); 3668 if (S == nullptr) { 3669 S = createSCEV(V); 3670 // During PHI resolution, it is possible to create two SCEVs for the same 3671 // V, so it is needed to double check whether V->S is inserted into 3672 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3673 std::pair<ValueExprMapType::iterator, bool> Pair = 3674 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3675 if (Pair.second) { 3676 ExprValueMap[S].insert({V, nullptr}); 3677 3678 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3679 // ExprValueMap. 3680 const SCEV *Stripped = S; 3681 ConstantInt *Offset = nullptr; 3682 std::tie(Stripped, Offset) = splitAddExpr(S); 3683 // If stripped is SCEVUnknown, don't bother to save 3684 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3685 // increase the complexity of the expansion code. 3686 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3687 // because it may generate add/sub instead of GEP in SCEV expansion. 3688 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3689 !isa<GetElementPtrInst>(V)) 3690 ExprValueMap[Stripped].insert({V, Offset}); 3691 } 3692 } 3693 return S; 3694 } 3695 3696 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3697 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3698 3699 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3700 if (I != ValueExprMap.end()) { 3701 const SCEV *S = I->second; 3702 if (checkValidity(S)) 3703 return S; 3704 eraseValueFromMap(V); 3705 forgetMemoizedResults(S); 3706 } 3707 return nullptr; 3708 } 3709 3710 /// Return a SCEV corresponding to -V = -1*V 3711 /// 3712 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3713 SCEV::NoWrapFlags Flags) { 3714 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3715 return getConstant( 3716 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3717 3718 Type *Ty = V->getType(); 3719 Ty = getEffectiveSCEVType(Ty); 3720 return getMulExpr( 3721 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 3722 } 3723 3724 /// Return a SCEV corresponding to ~V = -1-V 3725 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3726 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3727 return getConstant( 3728 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3729 3730 Type *Ty = V->getType(); 3731 Ty = getEffectiveSCEVType(Ty); 3732 const SCEV *AllOnes = 3733 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 3734 return getMinusSCEV(AllOnes, V); 3735 } 3736 3737 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3738 SCEV::NoWrapFlags Flags, 3739 unsigned Depth) { 3740 // Fast path: X - X --> 0. 3741 if (LHS == RHS) 3742 return getZero(LHS->getType()); 3743 3744 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3745 // makes it so that we cannot make much use of NUW. 3746 auto AddFlags = SCEV::FlagAnyWrap; 3747 const bool RHSIsNotMinSigned = 3748 !getSignedRangeMin(RHS).isMinSignedValue(); 3749 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3750 // Let M be the minimum representable signed value. Then (-1)*RHS 3751 // signed-wraps if and only if RHS is M. That can happen even for 3752 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3753 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3754 // (-1)*RHS, we need to prove that RHS != M. 3755 // 3756 // If LHS is non-negative and we know that LHS - RHS does not 3757 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 3758 // either by proving that RHS > M or that LHS >= 0. 3759 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 3760 AddFlags = SCEV::FlagNSW; 3761 } 3762 } 3763 3764 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 3765 // RHS is NSW and LHS >= 0. 3766 // 3767 // The difficulty here is that the NSW flag may have been proven 3768 // relative to a loop that is to be found in a recurrence in LHS and 3769 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 3770 // larger scope than intended. 3771 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3772 3773 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 3774 } 3775 3776 const SCEV * 3777 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) { 3778 Type *SrcTy = V->getType(); 3779 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3780 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3781 "Cannot truncate or zero extend with non-integer arguments!"); 3782 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3783 return V; // No conversion 3784 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3785 return getTruncateExpr(V, Ty); 3786 return getZeroExtendExpr(V, Ty); 3787 } 3788 3789 const SCEV * 3790 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 3791 Type *Ty) { 3792 Type *SrcTy = V->getType(); 3793 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3794 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3795 "Cannot truncate or zero extend with non-integer arguments!"); 3796 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3797 return V; // No conversion 3798 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3799 return getTruncateExpr(V, Ty); 3800 return getSignExtendExpr(V, Ty); 3801 } 3802 3803 const SCEV * 3804 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 3805 Type *SrcTy = V->getType(); 3806 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3807 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3808 "Cannot noop or zero extend with non-integer arguments!"); 3809 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3810 "getNoopOrZeroExtend cannot truncate!"); 3811 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3812 return V; // No conversion 3813 return getZeroExtendExpr(V, Ty); 3814 } 3815 3816 const SCEV * 3817 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 3818 Type *SrcTy = V->getType(); 3819 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3820 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3821 "Cannot noop or sign extend with non-integer arguments!"); 3822 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3823 "getNoopOrSignExtend cannot truncate!"); 3824 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3825 return V; // No conversion 3826 return getSignExtendExpr(V, Ty); 3827 } 3828 3829 const SCEV * 3830 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 3831 Type *SrcTy = V->getType(); 3832 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3833 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3834 "Cannot noop or any extend with non-integer arguments!"); 3835 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3836 "getNoopOrAnyExtend cannot truncate!"); 3837 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3838 return V; // No conversion 3839 return getAnyExtendExpr(V, Ty); 3840 } 3841 3842 const SCEV * 3843 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 3844 Type *SrcTy = V->getType(); 3845 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3846 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3847 "Cannot truncate or noop with non-integer arguments!"); 3848 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 3849 "getTruncateOrNoop cannot extend!"); 3850 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3851 return V; // No conversion 3852 return getTruncateExpr(V, Ty); 3853 } 3854 3855 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 3856 const SCEV *RHS) { 3857 const SCEV *PromotedLHS = LHS; 3858 const SCEV *PromotedRHS = RHS; 3859 3860 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3861 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3862 else 3863 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3864 3865 return getUMaxExpr(PromotedLHS, PromotedRHS); 3866 } 3867 3868 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 3869 const SCEV *RHS) { 3870 const SCEV *PromotedLHS = LHS; 3871 const SCEV *PromotedRHS = RHS; 3872 3873 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3874 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3875 else 3876 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3877 3878 return getUMinExpr(PromotedLHS, PromotedRHS); 3879 } 3880 3881 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 3882 // A pointer operand may evaluate to a nonpointer expression, such as null. 3883 if (!V->getType()->isPointerTy()) 3884 return V; 3885 3886 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 3887 return getPointerBase(Cast->getOperand()); 3888 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 3889 const SCEV *PtrOp = nullptr; 3890 for (const SCEV *NAryOp : NAry->operands()) { 3891 if (NAryOp->getType()->isPointerTy()) { 3892 // Cannot find the base of an expression with multiple pointer operands. 3893 if (PtrOp) 3894 return V; 3895 PtrOp = NAryOp; 3896 } 3897 } 3898 if (!PtrOp) 3899 return V; 3900 return getPointerBase(PtrOp); 3901 } 3902 return V; 3903 } 3904 3905 /// Push users of the given Instruction onto the given Worklist. 3906 static void 3907 PushDefUseChildren(Instruction *I, 3908 SmallVectorImpl<Instruction *> &Worklist) { 3909 // Push the def-use children onto the Worklist stack. 3910 for (User *U : I->users()) 3911 Worklist.push_back(cast<Instruction>(U)); 3912 } 3913 3914 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 3915 SmallVector<Instruction *, 16> Worklist; 3916 PushDefUseChildren(PN, Worklist); 3917 3918 SmallPtrSet<Instruction *, 8> Visited; 3919 Visited.insert(PN); 3920 while (!Worklist.empty()) { 3921 Instruction *I = Worklist.pop_back_val(); 3922 if (!Visited.insert(I).second) 3923 continue; 3924 3925 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 3926 if (It != ValueExprMap.end()) { 3927 const SCEV *Old = It->second; 3928 3929 // Short-circuit the def-use traversal if the symbolic name 3930 // ceases to appear in expressions. 3931 if (Old != SymName && !hasOperand(Old, SymName)) 3932 continue; 3933 3934 // SCEVUnknown for a PHI either means that it has an unrecognized 3935 // structure, it's a PHI that's in the progress of being computed 3936 // by createNodeForPHI, or it's a single-value PHI. In the first case, 3937 // additional loop trip count information isn't going to change anything. 3938 // In the second case, createNodeForPHI will perform the necessary 3939 // updates on its own when it gets to that point. In the third, we do 3940 // want to forget the SCEVUnknown. 3941 if (!isa<PHINode>(I) || 3942 !isa<SCEVUnknown>(Old) || 3943 (I != PN && Old == SymName)) { 3944 eraseValueFromMap(It->first); 3945 forgetMemoizedResults(Old); 3946 } 3947 } 3948 3949 PushDefUseChildren(I, Worklist); 3950 } 3951 } 3952 3953 namespace { 3954 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 3955 public: 3956 static const SCEV *rewrite(const SCEV *S, const Loop *L, 3957 ScalarEvolution &SE) { 3958 SCEVInitRewriter Rewriter(L, SE); 3959 const SCEV *Result = Rewriter.visit(S); 3960 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 3961 } 3962 3963 SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 3964 : SCEVRewriteVisitor(SE), L(L), Valid(true) {} 3965 3966 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 3967 if (!SE.isLoopInvariant(Expr, L)) 3968 Valid = false; 3969 return Expr; 3970 } 3971 3972 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 3973 // Only allow AddRecExprs for this loop. 3974 if (Expr->getLoop() == L) 3975 return Expr->getStart(); 3976 Valid = false; 3977 return Expr; 3978 } 3979 3980 bool isValid() { return Valid; } 3981 3982 private: 3983 const Loop *L; 3984 bool Valid; 3985 }; 3986 3987 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 3988 public: 3989 static const SCEV *rewrite(const SCEV *S, const Loop *L, 3990 ScalarEvolution &SE) { 3991 SCEVShiftRewriter Rewriter(L, SE); 3992 const SCEV *Result = Rewriter.visit(S); 3993 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 3994 } 3995 3996 SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 3997 : SCEVRewriteVisitor(SE), L(L), Valid(true) {} 3998 3999 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4000 // Only allow AddRecExprs for this loop. 4001 if (!SE.isLoopInvariant(Expr, L)) 4002 Valid = false; 4003 return Expr; 4004 } 4005 4006 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4007 if (Expr->getLoop() == L && Expr->isAffine()) 4008 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4009 Valid = false; 4010 return Expr; 4011 } 4012 bool isValid() { return Valid; } 4013 4014 private: 4015 const Loop *L; 4016 bool Valid; 4017 }; 4018 } // end anonymous namespace 4019 4020 SCEV::NoWrapFlags 4021 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4022 if (!AR->isAffine()) 4023 return SCEV::FlagAnyWrap; 4024 4025 typedef OverflowingBinaryOperator OBO; 4026 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4027 4028 if (!AR->hasNoSignedWrap()) { 4029 ConstantRange AddRecRange = getSignedRange(AR); 4030 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4031 4032 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4033 Instruction::Add, IncRange, OBO::NoSignedWrap); 4034 if (NSWRegion.contains(AddRecRange)) 4035 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4036 } 4037 4038 if (!AR->hasNoUnsignedWrap()) { 4039 ConstantRange AddRecRange = getUnsignedRange(AR); 4040 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4041 4042 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4043 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4044 if (NUWRegion.contains(AddRecRange)) 4045 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4046 } 4047 4048 return Result; 4049 } 4050 4051 namespace { 4052 /// Represents an abstract binary operation. This may exist as a 4053 /// normal instruction or constant expression, or may have been 4054 /// derived from an expression tree. 4055 struct BinaryOp { 4056 unsigned Opcode; 4057 Value *LHS; 4058 Value *RHS; 4059 bool IsNSW; 4060 bool IsNUW; 4061 4062 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4063 /// constant expression. 4064 Operator *Op; 4065 4066 explicit BinaryOp(Operator *Op) 4067 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4068 IsNSW(false), IsNUW(false), Op(Op) { 4069 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4070 IsNSW = OBO->hasNoSignedWrap(); 4071 IsNUW = OBO->hasNoUnsignedWrap(); 4072 } 4073 } 4074 4075 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4076 bool IsNUW = false) 4077 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW), 4078 Op(nullptr) {} 4079 }; 4080 } 4081 4082 4083 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4084 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4085 auto *Op = dyn_cast<Operator>(V); 4086 if (!Op) 4087 return None; 4088 4089 // Implementation detail: all the cleverness here should happen without 4090 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4091 // SCEV expressions when possible, and we should not break that. 4092 4093 switch (Op->getOpcode()) { 4094 case Instruction::Add: 4095 case Instruction::Sub: 4096 case Instruction::Mul: 4097 case Instruction::UDiv: 4098 case Instruction::And: 4099 case Instruction::Or: 4100 case Instruction::AShr: 4101 case Instruction::Shl: 4102 return BinaryOp(Op); 4103 4104 case Instruction::Xor: 4105 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4106 // If the RHS of the xor is a signmask, then this is just an add. 4107 // Instcombine turns add of signmask into xor as a strength reduction step. 4108 if (RHSC->getValue().isSignMask()) 4109 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4110 return BinaryOp(Op); 4111 4112 case Instruction::LShr: 4113 // Turn logical shift right of a constant into a unsigned divide. 4114 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4115 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4116 4117 // If the shift count is not less than the bitwidth, the result of 4118 // the shift is undefined. Don't try to analyze it, because the 4119 // resolution chosen here may differ from the resolution chosen in 4120 // other parts of the compiler. 4121 if (SA->getValue().ult(BitWidth)) { 4122 Constant *X = 4123 ConstantInt::get(SA->getContext(), 4124 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4125 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4126 } 4127 } 4128 return BinaryOp(Op); 4129 4130 case Instruction::ExtractValue: { 4131 auto *EVI = cast<ExtractValueInst>(Op); 4132 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4133 break; 4134 4135 auto *CI = dyn_cast<CallInst>(EVI->getAggregateOperand()); 4136 if (!CI) 4137 break; 4138 4139 if (auto *F = CI->getCalledFunction()) 4140 switch (F->getIntrinsicID()) { 4141 case Intrinsic::sadd_with_overflow: 4142 case Intrinsic::uadd_with_overflow: { 4143 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4144 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4145 CI->getArgOperand(1)); 4146 4147 // Now that we know that all uses of the arithmetic-result component of 4148 // CI are guarded by the overflow check, we can go ahead and pretend 4149 // that the arithmetic is non-overflowing. 4150 if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow) 4151 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4152 CI->getArgOperand(1), /* IsNSW = */ true, 4153 /* IsNUW = */ false); 4154 else 4155 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4156 CI->getArgOperand(1), /* IsNSW = */ false, 4157 /* IsNUW*/ true); 4158 } 4159 4160 case Intrinsic::ssub_with_overflow: 4161 case Intrinsic::usub_with_overflow: 4162 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4163 CI->getArgOperand(1)); 4164 4165 case Intrinsic::smul_with_overflow: 4166 case Intrinsic::umul_with_overflow: 4167 return BinaryOp(Instruction::Mul, CI->getArgOperand(0), 4168 CI->getArgOperand(1)); 4169 default: 4170 break; 4171 } 4172 } 4173 4174 default: 4175 break; 4176 } 4177 4178 return None; 4179 } 4180 4181 /// A helper function for createAddRecFromPHI to handle simple cases. 4182 /// 4183 /// This function tries to find an AddRec expression for the simplest (yet most 4184 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 4185 /// If it fails, createAddRecFromPHI will use a more general, but slow, 4186 /// technique for finding the AddRec expression. 4187 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 4188 Value *BEValueV, 4189 Value *StartValueV) { 4190 const Loop *L = LI.getLoopFor(PN->getParent()); 4191 assert(L && L->getHeader() == PN->getParent()); 4192 assert(BEValueV && StartValueV); 4193 4194 auto BO = MatchBinaryOp(BEValueV, DT); 4195 if (!BO) 4196 return nullptr; 4197 4198 if (BO->Opcode != Instruction::Add) 4199 return nullptr; 4200 4201 const SCEV *Accum = nullptr; 4202 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 4203 Accum = getSCEV(BO->RHS); 4204 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 4205 Accum = getSCEV(BO->LHS); 4206 4207 if (!Accum) 4208 return nullptr; 4209 4210 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4211 if (BO->IsNUW) 4212 Flags = setFlags(Flags, SCEV::FlagNUW); 4213 if (BO->IsNSW) 4214 Flags = setFlags(Flags, SCEV::FlagNSW); 4215 4216 const SCEV *StartVal = getSCEV(StartValueV); 4217 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4218 4219 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4220 4221 // We can add Flags to the post-inc expression only if we 4222 // know that it is *undefined behavior* for BEValueV to 4223 // overflow. 4224 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4225 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4226 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4227 4228 return PHISCEV; 4229 } 4230 4231 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 4232 const Loop *L = LI.getLoopFor(PN->getParent()); 4233 if (!L || L->getHeader() != PN->getParent()) 4234 return nullptr; 4235 4236 // The loop may have multiple entrances or multiple exits; we can analyze 4237 // this phi as an addrec if it has a unique entry value and a unique 4238 // backedge value. 4239 Value *BEValueV = nullptr, *StartValueV = nullptr; 4240 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4241 Value *V = PN->getIncomingValue(i); 4242 if (L->contains(PN->getIncomingBlock(i))) { 4243 if (!BEValueV) { 4244 BEValueV = V; 4245 } else if (BEValueV != V) { 4246 BEValueV = nullptr; 4247 break; 4248 } 4249 } else if (!StartValueV) { 4250 StartValueV = V; 4251 } else if (StartValueV != V) { 4252 StartValueV = nullptr; 4253 break; 4254 } 4255 } 4256 if (!BEValueV || !StartValueV) 4257 return nullptr; 4258 4259 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 4260 "PHI node already processed?"); 4261 4262 // First, try to find AddRec expression without creating a fictituos symbolic 4263 // value for PN. 4264 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 4265 return S; 4266 4267 // Handle PHI node value symbolically. 4268 const SCEV *SymbolicName = getUnknown(PN); 4269 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 4270 4271 // Using this symbolic name for the PHI, analyze the value coming around 4272 // the back-edge. 4273 const SCEV *BEValue = getSCEV(BEValueV); 4274 4275 // NOTE: If BEValue is loop invariant, we know that the PHI node just 4276 // has a special value for the first iteration of the loop. 4277 4278 // If the value coming around the backedge is an add with the symbolic 4279 // value we just inserted, then we found a simple induction variable! 4280 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 4281 // If there is a single occurrence of the symbolic value, replace it 4282 // with a recurrence. 4283 unsigned FoundIndex = Add->getNumOperands(); 4284 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4285 if (Add->getOperand(i) == SymbolicName) 4286 if (FoundIndex == e) { 4287 FoundIndex = i; 4288 break; 4289 } 4290 4291 if (FoundIndex != Add->getNumOperands()) { 4292 // Create an add with everything but the specified operand. 4293 SmallVector<const SCEV *, 8> Ops; 4294 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4295 if (i != FoundIndex) 4296 Ops.push_back(Add->getOperand(i)); 4297 const SCEV *Accum = getAddExpr(Ops); 4298 4299 // This is not a valid addrec if the step amount is varying each 4300 // loop iteration, but is not itself an addrec in this loop. 4301 if (isLoopInvariant(Accum, L) || 4302 (isa<SCEVAddRecExpr>(Accum) && 4303 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 4304 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4305 4306 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 4307 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 4308 if (BO->IsNUW) 4309 Flags = setFlags(Flags, SCEV::FlagNUW); 4310 if (BO->IsNSW) 4311 Flags = setFlags(Flags, SCEV::FlagNSW); 4312 } 4313 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 4314 // If the increment is an inbounds GEP, then we know the address 4315 // space cannot be wrapped around. We cannot make any guarantee 4316 // about signed or unsigned overflow because pointers are 4317 // unsigned but we may have a negative index from the base 4318 // pointer. We can guarantee that no unsigned wrap occurs if the 4319 // indices form a positive value. 4320 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 4321 Flags = setFlags(Flags, SCEV::FlagNW); 4322 4323 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 4324 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 4325 Flags = setFlags(Flags, SCEV::FlagNUW); 4326 } 4327 4328 // We cannot transfer nuw and nsw flags from subtraction 4329 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 4330 // for instance. 4331 } 4332 4333 const SCEV *StartVal = getSCEV(StartValueV); 4334 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4335 4336 // Okay, for the entire analysis of this edge we assumed the PHI 4337 // to be symbolic. We now need to go back and purge all of the 4338 // entries for the scalars that use the symbolic expression. 4339 forgetSymbolicName(PN, SymbolicName); 4340 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4341 4342 // We can add Flags to the post-inc expression only if we 4343 // know that it is *undefined behavior* for BEValueV to 4344 // overflow. 4345 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4346 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4347 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4348 4349 return PHISCEV; 4350 } 4351 } 4352 } else { 4353 // Otherwise, this could be a loop like this: 4354 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 4355 // In this case, j = {1,+,1} and BEValue is j. 4356 // Because the other in-value of i (0) fits the evolution of BEValue 4357 // i really is an addrec evolution. 4358 // 4359 // We can generalize this saying that i is the shifted value of BEValue 4360 // by one iteration: 4361 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 4362 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 4363 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this); 4364 if (Shifted != getCouldNotCompute() && 4365 Start != getCouldNotCompute()) { 4366 const SCEV *StartVal = getSCEV(StartValueV); 4367 if (Start == StartVal) { 4368 // Okay, for the entire analysis of this edge we assumed the PHI 4369 // to be symbolic. We now need to go back and purge all of the 4370 // entries for the scalars that use the symbolic expression. 4371 forgetSymbolicName(PN, SymbolicName); 4372 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 4373 return Shifted; 4374 } 4375 } 4376 } 4377 4378 // Remove the temporary PHI node SCEV that has been inserted while intending 4379 // to create an AddRecExpr for this PHI node. We can not keep this temporary 4380 // as it will prevent later (possibly simpler) SCEV expressions to be added 4381 // to the ValueExprMap. 4382 eraseValueFromMap(PN); 4383 4384 return nullptr; 4385 } 4386 4387 // Checks if the SCEV S is available at BB. S is considered available at BB 4388 // if S can be materialized at BB without introducing a fault. 4389 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 4390 BasicBlock *BB) { 4391 struct CheckAvailable { 4392 bool TraversalDone = false; 4393 bool Available = true; 4394 4395 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 4396 BasicBlock *BB = nullptr; 4397 DominatorTree &DT; 4398 4399 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 4400 : L(L), BB(BB), DT(DT) {} 4401 4402 bool setUnavailable() { 4403 TraversalDone = true; 4404 Available = false; 4405 return false; 4406 } 4407 4408 bool follow(const SCEV *S) { 4409 switch (S->getSCEVType()) { 4410 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 4411 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 4412 // These expressions are available if their operand(s) is/are. 4413 return true; 4414 4415 case scAddRecExpr: { 4416 // We allow add recurrences that are on the loop BB is in, or some 4417 // outer loop. This guarantees availability because the value of the 4418 // add recurrence at BB is simply the "current" value of the induction 4419 // variable. We can relax this in the future; for instance an add 4420 // recurrence on a sibling dominating loop is also available at BB. 4421 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 4422 if (L && (ARLoop == L || ARLoop->contains(L))) 4423 return true; 4424 4425 return setUnavailable(); 4426 } 4427 4428 case scUnknown: { 4429 // For SCEVUnknown, we check for simple dominance. 4430 const auto *SU = cast<SCEVUnknown>(S); 4431 Value *V = SU->getValue(); 4432 4433 if (isa<Argument>(V)) 4434 return false; 4435 4436 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 4437 return false; 4438 4439 return setUnavailable(); 4440 } 4441 4442 case scUDivExpr: 4443 case scCouldNotCompute: 4444 // We do not try to smart about these at all. 4445 return setUnavailable(); 4446 } 4447 llvm_unreachable("switch should be fully covered!"); 4448 } 4449 4450 bool isDone() { return TraversalDone; } 4451 }; 4452 4453 CheckAvailable CA(L, BB, DT); 4454 SCEVTraversal<CheckAvailable> ST(CA); 4455 4456 ST.visitAll(S); 4457 return CA.Available; 4458 } 4459 4460 // Try to match a control flow sequence that branches out at BI and merges back 4461 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 4462 // match. 4463 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 4464 Value *&C, Value *&LHS, Value *&RHS) { 4465 C = BI->getCondition(); 4466 4467 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 4468 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 4469 4470 if (!LeftEdge.isSingleEdge()) 4471 return false; 4472 4473 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 4474 4475 Use &LeftUse = Merge->getOperandUse(0); 4476 Use &RightUse = Merge->getOperandUse(1); 4477 4478 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 4479 LHS = LeftUse; 4480 RHS = RightUse; 4481 return true; 4482 } 4483 4484 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 4485 LHS = RightUse; 4486 RHS = LeftUse; 4487 return true; 4488 } 4489 4490 return false; 4491 } 4492 4493 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 4494 auto IsReachable = 4495 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 4496 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 4497 const Loop *L = LI.getLoopFor(PN->getParent()); 4498 4499 // We don't want to break LCSSA, even in a SCEV expression tree. 4500 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 4501 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 4502 return nullptr; 4503 4504 // Try to match 4505 // 4506 // br %cond, label %left, label %right 4507 // left: 4508 // br label %merge 4509 // right: 4510 // br label %merge 4511 // merge: 4512 // V = phi [ %x, %left ], [ %y, %right ] 4513 // 4514 // as "select %cond, %x, %y" 4515 4516 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 4517 assert(IDom && "At least the entry block should dominate PN"); 4518 4519 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 4520 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 4521 4522 if (BI && BI->isConditional() && 4523 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 4524 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 4525 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 4526 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 4527 } 4528 4529 return nullptr; 4530 } 4531 4532 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 4533 if (const SCEV *S = createAddRecFromPHI(PN)) 4534 return S; 4535 4536 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 4537 return S; 4538 4539 // If the PHI has a single incoming value, follow that value, unless the 4540 // PHI's incoming blocks are in a different loop, in which case doing so 4541 // risks breaking LCSSA form. Instcombine would normally zap these, but 4542 // it doesn't have DominatorTree information, so it may miss cases. 4543 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 4544 if (LI.replacementPreservesLCSSAForm(PN, V)) 4545 return getSCEV(V); 4546 4547 // If it's not a loop phi, we can't handle it yet. 4548 return getUnknown(PN); 4549 } 4550 4551 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 4552 Value *Cond, 4553 Value *TrueVal, 4554 Value *FalseVal) { 4555 // Handle "constant" branch or select. This can occur for instance when a 4556 // loop pass transforms an inner loop and moves on to process the outer loop. 4557 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 4558 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 4559 4560 // Try to match some simple smax or umax patterns. 4561 auto *ICI = dyn_cast<ICmpInst>(Cond); 4562 if (!ICI) 4563 return getUnknown(I); 4564 4565 Value *LHS = ICI->getOperand(0); 4566 Value *RHS = ICI->getOperand(1); 4567 4568 switch (ICI->getPredicate()) { 4569 case ICmpInst::ICMP_SLT: 4570 case ICmpInst::ICMP_SLE: 4571 std::swap(LHS, RHS); 4572 LLVM_FALLTHROUGH; 4573 case ICmpInst::ICMP_SGT: 4574 case ICmpInst::ICMP_SGE: 4575 // a >s b ? a+x : b+x -> smax(a, b)+x 4576 // a >s b ? b+x : a+x -> smin(a, b)+x 4577 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 4578 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 4579 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 4580 const SCEV *LA = getSCEV(TrueVal); 4581 const SCEV *RA = getSCEV(FalseVal); 4582 const SCEV *LDiff = getMinusSCEV(LA, LS); 4583 const SCEV *RDiff = getMinusSCEV(RA, RS); 4584 if (LDiff == RDiff) 4585 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 4586 LDiff = getMinusSCEV(LA, RS); 4587 RDiff = getMinusSCEV(RA, LS); 4588 if (LDiff == RDiff) 4589 return getAddExpr(getSMinExpr(LS, RS), LDiff); 4590 } 4591 break; 4592 case ICmpInst::ICMP_ULT: 4593 case ICmpInst::ICMP_ULE: 4594 std::swap(LHS, RHS); 4595 LLVM_FALLTHROUGH; 4596 case ICmpInst::ICMP_UGT: 4597 case ICmpInst::ICMP_UGE: 4598 // a >u b ? a+x : b+x -> umax(a, b)+x 4599 // a >u b ? b+x : a+x -> umin(a, b)+x 4600 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 4601 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4602 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 4603 const SCEV *LA = getSCEV(TrueVal); 4604 const SCEV *RA = getSCEV(FalseVal); 4605 const SCEV *LDiff = getMinusSCEV(LA, LS); 4606 const SCEV *RDiff = getMinusSCEV(RA, RS); 4607 if (LDiff == RDiff) 4608 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 4609 LDiff = getMinusSCEV(LA, RS); 4610 RDiff = getMinusSCEV(RA, LS); 4611 if (LDiff == RDiff) 4612 return getAddExpr(getUMinExpr(LS, RS), LDiff); 4613 } 4614 break; 4615 case ICmpInst::ICMP_NE: 4616 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 4617 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 4618 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 4619 const SCEV *One = getOne(I->getType()); 4620 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4621 const SCEV *LA = getSCEV(TrueVal); 4622 const SCEV *RA = getSCEV(FalseVal); 4623 const SCEV *LDiff = getMinusSCEV(LA, LS); 4624 const SCEV *RDiff = getMinusSCEV(RA, One); 4625 if (LDiff == RDiff) 4626 return getAddExpr(getUMaxExpr(One, LS), LDiff); 4627 } 4628 break; 4629 case ICmpInst::ICMP_EQ: 4630 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 4631 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 4632 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 4633 const SCEV *One = getOne(I->getType()); 4634 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4635 const SCEV *LA = getSCEV(TrueVal); 4636 const SCEV *RA = getSCEV(FalseVal); 4637 const SCEV *LDiff = getMinusSCEV(LA, One); 4638 const SCEV *RDiff = getMinusSCEV(RA, LS); 4639 if (LDiff == RDiff) 4640 return getAddExpr(getUMaxExpr(One, LS), LDiff); 4641 } 4642 break; 4643 default: 4644 break; 4645 } 4646 4647 return getUnknown(I); 4648 } 4649 4650 /// Expand GEP instructions into add and multiply operations. This allows them 4651 /// to be analyzed by regular SCEV code. 4652 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 4653 // Don't attempt to analyze GEPs over unsized objects. 4654 if (!GEP->getSourceElementType()->isSized()) 4655 return getUnknown(GEP); 4656 4657 SmallVector<const SCEV *, 4> IndexExprs; 4658 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 4659 IndexExprs.push_back(getSCEV(*Index)); 4660 return getGEPExpr(GEP, IndexExprs); 4661 } 4662 4663 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 4664 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 4665 return C->getAPInt().countTrailingZeros(); 4666 4667 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 4668 return std::min(GetMinTrailingZeros(T->getOperand()), 4669 (uint32_t)getTypeSizeInBits(T->getType())); 4670 4671 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 4672 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 4673 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 4674 ? getTypeSizeInBits(E->getType()) 4675 : OpRes; 4676 } 4677 4678 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 4679 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 4680 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 4681 ? getTypeSizeInBits(E->getType()) 4682 : OpRes; 4683 } 4684 4685 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 4686 // The result is the min of all operands results. 4687 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 4688 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 4689 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 4690 return MinOpRes; 4691 } 4692 4693 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 4694 // The result is the sum of all operands results. 4695 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 4696 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 4697 for (unsigned i = 1, e = M->getNumOperands(); 4698 SumOpRes != BitWidth && i != e; ++i) 4699 SumOpRes = 4700 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 4701 return SumOpRes; 4702 } 4703 4704 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 4705 // The result is the min of all operands results. 4706 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 4707 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 4708 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 4709 return MinOpRes; 4710 } 4711 4712 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 4713 // The result is the min of all operands results. 4714 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 4715 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 4716 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 4717 return MinOpRes; 4718 } 4719 4720 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 4721 // The result is the min of all operands results. 4722 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 4723 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 4724 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 4725 return MinOpRes; 4726 } 4727 4728 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 4729 // For a SCEVUnknown, ask ValueTracking. 4730 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 4731 return Known.countMinTrailingZeros(); 4732 } 4733 4734 // SCEVUDivExpr 4735 return 0; 4736 } 4737 4738 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 4739 auto I = MinTrailingZerosCache.find(S); 4740 if (I != MinTrailingZerosCache.end()) 4741 return I->second; 4742 4743 uint32_t Result = GetMinTrailingZerosImpl(S); 4744 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 4745 assert(InsertPair.second && "Should insert a new key"); 4746 return InsertPair.first->second; 4747 } 4748 4749 /// Helper method to assign a range to V from metadata present in the IR. 4750 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 4751 if (Instruction *I = dyn_cast<Instruction>(V)) 4752 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 4753 return getConstantRangeFromMetadata(*MD); 4754 4755 return None; 4756 } 4757 4758 /// Determine the range for a particular SCEV. If SignHint is 4759 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 4760 /// with a "cleaner" unsigned (resp. signed) representation. 4761 const ConstantRange & 4762 ScalarEvolution::getRangeRef(const SCEV *S, 4763 ScalarEvolution::RangeSignHint SignHint) { 4764 DenseMap<const SCEV *, ConstantRange> &Cache = 4765 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 4766 : SignedRanges; 4767 4768 // See if we've computed this range already. 4769 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 4770 if (I != Cache.end()) 4771 return I->second; 4772 4773 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 4774 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 4775 4776 unsigned BitWidth = getTypeSizeInBits(S->getType()); 4777 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 4778 4779 // If the value has known zeros, the maximum value will have those known zeros 4780 // as well. 4781 uint32_t TZ = GetMinTrailingZeros(S); 4782 if (TZ != 0) { 4783 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 4784 ConservativeResult = 4785 ConstantRange(APInt::getMinValue(BitWidth), 4786 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 4787 else 4788 ConservativeResult = ConstantRange( 4789 APInt::getSignedMinValue(BitWidth), 4790 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 4791 } 4792 4793 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 4794 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 4795 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 4796 X = X.add(getRangeRef(Add->getOperand(i), SignHint)); 4797 return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); 4798 } 4799 4800 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 4801 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 4802 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 4803 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 4804 return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); 4805 } 4806 4807 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 4808 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 4809 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 4810 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 4811 return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); 4812 } 4813 4814 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 4815 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 4816 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 4817 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 4818 return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); 4819 } 4820 4821 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 4822 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 4823 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 4824 return setRange(UDiv, SignHint, 4825 ConservativeResult.intersectWith(X.udiv(Y))); 4826 } 4827 4828 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 4829 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 4830 return setRange(ZExt, SignHint, 4831 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 4832 } 4833 4834 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 4835 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 4836 return setRange(SExt, SignHint, 4837 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 4838 } 4839 4840 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 4841 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 4842 return setRange(Trunc, SignHint, 4843 ConservativeResult.intersectWith(X.truncate(BitWidth))); 4844 } 4845 4846 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 4847 // If there's no unsigned wrap, the value will never be less than its 4848 // initial value. 4849 if (AddRec->hasNoUnsignedWrap()) 4850 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 4851 if (!C->getValue()->isZero()) 4852 ConservativeResult = ConservativeResult.intersectWith( 4853 ConstantRange(C->getAPInt(), APInt(BitWidth, 0))); 4854 4855 // If there's no signed wrap, and all the operands have the same sign or 4856 // zero, the value won't ever change sign. 4857 if (AddRec->hasNoSignedWrap()) { 4858 bool AllNonNeg = true; 4859 bool AllNonPos = true; 4860 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 4861 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 4862 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 4863 } 4864 if (AllNonNeg) 4865 ConservativeResult = ConservativeResult.intersectWith( 4866 ConstantRange(APInt(BitWidth, 0), 4867 APInt::getSignedMinValue(BitWidth))); 4868 else if (AllNonPos) 4869 ConservativeResult = ConservativeResult.intersectWith( 4870 ConstantRange(APInt::getSignedMinValue(BitWidth), 4871 APInt(BitWidth, 1))); 4872 } 4873 4874 // TODO: non-affine addrec 4875 if (AddRec->isAffine()) { 4876 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 4877 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 4878 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 4879 auto RangeFromAffine = getRangeForAffineAR( 4880 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 4881 BitWidth); 4882 if (!RangeFromAffine.isFullSet()) 4883 ConservativeResult = 4884 ConservativeResult.intersectWith(RangeFromAffine); 4885 4886 auto RangeFromFactoring = getRangeViaFactoring( 4887 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 4888 BitWidth); 4889 if (!RangeFromFactoring.isFullSet()) 4890 ConservativeResult = 4891 ConservativeResult.intersectWith(RangeFromFactoring); 4892 } 4893 } 4894 4895 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 4896 } 4897 4898 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 4899 // Check if the IR explicitly contains !range metadata. 4900 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 4901 if (MDRange.hasValue()) 4902 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue()); 4903 4904 // Split here to avoid paying the compile-time cost of calling both 4905 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 4906 // if needed. 4907 const DataLayout &DL = getDataLayout(); 4908 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 4909 // For a SCEVUnknown, ask ValueTracking. 4910 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 4911 if (Known.One != ~Known.Zero + 1) 4912 ConservativeResult = 4913 ConservativeResult.intersectWith(ConstantRange(Known.One, 4914 ~Known.Zero + 1)); 4915 } else { 4916 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 4917 "generalize as needed!"); 4918 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 4919 if (NS > 1) 4920 ConservativeResult = ConservativeResult.intersectWith( 4921 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 4922 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1)); 4923 } 4924 4925 return setRange(U, SignHint, std::move(ConservativeResult)); 4926 } 4927 4928 return setRange(S, SignHint, std::move(ConservativeResult)); 4929 } 4930 4931 // Given a StartRange, Step and MaxBECount for an expression compute a range of 4932 // values that the expression can take. Initially, the expression has a value 4933 // from StartRange and then is changed by Step up to MaxBECount times. Signed 4934 // argument defines if we treat Step as signed or unsigned. 4935 static ConstantRange getRangeForAffineARHelper(APInt Step, 4936 const ConstantRange &StartRange, 4937 const APInt &MaxBECount, 4938 unsigned BitWidth, bool Signed) { 4939 // If either Step or MaxBECount is 0, then the expression won't change, and we 4940 // just need to return the initial range. 4941 if (Step == 0 || MaxBECount == 0) 4942 return StartRange; 4943 4944 // If we don't know anything about the initial value (i.e. StartRange is 4945 // FullRange), then we don't know anything about the final range either. 4946 // Return FullRange. 4947 if (StartRange.isFullSet()) 4948 return ConstantRange(BitWidth, /* isFullSet = */ true); 4949 4950 // If Step is signed and negative, then we use its absolute value, but we also 4951 // note that we're moving in the opposite direction. 4952 bool Descending = Signed && Step.isNegative(); 4953 4954 if (Signed) 4955 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 4956 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 4957 // This equations hold true due to the well-defined wrap-around behavior of 4958 // APInt. 4959 Step = Step.abs(); 4960 4961 // Check if Offset is more than full span of BitWidth. If it is, the 4962 // expression is guaranteed to overflow. 4963 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 4964 return ConstantRange(BitWidth, /* isFullSet = */ true); 4965 4966 // Offset is by how much the expression can change. Checks above guarantee no 4967 // overflow here. 4968 APInt Offset = Step * MaxBECount; 4969 4970 // Minimum value of the final range will match the minimal value of StartRange 4971 // if the expression is increasing and will be decreased by Offset otherwise. 4972 // Maximum value of the final range will match the maximal value of StartRange 4973 // if the expression is decreasing and will be increased by Offset otherwise. 4974 APInt StartLower = StartRange.getLower(); 4975 APInt StartUpper = StartRange.getUpper() - 1; 4976 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 4977 : (StartUpper + std::move(Offset)); 4978 4979 // It's possible that the new minimum/maximum value will fall into the initial 4980 // range (due to wrap around). This means that the expression can take any 4981 // value in this bitwidth, and we have to return full range. 4982 if (StartRange.contains(MovedBoundary)) 4983 return ConstantRange(BitWidth, /* isFullSet = */ true); 4984 4985 APInt NewLower = 4986 Descending ? std::move(MovedBoundary) : std::move(StartLower); 4987 APInt NewUpper = 4988 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 4989 NewUpper += 1; 4990 4991 // If we end up with full range, return a proper full range. 4992 if (NewLower == NewUpper) 4993 return ConstantRange(BitWidth, /* isFullSet = */ true); 4994 4995 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 4996 return ConstantRange(std::move(NewLower), std::move(NewUpper)); 4997 } 4998 4999 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5000 const SCEV *Step, 5001 const SCEV *MaxBECount, 5002 unsigned BitWidth) { 5003 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5004 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5005 "Precondition!"); 5006 5007 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5008 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5009 5010 // First, consider step signed. 5011 ConstantRange StartSRange = getSignedRange(Start); 5012 ConstantRange StepSRange = getSignedRange(Step); 5013 5014 // If Step can be both positive and negative, we need to find ranges for the 5015 // maximum absolute step values in both directions and union them. 5016 ConstantRange SR = 5017 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5018 MaxBECountValue, BitWidth, /* Signed = */ true); 5019 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5020 StartSRange, MaxBECountValue, 5021 BitWidth, /* Signed = */ true)); 5022 5023 // Next, consider step unsigned. 5024 ConstantRange UR = getRangeForAffineARHelper( 5025 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5026 MaxBECountValue, BitWidth, /* Signed = */ false); 5027 5028 // Finally, intersect signed and unsigned ranges. 5029 return SR.intersectWith(UR); 5030 } 5031 5032 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 5033 const SCEV *Step, 5034 const SCEV *MaxBECount, 5035 unsigned BitWidth) { 5036 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 5037 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 5038 5039 struct SelectPattern { 5040 Value *Condition = nullptr; 5041 APInt TrueValue; 5042 APInt FalseValue; 5043 5044 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 5045 const SCEV *S) { 5046 Optional<unsigned> CastOp; 5047 APInt Offset(BitWidth, 0); 5048 5049 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 5050 "Should be!"); 5051 5052 // Peel off a constant offset: 5053 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 5054 // In the future we could consider being smarter here and handle 5055 // {Start+Step,+,Step} too. 5056 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 5057 return; 5058 5059 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 5060 S = SA->getOperand(1); 5061 } 5062 5063 // Peel off a cast operation 5064 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 5065 CastOp = SCast->getSCEVType(); 5066 S = SCast->getOperand(); 5067 } 5068 5069 using namespace llvm::PatternMatch; 5070 5071 auto *SU = dyn_cast<SCEVUnknown>(S); 5072 const APInt *TrueVal, *FalseVal; 5073 if (!SU || 5074 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 5075 m_APInt(FalseVal)))) { 5076 Condition = nullptr; 5077 return; 5078 } 5079 5080 TrueValue = *TrueVal; 5081 FalseValue = *FalseVal; 5082 5083 // Re-apply the cast we peeled off earlier 5084 if (CastOp.hasValue()) 5085 switch (*CastOp) { 5086 default: 5087 llvm_unreachable("Unknown SCEV cast type!"); 5088 5089 case scTruncate: 5090 TrueValue = TrueValue.trunc(BitWidth); 5091 FalseValue = FalseValue.trunc(BitWidth); 5092 break; 5093 case scZeroExtend: 5094 TrueValue = TrueValue.zext(BitWidth); 5095 FalseValue = FalseValue.zext(BitWidth); 5096 break; 5097 case scSignExtend: 5098 TrueValue = TrueValue.sext(BitWidth); 5099 FalseValue = FalseValue.sext(BitWidth); 5100 break; 5101 } 5102 5103 // Re-apply the constant offset we peeled off earlier 5104 TrueValue += Offset; 5105 FalseValue += Offset; 5106 } 5107 5108 bool isRecognized() { return Condition != nullptr; } 5109 }; 5110 5111 SelectPattern StartPattern(*this, BitWidth, Start); 5112 if (!StartPattern.isRecognized()) 5113 return ConstantRange(BitWidth, /* isFullSet = */ true); 5114 5115 SelectPattern StepPattern(*this, BitWidth, Step); 5116 if (!StepPattern.isRecognized()) 5117 return ConstantRange(BitWidth, /* isFullSet = */ true); 5118 5119 if (StartPattern.Condition != StepPattern.Condition) { 5120 // We don't handle this case today; but we could, by considering four 5121 // possibilities below instead of two. I'm not sure if there are cases where 5122 // that will help over what getRange already does, though. 5123 return ConstantRange(BitWidth, /* isFullSet = */ true); 5124 } 5125 5126 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 5127 // construct arbitrary general SCEV expressions here. This function is called 5128 // from deep in the call stack, and calling getSCEV (on a sext instruction, 5129 // say) can end up caching a suboptimal value. 5130 5131 // FIXME: without the explicit `this` receiver below, MSVC errors out with 5132 // C2352 and C2512 (otherwise it isn't needed). 5133 5134 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 5135 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 5136 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 5137 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 5138 5139 ConstantRange TrueRange = 5140 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 5141 ConstantRange FalseRange = 5142 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 5143 5144 return TrueRange.unionWith(FalseRange); 5145 } 5146 5147 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 5148 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 5149 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 5150 5151 // Return early if there are no flags to propagate to the SCEV. 5152 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5153 if (BinOp->hasNoUnsignedWrap()) 5154 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 5155 if (BinOp->hasNoSignedWrap()) 5156 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 5157 if (Flags == SCEV::FlagAnyWrap) 5158 return SCEV::FlagAnyWrap; 5159 5160 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 5161 } 5162 5163 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 5164 // Here we check that I is in the header of the innermost loop containing I, 5165 // since we only deal with instructions in the loop header. The actual loop we 5166 // need to check later will come from an add recurrence, but getting that 5167 // requires computing the SCEV of the operands, which can be expensive. This 5168 // check we can do cheaply to rule out some cases early. 5169 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 5170 if (InnermostContainingLoop == nullptr || 5171 InnermostContainingLoop->getHeader() != I->getParent()) 5172 return false; 5173 5174 // Only proceed if we can prove that I does not yield poison. 5175 if (!programUndefinedIfFullPoison(I)) 5176 return false; 5177 5178 // At this point we know that if I is executed, then it does not wrap 5179 // according to at least one of NSW or NUW. If I is not executed, then we do 5180 // not know if the calculation that I represents would wrap. Multiple 5181 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 5182 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 5183 // derived from other instructions that map to the same SCEV. We cannot make 5184 // that guarantee for cases where I is not executed. So we need to find the 5185 // loop that I is considered in relation to and prove that I is executed for 5186 // every iteration of that loop. That implies that the value that I 5187 // calculates does not wrap anywhere in the loop, so then we can apply the 5188 // flags to the SCEV. 5189 // 5190 // We check isLoopInvariant to disambiguate in case we are adding recurrences 5191 // from different loops, so that we know which loop to prove that I is 5192 // executed in. 5193 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 5194 // I could be an extractvalue from a call to an overflow intrinsic. 5195 // TODO: We can do better here in some cases. 5196 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 5197 return false; 5198 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 5199 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 5200 bool AllOtherOpsLoopInvariant = true; 5201 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 5202 ++OtherOpIndex) { 5203 if (OtherOpIndex != OpIndex) { 5204 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 5205 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 5206 AllOtherOpsLoopInvariant = false; 5207 break; 5208 } 5209 } 5210 } 5211 if (AllOtherOpsLoopInvariant && 5212 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 5213 return true; 5214 } 5215 } 5216 return false; 5217 } 5218 5219 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 5220 // If we know that \c I can never be poison period, then that's enough. 5221 if (isSCEVExprNeverPoison(I)) 5222 return true; 5223 5224 // For an add recurrence specifically, we assume that infinite loops without 5225 // side effects are undefined behavior, and then reason as follows: 5226 // 5227 // If the add recurrence is poison in any iteration, it is poison on all 5228 // future iterations (since incrementing poison yields poison). If the result 5229 // of the add recurrence is fed into the loop latch condition and the loop 5230 // does not contain any throws or exiting blocks other than the latch, we now 5231 // have the ability to "choose" whether the backedge is taken or not (by 5232 // choosing a sufficiently evil value for the poison feeding into the branch) 5233 // for every iteration including and after the one in which \p I first became 5234 // poison. There are two possibilities (let's call the iteration in which \p 5235 // I first became poison as K): 5236 // 5237 // 1. In the set of iterations including and after K, the loop body executes 5238 // no side effects. In this case executing the backege an infinte number 5239 // of times will yield undefined behavior. 5240 // 5241 // 2. In the set of iterations including and after K, the loop body executes 5242 // at least one side effect. In this case, that specific instance of side 5243 // effect is control dependent on poison, which also yields undefined 5244 // behavior. 5245 5246 auto *ExitingBB = L->getExitingBlock(); 5247 auto *LatchBB = L->getLoopLatch(); 5248 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 5249 return false; 5250 5251 SmallPtrSet<const Instruction *, 16> Pushed; 5252 SmallVector<const Instruction *, 8> PoisonStack; 5253 5254 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 5255 // things that are known to be fully poison under that assumption go on the 5256 // PoisonStack. 5257 Pushed.insert(I); 5258 PoisonStack.push_back(I); 5259 5260 bool LatchControlDependentOnPoison = false; 5261 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 5262 const Instruction *Poison = PoisonStack.pop_back_val(); 5263 5264 for (auto *PoisonUser : Poison->users()) { 5265 if (propagatesFullPoison(cast<Instruction>(PoisonUser))) { 5266 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 5267 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 5268 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 5269 assert(BI->isConditional() && "Only possibility!"); 5270 if (BI->getParent() == LatchBB) { 5271 LatchControlDependentOnPoison = true; 5272 break; 5273 } 5274 } 5275 } 5276 } 5277 5278 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 5279 } 5280 5281 ScalarEvolution::LoopProperties 5282 ScalarEvolution::getLoopProperties(const Loop *L) { 5283 typedef ScalarEvolution::LoopProperties LoopProperties; 5284 5285 auto Itr = LoopPropertiesCache.find(L); 5286 if (Itr == LoopPropertiesCache.end()) { 5287 auto HasSideEffects = [](Instruction *I) { 5288 if (auto *SI = dyn_cast<StoreInst>(I)) 5289 return !SI->isSimple(); 5290 5291 return I->mayHaveSideEffects(); 5292 }; 5293 5294 LoopProperties LP = {/* HasNoAbnormalExits */ true, 5295 /*HasNoSideEffects*/ true}; 5296 5297 for (auto *BB : L->getBlocks()) 5298 for (auto &I : *BB) { 5299 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5300 LP.HasNoAbnormalExits = false; 5301 if (HasSideEffects(&I)) 5302 LP.HasNoSideEffects = false; 5303 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 5304 break; // We're already as pessimistic as we can get. 5305 } 5306 5307 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 5308 assert(InsertPair.second && "We just checked!"); 5309 Itr = InsertPair.first; 5310 } 5311 5312 return Itr->second; 5313 } 5314 5315 const SCEV *ScalarEvolution::createSCEV(Value *V) { 5316 if (!isSCEVable(V->getType())) 5317 return getUnknown(V); 5318 5319 if (Instruction *I = dyn_cast<Instruction>(V)) { 5320 // Don't attempt to analyze instructions in blocks that aren't 5321 // reachable. Such instructions don't matter, and they aren't required 5322 // to obey basic rules for definitions dominating uses which this 5323 // analysis depends on. 5324 if (!DT.isReachableFromEntry(I->getParent())) 5325 return getUnknown(V); 5326 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 5327 return getConstant(CI); 5328 else if (isa<ConstantPointerNull>(V)) 5329 return getZero(V->getType()); 5330 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 5331 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 5332 else if (!isa<ConstantExpr>(V)) 5333 return getUnknown(V); 5334 5335 Operator *U = cast<Operator>(V); 5336 if (auto BO = MatchBinaryOp(U, DT)) { 5337 switch (BO->Opcode) { 5338 case Instruction::Add: { 5339 // The simple thing to do would be to just call getSCEV on both operands 5340 // and call getAddExpr with the result. However if we're looking at a 5341 // bunch of things all added together, this can be quite inefficient, 5342 // because it leads to N-1 getAddExpr calls for N ultimate operands. 5343 // Instead, gather up all the operands and make a single getAddExpr call. 5344 // LLVM IR canonical form means we need only traverse the left operands. 5345 SmallVector<const SCEV *, 4> AddOps; 5346 do { 5347 if (BO->Op) { 5348 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 5349 AddOps.push_back(OpSCEV); 5350 break; 5351 } 5352 5353 // If a NUW or NSW flag can be applied to the SCEV for this 5354 // addition, then compute the SCEV for this addition by itself 5355 // with a separate call to getAddExpr. We need to do that 5356 // instead of pushing the operands of the addition onto AddOps, 5357 // since the flags are only known to apply to this particular 5358 // addition - they may not apply to other additions that can be 5359 // formed with operands from AddOps. 5360 const SCEV *RHS = getSCEV(BO->RHS); 5361 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 5362 if (Flags != SCEV::FlagAnyWrap) { 5363 const SCEV *LHS = getSCEV(BO->LHS); 5364 if (BO->Opcode == Instruction::Sub) 5365 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 5366 else 5367 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 5368 break; 5369 } 5370 } 5371 5372 if (BO->Opcode == Instruction::Sub) 5373 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 5374 else 5375 AddOps.push_back(getSCEV(BO->RHS)); 5376 5377 auto NewBO = MatchBinaryOp(BO->LHS, DT); 5378 if (!NewBO || (NewBO->Opcode != Instruction::Add && 5379 NewBO->Opcode != Instruction::Sub)) { 5380 AddOps.push_back(getSCEV(BO->LHS)); 5381 break; 5382 } 5383 BO = NewBO; 5384 } while (true); 5385 5386 return getAddExpr(AddOps); 5387 } 5388 5389 case Instruction::Mul: { 5390 SmallVector<const SCEV *, 4> MulOps; 5391 do { 5392 if (BO->Op) { 5393 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 5394 MulOps.push_back(OpSCEV); 5395 break; 5396 } 5397 5398 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 5399 if (Flags != SCEV::FlagAnyWrap) { 5400 MulOps.push_back( 5401 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 5402 break; 5403 } 5404 } 5405 5406 MulOps.push_back(getSCEV(BO->RHS)); 5407 auto NewBO = MatchBinaryOp(BO->LHS, DT); 5408 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 5409 MulOps.push_back(getSCEV(BO->LHS)); 5410 break; 5411 } 5412 BO = NewBO; 5413 } while (true); 5414 5415 return getMulExpr(MulOps); 5416 } 5417 case Instruction::UDiv: 5418 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 5419 case Instruction::Sub: { 5420 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5421 if (BO->Op) 5422 Flags = getNoWrapFlagsFromUB(BO->Op); 5423 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 5424 } 5425 case Instruction::And: 5426 // For an expression like x&255 that merely masks off the high bits, 5427 // use zext(trunc(x)) as the SCEV expression. 5428 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5429 if (CI->isNullValue()) 5430 return getSCEV(BO->RHS); 5431 if (CI->isAllOnesValue()) 5432 return getSCEV(BO->LHS); 5433 const APInt &A = CI->getValue(); 5434 5435 // Instcombine's ShrinkDemandedConstant may strip bits out of 5436 // constants, obscuring what would otherwise be a low-bits mask. 5437 // Use computeKnownBits to compute what ShrinkDemandedConstant 5438 // knew about to reconstruct a low-bits mask value. 5439 unsigned LZ = A.countLeadingZeros(); 5440 unsigned TZ = A.countTrailingZeros(); 5441 unsigned BitWidth = A.getBitWidth(); 5442 KnownBits Known(BitWidth); 5443 computeKnownBits(BO->LHS, Known, getDataLayout(), 5444 0, &AC, nullptr, &DT); 5445 5446 APInt EffectiveMask = 5447 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 5448 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 5449 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 5450 const SCEV *LHS = getSCEV(BO->LHS); 5451 const SCEV *ShiftedLHS = nullptr; 5452 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 5453 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 5454 // For an expression like (x * 8) & 8, simplify the multiply. 5455 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 5456 unsigned GCD = std::min(MulZeros, TZ); 5457 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 5458 SmallVector<const SCEV*, 4> MulOps; 5459 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 5460 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 5461 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 5462 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 5463 } 5464 } 5465 if (!ShiftedLHS) 5466 ShiftedLHS = getUDivExpr(LHS, MulCount); 5467 return getMulExpr( 5468 getZeroExtendExpr( 5469 getTruncateExpr(ShiftedLHS, 5470 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 5471 BO->LHS->getType()), 5472 MulCount); 5473 } 5474 } 5475 break; 5476 5477 case Instruction::Or: 5478 // If the RHS of the Or is a constant, we may have something like: 5479 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 5480 // optimizations will transparently handle this case. 5481 // 5482 // In order for this transformation to be safe, the LHS must be of the 5483 // form X*(2^n) and the Or constant must be less than 2^n. 5484 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5485 const SCEV *LHS = getSCEV(BO->LHS); 5486 const APInt &CIVal = CI->getValue(); 5487 if (GetMinTrailingZeros(LHS) >= 5488 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 5489 // Build a plain add SCEV. 5490 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 5491 // If the LHS of the add was an addrec and it has no-wrap flags, 5492 // transfer the no-wrap flags, since an or won't introduce a wrap. 5493 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 5494 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 5495 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( 5496 OldAR->getNoWrapFlags()); 5497 } 5498 return S; 5499 } 5500 } 5501 break; 5502 5503 case Instruction::Xor: 5504 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5505 // If the RHS of xor is -1, then this is a not operation. 5506 if (CI->isAllOnesValue()) 5507 return getNotSCEV(getSCEV(BO->LHS)); 5508 5509 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 5510 // This is a variant of the check for xor with -1, and it handles 5511 // the case where instcombine has trimmed non-demanded bits out 5512 // of an xor with -1. 5513 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 5514 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 5515 if (LBO->getOpcode() == Instruction::And && 5516 LCI->getValue() == CI->getValue()) 5517 if (const SCEVZeroExtendExpr *Z = 5518 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 5519 Type *UTy = BO->LHS->getType(); 5520 const SCEV *Z0 = Z->getOperand(); 5521 Type *Z0Ty = Z0->getType(); 5522 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 5523 5524 // If C is a low-bits mask, the zero extend is serving to 5525 // mask off the high bits. Complement the operand and 5526 // re-apply the zext. 5527 if (CI->getValue().isMask(Z0TySize)) 5528 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 5529 5530 // If C is a single bit, it may be in the sign-bit position 5531 // before the zero-extend. In this case, represent the xor 5532 // using an add, which is equivalent, and re-apply the zext. 5533 APInt Trunc = CI->getValue().trunc(Z0TySize); 5534 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 5535 Trunc.isSignMask()) 5536 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 5537 UTy); 5538 } 5539 } 5540 break; 5541 5542 case Instruction::Shl: 5543 // Turn shift left of a constant amount into a multiply. 5544 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 5545 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 5546 5547 // If the shift count is not less than the bitwidth, the result of 5548 // the shift is undefined. Don't try to analyze it, because the 5549 // resolution chosen here may differ from the resolution chosen in 5550 // other parts of the compiler. 5551 if (SA->getValue().uge(BitWidth)) 5552 break; 5553 5554 // It is currently not resolved how to interpret NSW for left 5555 // shift by BitWidth - 1, so we avoid applying flags in that 5556 // case. Remove this check (or this comment) once the situation 5557 // is resolved. See 5558 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html 5559 // and http://reviews.llvm.org/D8890 . 5560 auto Flags = SCEV::FlagAnyWrap; 5561 if (BO->Op && SA->getValue().ult(BitWidth - 1)) 5562 Flags = getNoWrapFlagsFromUB(BO->Op); 5563 5564 Constant *X = ConstantInt::get(getContext(), 5565 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 5566 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 5567 } 5568 break; 5569 5570 case Instruction::AShr: 5571 // AShr X, C, where C is a constant. 5572 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 5573 if (!CI) 5574 break; 5575 5576 Type *OuterTy = BO->LHS->getType(); 5577 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 5578 // If the shift count is not less than the bitwidth, the result of 5579 // the shift is undefined. Don't try to analyze it, because the 5580 // resolution chosen here may differ from the resolution chosen in 5581 // other parts of the compiler. 5582 if (CI->getValue().uge(BitWidth)) 5583 break; 5584 5585 if (CI->isNullValue()) 5586 return getSCEV(BO->LHS); // shift by zero --> noop 5587 5588 uint64_t AShrAmt = CI->getZExtValue(); 5589 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 5590 5591 Operator *L = dyn_cast<Operator>(BO->LHS); 5592 if (L && L->getOpcode() == Instruction::Shl) { 5593 // X = Shl A, n 5594 // Y = AShr X, m 5595 // Both n and m are constant. 5596 5597 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 5598 if (L->getOperand(1) == BO->RHS) 5599 // For a two-shift sext-inreg, i.e. n = m, 5600 // use sext(trunc(x)) as the SCEV expression. 5601 return getSignExtendExpr( 5602 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 5603 5604 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 5605 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 5606 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 5607 if (ShlAmt > AShrAmt) { 5608 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 5609 // expression. We already checked that ShlAmt < BitWidth, so 5610 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 5611 // ShlAmt - AShrAmt < Amt. 5612 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 5613 ShlAmt - AShrAmt); 5614 return getSignExtendExpr( 5615 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 5616 getConstant(Mul)), OuterTy); 5617 } 5618 } 5619 } 5620 break; 5621 } 5622 } 5623 5624 switch (U->getOpcode()) { 5625 case Instruction::Trunc: 5626 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 5627 5628 case Instruction::ZExt: 5629 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 5630 5631 case Instruction::SExt: 5632 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 5633 5634 case Instruction::BitCast: 5635 // BitCasts are no-op casts so we just eliminate the cast. 5636 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 5637 return getSCEV(U->getOperand(0)); 5638 break; 5639 5640 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 5641 // lead to pointer expressions which cannot safely be expanded to GEPs, 5642 // because ScalarEvolution doesn't respect the GEP aliasing rules when 5643 // simplifying integer expressions. 5644 5645 case Instruction::GetElementPtr: 5646 return createNodeForGEP(cast<GEPOperator>(U)); 5647 5648 case Instruction::PHI: 5649 return createNodeForPHI(cast<PHINode>(U)); 5650 5651 case Instruction::Select: 5652 // U can also be a select constant expr, which let fall through. Since 5653 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 5654 // constant expressions cannot have instructions as operands, we'd have 5655 // returned getUnknown for a select constant expressions anyway. 5656 if (isa<Instruction>(U)) 5657 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 5658 U->getOperand(1), U->getOperand(2)); 5659 break; 5660 5661 case Instruction::Call: 5662 case Instruction::Invoke: 5663 if (Value *RV = CallSite(U).getReturnedArgOperand()) 5664 return getSCEV(RV); 5665 break; 5666 } 5667 5668 return getUnknown(V); 5669 } 5670 5671 5672 5673 //===----------------------------------------------------------------------===// 5674 // Iteration Count Computation Code 5675 // 5676 5677 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 5678 if (!ExitCount) 5679 return 0; 5680 5681 ConstantInt *ExitConst = ExitCount->getValue(); 5682 5683 // Guard against huge trip counts. 5684 if (ExitConst->getValue().getActiveBits() > 32) 5685 return 0; 5686 5687 // In case of integer overflow, this returns 0, which is correct. 5688 return ((unsigned)ExitConst->getZExtValue()) + 1; 5689 } 5690 5691 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 5692 if (BasicBlock *ExitingBB = L->getExitingBlock()) 5693 return getSmallConstantTripCount(L, ExitingBB); 5694 5695 // No trip count information for multiple exits. 5696 return 0; 5697 } 5698 5699 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L, 5700 BasicBlock *ExitingBlock) { 5701 assert(ExitingBlock && "Must pass a non-null exiting block!"); 5702 assert(L->isLoopExiting(ExitingBlock) && 5703 "Exiting block must actually branch out of the loop!"); 5704 const SCEVConstant *ExitCount = 5705 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 5706 return getConstantTripCount(ExitCount); 5707 } 5708 5709 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 5710 const auto *MaxExitCount = 5711 dyn_cast<SCEVConstant>(getMaxBackedgeTakenCount(L)); 5712 return getConstantTripCount(MaxExitCount); 5713 } 5714 5715 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 5716 if (BasicBlock *ExitingBB = L->getExitingBlock()) 5717 return getSmallConstantTripMultiple(L, ExitingBB); 5718 5719 // No trip multiple information for multiple exits. 5720 return 0; 5721 } 5722 5723 /// Returns the largest constant divisor of the trip count of this loop as a 5724 /// normal unsigned value, if possible. This means that the actual trip count is 5725 /// always a multiple of the returned value (don't forget the trip count could 5726 /// very well be zero as well!). 5727 /// 5728 /// Returns 1 if the trip count is unknown or not guaranteed to be the 5729 /// multiple of a constant (which is also the case if the trip count is simply 5730 /// constant, use getSmallConstantTripCount for that case), Will also return 1 5731 /// if the trip count is very large (>= 2^32). 5732 /// 5733 /// As explained in the comments for getSmallConstantTripCount, this assumes 5734 /// that control exits the loop via ExitingBlock. 5735 unsigned 5736 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 5737 BasicBlock *ExitingBlock) { 5738 assert(ExitingBlock && "Must pass a non-null exiting block!"); 5739 assert(L->isLoopExiting(ExitingBlock) && 5740 "Exiting block must actually branch out of the loop!"); 5741 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 5742 if (ExitCount == getCouldNotCompute()) 5743 return 1; 5744 5745 // Get the trip count from the BE count by adding 1. 5746 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 5747 5748 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 5749 if (!TC) 5750 // Attempt to factor more general cases. Returns the greatest power of 5751 // two divisor. If overflow happens, the trip count expression is still 5752 // divisible by the greatest power of 2 divisor returned. 5753 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 5754 5755 ConstantInt *Result = TC->getValue(); 5756 5757 // Guard against huge trip counts (this requires checking 5758 // for zero to handle the case where the trip count == -1 and the 5759 // addition wraps). 5760 if (!Result || Result->getValue().getActiveBits() > 32 || 5761 Result->getValue().getActiveBits() == 0) 5762 return 1; 5763 5764 return (unsigned)Result->getZExtValue(); 5765 } 5766 5767 /// Get the expression for the number of loop iterations for which this loop is 5768 /// guaranteed not to exit via ExitingBlock. Otherwise return 5769 /// SCEVCouldNotCompute. 5770 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 5771 BasicBlock *ExitingBlock) { 5772 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 5773 } 5774 5775 const SCEV * 5776 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 5777 SCEVUnionPredicate &Preds) { 5778 return getPredicatedBackedgeTakenInfo(L).getExact(this, &Preds); 5779 } 5780 5781 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 5782 return getBackedgeTakenInfo(L).getExact(this); 5783 } 5784 5785 /// Similar to getBackedgeTakenCount, except return the least SCEV value that is 5786 /// known never to be less than the actual backedge taken count. 5787 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 5788 return getBackedgeTakenInfo(L).getMax(this); 5789 } 5790 5791 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 5792 return getBackedgeTakenInfo(L).isMaxOrZero(this); 5793 } 5794 5795 /// Push PHI nodes in the header of the given loop onto the given Worklist. 5796 static void 5797 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 5798 BasicBlock *Header = L->getHeader(); 5799 5800 // Push all Loop-header PHIs onto the Worklist stack. 5801 for (BasicBlock::iterator I = Header->begin(); 5802 PHINode *PN = dyn_cast<PHINode>(I); ++I) 5803 Worklist.push_back(PN); 5804 } 5805 5806 const ScalarEvolution::BackedgeTakenInfo & 5807 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 5808 auto &BTI = getBackedgeTakenInfo(L); 5809 if (BTI.hasFullInfo()) 5810 return BTI; 5811 5812 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 5813 5814 if (!Pair.second) 5815 return Pair.first->second; 5816 5817 BackedgeTakenInfo Result = 5818 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 5819 5820 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 5821 } 5822 5823 const ScalarEvolution::BackedgeTakenInfo & 5824 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 5825 // Initially insert an invalid entry for this loop. If the insertion 5826 // succeeds, proceed to actually compute a backedge-taken count and 5827 // update the value. The temporary CouldNotCompute value tells SCEV 5828 // code elsewhere that it shouldn't attempt to request a new 5829 // backedge-taken count, which could result in infinite recursion. 5830 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 5831 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 5832 if (!Pair.second) 5833 return Pair.first->second; 5834 5835 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 5836 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 5837 // must be cleared in this scope. 5838 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 5839 5840 if (Result.getExact(this) != getCouldNotCompute()) { 5841 assert(isLoopInvariant(Result.getExact(this), L) && 5842 isLoopInvariant(Result.getMax(this), L) && 5843 "Computed backedge-taken count isn't loop invariant for loop!"); 5844 ++NumTripCountsComputed; 5845 } 5846 else if (Result.getMax(this) == getCouldNotCompute() && 5847 isa<PHINode>(L->getHeader()->begin())) { 5848 // Only count loops that have phi nodes as not being computable. 5849 ++NumTripCountsNotComputed; 5850 } 5851 5852 // Now that we know more about the trip count for this loop, forget any 5853 // existing SCEV values for PHI nodes in this loop since they are only 5854 // conservative estimates made without the benefit of trip count 5855 // information. This is similar to the code in forgetLoop, except that 5856 // it handles SCEVUnknown PHI nodes specially. 5857 if (Result.hasAnyInfo()) { 5858 SmallVector<Instruction *, 16> Worklist; 5859 PushLoopPHIs(L, Worklist); 5860 5861 SmallPtrSet<Instruction *, 8> Visited; 5862 while (!Worklist.empty()) { 5863 Instruction *I = Worklist.pop_back_val(); 5864 if (!Visited.insert(I).second) 5865 continue; 5866 5867 ValueExprMapType::iterator It = 5868 ValueExprMap.find_as(static_cast<Value *>(I)); 5869 if (It != ValueExprMap.end()) { 5870 const SCEV *Old = It->second; 5871 5872 // SCEVUnknown for a PHI either means that it has an unrecognized 5873 // structure, or it's a PHI that's in the progress of being computed 5874 // by createNodeForPHI. In the former case, additional loop trip 5875 // count information isn't going to change anything. In the later 5876 // case, createNodeForPHI will perform the necessary updates on its 5877 // own when it gets to that point. 5878 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 5879 eraseValueFromMap(It->first); 5880 forgetMemoizedResults(Old); 5881 } 5882 if (PHINode *PN = dyn_cast<PHINode>(I)) 5883 ConstantEvolutionLoopExitValue.erase(PN); 5884 } 5885 5886 PushDefUseChildren(I, Worklist); 5887 } 5888 } 5889 5890 // Re-lookup the insert position, since the call to 5891 // computeBackedgeTakenCount above could result in a 5892 // recusive call to getBackedgeTakenInfo (on a different 5893 // loop), which would invalidate the iterator computed 5894 // earlier. 5895 return BackedgeTakenCounts.find(L)->second = std::move(Result); 5896 } 5897 5898 void ScalarEvolution::forgetLoop(const Loop *L) { 5899 // Drop any stored trip count value. 5900 auto RemoveLoopFromBackedgeMap = 5901 [L](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 5902 auto BTCPos = Map.find(L); 5903 if (BTCPos != Map.end()) { 5904 BTCPos->second.clear(); 5905 Map.erase(BTCPos); 5906 } 5907 }; 5908 5909 RemoveLoopFromBackedgeMap(BackedgeTakenCounts); 5910 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts); 5911 5912 // Drop information about expressions based on loop-header PHIs. 5913 SmallVector<Instruction *, 16> Worklist; 5914 PushLoopPHIs(L, Worklist); 5915 5916 SmallPtrSet<Instruction *, 8> Visited; 5917 while (!Worklist.empty()) { 5918 Instruction *I = Worklist.pop_back_val(); 5919 if (!Visited.insert(I).second) 5920 continue; 5921 5922 ValueExprMapType::iterator It = 5923 ValueExprMap.find_as(static_cast<Value *>(I)); 5924 if (It != ValueExprMap.end()) { 5925 eraseValueFromMap(It->first); 5926 forgetMemoizedResults(It->second); 5927 if (PHINode *PN = dyn_cast<PHINode>(I)) 5928 ConstantEvolutionLoopExitValue.erase(PN); 5929 } 5930 5931 PushDefUseChildren(I, Worklist); 5932 } 5933 5934 // Forget all contained loops too, to avoid dangling entries in the 5935 // ValuesAtScopes map. 5936 for (Loop *I : *L) 5937 forgetLoop(I); 5938 5939 LoopPropertiesCache.erase(L); 5940 } 5941 5942 void ScalarEvolution::forgetValue(Value *V) { 5943 Instruction *I = dyn_cast<Instruction>(V); 5944 if (!I) return; 5945 5946 // Drop information about expressions based on loop-header PHIs. 5947 SmallVector<Instruction *, 16> Worklist; 5948 Worklist.push_back(I); 5949 5950 SmallPtrSet<Instruction *, 8> Visited; 5951 while (!Worklist.empty()) { 5952 I = Worklist.pop_back_val(); 5953 if (!Visited.insert(I).second) 5954 continue; 5955 5956 ValueExprMapType::iterator It = 5957 ValueExprMap.find_as(static_cast<Value *>(I)); 5958 if (It != ValueExprMap.end()) { 5959 eraseValueFromMap(It->first); 5960 forgetMemoizedResults(It->second); 5961 if (PHINode *PN = dyn_cast<PHINode>(I)) 5962 ConstantEvolutionLoopExitValue.erase(PN); 5963 } 5964 5965 PushDefUseChildren(I, Worklist); 5966 } 5967 } 5968 5969 /// Get the exact loop backedge taken count considering all loop exits. A 5970 /// computable result can only be returned for loops with a single exit. 5971 /// Returning the minimum taken count among all exits is incorrect because one 5972 /// of the loop's exit limit's may have been skipped. howFarToZero assumes that 5973 /// the limit of each loop test is never skipped. This is a valid assumption as 5974 /// long as the loop exits via that test. For precise results, it is the 5975 /// caller's responsibility to specify the relevant loop exit using 5976 /// getExact(ExitingBlock, SE). 5977 const SCEV * 5978 ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE, 5979 SCEVUnionPredicate *Preds) const { 5980 // If any exits were not computable, the loop is not computable. 5981 if (!isComplete() || ExitNotTaken.empty()) 5982 return SE->getCouldNotCompute(); 5983 5984 const SCEV *BECount = nullptr; 5985 for (auto &ENT : ExitNotTaken) { 5986 assert(ENT.ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV"); 5987 5988 if (!BECount) 5989 BECount = ENT.ExactNotTaken; 5990 else if (BECount != ENT.ExactNotTaken) 5991 return SE->getCouldNotCompute(); 5992 if (Preds && !ENT.hasAlwaysTruePredicate()) 5993 Preds->add(ENT.Predicate.get()); 5994 5995 assert((Preds || ENT.hasAlwaysTruePredicate()) && 5996 "Predicate should be always true!"); 5997 } 5998 5999 assert(BECount && "Invalid not taken count for loop exit"); 6000 return BECount; 6001 } 6002 6003 /// Get the exact not taken count for this loop exit. 6004 const SCEV * 6005 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 6006 ScalarEvolution *SE) const { 6007 for (auto &ENT : ExitNotTaken) 6008 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6009 return ENT.ExactNotTaken; 6010 6011 return SE->getCouldNotCompute(); 6012 } 6013 6014 /// getMax - Get the max backedge taken count for the loop. 6015 const SCEV * 6016 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 6017 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6018 return !ENT.hasAlwaysTruePredicate(); 6019 }; 6020 6021 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax()) 6022 return SE->getCouldNotCompute(); 6023 6024 assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) && 6025 "No point in having a non-constant max backedge taken count!"); 6026 return getMax(); 6027 } 6028 6029 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const { 6030 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6031 return !ENT.hasAlwaysTruePredicate(); 6032 }; 6033 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 6034 } 6035 6036 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 6037 ScalarEvolution *SE) const { 6038 if (getMax() && getMax() != SE->getCouldNotCompute() && 6039 SE->hasOperand(getMax(), S)) 6040 return true; 6041 6042 for (auto &ENT : ExitNotTaken) 6043 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 6044 SE->hasOperand(ENT.ExactNotTaken, S)) 6045 return true; 6046 6047 return false; 6048 } 6049 6050 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 6051 : ExactNotTaken(E), MaxNotTaken(E), MaxOrZero(false) { 6052 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6053 isa<SCEVConstant>(MaxNotTaken)) && 6054 "No point in having a non-constant max backedge taken count!"); 6055 } 6056 6057 ScalarEvolution::ExitLimit::ExitLimit( 6058 const SCEV *E, const SCEV *M, bool MaxOrZero, 6059 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 6060 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 6061 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 6062 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 6063 "Exact is not allowed to be less precise than Max"); 6064 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6065 isa<SCEVConstant>(MaxNotTaken)) && 6066 "No point in having a non-constant max backedge taken count!"); 6067 for (auto *PredSet : PredSetList) 6068 for (auto *P : *PredSet) 6069 addPredicate(P); 6070 } 6071 6072 ScalarEvolution::ExitLimit::ExitLimit( 6073 const SCEV *E, const SCEV *M, bool MaxOrZero, 6074 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 6075 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 6076 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6077 isa<SCEVConstant>(MaxNotTaken)) && 6078 "No point in having a non-constant max backedge taken count!"); 6079 } 6080 6081 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 6082 bool MaxOrZero) 6083 : ExitLimit(E, M, MaxOrZero, None) { 6084 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6085 isa<SCEVConstant>(MaxNotTaken)) && 6086 "No point in having a non-constant max backedge taken count!"); 6087 } 6088 6089 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 6090 /// computable exit into a persistent ExitNotTakenInfo array. 6091 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 6092 SmallVectorImpl<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> 6093 &&ExitCounts, 6094 bool Complete, const SCEV *MaxCount, bool MaxOrZero) 6095 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { 6096 typedef ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo EdgeExitInfo; 6097 ExitNotTaken.reserve(ExitCounts.size()); 6098 std::transform( 6099 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 6100 [&](const EdgeExitInfo &EEI) { 6101 BasicBlock *ExitBB = EEI.first; 6102 const ExitLimit &EL = EEI.second; 6103 if (EL.Predicates.empty()) 6104 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, nullptr); 6105 6106 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 6107 for (auto *Pred : EL.Predicates) 6108 Predicate->add(Pred); 6109 6110 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, std::move(Predicate)); 6111 }); 6112 assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) && 6113 "No point in having a non-constant max backedge taken count!"); 6114 } 6115 6116 /// Invalidate this result and free the ExitNotTakenInfo array. 6117 void ScalarEvolution::BackedgeTakenInfo::clear() { 6118 ExitNotTaken.clear(); 6119 } 6120 6121 /// Compute the number of times the backedge of the specified loop will execute. 6122 ScalarEvolution::BackedgeTakenInfo 6123 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 6124 bool AllowPredicates) { 6125 SmallVector<BasicBlock *, 8> ExitingBlocks; 6126 L->getExitingBlocks(ExitingBlocks); 6127 6128 typedef ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo EdgeExitInfo; 6129 6130 SmallVector<EdgeExitInfo, 4> ExitCounts; 6131 bool CouldComputeBECount = true; 6132 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 6133 const SCEV *MustExitMaxBECount = nullptr; 6134 const SCEV *MayExitMaxBECount = nullptr; 6135 bool MustExitMaxOrZero = false; 6136 6137 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 6138 // and compute maxBECount. 6139 // Do a union of all the predicates here. 6140 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 6141 BasicBlock *ExitBB = ExitingBlocks[i]; 6142 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 6143 6144 assert((AllowPredicates || EL.Predicates.empty()) && 6145 "Predicated exit limit when predicates are not allowed!"); 6146 6147 // 1. For each exit that can be computed, add an entry to ExitCounts. 6148 // CouldComputeBECount is true only if all exits can be computed. 6149 if (EL.ExactNotTaken == getCouldNotCompute()) 6150 // We couldn't compute an exact value for this exit, so 6151 // we won't be able to compute an exact value for the loop. 6152 CouldComputeBECount = false; 6153 else 6154 ExitCounts.emplace_back(ExitBB, EL); 6155 6156 // 2. Derive the loop's MaxBECount from each exit's max number of 6157 // non-exiting iterations. Partition the loop exits into two kinds: 6158 // LoopMustExits and LoopMayExits. 6159 // 6160 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 6161 // is a LoopMayExit. If any computable LoopMustExit is found, then 6162 // MaxBECount is the minimum EL.MaxNotTaken of computable 6163 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 6164 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 6165 // computable EL.MaxNotTaken. 6166 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 6167 DT.dominates(ExitBB, Latch)) { 6168 if (!MustExitMaxBECount) { 6169 MustExitMaxBECount = EL.MaxNotTaken; 6170 MustExitMaxOrZero = EL.MaxOrZero; 6171 } else { 6172 MustExitMaxBECount = 6173 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 6174 } 6175 } else if (MayExitMaxBECount != getCouldNotCompute()) { 6176 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 6177 MayExitMaxBECount = EL.MaxNotTaken; 6178 else { 6179 MayExitMaxBECount = 6180 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 6181 } 6182 } 6183 } 6184 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 6185 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 6186 // The loop backedge will be taken the maximum or zero times if there's 6187 // a single exit that must be taken the maximum or zero times. 6188 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 6189 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 6190 MaxBECount, MaxOrZero); 6191 } 6192 6193 ScalarEvolution::ExitLimit 6194 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 6195 bool AllowPredicates) { 6196 6197 // Okay, we've chosen an exiting block. See what condition causes us to exit 6198 // at this block and remember the exit block and whether all other targets 6199 // lead to the loop header. 6200 bool MustExecuteLoopHeader = true; 6201 BasicBlock *Exit = nullptr; 6202 for (auto *SBB : successors(ExitingBlock)) 6203 if (!L->contains(SBB)) { 6204 if (Exit) // Multiple exit successors. 6205 return getCouldNotCompute(); 6206 Exit = SBB; 6207 } else if (SBB != L->getHeader()) { 6208 MustExecuteLoopHeader = false; 6209 } 6210 6211 // At this point, we know we have a conditional branch that determines whether 6212 // the loop is exited. However, we don't know if the branch is executed each 6213 // time through the loop. If not, then the execution count of the branch will 6214 // not be equal to the trip count of the loop. 6215 // 6216 // Currently we check for this by checking to see if the Exit branch goes to 6217 // the loop header. If so, we know it will always execute the same number of 6218 // times as the loop. We also handle the case where the exit block *is* the 6219 // loop header. This is common for un-rotated loops. 6220 // 6221 // If both of those tests fail, walk up the unique predecessor chain to the 6222 // header, stopping if there is an edge that doesn't exit the loop. If the 6223 // header is reached, the execution count of the branch will be equal to the 6224 // trip count of the loop. 6225 // 6226 // More extensive analysis could be done to handle more cases here. 6227 // 6228 if (!MustExecuteLoopHeader && ExitingBlock != L->getHeader()) { 6229 // The simple checks failed, try climbing the unique predecessor chain 6230 // up to the header. 6231 bool Ok = false; 6232 for (BasicBlock *BB = ExitingBlock; BB; ) { 6233 BasicBlock *Pred = BB->getUniquePredecessor(); 6234 if (!Pred) 6235 return getCouldNotCompute(); 6236 TerminatorInst *PredTerm = Pred->getTerminator(); 6237 for (const BasicBlock *PredSucc : PredTerm->successors()) { 6238 if (PredSucc == BB) 6239 continue; 6240 // If the predecessor has a successor that isn't BB and isn't 6241 // outside the loop, assume the worst. 6242 if (L->contains(PredSucc)) 6243 return getCouldNotCompute(); 6244 } 6245 if (Pred == L->getHeader()) { 6246 Ok = true; 6247 break; 6248 } 6249 BB = Pred; 6250 } 6251 if (!Ok) 6252 return getCouldNotCompute(); 6253 } 6254 6255 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 6256 TerminatorInst *Term = ExitingBlock->getTerminator(); 6257 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 6258 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 6259 // Proceed to the next level to examine the exit condition expression. 6260 return computeExitLimitFromCond( 6261 L, BI->getCondition(), BI->getSuccessor(0), BI->getSuccessor(1), 6262 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 6263 } 6264 6265 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) 6266 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 6267 /*ControlsExit=*/IsOnlyExit); 6268 6269 return getCouldNotCompute(); 6270 } 6271 6272 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 6273 const Loop *L, Value *ExitCond, BasicBlock *TBB, BasicBlock *FBB, 6274 bool ControlsExit, bool AllowPredicates) { 6275 ScalarEvolution::ExitLimitCacheTy Cache(L, TBB, FBB, AllowPredicates); 6276 return computeExitLimitFromCondCached(Cache, L, ExitCond, TBB, FBB, 6277 ControlsExit, AllowPredicates); 6278 } 6279 6280 Optional<ScalarEvolution::ExitLimit> 6281 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 6282 BasicBlock *TBB, BasicBlock *FBB, 6283 bool ControlsExit, bool AllowPredicates) { 6284 (void)this->L; 6285 (void)this->TBB; 6286 (void)this->FBB; 6287 (void)this->AllowPredicates; 6288 6289 assert(this->L == L && this->TBB == TBB && this->FBB == FBB && 6290 this->AllowPredicates == AllowPredicates && 6291 "Variance in assumed invariant key components!"); 6292 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 6293 if (Itr == TripCountMap.end()) 6294 return None; 6295 return Itr->second; 6296 } 6297 6298 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 6299 BasicBlock *TBB, BasicBlock *FBB, 6300 bool ControlsExit, 6301 bool AllowPredicates, 6302 const ExitLimit &EL) { 6303 assert(this->L == L && this->TBB == TBB && this->FBB == FBB && 6304 this->AllowPredicates == AllowPredicates && 6305 "Variance in assumed invariant key components!"); 6306 6307 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 6308 assert(InsertResult.second && "Expected successful insertion!"); 6309 (void)InsertResult; 6310 } 6311 6312 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 6313 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, BasicBlock *TBB, 6314 BasicBlock *FBB, bool ControlsExit, bool AllowPredicates) { 6315 6316 if (auto MaybeEL = 6317 Cache.find(L, ExitCond, TBB, FBB, ControlsExit, AllowPredicates)) 6318 return *MaybeEL; 6319 6320 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, TBB, FBB, 6321 ControlsExit, AllowPredicates); 6322 Cache.insert(L, ExitCond, TBB, FBB, ControlsExit, AllowPredicates, EL); 6323 return EL; 6324 } 6325 6326 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 6327 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, BasicBlock *TBB, 6328 BasicBlock *FBB, bool ControlsExit, bool AllowPredicates) { 6329 // Check if the controlling expression for this loop is an And or Or. 6330 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 6331 if (BO->getOpcode() == Instruction::And) { 6332 // Recurse on the operands of the and. 6333 bool EitherMayExit = L->contains(TBB); 6334 ExitLimit EL0 = computeExitLimitFromCondCached( 6335 Cache, L, BO->getOperand(0), TBB, FBB, ControlsExit && !EitherMayExit, 6336 AllowPredicates); 6337 ExitLimit EL1 = computeExitLimitFromCondCached( 6338 Cache, L, BO->getOperand(1), TBB, FBB, ControlsExit && !EitherMayExit, 6339 AllowPredicates); 6340 const SCEV *BECount = getCouldNotCompute(); 6341 const SCEV *MaxBECount = getCouldNotCompute(); 6342 if (EitherMayExit) { 6343 // Both conditions must be true for the loop to continue executing. 6344 // Choose the less conservative count. 6345 if (EL0.ExactNotTaken == getCouldNotCompute() || 6346 EL1.ExactNotTaken == getCouldNotCompute()) 6347 BECount = getCouldNotCompute(); 6348 else 6349 BECount = 6350 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 6351 if (EL0.MaxNotTaken == getCouldNotCompute()) 6352 MaxBECount = EL1.MaxNotTaken; 6353 else if (EL1.MaxNotTaken == getCouldNotCompute()) 6354 MaxBECount = EL0.MaxNotTaken; 6355 else 6356 MaxBECount = 6357 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 6358 } else { 6359 // Both conditions must be true at the same time for the loop to exit. 6360 // For now, be conservative. 6361 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 6362 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 6363 MaxBECount = EL0.MaxNotTaken; 6364 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 6365 BECount = EL0.ExactNotTaken; 6366 } 6367 6368 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 6369 // to be more aggressive when computing BECount than when computing 6370 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 6371 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 6372 // to not. 6373 if (isa<SCEVCouldNotCompute>(MaxBECount) && 6374 !isa<SCEVCouldNotCompute>(BECount)) 6375 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 6376 6377 return ExitLimit(BECount, MaxBECount, false, 6378 {&EL0.Predicates, &EL1.Predicates}); 6379 } 6380 if (BO->getOpcode() == Instruction::Or) { 6381 // Recurse on the operands of the or. 6382 bool EitherMayExit = L->contains(FBB); 6383 ExitLimit EL0 = computeExitLimitFromCondCached( 6384 Cache, L, BO->getOperand(0), TBB, FBB, ControlsExit && !EitherMayExit, 6385 AllowPredicates); 6386 ExitLimit EL1 = computeExitLimitFromCondCached( 6387 Cache, L, BO->getOperand(1), TBB, FBB, ControlsExit && !EitherMayExit, 6388 AllowPredicates); 6389 const SCEV *BECount = getCouldNotCompute(); 6390 const SCEV *MaxBECount = getCouldNotCompute(); 6391 if (EitherMayExit) { 6392 // Both conditions must be false for the loop to continue executing. 6393 // Choose the less conservative count. 6394 if (EL0.ExactNotTaken == getCouldNotCompute() || 6395 EL1.ExactNotTaken == getCouldNotCompute()) 6396 BECount = getCouldNotCompute(); 6397 else 6398 BECount = 6399 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 6400 if (EL0.MaxNotTaken == getCouldNotCompute()) 6401 MaxBECount = EL1.MaxNotTaken; 6402 else if (EL1.MaxNotTaken == getCouldNotCompute()) 6403 MaxBECount = EL0.MaxNotTaken; 6404 else 6405 MaxBECount = 6406 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 6407 } else { 6408 // Both conditions must be false at the same time for the loop to exit. 6409 // For now, be conservative. 6410 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 6411 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 6412 MaxBECount = EL0.MaxNotTaken; 6413 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 6414 BECount = EL0.ExactNotTaken; 6415 } 6416 6417 return ExitLimit(BECount, MaxBECount, false, 6418 {&EL0.Predicates, &EL1.Predicates}); 6419 } 6420 } 6421 6422 // With an icmp, it may be feasible to compute an exact backedge-taken count. 6423 // Proceed to the next level to examine the icmp. 6424 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 6425 ExitLimit EL = 6426 computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit); 6427 if (EL.hasFullInfo() || !AllowPredicates) 6428 return EL; 6429 6430 // Try again, but use SCEV predicates this time. 6431 return computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit, 6432 /*AllowPredicates=*/true); 6433 } 6434 6435 // Check for a constant condition. These are normally stripped out by 6436 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 6437 // preserve the CFG and is temporarily leaving constant conditions 6438 // in place. 6439 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 6440 if (L->contains(FBB) == !CI->getZExtValue()) 6441 // The backedge is always taken. 6442 return getCouldNotCompute(); 6443 else 6444 // The backedge is never taken. 6445 return getZero(CI->getType()); 6446 } 6447 6448 // If it's not an integer or pointer comparison then compute it the hard way. 6449 return computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 6450 } 6451 6452 ScalarEvolution::ExitLimit 6453 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 6454 ICmpInst *ExitCond, 6455 BasicBlock *TBB, 6456 BasicBlock *FBB, 6457 bool ControlsExit, 6458 bool AllowPredicates) { 6459 6460 // If the condition was exit on true, convert the condition to exit on false 6461 ICmpInst::Predicate Cond; 6462 if (!L->contains(FBB)) 6463 Cond = ExitCond->getPredicate(); 6464 else 6465 Cond = ExitCond->getInversePredicate(); 6466 6467 // Handle common loops like: for (X = "string"; *X; ++X) 6468 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 6469 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 6470 ExitLimit ItCnt = 6471 computeLoadConstantCompareExitLimit(LI, RHS, L, Cond); 6472 if (ItCnt.hasAnyInfo()) 6473 return ItCnt; 6474 } 6475 6476 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 6477 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 6478 6479 // Try to evaluate any dependencies out of the loop. 6480 LHS = getSCEVAtScope(LHS, L); 6481 RHS = getSCEVAtScope(RHS, L); 6482 6483 // At this point, we would like to compute how many iterations of the 6484 // loop the predicate will return true for these inputs. 6485 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 6486 // If there is a loop-invariant, force it into the RHS. 6487 std::swap(LHS, RHS); 6488 Cond = ICmpInst::getSwappedPredicate(Cond); 6489 } 6490 6491 // Simplify the operands before analyzing them. 6492 (void)SimplifyICmpOperands(Cond, LHS, RHS); 6493 6494 // If we have a comparison of a chrec against a constant, try to use value 6495 // ranges to answer this query. 6496 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 6497 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 6498 if (AddRec->getLoop() == L) { 6499 // Form the constant range. 6500 ConstantRange CompRange = 6501 ConstantRange::makeExactICmpRegion(Cond, RHSC->getAPInt()); 6502 6503 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 6504 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 6505 } 6506 6507 switch (Cond) { 6508 case ICmpInst::ICMP_NE: { // while (X != Y) 6509 // Convert to: while (X-Y != 0) 6510 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 6511 AllowPredicates); 6512 if (EL.hasAnyInfo()) return EL; 6513 break; 6514 } 6515 case ICmpInst::ICMP_EQ: { // while (X == Y) 6516 // Convert to: while (X-Y == 0) 6517 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 6518 if (EL.hasAnyInfo()) return EL; 6519 break; 6520 } 6521 case ICmpInst::ICMP_SLT: 6522 case ICmpInst::ICMP_ULT: { // while (X < Y) 6523 bool IsSigned = Cond == ICmpInst::ICMP_SLT; 6524 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 6525 AllowPredicates); 6526 if (EL.hasAnyInfo()) return EL; 6527 break; 6528 } 6529 case ICmpInst::ICMP_SGT: 6530 case ICmpInst::ICMP_UGT: { // while (X > Y) 6531 bool IsSigned = Cond == ICmpInst::ICMP_SGT; 6532 ExitLimit EL = 6533 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 6534 AllowPredicates); 6535 if (EL.hasAnyInfo()) return EL; 6536 break; 6537 } 6538 default: 6539 break; 6540 } 6541 6542 auto *ExhaustiveCount = 6543 computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 6544 6545 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 6546 return ExhaustiveCount; 6547 6548 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 6549 ExitCond->getOperand(1), L, Cond); 6550 } 6551 6552 ScalarEvolution::ExitLimit 6553 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 6554 SwitchInst *Switch, 6555 BasicBlock *ExitingBlock, 6556 bool ControlsExit) { 6557 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 6558 6559 // Give up if the exit is the default dest of a switch. 6560 if (Switch->getDefaultDest() == ExitingBlock) 6561 return getCouldNotCompute(); 6562 6563 assert(L->contains(Switch->getDefaultDest()) && 6564 "Default case must not exit the loop!"); 6565 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 6566 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 6567 6568 // while (X != Y) --> while (X-Y != 0) 6569 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 6570 if (EL.hasAnyInfo()) 6571 return EL; 6572 6573 return getCouldNotCompute(); 6574 } 6575 6576 static ConstantInt * 6577 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 6578 ScalarEvolution &SE) { 6579 const SCEV *InVal = SE.getConstant(C); 6580 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 6581 assert(isa<SCEVConstant>(Val) && 6582 "Evaluation of SCEV at constant didn't fold correctly?"); 6583 return cast<SCEVConstant>(Val)->getValue(); 6584 } 6585 6586 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 6587 /// compute the backedge execution count. 6588 ScalarEvolution::ExitLimit 6589 ScalarEvolution::computeLoadConstantCompareExitLimit( 6590 LoadInst *LI, 6591 Constant *RHS, 6592 const Loop *L, 6593 ICmpInst::Predicate predicate) { 6594 6595 if (LI->isVolatile()) return getCouldNotCompute(); 6596 6597 // Check to see if the loaded pointer is a getelementptr of a global. 6598 // TODO: Use SCEV instead of manually grubbing with GEPs. 6599 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 6600 if (!GEP) return getCouldNotCompute(); 6601 6602 // Make sure that it is really a constant global we are gepping, with an 6603 // initializer, and make sure the first IDX is really 0. 6604 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 6605 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 6606 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 6607 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 6608 return getCouldNotCompute(); 6609 6610 // Okay, we allow one non-constant index into the GEP instruction. 6611 Value *VarIdx = nullptr; 6612 std::vector<Constant*> Indexes; 6613 unsigned VarIdxNum = 0; 6614 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 6615 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 6616 Indexes.push_back(CI); 6617 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 6618 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 6619 VarIdx = GEP->getOperand(i); 6620 VarIdxNum = i-2; 6621 Indexes.push_back(nullptr); 6622 } 6623 6624 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 6625 if (!VarIdx) 6626 return getCouldNotCompute(); 6627 6628 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 6629 // Check to see if X is a loop variant variable value now. 6630 const SCEV *Idx = getSCEV(VarIdx); 6631 Idx = getSCEVAtScope(Idx, L); 6632 6633 // We can only recognize very limited forms of loop index expressions, in 6634 // particular, only affine AddRec's like {C1,+,C2}. 6635 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 6636 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 6637 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 6638 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 6639 return getCouldNotCompute(); 6640 6641 unsigned MaxSteps = MaxBruteForceIterations; 6642 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 6643 ConstantInt *ItCst = ConstantInt::get( 6644 cast<IntegerType>(IdxExpr->getType()), IterationNum); 6645 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 6646 6647 // Form the GEP offset. 6648 Indexes[VarIdxNum] = Val; 6649 6650 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 6651 Indexes); 6652 if (!Result) break; // Cannot compute! 6653 6654 // Evaluate the condition for this iteration. 6655 Result = ConstantExpr::getICmp(predicate, Result, RHS); 6656 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 6657 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 6658 ++NumArrayLenItCounts; 6659 return getConstant(ItCst); // Found terminating iteration! 6660 } 6661 } 6662 return getCouldNotCompute(); 6663 } 6664 6665 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 6666 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 6667 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 6668 if (!RHS) 6669 return getCouldNotCompute(); 6670 6671 const BasicBlock *Latch = L->getLoopLatch(); 6672 if (!Latch) 6673 return getCouldNotCompute(); 6674 6675 const BasicBlock *Predecessor = L->getLoopPredecessor(); 6676 if (!Predecessor) 6677 return getCouldNotCompute(); 6678 6679 // Return true if V is of the form "LHS `shift_op` <positive constant>". 6680 // Return LHS in OutLHS and shift_opt in OutOpCode. 6681 auto MatchPositiveShift = 6682 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 6683 6684 using namespace PatternMatch; 6685 6686 ConstantInt *ShiftAmt; 6687 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 6688 OutOpCode = Instruction::LShr; 6689 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 6690 OutOpCode = Instruction::AShr; 6691 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 6692 OutOpCode = Instruction::Shl; 6693 else 6694 return false; 6695 6696 return ShiftAmt->getValue().isStrictlyPositive(); 6697 }; 6698 6699 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 6700 // 6701 // loop: 6702 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 6703 // %iv.shifted = lshr i32 %iv, <positive constant> 6704 // 6705 // Return true on a successful match. Return the corresponding PHI node (%iv 6706 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 6707 auto MatchShiftRecurrence = 6708 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 6709 Optional<Instruction::BinaryOps> PostShiftOpCode; 6710 6711 { 6712 Instruction::BinaryOps OpC; 6713 Value *V; 6714 6715 // If we encounter a shift instruction, "peel off" the shift operation, 6716 // and remember that we did so. Later when we inspect %iv's backedge 6717 // value, we will make sure that the backedge value uses the same 6718 // operation. 6719 // 6720 // Note: the peeled shift operation does not have to be the same 6721 // instruction as the one feeding into the PHI's backedge value. We only 6722 // really care about it being the same *kind* of shift instruction -- 6723 // that's all that is required for our later inferences to hold. 6724 if (MatchPositiveShift(LHS, V, OpC)) { 6725 PostShiftOpCode = OpC; 6726 LHS = V; 6727 } 6728 } 6729 6730 PNOut = dyn_cast<PHINode>(LHS); 6731 if (!PNOut || PNOut->getParent() != L->getHeader()) 6732 return false; 6733 6734 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 6735 Value *OpLHS; 6736 6737 return 6738 // The backedge value for the PHI node must be a shift by a positive 6739 // amount 6740 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 6741 6742 // of the PHI node itself 6743 OpLHS == PNOut && 6744 6745 // and the kind of shift should be match the kind of shift we peeled 6746 // off, if any. 6747 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 6748 }; 6749 6750 PHINode *PN; 6751 Instruction::BinaryOps OpCode; 6752 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 6753 return getCouldNotCompute(); 6754 6755 const DataLayout &DL = getDataLayout(); 6756 6757 // The key rationale for this optimization is that for some kinds of shift 6758 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 6759 // within a finite number of iterations. If the condition guarding the 6760 // backedge (in the sense that the backedge is taken if the condition is true) 6761 // is false for the value the shift recurrence stabilizes to, then we know 6762 // that the backedge is taken only a finite number of times. 6763 6764 ConstantInt *StableValue = nullptr; 6765 switch (OpCode) { 6766 default: 6767 llvm_unreachable("Impossible case!"); 6768 6769 case Instruction::AShr: { 6770 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 6771 // bitwidth(K) iterations. 6772 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 6773 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 6774 Predecessor->getTerminator(), &DT); 6775 auto *Ty = cast<IntegerType>(RHS->getType()); 6776 if (Known.isNonNegative()) 6777 StableValue = ConstantInt::get(Ty, 0); 6778 else if (Known.isNegative()) 6779 StableValue = ConstantInt::get(Ty, -1, true); 6780 else 6781 return getCouldNotCompute(); 6782 6783 break; 6784 } 6785 case Instruction::LShr: 6786 case Instruction::Shl: 6787 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 6788 // stabilize to 0 in at most bitwidth(K) iterations. 6789 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 6790 break; 6791 } 6792 6793 auto *Result = 6794 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 6795 assert(Result->getType()->isIntegerTy(1) && 6796 "Otherwise cannot be an operand to a branch instruction"); 6797 6798 if (Result->isZeroValue()) { 6799 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 6800 const SCEV *UpperBound = 6801 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 6802 return ExitLimit(getCouldNotCompute(), UpperBound, false); 6803 } 6804 6805 return getCouldNotCompute(); 6806 } 6807 6808 /// Return true if we can constant fold an instruction of the specified type, 6809 /// assuming that all operands were constants. 6810 static bool CanConstantFold(const Instruction *I) { 6811 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 6812 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 6813 isa<LoadInst>(I)) 6814 return true; 6815 6816 if (const CallInst *CI = dyn_cast<CallInst>(I)) 6817 if (const Function *F = CI->getCalledFunction()) 6818 return canConstantFoldCallTo(CI, F); 6819 return false; 6820 } 6821 6822 /// Determine whether this instruction can constant evolve within this loop 6823 /// assuming its operands can all constant evolve. 6824 static bool canConstantEvolve(Instruction *I, const Loop *L) { 6825 // An instruction outside of the loop can't be derived from a loop PHI. 6826 if (!L->contains(I)) return false; 6827 6828 if (isa<PHINode>(I)) { 6829 // We don't currently keep track of the control flow needed to evaluate 6830 // PHIs, so we cannot handle PHIs inside of loops. 6831 return L->getHeader() == I->getParent(); 6832 } 6833 6834 // If we won't be able to constant fold this expression even if the operands 6835 // are constants, bail early. 6836 return CanConstantFold(I); 6837 } 6838 6839 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 6840 /// recursing through each instruction operand until reaching a loop header phi. 6841 static PHINode * 6842 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 6843 DenseMap<Instruction *, PHINode *> &PHIMap, 6844 unsigned Depth) { 6845 if (Depth > MaxConstantEvolvingDepth) 6846 return nullptr; 6847 6848 // Otherwise, we can evaluate this instruction if all of its operands are 6849 // constant or derived from a PHI node themselves. 6850 PHINode *PHI = nullptr; 6851 for (Value *Op : UseInst->operands()) { 6852 if (isa<Constant>(Op)) continue; 6853 6854 Instruction *OpInst = dyn_cast<Instruction>(Op); 6855 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 6856 6857 PHINode *P = dyn_cast<PHINode>(OpInst); 6858 if (!P) 6859 // If this operand is already visited, reuse the prior result. 6860 // We may have P != PHI if this is the deepest point at which the 6861 // inconsistent paths meet. 6862 P = PHIMap.lookup(OpInst); 6863 if (!P) { 6864 // Recurse and memoize the results, whether a phi is found or not. 6865 // This recursive call invalidates pointers into PHIMap. 6866 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 6867 PHIMap[OpInst] = P; 6868 } 6869 if (!P) 6870 return nullptr; // Not evolving from PHI 6871 if (PHI && PHI != P) 6872 return nullptr; // Evolving from multiple different PHIs. 6873 PHI = P; 6874 } 6875 // This is a expression evolving from a constant PHI! 6876 return PHI; 6877 } 6878 6879 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 6880 /// in the loop that V is derived from. We allow arbitrary operations along the 6881 /// way, but the operands of an operation must either be constants or a value 6882 /// derived from a constant PHI. If this expression does not fit with these 6883 /// constraints, return null. 6884 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 6885 Instruction *I = dyn_cast<Instruction>(V); 6886 if (!I || !canConstantEvolve(I, L)) return nullptr; 6887 6888 if (PHINode *PN = dyn_cast<PHINode>(I)) 6889 return PN; 6890 6891 // Record non-constant instructions contained by the loop. 6892 DenseMap<Instruction *, PHINode *> PHIMap; 6893 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 6894 } 6895 6896 /// EvaluateExpression - Given an expression that passes the 6897 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 6898 /// in the loop has the value PHIVal. If we can't fold this expression for some 6899 /// reason, return null. 6900 static Constant *EvaluateExpression(Value *V, const Loop *L, 6901 DenseMap<Instruction *, Constant *> &Vals, 6902 const DataLayout &DL, 6903 const TargetLibraryInfo *TLI) { 6904 // Convenient constant check, but redundant for recursive calls. 6905 if (Constant *C = dyn_cast<Constant>(V)) return C; 6906 Instruction *I = dyn_cast<Instruction>(V); 6907 if (!I) return nullptr; 6908 6909 if (Constant *C = Vals.lookup(I)) return C; 6910 6911 // An instruction inside the loop depends on a value outside the loop that we 6912 // weren't given a mapping for, or a value such as a call inside the loop. 6913 if (!canConstantEvolve(I, L)) return nullptr; 6914 6915 // An unmapped PHI can be due to a branch or another loop inside this loop, 6916 // or due to this not being the initial iteration through a loop where we 6917 // couldn't compute the evolution of this particular PHI last time. 6918 if (isa<PHINode>(I)) return nullptr; 6919 6920 std::vector<Constant*> Operands(I->getNumOperands()); 6921 6922 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 6923 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 6924 if (!Operand) { 6925 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 6926 if (!Operands[i]) return nullptr; 6927 continue; 6928 } 6929 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 6930 Vals[Operand] = C; 6931 if (!C) return nullptr; 6932 Operands[i] = C; 6933 } 6934 6935 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 6936 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 6937 Operands[1], DL, TLI); 6938 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 6939 if (!LI->isVolatile()) 6940 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 6941 } 6942 return ConstantFoldInstOperands(I, Operands, DL, TLI); 6943 } 6944 6945 6946 // If every incoming value to PN except the one for BB is a specific Constant, 6947 // return that, else return nullptr. 6948 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 6949 Constant *IncomingVal = nullptr; 6950 6951 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 6952 if (PN->getIncomingBlock(i) == BB) 6953 continue; 6954 6955 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 6956 if (!CurrentVal) 6957 return nullptr; 6958 6959 if (IncomingVal != CurrentVal) { 6960 if (IncomingVal) 6961 return nullptr; 6962 IncomingVal = CurrentVal; 6963 } 6964 } 6965 6966 return IncomingVal; 6967 } 6968 6969 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 6970 /// in the header of its containing loop, we know the loop executes a 6971 /// constant number of times, and the PHI node is just a recurrence 6972 /// involving constants, fold it. 6973 Constant * 6974 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 6975 const APInt &BEs, 6976 const Loop *L) { 6977 auto I = ConstantEvolutionLoopExitValue.find(PN); 6978 if (I != ConstantEvolutionLoopExitValue.end()) 6979 return I->second; 6980 6981 if (BEs.ugt(MaxBruteForceIterations)) 6982 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 6983 6984 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 6985 6986 DenseMap<Instruction *, Constant *> CurrentIterVals; 6987 BasicBlock *Header = L->getHeader(); 6988 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 6989 6990 BasicBlock *Latch = L->getLoopLatch(); 6991 if (!Latch) 6992 return nullptr; 6993 6994 for (auto &I : *Header) { 6995 PHINode *PHI = dyn_cast<PHINode>(&I); 6996 if (!PHI) break; 6997 auto *StartCST = getOtherIncomingValue(PHI, Latch); 6998 if (!StartCST) continue; 6999 CurrentIterVals[PHI] = StartCST; 7000 } 7001 if (!CurrentIterVals.count(PN)) 7002 return RetVal = nullptr; 7003 7004 Value *BEValue = PN->getIncomingValueForBlock(Latch); 7005 7006 // Execute the loop symbolically to determine the exit value. 7007 if (BEs.getActiveBits() >= 32) 7008 return RetVal = nullptr; // More than 2^32-1 iterations?? Not doing it! 7009 7010 unsigned NumIterations = BEs.getZExtValue(); // must be in range 7011 unsigned IterationNum = 0; 7012 const DataLayout &DL = getDataLayout(); 7013 for (; ; ++IterationNum) { 7014 if (IterationNum == NumIterations) 7015 return RetVal = CurrentIterVals[PN]; // Got exit value! 7016 7017 // Compute the value of the PHIs for the next iteration. 7018 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 7019 DenseMap<Instruction *, Constant *> NextIterVals; 7020 Constant *NextPHI = 7021 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7022 if (!NextPHI) 7023 return nullptr; // Couldn't evaluate! 7024 NextIterVals[PN] = NextPHI; 7025 7026 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 7027 7028 // Also evaluate the other PHI nodes. However, we don't get to stop if we 7029 // cease to be able to evaluate one of them or if they stop evolving, 7030 // because that doesn't necessarily prevent us from computing PN. 7031 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 7032 for (const auto &I : CurrentIterVals) { 7033 PHINode *PHI = dyn_cast<PHINode>(I.first); 7034 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 7035 PHIsToCompute.emplace_back(PHI, I.second); 7036 } 7037 // We use two distinct loops because EvaluateExpression may invalidate any 7038 // iterators into CurrentIterVals. 7039 for (const auto &I : PHIsToCompute) { 7040 PHINode *PHI = I.first; 7041 Constant *&NextPHI = NextIterVals[PHI]; 7042 if (!NextPHI) { // Not already computed. 7043 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7044 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7045 } 7046 if (NextPHI != I.second) 7047 StoppedEvolving = false; 7048 } 7049 7050 // If all entries in CurrentIterVals == NextIterVals then we can stop 7051 // iterating, the loop can't continue to change. 7052 if (StoppedEvolving) 7053 return RetVal = CurrentIterVals[PN]; 7054 7055 CurrentIterVals.swap(NextIterVals); 7056 } 7057 } 7058 7059 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 7060 Value *Cond, 7061 bool ExitWhen) { 7062 PHINode *PN = getConstantEvolvingPHI(Cond, L); 7063 if (!PN) return getCouldNotCompute(); 7064 7065 // If the loop is canonicalized, the PHI will have exactly two entries. 7066 // That's the only form we support here. 7067 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 7068 7069 DenseMap<Instruction *, Constant *> CurrentIterVals; 7070 BasicBlock *Header = L->getHeader(); 7071 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7072 7073 BasicBlock *Latch = L->getLoopLatch(); 7074 assert(Latch && "Should follow from NumIncomingValues == 2!"); 7075 7076 for (auto &I : *Header) { 7077 PHINode *PHI = dyn_cast<PHINode>(&I); 7078 if (!PHI) 7079 break; 7080 auto *StartCST = getOtherIncomingValue(PHI, Latch); 7081 if (!StartCST) continue; 7082 CurrentIterVals[PHI] = StartCST; 7083 } 7084 if (!CurrentIterVals.count(PN)) 7085 return getCouldNotCompute(); 7086 7087 // Okay, we find a PHI node that defines the trip count of this loop. Execute 7088 // the loop symbolically to determine when the condition gets a value of 7089 // "ExitWhen". 7090 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 7091 const DataLayout &DL = getDataLayout(); 7092 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 7093 auto *CondVal = dyn_cast_or_null<ConstantInt>( 7094 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 7095 7096 // Couldn't symbolically evaluate. 7097 if (!CondVal) return getCouldNotCompute(); 7098 7099 if (CondVal->getValue() == uint64_t(ExitWhen)) { 7100 ++NumBruteForceTripCountsComputed; 7101 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 7102 } 7103 7104 // Update all the PHI nodes for the next iteration. 7105 DenseMap<Instruction *, Constant *> NextIterVals; 7106 7107 // Create a list of which PHIs we need to compute. We want to do this before 7108 // calling EvaluateExpression on them because that may invalidate iterators 7109 // into CurrentIterVals. 7110 SmallVector<PHINode *, 8> PHIsToCompute; 7111 for (const auto &I : CurrentIterVals) { 7112 PHINode *PHI = dyn_cast<PHINode>(I.first); 7113 if (!PHI || PHI->getParent() != Header) continue; 7114 PHIsToCompute.push_back(PHI); 7115 } 7116 for (PHINode *PHI : PHIsToCompute) { 7117 Constant *&NextPHI = NextIterVals[PHI]; 7118 if (NextPHI) continue; // Already computed! 7119 7120 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7121 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7122 } 7123 CurrentIterVals.swap(NextIterVals); 7124 } 7125 7126 // Too many iterations were needed to evaluate. 7127 return getCouldNotCompute(); 7128 } 7129 7130 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 7131 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 7132 ValuesAtScopes[V]; 7133 // Check to see if we've folded this expression at this loop before. 7134 for (auto &LS : Values) 7135 if (LS.first == L) 7136 return LS.second ? LS.second : V; 7137 7138 Values.emplace_back(L, nullptr); 7139 7140 // Otherwise compute it. 7141 const SCEV *C = computeSCEVAtScope(V, L); 7142 for (auto &LS : reverse(ValuesAtScopes[V])) 7143 if (LS.first == L) { 7144 LS.second = C; 7145 break; 7146 } 7147 return C; 7148 } 7149 7150 /// This builds up a Constant using the ConstantExpr interface. That way, we 7151 /// will return Constants for objects which aren't represented by a 7152 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 7153 /// Returns NULL if the SCEV isn't representable as a Constant. 7154 static Constant *BuildConstantFromSCEV(const SCEV *V) { 7155 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 7156 case scCouldNotCompute: 7157 case scAddRecExpr: 7158 break; 7159 case scConstant: 7160 return cast<SCEVConstant>(V)->getValue(); 7161 case scUnknown: 7162 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 7163 case scSignExtend: { 7164 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 7165 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 7166 return ConstantExpr::getSExt(CastOp, SS->getType()); 7167 break; 7168 } 7169 case scZeroExtend: { 7170 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 7171 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 7172 return ConstantExpr::getZExt(CastOp, SZ->getType()); 7173 break; 7174 } 7175 case scTruncate: { 7176 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 7177 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 7178 return ConstantExpr::getTrunc(CastOp, ST->getType()); 7179 break; 7180 } 7181 case scAddExpr: { 7182 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 7183 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 7184 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7185 unsigned AS = PTy->getAddressSpace(); 7186 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7187 C = ConstantExpr::getBitCast(C, DestPtrTy); 7188 } 7189 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 7190 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 7191 if (!C2) return nullptr; 7192 7193 // First pointer! 7194 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 7195 unsigned AS = C2->getType()->getPointerAddressSpace(); 7196 std::swap(C, C2); 7197 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7198 // The offsets have been converted to bytes. We can add bytes to an 7199 // i8* by GEP with the byte count in the first index. 7200 C = ConstantExpr::getBitCast(C, DestPtrTy); 7201 } 7202 7203 // Don't bother trying to sum two pointers. We probably can't 7204 // statically compute a load that results from it anyway. 7205 if (C2->getType()->isPointerTy()) 7206 return nullptr; 7207 7208 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7209 if (PTy->getElementType()->isStructTy()) 7210 C2 = ConstantExpr::getIntegerCast( 7211 C2, Type::getInt32Ty(C->getContext()), true); 7212 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 7213 } else 7214 C = ConstantExpr::getAdd(C, C2); 7215 } 7216 return C; 7217 } 7218 break; 7219 } 7220 case scMulExpr: { 7221 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 7222 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 7223 // Don't bother with pointers at all. 7224 if (C->getType()->isPointerTy()) return nullptr; 7225 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 7226 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 7227 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 7228 C = ConstantExpr::getMul(C, C2); 7229 } 7230 return C; 7231 } 7232 break; 7233 } 7234 case scUDivExpr: { 7235 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 7236 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 7237 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 7238 if (LHS->getType() == RHS->getType()) 7239 return ConstantExpr::getUDiv(LHS, RHS); 7240 break; 7241 } 7242 case scSMaxExpr: 7243 case scUMaxExpr: 7244 break; // TODO: smax, umax. 7245 } 7246 return nullptr; 7247 } 7248 7249 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 7250 if (isa<SCEVConstant>(V)) return V; 7251 7252 // If this instruction is evolved from a constant-evolving PHI, compute the 7253 // exit value from the loop without using SCEVs. 7254 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 7255 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 7256 const Loop *LI = this->LI[I->getParent()]; 7257 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 7258 if (PHINode *PN = dyn_cast<PHINode>(I)) 7259 if (PN->getParent() == LI->getHeader()) { 7260 // Okay, there is no closed form solution for the PHI node. Check 7261 // to see if the loop that contains it has a known backedge-taken 7262 // count. If so, we may be able to force computation of the exit 7263 // value. 7264 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 7265 if (const SCEVConstant *BTCC = 7266 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 7267 // Okay, we know how many times the containing loop executes. If 7268 // this is a constant evolving PHI node, get the final value at 7269 // the specified iteration number. 7270 Constant *RV = 7271 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 7272 if (RV) return getSCEV(RV); 7273 } 7274 } 7275 7276 // Okay, this is an expression that we cannot symbolically evaluate 7277 // into a SCEV. Check to see if it's possible to symbolically evaluate 7278 // the arguments into constants, and if so, try to constant propagate the 7279 // result. This is particularly useful for computing loop exit values. 7280 if (CanConstantFold(I)) { 7281 SmallVector<Constant *, 4> Operands; 7282 bool MadeImprovement = false; 7283 for (Value *Op : I->operands()) { 7284 if (Constant *C = dyn_cast<Constant>(Op)) { 7285 Operands.push_back(C); 7286 continue; 7287 } 7288 7289 // If any of the operands is non-constant and if they are 7290 // non-integer and non-pointer, don't even try to analyze them 7291 // with scev techniques. 7292 if (!isSCEVable(Op->getType())) 7293 return V; 7294 7295 const SCEV *OrigV = getSCEV(Op); 7296 const SCEV *OpV = getSCEVAtScope(OrigV, L); 7297 MadeImprovement |= OrigV != OpV; 7298 7299 Constant *C = BuildConstantFromSCEV(OpV); 7300 if (!C) return V; 7301 if (C->getType() != Op->getType()) 7302 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 7303 Op->getType(), 7304 false), 7305 C, Op->getType()); 7306 Operands.push_back(C); 7307 } 7308 7309 // Check to see if getSCEVAtScope actually made an improvement. 7310 if (MadeImprovement) { 7311 Constant *C = nullptr; 7312 const DataLayout &DL = getDataLayout(); 7313 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 7314 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7315 Operands[1], DL, &TLI); 7316 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 7317 if (!LI->isVolatile()) 7318 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7319 } else 7320 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 7321 if (!C) return V; 7322 return getSCEV(C); 7323 } 7324 } 7325 } 7326 7327 // This is some other type of SCEVUnknown, just return it. 7328 return V; 7329 } 7330 7331 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 7332 // Avoid performing the look-up in the common case where the specified 7333 // expression has no loop-variant portions. 7334 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 7335 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 7336 if (OpAtScope != Comm->getOperand(i)) { 7337 // Okay, at least one of these operands is loop variant but might be 7338 // foldable. Build a new instance of the folded commutative expression. 7339 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 7340 Comm->op_begin()+i); 7341 NewOps.push_back(OpAtScope); 7342 7343 for (++i; i != e; ++i) { 7344 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 7345 NewOps.push_back(OpAtScope); 7346 } 7347 if (isa<SCEVAddExpr>(Comm)) 7348 return getAddExpr(NewOps); 7349 if (isa<SCEVMulExpr>(Comm)) 7350 return getMulExpr(NewOps); 7351 if (isa<SCEVSMaxExpr>(Comm)) 7352 return getSMaxExpr(NewOps); 7353 if (isa<SCEVUMaxExpr>(Comm)) 7354 return getUMaxExpr(NewOps); 7355 llvm_unreachable("Unknown commutative SCEV type!"); 7356 } 7357 } 7358 // If we got here, all operands are loop invariant. 7359 return Comm; 7360 } 7361 7362 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 7363 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 7364 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 7365 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 7366 return Div; // must be loop invariant 7367 return getUDivExpr(LHS, RHS); 7368 } 7369 7370 // If this is a loop recurrence for a loop that does not contain L, then we 7371 // are dealing with the final value computed by the loop. 7372 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 7373 // First, attempt to evaluate each operand. 7374 // Avoid performing the look-up in the common case where the specified 7375 // expression has no loop-variant portions. 7376 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 7377 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 7378 if (OpAtScope == AddRec->getOperand(i)) 7379 continue; 7380 7381 // Okay, at least one of these operands is loop variant but might be 7382 // foldable. Build a new instance of the folded commutative expression. 7383 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 7384 AddRec->op_begin()+i); 7385 NewOps.push_back(OpAtScope); 7386 for (++i; i != e; ++i) 7387 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 7388 7389 const SCEV *FoldedRec = 7390 getAddRecExpr(NewOps, AddRec->getLoop(), 7391 AddRec->getNoWrapFlags(SCEV::FlagNW)); 7392 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 7393 // The addrec may be folded to a nonrecurrence, for example, if the 7394 // induction variable is multiplied by zero after constant folding. Go 7395 // ahead and return the folded value. 7396 if (!AddRec) 7397 return FoldedRec; 7398 break; 7399 } 7400 7401 // If the scope is outside the addrec's loop, evaluate it by using the 7402 // loop exit value of the addrec. 7403 if (!AddRec->getLoop()->contains(L)) { 7404 // To evaluate this recurrence, we need to know how many times the AddRec 7405 // loop iterates. Compute this now. 7406 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 7407 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 7408 7409 // Then, evaluate the AddRec. 7410 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 7411 } 7412 7413 return AddRec; 7414 } 7415 7416 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 7417 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7418 if (Op == Cast->getOperand()) 7419 return Cast; // must be loop invariant 7420 return getZeroExtendExpr(Op, Cast->getType()); 7421 } 7422 7423 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 7424 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7425 if (Op == Cast->getOperand()) 7426 return Cast; // must be loop invariant 7427 return getSignExtendExpr(Op, Cast->getType()); 7428 } 7429 7430 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 7431 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7432 if (Op == Cast->getOperand()) 7433 return Cast; // must be loop invariant 7434 return getTruncateExpr(Op, Cast->getType()); 7435 } 7436 7437 llvm_unreachable("Unknown SCEV type!"); 7438 } 7439 7440 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 7441 return getSCEVAtScope(getSCEV(V), L); 7442 } 7443 7444 /// Finds the minimum unsigned root of the following equation: 7445 /// 7446 /// A * X = B (mod N) 7447 /// 7448 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 7449 /// A and B isn't important. 7450 /// 7451 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 7452 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 7453 ScalarEvolution &SE) { 7454 uint32_t BW = A.getBitWidth(); 7455 assert(BW == SE.getTypeSizeInBits(B->getType())); 7456 assert(A != 0 && "A must be non-zero."); 7457 7458 // 1. D = gcd(A, N) 7459 // 7460 // The gcd of A and N may have only one prime factor: 2. The number of 7461 // trailing zeros in A is its multiplicity 7462 uint32_t Mult2 = A.countTrailingZeros(); 7463 // D = 2^Mult2 7464 7465 // 2. Check if B is divisible by D. 7466 // 7467 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 7468 // is not less than multiplicity of this prime factor for D. 7469 if (SE.GetMinTrailingZeros(B) < Mult2) 7470 return SE.getCouldNotCompute(); 7471 7472 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 7473 // modulo (N / D). 7474 // 7475 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 7476 // (N / D) in general. The inverse itself always fits into BW bits, though, 7477 // so we immediately truncate it. 7478 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 7479 APInt Mod(BW + 1, 0); 7480 Mod.setBit(BW - Mult2); // Mod = N / D 7481 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 7482 7483 // 4. Compute the minimum unsigned root of the equation: 7484 // I * (B / D) mod (N / D) 7485 // To simplify the computation, we factor out the divide by D: 7486 // (I * B mod N) / D 7487 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 7488 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 7489 } 7490 7491 /// Find the roots of the quadratic equation for the given quadratic chrec 7492 /// {L,+,M,+,N}. This returns either the two roots (which might be the same) or 7493 /// two SCEVCouldNotCompute objects. 7494 /// 7495 static Optional<std::pair<const SCEVConstant *,const SCEVConstant *>> 7496 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 7497 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 7498 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 7499 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 7500 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 7501 7502 // We currently can only solve this if the coefficients are constants. 7503 if (!LC || !MC || !NC) 7504 return None; 7505 7506 uint32_t BitWidth = LC->getAPInt().getBitWidth(); 7507 const APInt &L = LC->getAPInt(); 7508 const APInt &M = MC->getAPInt(); 7509 const APInt &N = NC->getAPInt(); 7510 APInt Two(BitWidth, 2); 7511 7512 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 7513 7514 // The A coefficient is N/2 7515 APInt A = N.sdiv(Two); 7516 7517 // The B coefficient is M-N/2 7518 APInt B = M; 7519 B -= A; // A is the same as N/2. 7520 7521 // The C coefficient is L. 7522 const APInt& C = L; 7523 7524 // Compute the B^2-4ac term. 7525 APInt SqrtTerm = B; 7526 SqrtTerm *= B; 7527 SqrtTerm -= 4 * (A * C); 7528 7529 if (SqrtTerm.isNegative()) { 7530 // The loop is provably infinite. 7531 return None; 7532 } 7533 7534 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 7535 // integer value or else APInt::sqrt() will assert. 7536 APInt SqrtVal = SqrtTerm.sqrt(); 7537 7538 // Compute the two solutions for the quadratic formula. 7539 // The divisions must be performed as signed divisions. 7540 APInt NegB = -std::move(B); 7541 APInt TwoA = std::move(A); 7542 TwoA <<= 1; 7543 if (TwoA.isNullValue()) 7544 return None; 7545 7546 LLVMContext &Context = SE.getContext(); 7547 7548 ConstantInt *Solution1 = 7549 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); 7550 ConstantInt *Solution2 = 7551 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); 7552 7553 return std::make_pair(cast<SCEVConstant>(SE.getConstant(Solution1)), 7554 cast<SCEVConstant>(SE.getConstant(Solution2))); 7555 } 7556 7557 ScalarEvolution::ExitLimit 7558 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 7559 bool AllowPredicates) { 7560 7561 // This is only used for loops with a "x != y" exit test. The exit condition 7562 // is now expressed as a single expression, V = x-y. So the exit test is 7563 // effectively V != 0. We know and take advantage of the fact that this 7564 // expression only being used in a comparison by zero context. 7565 7566 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 7567 // If the value is a constant 7568 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 7569 // If the value is already zero, the branch will execute zero times. 7570 if (C->getValue()->isZero()) return C; 7571 return getCouldNotCompute(); // Otherwise it will loop infinitely. 7572 } 7573 7574 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 7575 if (!AddRec && AllowPredicates) 7576 // Try to make this an AddRec using runtime tests, in the first X 7577 // iterations of this loop, where X is the SCEV expression found by the 7578 // algorithm below. 7579 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 7580 7581 if (!AddRec || AddRec->getLoop() != L) 7582 return getCouldNotCompute(); 7583 7584 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 7585 // the quadratic equation to solve it. 7586 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 7587 if (auto Roots = SolveQuadraticEquation(AddRec, *this)) { 7588 const SCEVConstant *R1 = Roots->first; 7589 const SCEVConstant *R2 = Roots->second; 7590 // Pick the smallest positive root value. 7591 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 7592 CmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 7593 if (!CB->getZExtValue()) 7594 std::swap(R1, R2); // R1 is the minimum root now. 7595 7596 // We can only use this value if the chrec ends up with an exact zero 7597 // value at this index. When solving for "X*X != 5", for example, we 7598 // should not accept a root of 2. 7599 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); 7600 if (Val->isZero()) 7601 // We found a quadratic root! 7602 return ExitLimit(R1, R1, false, Predicates); 7603 } 7604 } 7605 return getCouldNotCompute(); 7606 } 7607 7608 // Otherwise we can only handle this if it is affine. 7609 if (!AddRec->isAffine()) 7610 return getCouldNotCompute(); 7611 7612 // If this is an affine expression, the execution count of this branch is 7613 // the minimum unsigned root of the following equation: 7614 // 7615 // Start + Step*N = 0 (mod 2^BW) 7616 // 7617 // equivalent to: 7618 // 7619 // Step*N = -Start (mod 2^BW) 7620 // 7621 // where BW is the common bit width of Start and Step. 7622 7623 // Get the initial value for the loop. 7624 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 7625 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 7626 7627 // For now we handle only constant steps. 7628 // 7629 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 7630 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 7631 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 7632 // We have not yet seen any such cases. 7633 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 7634 if (!StepC || StepC->getValue()->equalsInt(0)) 7635 return getCouldNotCompute(); 7636 7637 // For positive steps (counting up until unsigned overflow): 7638 // N = -Start/Step (as unsigned) 7639 // For negative steps (counting down to zero): 7640 // N = Start/-Step 7641 // First compute the unsigned distance from zero in the direction of Step. 7642 bool CountDown = StepC->getAPInt().isNegative(); 7643 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 7644 7645 // Handle unitary steps, which cannot wraparound. 7646 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 7647 // N = Distance (as unsigned) 7648 if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) { 7649 APInt MaxBECount = getUnsignedRangeMax(Distance); 7650 7651 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 7652 // we end up with a loop whose backedge-taken count is n - 1. Detect this 7653 // case, and see if we can improve the bound. 7654 // 7655 // Explicitly handling this here is necessary because getUnsignedRange 7656 // isn't context-sensitive; it doesn't know that we only care about the 7657 // range inside the loop. 7658 const SCEV *Zero = getZero(Distance->getType()); 7659 const SCEV *One = getOne(Distance->getType()); 7660 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 7661 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 7662 // If Distance + 1 doesn't overflow, we can compute the maximum distance 7663 // as "unsigned_max(Distance + 1) - 1". 7664 ConstantRange CR = getUnsignedRange(DistancePlusOne); 7665 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 7666 } 7667 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 7668 } 7669 7670 // If the condition controls loop exit (the loop exits only if the expression 7671 // is true) and the addition is no-wrap we can use unsigned divide to 7672 // compute the backedge count. In this case, the step may not divide the 7673 // distance, but we don't care because if the condition is "missed" the loop 7674 // will have undefined behavior due to wrapping. 7675 if (ControlsExit && AddRec->hasNoSelfWrap() && 7676 loopHasNoAbnormalExits(AddRec->getLoop())) { 7677 const SCEV *Exact = 7678 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 7679 const SCEV *Max = 7680 Exact == getCouldNotCompute() 7681 ? Exact 7682 : getConstant(getUnsignedRangeMax(Exact)); 7683 return ExitLimit(Exact, Max, false, Predicates); 7684 } 7685 7686 // Solve the general equation. 7687 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 7688 getNegativeSCEV(Start), *this); 7689 const SCEV *M = E == getCouldNotCompute() 7690 ? E 7691 : getConstant(getUnsignedRangeMax(E)); 7692 return ExitLimit(E, M, false, Predicates); 7693 } 7694 7695 ScalarEvolution::ExitLimit 7696 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 7697 // Loops that look like: while (X == 0) are very strange indeed. We don't 7698 // handle them yet except for the trivial case. This could be expanded in the 7699 // future as needed. 7700 7701 // If the value is a constant, check to see if it is known to be non-zero 7702 // already. If so, the backedge will execute zero times. 7703 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 7704 if (!C->getValue()->isNullValue()) 7705 return getZero(C->getType()); 7706 return getCouldNotCompute(); // Otherwise it will loop infinitely. 7707 } 7708 7709 // We could implement others, but I really doubt anyone writes loops like 7710 // this, and if they did, they would already be constant folded. 7711 return getCouldNotCompute(); 7712 } 7713 7714 std::pair<BasicBlock *, BasicBlock *> 7715 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 7716 // If the block has a unique predecessor, then there is no path from the 7717 // predecessor to the block that does not go through the direct edge 7718 // from the predecessor to the block. 7719 if (BasicBlock *Pred = BB->getSinglePredecessor()) 7720 return {Pred, BB}; 7721 7722 // A loop's header is defined to be a block that dominates the loop. 7723 // If the header has a unique predecessor outside the loop, it must be 7724 // a block that has exactly one successor that can reach the loop. 7725 if (Loop *L = LI.getLoopFor(BB)) 7726 return {L->getLoopPredecessor(), L->getHeader()}; 7727 7728 return {nullptr, nullptr}; 7729 } 7730 7731 /// SCEV structural equivalence is usually sufficient for testing whether two 7732 /// expressions are equal, however for the purposes of looking for a condition 7733 /// guarding a loop, it can be useful to be a little more general, since a 7734 /// front-end may have replicated the controlling expression. 7735 /// 7736 static bool HasSameValue(const SCEV *A, const SCEV *B) { 7737 // Quick check to see if they are the same SCEV. 7738 if (A == B) return true; 7739 7740 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 7741 // Not all instructions that are "identical" compute the same value. For 7742 // instance, two distinct alloca instructions allocating the same type are 7743 // identical and do not read memory; but compute distinct values. 7744 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 7745 }; 7746 7747 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 7748 // two different instructions with the same value. Check for this case. 7749 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 7750 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 7751 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 7752 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 7753 if (ComputesEqualValues(AI, BI)) 7754 return true; 7755 7756 // Otherwise assume they may have a different value. 7757 return false; 7758 } 7759 7760 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 7761 const SCEV *&LHS, const SCEV *&RHS, 7762 unsigned Depth) { 7763 bool Changed = false; 7764 7765 // If we hit the max recursion limit bail out. 7766 if (Depth >= 3) 7767 return false; 7768 7769 // Canonicalize a constant to the right side. 7770 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 7771 // Check for both operands constant. 7772 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 7773 if (ConstantExpr::getICmp(Pred, 7774 LHSC->getValue(), 7775 RHSC->getValue())->isNullValue()) 7776 goto trivially_false; 7777 else 7778 goto trivially_true; 7779 } 7780 // Otherwise swap the operands to put the constant on the right. 7781 std::swap(LHS, RHS); 7782 Pred = ICmpInst::getSwappedPredicate(Pred); 7783 Changed = true; 7784 } 7785 7786 // If we're comparing an addrec with a value which is loop-invariant in the 7787 // addrec's loop, put the addrec on the left. Also make a dominance check, 7788 // as both operands could be addrecs loop-invariant in each other's loop. 7789 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 7790 const Loop *L = AR->getLoop(); 7791 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 7792 std::swap(LHS, RHS); 7793 Pred = ICmpInst::getSwappedPredicate(Pred); 7794 Changed = true; 7795 } 7796 } 7797 7798 // If there's a constant operand, canonicalize comparisons with boundary 7799 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 7800 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 7801 const APInt &RA = RC->getAPInt(); 7802 7803 bool SimplifiedByConstantRange = false; 7804 7805 if (!ICmpInst::isEquality(Pred)) { 7806 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 7807 if (ExactCR.isFullSet()) 7808 goto trivially_true; 7809 else if (ExactCR.isEmptySet()) 7810 goto trivially_false; 7811 7812 APInt NewRHS; 7813 CmpInst::Predicate NewPred; 7814 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 7815 ICmpInst::isEquality(NewPred)) { 7816 // We were able to convert an inequality to an equality. 7817 Pred = NewPred; 7818 RHS = getConstant(NewRHS); 7819 Changed = SimplifiedByConstantRange = true; 7820 } 7821 } 7822 7823 if (!SimplifiedByConstantRange) { 7824 switch (Pred) { 7825 default: 7826 break; 7827 case ICmpInst::ICMP_EQ: 7828 case ICmpInst::ICMP_NE: 7829 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 7830 if (!RA) 7831 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 7832 if (const SCEVMulExpr *ME = 7833 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 7834 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 7835 ME->getOperand(0)->isAllOnesValue()) { 7836 RHS = AE->getOperand(1); 7837 LHS = ME->getOperand(1); 7838 Changed = true; 7839 } 7840 break; 7841 7842 7843 // The "Should have been caught earlier!" messages refer to the fact 7844 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 7845 // should have fired on the corresponding cases, and canonicalized the 7846 // check to trivially_true or trivially_false. 7847 7848 case ICmpInst::ICMP_UGE: 7849 assert(!RA.isMinValue() && "Should have been caught earlier!"); 7850 Pred = ICmpInst::ICMP_UGT; 7851 RHS = getConstant(RA - 1); 7852 Changed = true; 7853 break; 7854 case ICmpInst::ICMP_ULE: 7855 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 7856 Pred = ICmpInst::ICMP_ULT; 7857 RHS = getConstant(RA + 1); 7858 Changed = true; 7859 break; 7860 case ICmpInst::ICMP_SGE: 7861 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 7862 Pred = ICmpInst::ICMP_SGT; 7863 RHS = getConstant(RA - 1); 7864 Changed = true; 7865 break; 7866 case ICmpInst::ICMP_SLE: 7867 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 7868 Pred = ICmpInst::ICMP_SLT; 7869 RHS = getConstant(RA + 1); 7870 Changed = true; 7871 break; 7872 } 7873 } 7874 } 7875 7876 // Check for obvious equality. 7877 if (HasSameValue(LHS, RHS)) { 7878 if (ICmpInst::isTrueWhenEqual(Pred)) 7879 goto trivially_true; 7880 if (ICmpInst::isFalseWhenEqual(Pred)) 7881 goto trivially_false; 7882 } 7883 7884 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 7885 // adding or subtracting 1 from one of the operands. 7886 switch (Pred) { 7887 case ICmpInst::ICMP_SLE: 7888 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 7889 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 7890 SCEV::FlagNSW); 7891 Pred = ICmpInst::ICMP_SLT; 7892 Changed = true; 7893 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 7894 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 7895 SCEV::FlagNSW); 7896 Pred = ICmpInst::ICMP_SLT; 7897 Changed = true; 7898 } 7899 break; 7900 case ICmpInst::ICMP_SGE: 7901 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 7902 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 7903 SCEV::FlagNSW); 7904 Pred = ICmpInst::ICMP_SGT; 7905 Changed = true; 7906 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 7907 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 7908 SCEV::FlagNSW); 7909 Pred = ICmpInst::ICMP_SGT; 7910 Changed = true; 7911 } 7912 break; 7913 case ICmpInst::ICMP_ULE: 7914 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 7915 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 7916 SCEV::FlagNUW); 7917 Pred = ICmpInst::ICMP_ULT; 7918 Changed = true; 7919 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 7920 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 7921 Pred = ICmpInst::ICMP_ULT; 7922 Changed = true; 7923 } 7924 break; 7925 case ICmpInst::ICMP_UGE: 7926 if (!getUnsignedRangeMin(RHS).isMinValue()) { 7927 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 7928 Pred = ICmpInst::ICMP_UGT; 7929 Changed = true; 7930 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 7931 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 7932 SCEV::FlagNUW); 7933 Pred = ICmpInst::ICMP_UGT; 7934 Changed = true; 7935 } 7936 break; 7937 default: 7938 break; 7939 } 7940 7941 // TODO: More simplifications are possible here. 7942 7943 // Recursively simplify until we either hit a recursion limit or nothing 7944 // changes. 7945 if (Changed) 7946 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 7947 7948 return Changed; 7949 7950 trivially_true: 7951 // Return 0 == 0. 7952 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 7953 Pred = ICmpInst::ICMP_EQ; 7954 return true; 7955 7956 trivially_false: 7957 // Return 0 != 0. 7958 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 7959 Pred = ICmpInst::ICMP_NE; 7960 return true; 7961 } 7962 7963 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 7964 return getSignedRangeMax(S).isNegative(); 7965 } 7966 7967 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 7968 return getSignedRangeMin(S).isStrictlyPositive(); 7969 } 7970 7971 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 7972 return !getSignedRangeMin(S).isNegative(); 7973 } 7974 7975 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 7976 return !getSignedRangeMax(S).isStrictlyPositive(); 7977 } 7978 7979 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 7980 return isKnownNegative(S) || isKnownPositive(S); 7981 } 7982 7983 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 7984 const SCEV *LHS, const SCEV *RHS) { 7985 // Canonicalize the inputs first. 7986 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7987 7988 // If LHS or RHS is an addrec, check to see if the condition is true in 7989 // every iteration of the loop. 7990 // If LHS and RHS are both addrec, both conditions must be true in 7991 // every iteration of the loop. 7992 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 7993 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 7994 bool LeftGuarded = false; 7995 bool RightGuarded = false; 7996 if (LAR) { 7997 const Loop *L = LAR->getLoop(); 7998 if (isLoopEntryGuardedByCond(L, Pred, LAR->getStart(), RHS) && 7999 isLoopBackedgeGuardedByCond(L, Pred, LAR->getPostIncExpr(*this), RHS)) { 8000 if (!RAR) return true; 8001 LeftGuarded = true; 8002 } 8003 } 8004 if (RAR) { 8005 const Loop *L = RAR->getLoop(); 8006 if (isLoopEntryGuardedByCond(L, Pred, LHS, RAR->getStart()) && 8007 isLoopBackedgeGuardedByCond(L, Pred, LHS, RAR->getPostIncExpr(*this))) { 8008 if (!LAR) return true; 8009 RightGuarded = true; 8010 } 8011 } 8012 if (LeftGuarded && RightGuarded) 8013 return true; 8014 8015 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 8016 return true; 8017 8018 // Otherwise see what can be done with known constant ranges. 8019 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS); 8020 } 8021 8022 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 8023 ICmpInst::Predicate Pred, 8024 bool &Increasing) { 8025 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 8026 8027 #ifndef NDEBUG 8028 // Verify an invariant: inverting the predicate should turn a monotonically 8029 // increasing change to a monotonically decreasing one, and vice versa. 8030 bool IncreasingSwapped; 8031 bool ResultSwapped = isMonotonicPredicateImpl( 8032 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 8033 8034 assert(Result == ResultSwapped && "should be able to analyze both!"); 8035 if (ResultSwapped) 8036 assert(Increasing == !IncreasingSwapped && 8037 "monotonicity should flip as we flip the predicate"); 8038 #endif 8039 8040 return Result; 8041 } 8042 8043 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 8044 ICmpInst::Predicate Pred, 8045 bool &Increasing) { 8046 8047 // A zero step value for LHS means the induction variable is essentially a 8048 // loop invariant value. We don't really depend on the predicate actually 8049 // flipping from false to true (for increasing predicates, and the other way 8050 // around for decreasing predicates), all we care about is that *if* the 8051 // predicate changes then it only changes from false to true. 8052 // 8053 // A zero step value in itself is not very useful, but there may be places 8054 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 8055 // as general as possible. 8056 8057 switch (Pred) { 8058 default: 8059 return false; // Conservative answer 8060 8061 case ICmpInst::ICMP_UGT: 8062 case ICmpInst::ICMP_UGE: 8063 case ICmpInst::ICMP_ULT: 8064 case ICmpInst::ICMP_ULE: 8065 if (!LHS->hasNoUnsignedWrap()) 8066 return false; 8067 8068 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 8069 return true; 8070 8071 case ICmpInst::ICMP_SGT: 8072 case ICmpInst::ICMP_SGE: 8073 case ICmpInst::ICMP_SLT: 8074 case ICmpInst::ICMP_SLE: { 8075 if (!LHS->hasNoSignedWrap()) 8076 return false; 8077 8078 const SCEV *Step = LHS->getStepRecurrence(*this); 8079 8080 if (isKnownNonNegative(Step)) { 8081 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 8082 return true; 8083 } 8084 8085 if (isKnownNonPositive(Step)) { 8086 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 8087 return true; 8088 } 8089 8090 return false; 8091 } 8092 8093 } 8094 8095 llvm_unreachable("switch has default clause!"); 8096 } 8097 8098 bool ScalarEvolution::isLoopInvariantPredicate( 8099 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 8100 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 8101 const SCEV *&InvariantRHS) { 8102 8103 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 8104 if (!isLoopInvariant(RHS, L)) { 8105 if (!isLoopInvariant(LHS, L)) 8106 return false; 8107 8108 std::swap(LHS, RHS); 8109 Pred = ICmpInst::getSwappedPredicate(Pred); 8110 } 8111 8112 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 8113 if (!ArLHS || ArLHS->getLoop() != L) 8114 return false; 8115 8116 bool Increasing; 8117 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 8118 return false; 8119 8120 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 8121 // true as the loop iterates, and the backedge is control dependent on 8122 // "ArLHS `Pred` RHS" == true then we can reason as follows: 8123 // 8124 // * if the predicate was false in the first iteration then the predicate 8125 // is never evaluated again, since the loop exits without taking the 8126 // backedge. 8127 // * if the predicate was true in the first iteration then it will 8128 // continue to be true for all future iterations since it is 8129 // monotonically increasing. 8130 // 8131 // For both the above possibilities, we can replace the loop varying 8132 // predicate with its value on the first iteration of the loop (which is 8133 // loop invariant). 8134 // 8135 // A similar reasoning applies for a monotonically decreasing predicate, by 8136 // replacing true with false and false with true in the above two bullets. 8137 8138 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 8139 8140 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 8141 return false; 8142 8143 InvariantPred = Pred; 8144 InvariantLHS = ArLHS->getStart(); 8145 InvariantRHS = RHS; 8146 return true; 8147 } 8148 8149 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 8150 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 8151 if (HasSameValue(LHS, RHS)) 8152 return ICmpInst::isTrueWhenEqual(Pred); 8153 8154 // This code is split out from isKnownPredicate because it is called from 8155 // within isLoopEntryGuardedByCond. 8156 8157 auto CheckRanges = 8158 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 8159 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 8160 .contains(RangeLHS); 8161 }; 8162 8163 // The check at the top of the function catches the case where the values are 8164 // known to be equal. 8165 if (Pred == CmpInst::ICMP_EQ) 8166 return false; 8167 8168 if (Pred == CmpInst::ICMP_NE) 8169 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 8170 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 8171 isKnownNonZero(getMinusSCEV(LHS, RHS)); 8172 8173 if (CmpInst::isSigned(Pred)) 8174 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 8175 8176 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 8177 } 8178 8179 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 8180 const SCEV *LHS, 8181 const SCEV *RHS) { 8182 8183 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 8184 // Return Y via OutY. 8185 auto MatchBinaryAddToConst = 8186 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 8187 SCEV::NoWrapFlags ExpectedFlags) { 8188 const SCEV *NonConstOp, *ConstOp; 8189 SCEV::NoWrapFlags FlagsPresent; 8190 8191 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 8192 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 8193 return false; 8194 8195 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 8196 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 8197 }; 8198 8199 APInt C; 8200 8201 switch (Pred) { 8202 default: 8203 break; 8204 8205 case ICmpInst::ICMP_SGE: 8206 std::swap(LHS, RHS); 8207 LLVM_FALLTHROUGH; 8208 case ICmpInst::ICMP_SLE: 8209 // X s<= (X + C)<nsw> if C >= 0 8210 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 8211 return true; 8212 8213 // (X + C)<nsw> s<= X if C <= 0 8214 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 8215 !C.isStrictlyPositive()) 8216 return true; 8217 break; 8218 8219 case ICmpInst::ICMP_SGT: 8220 std::swap(LHS, RHS); 8221 LLVM_FALLTHROUGH; 8222 case ICmpInst::ICMP_SLT: 8223 // X s< (X + C)<nsw> if C > 0 8224 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 8225 C.isStrictlyPositive()) 8226 return true; 8227 8228 // (X + C)<nsw> s< X if C < 0 8229 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 8230 return true; 8231 break; 8232 } 8233 8234 return false; 8235 } 8236 8237 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 8238 const SCEV *LHS, 8239 const SCEV *RHS) { 8240 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 8241 return false; 8242 8243 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 8244 // the stack can result in exponential time complexity. 8245 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 8246 8247 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 8248 // 8249 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 8250 // isKnownPredicate. isKnownPredicate is more powerful, but also more 8251 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 8252 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 8253 // use isKnownPredicate later if needed. 8254 return isKnownNonNegative(RHS) && 8255 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 8256 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 8257 } 8258 8259 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 8260 ICmpInst::Predicate Pred, 8261 const SCEV *LHS, const SCEV *RHS) { 8262 // No need to even try if we know the module has no guards. 8263 if (!HasGuards) 8264 return false; 8265 8266 return any_of(*BB, [&](Instruction &I) { 8267 using namespace llvm::PatternMatch; 8268 8269 Value *Condition; 8270 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 8271 m_Value(Condition))) && 8272 isImpliedCond(Pred, LHS, RHS, Condition, false); 8273 }); 8274 } 8275 8276 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 8277 /// protected by a conditional between LHS and RHS. This is used to 8278 /// to eliminate casts. 8279 bool 8280 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 8281 ICmpInst::Predicate Pred, 8282 const SCEV *LHS, const SCEV *RHS) { 8283 // Interpret a null as meaning no loop, where there is obviously no guard 8284 // (interprocedural conditions notwithstanding). 8285 if (!L) return true; 8286 8287 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 8288 return true; 8289 8290 BasicBlock *Latch = L->getLoopLatch(); 8291 if (!Latch) 8292 return false; 8293 8294 BranchInst *LoopContinuePredicate = 8295 dyn_cast<BranchInst>(Latch->getTerminator()); 8296 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 8297 isImpliedCond(Pred, LHS, RHS, 8298 LoopContinuePredicate->getCondition(), 8299 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 8300 return true; 8301 8302 // We don't want more than one activation of the following loops on the stack 8303 // -- that can lead to O(n!) time complexity. 8304 if (WalkingBEDominatingConds) 8305 return false; 8306 8307 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 8308 8309 // See if we can exploit a trip count to prove the predicate. 8310 const auto &BETakenInfo = getBackedgeTakenInfo(L); 8311 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 8312 if (LatchBECount != getCouldNotCompute()) { 8313 // We know that Latch branches back to the loop header exactly 8314 // LatchBECount times. This means the backdege condition at Latch is 8315 // equivalent to "{0,+,1} u< LatchBECount". 8316 Type *Ty = LatchBECount->getType(); 8317 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 8318 const SCEV *LoopCounter = 8319 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 8320 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 8321 LatchBECount)) 8322 return true; 8323 } 8324 8325 // Check conditions due to any @llvm.assume intrinsics. 8326 for (auto &AssumeVH : AC.assumptions()) { 8327 if (!AssumeVH) 8328 continue; 8329 auto *CI = cast<CallInst>(AssumeVH); 8330 if (!DT.dominates(CI, Latch->getTerminator())) 8331 continue; 8332 8333 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 8334 return true; 8335 } 8336 8337 // If the loop is not reachable from the entry block, we risk running into an 8338 // infinite loop as we walk up into the dom tree. These loops do not matter 8339 // anyway, so we just return a conservative answer when we see them. 8340 if (!DT.isReachableFromEntry(L->getHeader())) 8341 return false; 8342 8343 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 8344 return true; 8345 8346 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 8347 DTN != HeaderDTN; DTN = DTN->getIDom()) { 8348 8349 assert(DTN && "should reach the loop header before reaching the root!"); 8350 8351 BasicBlock *BB = DTN->getBlock(); 8352 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 8353 return true; 8354 8355 BasicBlock *PBB = BB->getSinglePredecessor(); 8356 if (!PBB) 8357 continue; 8358 8359 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 8360 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 8361 continue; 8362 8363 Value *Condition = ContinuePredicate->getCondition(); 8364 8365 // If we have an edge `E` within the loop body that dominates the only 8366 // latch, the condition guarding `E` also guards the backedge. This 8367 // reasoning works only for loops with a single latch. 8368 8369 BasicBlockEdge DominatingEdge(PBB, BB); 8370 if (DominatingEdge.isSingleEdge()) { 8371 // We're constructively (and conservatively) enumerating edges within the 8372 // loop body that dominate the latch. The dominator tree better agree 8373 // with us on this: 8374 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 8375 8376 if (isImpliedCond(Pred, LHS, RHS, Condition, 8377 BB != ContinuePredicate->getSuccessor(0))) 8378 return true; 8379 } 8380 } 8381 8382 return false; 8383 } 8384 8385 bool 8386 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 8387 ICmpInst::Predicate Pred, 8388 const SCEV *LHS, const SCEV *RHS) { 8389 // Interpret a null as meaning no loop, where there is obviously no guard 8390 // (interprocedural conditions notwithstanding). 8391 if (!L) return false; 8392 8393 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 8394 return true; 8395 8396 // Starting at the loop predecessor, climb up the predecessor chain, as long 8397 // as there are predecessors that can be found that have unique successors 8398 // leading to the original header. 8399 for (std::pair<BasicBlock *, BasicBlock *> 8400 Pair(L->getLoopPredecessor(), L->getHeader()); 8401 Pair.first; 8402 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 8403 8404 if (isImpliedViaGuard(Pair.first, Pred, LHS, RHS)) 8405 return true; 8406 8407 BranchInst *LoopEntryPredicate = 8408 dyn_cast<BranchInst>(Pair.first->getTerminator()); 8409 if (!LoopEntryPredicate || 8410 LoopEntryPredicate->isUnconditional()) 8411 continue; 8412 8413 if (isImpliedCond(Pred, LHS, RHS, 8414 LoopEntryPredicate->getCondition(), 8415 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 8416 return true; 8417 } 8418 8419 // Check conditions due to any @llvm.assume intrinsics. 8420 for (auto &AssumeVH : AC.assumptions()) { 8421 if (!AssumeVH) 8422 continue; 8423 auto *CI = cast<CallInst>(AssumeVH); 8424 if (!DT.dominates(CI, L->getHeader())) 8425 continue; 8426 8427 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 8428 return true; 8429 } 8430 8431 return false; 8432 } 8433 8434 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 8435 const SCEV *LHS, const SCEV *RHS, 8436 Value *FoundCondValue, 8437 bool Inverse) { 8438 if (!PendingLoopPredicates.insert(FoundCondValue).second) 8439 return false; 8440 8441 auto ClearOnExit = 8442 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 8443 8444 // Recursively handle And and Or conditions. 8445 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 8446 if (BO->getOpcode() == Instruction::And) { 8447 if (!Inverse) 8448 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 8449 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 8450 } else if (BO->getOpcode() == Instruction::Or) { 8451 if (Inverse) 8452 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 8453 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 8454 } 8455 } 8456 8457 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 8458 if (!ICI) return false; 8459 8460 // Now that we found a conditional branch that dominates the loop or controls 8461 // the loop latch. Check to see if it is the comparison we are looking for. 8462 ICmpInst::Predicate FoundPred; 8463 if (Inverse) 8464 FoundPred = ICI->getInversePredicate(); 8465 else 8466 FoundPred = ICI->getPredicate(); 8467 8468 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 8469 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 8470 8471 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 8472 } 8473 8474 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 8475 const SCEV *RHS, 8476 ICmpInst::Predicate FoundPred, 8477 const SCEV *FoundLHS, 8478 const SCEV *FoundRHS) { 8479 // Balance the types. 8480 if (getTypeSizeInBits(LHS->getType()) < 8481 getTypeSizeInBits(FoundLHS->getType())) { 8482 if (CmpInst::isSigned(Pred)) { 8483 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 8484 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 8485 } else { 8486 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 8487 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 8488 } 8489 } else if (getTypeSizeInBits(LHS->getType()) > 8490 getTypeSizeInBits(FoundLHS->getType())) { 8491 if (CmpInst::isSigned(FoundPred)) { 8492 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 8493 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 8494 } else { 8495 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 8496 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 8497 } 8498 } 8499 8500 // Canonicalize the query to match the way instcombine will have 8501 // canonicalized the comparison. 8502 if (SimplifyICmpOperands(Pred, LHS, RHS)) 8503 if (LHS == RHS) 8504 return CmpInst::isTrueWhenEqual(Pred); 8505 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 8506 if (FoundLHS == FoundRHS) 8507 return CmpInst::isFalseWhenEqual(FoundPred); 8508 8509 // Check to see if we can make the LHS or RHS match. 8510 if (LHS == FoundRHS || RHS == FoundLHS) { 8511 if (isa<SCEVConstant>(RHS)) { 8512 std::swap(FoundLHS, FoundRHS); 8513 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 8514 } else { 8515 std::swap(LHS, RHS); 8516 Pred = ICmpInst::getSwappedPredicate(Pred); 8517 } 8518 } 8519 8520 // Check whether the found predicate is the same as the desired predicate. 8521 if (FoundPred == Pred) 8522 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 8523 8524 // Check whether swapping the found predicate makes it the same as the 8525 // desired predicate. 8526 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 8527 if (isa<SCEVConstant>(RHS)) 8528 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 8529 else 8530 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 8531 RHS, LHS, FoundLHS, FoundRHS); 8532 } 8533 8534 // Unsigned comparison is the same as signed comparison when both the operands 8535 // are non-negative. 8536 if (CmpInst::isUnsigned(FoundPred) && 8537 CmpInst::getSignedPredicate(FoundPred) == Pred && 8538 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 8539 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 8540 8541 // Check if we can make progress by sharpening ranges. 8542 if (FoundPred == ICmpInst::ICMP_NE && 8543 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 8544 8545 const SCEVConstant *C = nullptr; 8546 const SCEV *V = nullptr; 8547 8548 if (isa<SCEVConstant>(FoundLHS)) { 8549 C = cast<SCEVConstant>(FoundLHS); 8550 V = FoundRHS; 8551 } else { 8552 C = cast<SCEVConstant>(FoundRHS); 8553 V = FoundLHS; 8554 } 8555 8556 // The guarding predicate tells us that C != V. If the known range 8557 // of V is [C, t), we can sharpen the range to [C + 1, t). The 8558 // range we consider has to correspond to same signedness as the 8559 // predicate we're interested in folding. 8560 8561 APInt Min = ICmpInst::isSigned(Pred) ? 8562 getSignedRangeMin(V) : getUnsignedRangeMin(V); 8563 8564 if (Min == C->getAPInt()) { 8565 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 8566 // This is true even if (Min + 1) wraps around -- in case of 8567 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 8568 8569 APInt SharperMin = Min + 1; 8570 8571 switch (Pred) { 8572 case ICmpInst::ICMP_SGE: 8573 case ICmpInst::ICMP_UGE: 8574 // We know V `Pred` SharperMin. If this implies LHS `Pred` 8575 // RHS, we're done. 8576 if (isImpliedCondOperands(Pred, LHS, RHS, V, 8577 getConstant(SharperMin))) 8578 return true; 8579 LLVM_FALLTHROUGH; 8580 8581 case ICmpInst::ICMP_SGT: 8582 case ICmpInst::ICMP_UGT: 8583 // We know from the range information that (V `Pred` Min || 8584 // V == Min). We know from the guarding condition that !(V 8585 // == Min). This gives us 8586 // 8587 // V `Pred` Min || V == Min && !(V == Min) 8588 // => V `Pred` Min 8589 // 8590 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 8591 8592 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 8593 return true; 8594 LLVM_FALLTHROUGH; 8595 8596 default: 8597 // No change 8598 break; 8599 } 8600 } 8601 } 8602 8603 // Check whether the actual condition is beyond sufficient. 8604 if (FoundPred == ICmpInst::ICMP_EQ) 8605 if (ICmpInst::isTrueWhenEqual(Pred)) 8606 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 8607 return true; 8608 if (Pred == ICmpInst::ICMP_NE) 8609 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 8610 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 8611 return true; 8612 8613 // Otherwise assume the worst. 8614 return false; 8615 } 8616 8617 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 8618 const SCEV *&L, const SCEV *&R, 8619 SCEV::NoWrapFlags &Flags) { 8620 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 8621 if (!AE || AE->getNumOperands() != 2) 8622 return false; 8623 8624 L = AE->getOperand(0); 8625 R = AE->getOperand(1); 8626 Flags = AE->getNoWrapFlags(); 8627 return true; 8628 } 8629 8630 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 8631 const SCEV *Less) { 8632 // We avoid subtracting expressions here because this function is usually 8633 // fairly deep in the call stack (i.e. is called many times). 8634 8635 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 8636 const auto *LAR = cast<SCEVAddRecExpr>(Less); 8637 const auto *MAR = cast<SCEVAddRecExpr>(More); 8638 8639 if (LAR->getLoop() != MAR->getLoop()) 8640 return None; 8641 8642 // We look at affine expressions only; not for correctness but to keep 8643 // getStepRecurrence cheap. 8644 if (!LAR->isAffine() || !MAR->isAffine()) 8645 return None; 8646 8647 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 8648 return None; 8649 8650 Less = LAR->getStart(); 8651 More = MAR->getStart(); 8652 8653 // fall through 8654 } 8655 8656 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 8657 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 8658 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 8659 return M - L; 8660 } 8661 8662 const SCEV *L, *R; 8663 SCEV::NoWrapFlags Flags; 8664 if (splitBinaryAdd(Less, L, R, Flags)) 8665 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 8666 if (R == More) 8667 return -(LC->getAPInt()); 8668 8669 if (splitBinaryAdd(More, L, R, Flags)) 8670 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 8671 if (R == Less) 8672 return LC->getAPInt(); 8673 8674 return None; 8675 } 8676 8677 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 8678 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 8679 const SCEV *FoundLHS, const SCEV *FoundRHS) { 8680 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 8681 return false; 8682 8683 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 8684 if (!AddRecLHS) 8685 return false; 8686 8687 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 8688 if (!AddRecFoundLHS) 8689 return false; 8690 8691 // We'd like to let SCEV reason about control dependencies, so we constrain 8692 // both the inequalities to be about add recurrences on the same loop. This 8693 // way we can use isLoopEntryGuardedByCond later. 8694 8695 const Loop *L = AddRecFoundLHS->getLoop(); 8696 if (L != AddRecLHS->getLoop()) 8697 return false; 8698 8699 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 8700 // 8701 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 8702 // ... (2) 8703 // 8704 // Informal proof for (2), assuming (1) [*]: 8705 // 8706 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 8707 // 8708 // Then 8709 // 8710 // FoundLHS s< FoundRHS s< INT_MIN - C 8711 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 8712 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 8713 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 8714 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 8715 // <=> FoundLHS + C s< FoundRHS + C 8716 // 8717 // [*]: (1) can be proved by ruling out overflow. 8718 // 8719 // [**]: This can be proved by analyzing all the four possibilities: 8720 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 8721 // (A s>= 0, B s>= 0). 8722 // 8723 // Note: 8724 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 8725 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 8726 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 8727 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 8728 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 8729 // C)". 8730 8731 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 8732 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 8733 if (!LDiff || !RDiff || *LDiff != *RDiff) 8734 return false; 8735 8736 if (LDiff->isMinValue()) 8737 return true; 8738 8739 APInt FoundRHSLimit; 8740 8741 if (Pred == CmpInst::ICMP_ULT) { 8742 FoundRHSLimit = -(*RDiff); 8743 } else { 8744 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 8745 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 8746 } 8747 8748 // Try to prove (1) or (2), as needed. 8749 return isLoopEntryGuardedByCond(L, Pred, FoundRHS, 8750 getConstant(FoundRHSLimit)); 8751 } 8752 8753 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 8754 const SCEV *LHS, const SCEV *RHS, 8755 const SCEV *FoundLHS, 8756 const SCEV *FoundRHS) { 8757 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 8758 return true; 8759 8760 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 8761 return true; 8762 8763 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 8764 FoundLHS, FoundRHS) || 8765 // ~x < ~y --> x > y 8766 isImpliedCondOperandsHelper(Pred, LHS, RHS, 8767 getNotSCEV(FoundRHS), 8768 getNotSCEV(FoundLHS)); 8769 } 8770 8771 8772 /// If Expr computes ~A, return A else return nullptr 8773 static const SCEV *MatchNotExpr(const SCEV *Expr) { 8774 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 8775 if (!Add || Add->getNumOperands() != 2 || 8776 !Add->getOperand(0)->isAllOnesValue()) 8777 return nullptr; 8778 8779 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 8780 if (!AddRHS || AddRHS->getNumOperands() != 2 || 8781 !AddRHS->getOperand(0)->isAllOnesValue()) 8782 return nullptr; 8783 8784 return AddRHS->getOperand(1); 8785 } 8786 8787 8788 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values? 8789 template<typename MaxExprType> 8790 static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, 8791 const SCEV *Candidate) { 8792 const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr); 8793 if (!MaxExpr) return false; 8794 8795 return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end(); 8796 } 8797 8798 8799 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values? 8800 template<typename MaxExprType> 8801 static bool IsMinConsistingOf(ScalarEvolution &SE, 8802 const SCEV *MaybeMinExpr, 8803 const SCEV *Candidate) { 8804 const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr); 8805 if (!MaybeMaxExpr) 8806 return false; 8807 8808 return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate)); 8809 } 8810 8811 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 8812 ICmpInst::Predicate Pred, 8813 const SCEV *LHS, const SCEV *RHS) { 8814 8815 // If both sides are affine addrecs for the same loop, with equal 8816 // steps, and we know the recurrences don't wrap, then we only 8817 // need to check the predicate on the starting values. 8818 8819 if (!ICmpInst::isRelational(Pred)) 8820 return false; 8821 8822 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 8823 if (!LAR) 8824 return false; 8825 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 8826 if (!RAR) 8827 return false; 8828 if (LAR->getLoop() != RAR->getLoop()) 8829 return false; 8830 if (!LAR->isAffine() || !RAR->isAffine()) 8831 return false; 8832 8833 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 8834 return false; 8835 8836 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 8837 SCEV::FlagNSW : SCEV::FlagNUW; 8838 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 8839 return false; 8840 8841 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 8842 } 8843 8844 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 8845 /// expression? 8846 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 8847 ICmpInst::Predicate Pred, 8848 const SCEV *LHS, const SCEV *RHS) { 8849 switch (Pred) { 8850 default: 8851 return false; 8852 8853 case ICmpInst::ICMP_SGE: 8854 std::swap(LHS, RHS); 8855 LLVM_FALLTHROUGH; 8856 case ICmpInst::ICMP_SLE: 8857 return 8858 // min(A, ...) <= A 8859 IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) || 8860 // A <= max(A, ...) 8861 IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 8862 8863 case ICmpInst::ICMP_UGE: 8864 std::swap(LHS, RHS); 8865 LLVM_FALLTHROUGH; 8866 case ICmpInst::ICMP_ULE: 8867 return 8868 // min(A, ...) <= A 8869 IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) || 8870 // A <= max(A, ...) 8871 IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 8872 } 8873 8874 llvm_unreachable("covered switch fell through?!"); 8875 } 8876 8877 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 8878 const SCEV *LHS, const SCEV *RHS, 8879 const SCEV *FoundLHS, 8880 const SCEV *FoundRHS, 8881 unsigned Depth) { 8882 assert(getTypeSizeInBits(LHS->getType()) == 8883 getTypeSizeInBits(RHS->getType()) && 8884 "LHS and RHS have different sizes?"); 8885 assert(getTypeSizeInBits(FoundLHS->getType()) == 8886 getTypeSizeInBits(FoundRHS->getType()) && 8887 "FoundLHS and FoundRHS have different sizes?"); 8888 // We want to avoid hurting the compile time with analysis of too big trees. 8889 if (Depth > MaxSCEVOperationsImplicationDepth) 8890 return false; 8891 // We only want to work with ICMP_SGT comparison so far. 8892 // TODO: Extend to ICMP_UGT? 8893 if (Pred == ICmpInst::ICMP_SLT) { 8894 Pred = ICmpInst::ICMP_SGT; 8895 std::swap(LHS, RHS); 8896 std::swap(FoundLHS, FoundRHS); 8897 } 8898 if (Pred != ICmpInst::ICMP_SGT) 8899 return false; 8900 8901 auto GetOpFromSExt = [&](const SCEV *S) { 8902 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 8903 return Ext->getOperand(); 8904 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 8905 // the constant in some cases. 8906 return S; 8907 }; 8908 8909 // Acquire values from extensions. 8910 auto *OrigFoundLHS = FoundLHS; 8911 LHS = GetOpFromSExt(LHS); 8912 FoundLHS = GetOpFromSExt(FoundLHS); 8913 8914 // Is the SGT predicate can be proved trivially or using the found context. 8915 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 8916 return isKnownViaSimpleReasoning(ICmpInst::ICMP_SGT, S1, S2) || 8917 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 8918 FoundRHS, Depth + 1); 8919 }; 8920 8921 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 8922 // We want to avoid creation of any new non-constant SCEV. Since we are 8923 // going to compare the operands to RHS, we should be certain that we don't 8924 // need any size extensions for this. So let's decline all cases when the 8925 // sizes of types of LHS and RHS do not match. 8926 // TODO: Maybe try to get RHS from sext to catch more cases? 8927 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 8928 return false; 8929 8930 // Should not overflow. 8931 if (!LHSAddExpr->hasNoSignedWrap()) 8932 return false; 8933 8934 auto *LL = LHSAddExpr->getOperand(0); 8935 auto *LR = LHSAddExpr->getOperand(1); 8936 auto *MinusOne = getNegativeSCEV(getOne(RHS->getType())); 8937 8938 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 8939 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 8940 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 8941 }; 8942 // Try to prove the following rule: 8943 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 8944 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 8945 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 8946 return true; 8947 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 8948 Value *LL, *LR; 8949 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 8950 using namespace llvm::PatternMatch; 8951 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 8952 // Rules for division. 8953 // We are going to perform some comparisons with Denominator and its 8954 // derivative expressions. In general case, creating a SCEV for it may 8955 // lead to a complex analysis of the entire graph, and in particular it 8956 // can request trip count recalculation for the same loop. This would 8957 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 8958 // this, we only want to create SCEVs that are constants in this section. 8959 // So we bail if Denominator is not a constant. 8960 if (!isa<ConstantInt>(LR)) 8961 return false; 8962 8963 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 8964 8965 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 8966 // then a SCEV for the numerator already exists and matches with FoundLHS. 8967 auto *Numerator = getExistingSCEV(LL); 8968 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 8969 return false; 8970 8971 // Make sure that the numerator matches with FoundLHS and the denominator 8972 // is positive. 8973 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 8974 return false; 8975 8976 auto *DTy = Denominator->getType(); 8977 auto *FRHSTy = FoundRHS->getType(); 8978 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 8979 // One of types is a pointer and another one is not. We cannot extend 8980 // them properly to a wider type, so let us just reject this case. 8981 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 8982 // to avoid this check. 8983 return false; 8984 8985 // Given that: 8986 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 8987 auto *WTy = getWiderType(DTy, FRHSTy); 8988 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 8989 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 8990 8991 // Try to prove the following rule: 8992 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 8993 // For example, given that FoundLHS > 2. It means that FoundLHS is at 8994 // least 3. If we divide it by Denominator < 4, we will have at least 1. 8995 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 8996 if (isKnownNonPositive(RHS) && 8997 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 8998 return true; 8999 9000 // Try to prove the following rule: 9001 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 9002 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 9003 // If we divide it by Denominator > 2, then: 9004 // 1. If FoundLHS is negative, then the result is 0. 9005 // 2. If FoundLHS is non-negative, then the result is non-negative. 9006 // Anyways, the result is non-negative. 9007 auto *MinusOne = getNegativeSCEV(getOne(WTy)); 9008 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 9009 if (isKnownNegative(RHS) && 9010 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 9011 return true; 9012 } 9013 } 9014 9015 return false; 9016 } 9017 9018 bool 9019 ScalarEvolution::isKnownViaSimpleReasoning(ICmpInst::Predicate Pred, 9020 const SCEV *LHS, const SCEV *RHS) { 9021 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 9022 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 9023 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 9024 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 9025 } 9026 9027 bool 9028 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 9029 const SCEV *LHS, const SCEV *RHS, 9030 const SCEV *FoundLHS, 9031 const SCEV *FoundRHS) { 9032 switch (Pred) { 9033 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 9034 case ICmpInst::ICMP_EQ: 9035 case ICmpInst::ICMP_NE: 9036 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 9037 return true; 9038 break; 9039 case ICmpInst::ICMP_SLT: 9040 case ICmpInst::ICMP_SLE: 9041 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 9042 isKnownViaSimpleReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 9043 return true; 9044 break; 9045 case ICmpInst::ICMP_SGT: 9046 case ICmpInst::ICMP_SGE: 9047 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 9048 isKnownViaSimpleReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 9049 return true; 9050 break; 9051 case ICmpInst::ICMP_ULT: 9052 case ICmpInst::ICMP_ULE: 9053 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 9054 isKnownViaSimpleReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 9055 return true; 9056 break; 9057 case ICmpInst::ICMP_UGT: 9058 case ICmpInst::ICMP_UGE: 9059 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 9060 isKnownViaSimpleReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 9061 return true; 9062 break; 9063 } 9064 9065 // Maybe it can be proved via operations? 9066 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9067 return true; 9068 9069 return false; 9070 } 9071 9072 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 9073 const SCEV *LHS, 9074 const SCEV *RHS, 9075 const SCEV *FoundLHS, 9076 const SCEV *FoundRHS) { 9077 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 9078 // The restriction on `FoundRHS` be lifted easily -- it exists only to 9079 // reduce the compile time impact of this optimization. 9080 return false; 9081 9082 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 9083 if (!Addend) 9084 return false; 9085 9086 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 9087 9088 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 9089 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 9090 ConstantRange FoundLHSRange = 9091 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 9092 9093 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 9094 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 9095 9096 // We can also compute the range of values for `LHS` that satisfy the 9097 // consequent, "`LHS` `Pred` `RHS`": 9098 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 9099 ConstantRange SatisfyingLHSRange = 9100 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 9101 9102 // The antecedent implies the consequent if every value of `LHS` that 9103 // satisfies the antecedent also satisfies the consequent. 9104 return SatisfyingLHSRange.contains(LHSRange); 9105 } 9106 9107 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 9108 bool IsSigned, bool NoWrap) { 9109 assert(isKnownPositive(Stride) && "Positive stride expected!"); 9110 9111 if (NoWrap) return false; 9112 9113 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9114 const SCEV *One = getOne(Stride->getType()); 9115 9116 if (IsSigned) { 9117 APInt MaxRHS = getSignedRangeMax(RHS); 9118 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 9119 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 9120 9121 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 9122 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 9123 } 9124 9125 APInt MaxRHS = getUnsignedRangeMax(RHS); 9126 APInt MaxValue = APInt::getMaxValue(BitWidth); 9127 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 9128 9129 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 9130 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 9131 } 9132 9133 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 9134 bool IsSigned, bool NoWrap) { 9135 if (NoWrap) return false; 9136 9137 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9138 const SCEV *One = getOne(Stride->getType()); 9139 9140 if (IsSigned) { 9141 APInt MinRHS = getSignedRangeMin(RHS); 9142 APInt MinValue = APInt::getSignedMinValue(BitWidth); 9143 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 9144 9145 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 9146 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 9147 } 9148 9149 APInt MinRHS = getUnsignedRangeMin(RHS); 9150 APInt MinValue = APInt::getMinValue(BitWidth); 9151 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 9152 9153 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 9154 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 9155 } 9156 9157 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 9158 bool Equality) { 9159 const SCEV *One = getOne(Step->getType()); 9160 Delta = Equality ? getAddExpr(Delta, Step) 9161 : getAddExpr(Delta, getMinusSCEV(Step, One)); 9162 return getUDivExpr(Delta, Step); 9163 } 9164 9165 ScalarEvolution::ExitLimit 9166 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 9167 const Loop *L, bool IsSigned, 9168 bool ControlsExit, bool AllowPredicates) { 9169 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9170 // We handle only IV < Invariant 9171 if (!isLoopInvariant(RHS, L)) 9172 return getCouldNotCompute(); 9173 9174 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 9175 bool PredicatedIV = false; 9176 9177 if (!IV && AllowPredicates) { 9178 // Try to make this an AddRec using runtime tests, in the first X 9179 // iterations of this loop, where X is the SCEV expression found by the 9180 // algorithm below. 9181 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 9182 PredicatedIV = true; 9183 } 9184 9185 // Avoid weird loops 9186 if (!IV || IV->getLoop() != L || !IV->isAffine()) 9187 return getCouldNotCompute(); 9188 9189 bool NoWrap = ControlsExit && 9190 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 9191 9192 const SCEV *Stride = IV->getStepRecurrence(*this); 9193 9194 bool PositiveStride = isKnownPositive(Stride); 9195 9196 // Avoid negative or zero stride values. 9197 if (!PositiveStride) { 9198 // We can compute the correct backedge taken count for loops with unknown 9199 // strides if we can prove that the loop is not an infinite loop with side 9200 // effects. Here's the loop structure we are trying to handle - 9201 // 9202 // i = start 9203 // do { 9204 // A[i] = i; 9205 // i += s; 9206 // } while (i < end); 9207 // 9208 // The backedge taken count for such loops is evaluated as - 9209 // (max(end, start + stride) - start - 1) /u stride 9210 // 9211 // The additional preconditions that we need to check to prove correctness 9212 // of the above formula is as follows - 9213 // 9214 // a) IV is either nuw or nsw depending upon signedness (indicated by the 9215 // NoWrap flag). 9216 // b) loop is single exit with no side effects. 9217 // 9218 // 9219 // Precondition a) implies that if the stride is negative, this is a single 9220 // trip loop. The backedge taken count formula reduces to zero in this case. 9221 // 9222 // Precondition b) implies that the unknown stride cannot be zero otherwise 9223 // we have UB. 9224 // 9225 // The positive stride case is the same as isKnownPositive(Stride) returning 9226 // true (original behavior of the function). 9227 // 9228 // We want to make sure that the stride is truly unknown as there are edge 9229 // cases where ScalarEvolution propagates no wrap flags to the 9230 // post-increment/decrement IV even though the increment/decrement operation 9231 // itself is wrapping. The computed backedge taken count may be wrong in 9232 // such cases. This is prevented by checking that the stride is not known to 9233 // be either positive or non-positive. For example, no wrap flags are 9234 // propagated to the post-increment IV of this loop with a trip count of 2 - 9235 // 9236 // unsigned char i; 9237 // for(i=127; i<128; i+=129) 9238 // A[i] = i; 9239 // 9240 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 9241 !loopHasNoSideEffects(L)) 9242 return getCouldNotCompute(); 9243 9244 } else if (!Stride->isOne() && 9245 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 9246 // Avoid proven overflow cases: this will ensure that the backedge taken 9247 // count will not generate any unsigned overflow. Relaxed no-overflow 9248 // conditions exploit NoWrapFlags, allowing to optimize in presence of 9249 // undefined behaviors like the case of C language. 9250 return getCouldNotCompute(); 9251 9252 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 9253 : ICmpInst::ICMP_ULT; 9254 const SCEV *Start = IV->getStart(); 9255 const SCEV *End = RHS; 9256 // If the backedge is taken at least once, then it will be taken 9257 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 9258 // is the LHS value of the less-than comparison the first time it is evaluated 9259 // and End is the RHS. 9260 const SCEV *BECountIfBackedgeTaken = 9261 computeBECount(getMinusSCEV(End, Start), Stride, false); 9262 // If the loop entry is guarded by the result of the backedge test of the 9263 // first loop iteration, then we know the backedge will be taken at least 9264 // once and so the backedge taken count is as above. If not then we use the 9265 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 9266 // as if the backedge is taken at least once max(End,Start) is End and so the 9267 // result is as above, and if not max(End,Start) is Start so we get a backedge 9268 // count of zero. 9269 const SCEV *BECount; 9270 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 9271 BECount = BECountIfBackedgeTaken; 9272 else { 9273 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 9274 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 9275 } 9276 9277 const SCEV *MaxBECount; 9278 bool MaxOrZero = false; 9279 if (isa<SCEVConstant>(BECount)) 9280 MaxBECount = BECount; 9281 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 9282 // If we know exactly how many times the backedge will be taken if it's 9283 // taken at least once, then the backedge count will either be that or 9284 // zero. 9285 MaxBECount = BECountIfBackedgeTaken; 9286 MaxOrZero = true; 9287 } else { 9288 // Calculate the maximum backedge count based on the range of values 9289 // permitted by Start, End, and Stride. 9290 APInt MinStart = IsSigned ? getSignedRangeMin(Start) 9291 : getUnsignedRangeMin(Start); 9292 9293 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 9294 9295 APInt StrideForMaxBECount; 9296 9297 if (PositiveStride) 9298 StrideForMaxBECount = 9299 IsSigned ? getSignedRangeMin(Stride) 9300 : getUnsignedRangeMin(Stride); 9301 else 9302 // Using a stride of 1 is safe when computing max backedge taken count for 9303 // a loop with unknown stride. 9304 StrideForMaxBECount = APInt(BitWidth, 1, IsSigned); 9305 9306 APInt Limit = 9307 IsSigned ? APInt::getSignedMaxValue(BitWidth) - (StrideForMaxBECount - 1) 9308 : APInt::getMaxValue(BitWidth) - (StrideForMaxBECount - 1); 9309 9310 // Although End can be a MAX expression we estimate MaxEnd considering only 9311 // the case End = RHS. This is safe because in the other case (End - Start) 9312 // is zero, leading to a zero maximum backedge taken count. 9313 APInt MaxEnd = 9314 IsSigned ? APIntOps::smin(getSignedRangeMax(RHS), Limit) 9315 : APIntOps::umin(getUnsignedRangeMax(RHS), Limit); 9316 9317 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart), 9318 getConstant(StrideForMaxBECount), false); 9319 } 9320 9321 if (isa<SCEVCouldNotCompute>(MaxBECount) && 9322 !isa<SCEVCouldNotCompute>(BECount)) 9323 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 9324 9325 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 9326 } 9327 9328 ScalarEvolution::ExitLimit 9329 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 9330 const Loop *L, bool IsSigned, 9331 bool ControlsExit, bool AllowPredicates) { 9332 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9333 // We handle only IV > Invariant 9334 if (!isLoopInvariant(RHS, L)) 9335 return getCouldNotCompute(); 9336 9337 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 9338 if (!IV && AllowPredicates) 9339 // Try to make this an AddRec using runtime tests, in the first X 9340 // iterations of this loop, where X is the SCEV expression found by the 9341 // algorithm below. 9342 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 9343 9344 // Avoid weird loops 9345 if (!IV || IV->getLoop() != L || !IV->isAffine()) 9346 return getCouldNotCompute(); 9347 9348 bool NoWrap = ControlsExit && 9349 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 9350 9351 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 9352 9353 // Avoid negative or zero stride values 9354 if (!isKnownPositive(Stride)) 9355 return getCouldNotCompute(); 9356 9357 // Avoid proven overflow cases: this will ensure that the backedge taken count 9358 // will not generate any unsigned overflow. Relaxed no-overflow conditions 9359 // exploit NoWrapFlags, allowing to optimize in presence of undefined 9360 // behaviors like the case of C language. 9361 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 9362 return getCouldNotCompute(); 9363 9364 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 9365 : ICmpInst::ICMP_UGT; 9366 9367 const SCEV *Start = IV->getStart(); 9368 const SCEV *End = RHS; 9369 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) 9370 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 9371 9372 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 9373 9374 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 9375 : getUnsignedRangeMax(Start); 9376 9377 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 9378 : getUnsignedRangeMin(Stride); 9379 9380 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 9381 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 9382 : APInt::getMinValue(BitWidth) + (MinStride - 1); 9383 9384 // Although End can be a MIN expression we estimate MinEnd considering only 9385 // the case End = RHS. This is safe because in the other case (Start - End) 9386 // is zero, leading to a zero maximum backedge taken count. 9387 APInt MinEnd = 9388 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 9389 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 9390 9391 9392 const SCEV *MaxBECount = getCouldNotCompute(); 9393 if (isa<SCEVConstant>(BECount)) 9394 MaxBECount = BECount; 9395 else 9396 MaxBECount = computeBECount(getConstant(MaxStart - MinEnd), 9397 getConstant(MinStride), false); 9398 9399 if (isa<SCEVCouldNotCompute>(MaxBECount)) 9400 MaxBECount = BECount; 9401 9402 return ExitLimit(BECount, MaxBECount, false, Predicates); 9403 } 9404 9405 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 9406 ScalarEvolution &SE) const { 9407 if (Range.isFullSet()) // Infinite loop. 9408 return SE.getCouldNotCompute(); 9409 9410 // If the start is a non-zero constant, shift the range to simplify things. 9411 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 9412 if (!SC->getValue()->isZero()) { 9413 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 9414 Operands[0] = SE.getZero(SC->getType()); 9415 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 9416 getNoWrapFlags(FlagNW)); 9417 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 9418 return ShiftedAddRec->getNumIterationsInRange( 9419 Range.subtract(SC->getAPInt()), SE); 9420 // This is strange and shouldn't happen. 9421 return SE.getCouldNotCompute(); 9422 } 9423 9424 // The only time we can solve this is when we have all constant indices. 9425 // Otherwise, we cannot determine the overflow conditions. 9426 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 9427 return SE.getCouldNotCompute(); 9428 9429 // Okay at this point we know that all elements of the chrec are constants and 9430 // that the start element is zero. 9431 9432 // First check to see if the range contains zero. If not, the first 9433 // iteration exits. 9434 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 9435 if (!Range.contains(APInt(BitWidth, 0))) 9436 return SE.getZero(getType()); 9437 9438 if (isAffine()) { 9439 // If this is an affine expression then we have this situation: 9440 // Solve {0,+,A} in Range === Ax in Range 9441 9442 // We know that zero is in the range. If A is positive then we know that 9443 // the upper value of the range must be the first possible exit value. 9444 // If A is negative then the lower of the range is the last possible loop 9445 // value. Also note that we already checked for a full range. 9446 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 9447 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 9448 9449 // The exit value should be (End+A)/A. 9450 APInt ExitVal = (End + A).udiv(A); 9451 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 9452 9453 // Evaluate at the exit value. If we really did fall out of the valid 9454 // range, then we computed our trip count, otherwise wrap around or other 9455 // things must have happened. 9456 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 9457 if (Range.contains(Val->getValue())) 9458 return SE.getCouldNotCompute(); // Something strange happened 9459 9460 // Ensure that the previous value is in the range. This is a sanity check. 9461 assert(Range.contains( 9462 EvaluateConstantChrecAtConstant(this, 9463 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 9464 "Linear scev computation is off in a bad way!"); 9465 return SE.getConstant(ExitValue); 9466 } else if (isQuadratic()) { 9467 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 9468 // quadratic equation to solve it. To do this, we must frame our problem in 9469 // terms of figuring out when zero is crossed, instead of when 9470 // Range.getUpper() is crossed. 9471 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); 9472 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 9473 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(), FlagAnyWrap); 9474 9475 // Next, solve the constructed addrec 9476 if (auto Roots = 9477 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE)) { 9478 const SCEVConstant *R1 = Roots->first; 9479 const SCEVConstant *R2 = Roots->second; 9480 // Pick the smallest positive root value. 9481 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 9482 ICmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 9483 if (!CB->getZExtValue()) 9484 std::swap(R1, R2); // R1 is the minimum root now. 9485 9486 // Make sure the root is not off by one. The returned iteration should 9487 // not be in the range, but the previous one should be. When solving 9488 // for "X*X < 5", for example, we should not return a root of 2. 9489 ConstantInt *R1Val = 9490 EvaluateConstantChrecAtConstant(this, R1->getValue(), SE); 9491 if (Range.contains(R1Val->getValue())) { 9492 // The next iteration must be out of the range... 9493 ConstantInt *NextVal = 9494 ConstantInt::get(SE.getContext(), R1->getAPInt() + 1); 9495 9496 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 9497 if (!Range.contains(R1Val->getValue())) 9498 return SE.getConstant(NextVal); 9499 return SE.getCouldNotCompute(); // Something strange happened 9500 } 9501 9502 // If R1 was not in the range, then it is a good return value. Make 9503 // sure that R1-1 WAS in the range though, just in case. 9504 ConstantInt *NextVal = 9505 ConstantInt::get(SE.getContext(), R1->getAPInt() - 1); 9506 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 9507 if (Range.contains(R1Val->getValue())) 9508 return R1; 9509 return SE.getCouldNotCompute(); // Something strange happened 9510 } 9511 } 9512 } 9513 9514 return SE.getCouldNotCompute(); 9515 } 9516 9517 // Return true when S contains at least an undef value. 9518 static inline bool containsUndefs(const SCEV *S) { 9519 return SCEVExprContains(S, [](const SCEV *S) { 9520 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 9521 return isa<UndefValue>(SU->getValue()); 9522 else if (const auto *SC = dyn_cast<SCEVConstant>(S)) 9523 return isa<UndefValue>(SC->getValue()); 9524 return false; 9525 }); 9526 } 9527 9528 namespace { 9529 // Collect all steps of SCEV expressions. 9530 struct SCEVCollectStrides { 9531 ScalarEvolution &SE; 9532 SmallVectorImpl<const SCEV *> &Strides; 9533 9534 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 9535 : SE(SE), Strides(S) {} 9536 9537 bool follow(const SCEV *S) { 9538 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 9539 Strides.push_back(AR->getStepRecurrence(SE)); 9540 return true; 9541 } 9542 bool isDone() const { return false; } 9543 }; 9544 9545 // Collect all SCEVUnknown and SCEVMulExpr expressions. 9546 struct SCEVCollectTerms { 9547 SmallVectorImpl<const SCEV *> &Terms; 9548 9549 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) 9550 : Terms(T) {} 9551 9552 bool follow(const SCEV *S) { 9553 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 9554 isa<SCEVSignExtendExpr>(S)) { 9555 if (!containsUndefs(S)) 9556 Terms.push_back(S); 9557 9558 // Stop recursion: once we collected a term, do not walk its operands. 9559 return false; 9560 } 9561 9562 // Keep looking. 9563 return true; 9564 } 9565 bool isDone() const { return false; } 9566 }; 9567 9568 // Check if a SCEV contains an AddRecExpr. 9569 struct SCEVHasAddRec { 9570 bool &ContainsAddRec; 9571 9572 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 9573 ContainsAddRec = false; 9574 } 9575 9576 bool follow(const SCEV *S) { 9577 if (isa<SCEVAddRecExpr>(S)) { 9578 ContainsAddRec = true; 9579 9580 // Stop recursion: once we collected a term, do not walk its operands. 9581 return false; 9582 } 9583 9584 // Keep looking. 9585 return true; 9586 } 9587 bool isDone() const { return false; } 9588 }; 9589 9590 // Find factors that are multiplied with an expression that (possibly as a 9591 // subexpression) contains an AddRecExpr. In the expression: 9592 // 9593 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 9594 // 9595 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 9596 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 9597 // parameters as they form a product with an induction variable. 9598 // 9599 // This collector expects all array size parameters to be in the same MulExpr. 9600 // It might be necessary to later add support for collecting parameters that are 9601 // spread over different nested MulExpr. 9602 struct SCEVCollectAddRecMultiplies { 9603 SmallVectorImpl<const SCEV *> &Terms; 9604 ScalarEvolution &SE; 9605 9606 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 9607 : Terms(T), SE(SE) {} 9608 9609 bool follow(const SCEV *S) { 9610 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 9611 bool HasAddRec = false; 9612 SmallVector<const SCEV *, 0> Operands; 9613 for (auto Op : Mul->operands()) { 9614 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 9615 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 9616 Operands.push_back(Op); 9617 } else if (Unknown) { 9618 HasAddRec = true; 9619 } else { 9620 bool ContainsAddRec; 9621 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 9622 visitAll(Op, ContiansAddRec); 9623 HasAddRec |= ContainsAddRec; 9624 } 9625 } 9626 if (Operands.size() == 0) 9627 return true; 9628 9629 if (!HasAddRec) 9630 return false; 9631 9632 Terms.push_back(SE.getMulExpr(Operands)); 9633 // Stop recursion: once we collected a term, do not walk its operands. 9634 return false; 9635 } 9636 9637 // Keep looking. 9638 return true; 9639 } 9640 bool isDone() const { return false; } 9641 }; 9642 } 9643 9644 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 9645 /// two places: 9646 /// 1) The strides of AddRec expressions. 9647 /// 2) Unknowns that are multiplied with AddRec expressions. 9648 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 9649 SmallVectorImpl<const SCEV *> &Terms) { 9650 SmallVector<const SCEV *, 4> Strides; 9651 SCEVCollectStrides StrideCollector(*this, Strides); 9652 visitAll(Expr, StrideCollector); 9653 9654 DEBUG({ 9655 dbgs() << "Strides:\n"; 9656 for (const SCEV *S : Strides) 9657 dbgs() << *S << "\n"; 9658 }); 9659 9660 for (const SCEV *S : Strides) { 9661 SCEVCollectTerms TermCollector(Terms); 9662 visitAll(S, TermCollector); 9663 } 9664 9665 DEBUG({ 9666 dbgs() << "Terms:\n"; 9667 for (const SCEV *T : Terms) 9668 dbgs() << *T << "\n"; 9669 }); 9670 9671 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 9672 visitAll(Expr, MulCollector); 9673 } 9674 9675 static bool findArrayDimensionsRec(ScalarEvolution &SE, 9676 SmallVectorImpl<const SCEV *> &Terms, 9677 SmallVectorImpl<const SCEV *> &Sizes) { 9678 int Last = Terms.size() - 1; 9679 const SCEV *Step = Terms[Last]; 9680 9681 // End of recursion. 9682 if (Last == 0) { 9683 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 9684 SmallVector<const SCEV *, 2> Qs; 9685 for (const SCEV *Op : M->operands()) 9686 if (!isa<SCEVConstant>(Op)) 9687 Qs.push_back(Op); 9688 9689 Step = SE.getMulExpr(Qs); 9690 } 9691 9692 Sizes.push_back(Step); 9693 return true; 9694 } 9695 9696 for (const SCEV *&Term : Terms) { 9697 // Normalize the terms before the next call to findArrayDimensionsRec. 9698 const SCEV *Q, *R; 9699 SCEVDivision::divide(SE, Term, Step, &Q, &R); 9700 9701 // Bail out when GCD does not evenly divide one of the terms. 9702 if (!R->isZero()) 9703 return false; 9704 9705 Term = Q; 9706 } 9707 9708 // Remove all SCEVConstants. 9709 Terms.erase( 9710 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 9711 Terms.end()); 9712 9713 if (Terms.size() > 0) 9714 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 9715 return false; 9716 9717 Sizes.push_back(Step); 9718 return true; 9719 } 9720 9721 9722 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 9723 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 9724 for (const SCEV *T : Terms) 9725 if (SCEVExprContains(T, isa<SCEVUnknown, const SCEV *>)) 9726 return true; 9727 return false; 9728 } 9729 9730 // Return the number of product terms in S. 9731 static inline int numberOfTerms(const SCEV *S) { 9732 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 9733 return Expr->getNumOperands(); 9734 return 1; 9735 } 9736 9737 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 9738 if (isa<SCEVConstant>(T)) 9739 return nullptr; 9740 9741 if (isa<SCEVUnknown>(T)) 9742 return T; 9743 9744 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 9745 SmallVector<const SCEV *, 2> Factors; 9746 for (const SCEV *Op : M->operands()) 9747 if (!isa<SCEVConstant>(Op)) 9748 Factors.push_back(Op); 9749 9750 return SE.getMulExpr(Factors); 9751 } 9752 9753 return T; 9754 } 9755 9756 /// Return the size of an element read or written by Inst. 9757 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 9758 Type *Ty; 9759 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 9760 Ty = Store->getValueOperand()->getType(); 9761 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 9762 Ty = Load->getType(); 9763 else 9764 return nullptr; 9765 9766 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 9767 return getSizeOfExpr(ETy, Ty); 9768 } 9769 9770 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 9771 SmallVectorImpl<const SCEV *> &Sizes, 9772 const SCEV *ElementSize) { 9773 if (Terms.size() < 1 || !ElementSize) 9774 return; 9775 9776 // Early return when Terms do not contain parameters: we do not delinearize 9777 // non parametric SCEVs. 9778 if (!containsParameters(Terms)) 9779 return; 9780 9781 DEBUG({ 9782 dbgs() << "Terms:\n"; 9783 for (const SCEV *T : Terms) 9784 dbgs() << *T << "\n"; 9785 }); 9786 9787 // Remove duplicates. 9788 array_pod_sort(Terms.begin(), Terms.end()); 9789 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 9790 9791 // Put larger terms first. 9792 std::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) { 9793 return numberOfTerms(LHS) > numberOfTerms(RHS); 9794 }); 9795 9796 // Try to divide all terms by the element size. If term is not divisible by 9797 // element size, proceed with the original term. 9798 for (const SCEV *&Term : Terms) { 9799 const SCEV *Q, *R; 9800 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 9801 if (!Q->isZero()) 9802 Term = Q; 9803 } 9804 9805 SmallVector<const SCEV *, 4> NewTerms; 9806 9807 // Remove constant factors. 9808 for (const SCEV *T : Terms) 9809 if (const SCEV *NewT = removeConstantFactors(*this, T)) 9810 NewTerms.push_back(NewT); 9811 9812 DEBUG({ 9813 dbgs() << "Terms after sorting:\n"; 9814 for (const SCEV *T : NewTerms) 9815 dbgs() << *T << "\n"; 9816 }); 9817 9818 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 9819 Sizes.clear(); 9820 return; 9821 } 9822 9823 // The last element to be pushed into Sizes is the size of an element. 9824 Sizes.push_back(ElementSize); 9825 9826 DEBUG({ 9827 dbgs() << "Sizes:\n"; 9828 for (const SCEV *S : Sizes) 9829 dbgs() << *S << "\n"; 9830 }); 9831 } 9832 9833 void ScalarEvolution::computeAccessFunctions( 9834 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 9835 SmallVectorImpl<const SCEV *> &Sizes) { 9836 9837 // Early exit in case this SCEV is not an affine multivariate function. 9838 if (Sizes.empty()) 9839 return; 9840 9841 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 9842 if (!AR->isAffine()) 9843 return; 9844 9845 const SCEV *Res = Expr; 9846 int Last = Sizes.size() - 1; 9847 for (int i = Last; i >= 0; i--) { 9848 const SCEV *Q, *R; 9849 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 9850 9851 DEBUG({ 9852 dbgs() << "Res: " << *Res << "\n"; 9853 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 9854 dbgs() << "Res divided by Sizes[i]:\n"; 9855 dbgs() << "Quotient: " << *Q << "\n"; 9856 dbgs() << "Remainder: " << *R << "\n"; 9857 }); 9858 9859 Res = Q; 9860 9861 // Do not record the last subscript corresponding to the size of elements in 9862 // the array. 9863 if (i == Last) { 9864 9865 // Bail out if the remainder is too complex. 9866 if (isa<SCEVAddRecExpr>(R)) { 9867 Subscripts.clear(); 9868 Sizes.clear(); 9869 return; 9870 } 9871 9872 continue; 9873 } 9874 9875 // Record the access function for the current subscript. 9876 Subscripts.push_back(R); 9877 } 9878 9879 // Also push in last position the remainder of the last division: it will be 9880 // the access function of the innermost dimension. 9881 Subscripts.push_back(Res); 9882 9883 std::reverse(Subscripts.begin(), Subscripts.end()); 9884 9885 DEBUG({ 9886 dbgs() << "Subscripts:\n"; 9887 for (const SCEV *S : Subscripts) 9888 dbgs() << *S << "\n"; 9889 }); 9890 } 9891 9892 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 9893 /// sizes of an array access. Returns the remainder of the delinearization that 9894 /// is the offset start of the array. The SCEV->delinearize algorithm computes 9895 /// the multiples of SCEV coefficients: that is a pattern matching of sub 9896 /// expressions in the stride and base of a SCEV corresponding to the 9897 /// computation of a GCD (greatest common divisor) of base and stride. When 9898 /// SCEV->delinearize fails, it returns the SCEV unchanged. 9899 /// 9900 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 9901 /// 9902 /// void foo(long n, long m, long o, double A[n][m][o]) { 9903 /// 9904 /// for (long i = 0; i < n; i++) 9905 /// for (long j = 0; j < m; j++) 9906 /// for (long k = 0; k < o; k++) 9907 /// A[i][j][k] = 1.0; 9908 /// } 9909 /// 9910 /// the delinearization input is the following AddRec SCEV: 9911 /// 9912 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 9913 /// 9914 /// From this SCEV, we are able to say that the base offset of the access is %A 9915 /// because it appears as an offset that does not divide any of the strides in 9916 /// the loops: 9917 /// 9918 /// CHECK: Base offset: %A 9919 /// 9920 /// and then SCEV->delinearize determines the size of some of the dimensions of 9921 /// the array as these are the multiples by which the strides are happening: 9922 /// 9923 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 9924 /// 9925 /// Note that the outermost dimension remains of UnknownSize because there are 9926 /// no strides that would help identifying the size of the last dimension: when 9927 /// the array has been statically allocated, one could compute the size of that 9928 /// dimension by dividing the overall size of the array by the size of the known 9929 /// dimensions: %m * %o * 8. 9930 /// 9931 /// Finally delinearize provides the access functions for the array reference 9932 /// that does correspond to A[i][j][k] of the above C testcase: 9933 /// 9934 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 9935 /// 9936 /// The testcases are checking the output of a function pass: 9937 /// DelinearizationPass that walks through all loads and stores of a function 9938 /// asking for the SCEV of the memory access with respect to all enclosing 9939 /// loops, calling SCEV->delinearize on that and printing the results. 9940 9941 void ScalarEvolution::delinearize(const SCEV *Expr, 9942 SmallVectorImpl<const SCEV *> &Subscripts, 9943 SmallVectorImpl<const SCEV *> &Sizes, 9944 const SCEV *ElementSize) { 9945 // First step: collect parametric terms. 9946 SmallVector<const SCEV *, 4> Terms; 9947 collectParametricTerms(Expr, Terms); 9948 9949 if (Terms.empty()) 9950 return; 9951 9952 // Second step: find subscript sizes. 9953 findArrayDimensions(Terms, Sizes, ElementSize); 9954 9955 if (Sizes.empty()) 9956 return; 9957 9958 // Third step: compute the access functions for each subscript. 9959 computeAccessFunctions(Expr, Subscripts, Sizes); 9960 9961 if (Subscripts.empty()) 9962 return; 9963 9964 DEBUG({ 9965 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 9966 dbgs() << "ArrayDecl[UnknownSize]"; 9967 for (const SCEV *S : Sizes) 9968 dbgs() << "[" << *S << "]"; 9969 9970 dbgs() << "\nArrayRef"; 9971 for (const SCEV *S : Subscripts) 9972 dbgs() << "[" << *S << "]"; 9973 dbgs() << "\n"; 9974 }); 9975 } 9976 9977 //===----------------------------------------------------------------------===// 9978 // SCEVCallbackVH Class Implementation 9979 //===----------------------------------------------------------------------===// 9980 9981 void ScalarEvolution::SCEVCallbackVH::deleted() { 9982 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 9983 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 9984 SE->ConstantEvolutionLoopExitValue.erase(PN); 9985 SE->eraseValueFromMap(getValPtr()); 9986 // this now dangles! 9987 } 9988 9989 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 9990 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 9991 9992 // Forget all the expressions associated with users of the old value, 9993 // so that future queries will recompute the expressions using the new 9994 // value. 9995 Value *Old = getValPtr(); 9996 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 9997 SmallPtrSet<User *, 8> Visited; 9998 while (!Worklist.empty()) { 9999 User *U = Worklist.pop_back_val(); 10000 // Deleting the Old value will cause this to dangle. Postpone 10001 // that until everything else is done. 10002 if (U == Old) 10003 continue; 10004 if (!Visited.insert(U).second) 10005 continue; 10006 if (PHINode *PN = dyn_cast<PHINode>(U)) 10007 SE->ConstantEvolutionLoopExitValue.erase(PN); 10008 SE->eraseValueFromMap(U); 10009 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 10010 } 10011 // Delete the Old value. 10012 if (PHINode *PN = dyn_cast<PHINode>(Old)) 10013 SE->ConstantEvolutionLoopExitValue.erase(PN); 10014 SE->eraseValueFromMap(Old); 10015 // this now dangles! 10016 } 10017 10018 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 10019 : CallbackVH(V), SE(se) {} 10020 10021 //===----------------------------------------------------------------------===// 10022 // ScalarEvolution Class Implementation 10023 //===----------------------------------------------------------------------===// 10024 10025 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 10026 AssumptionCache &AC, DominatorTree &DT, 10027 LoopInfo &LI) 10028 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 10029 CouldNotCompute(new SCEVCouldNotCompute()), 10030 WalkingBEDominatingConds(false), ProvingSplitPredicate(false), 10031 ValuesAtScopes(64), LoopDispositions(64), BlockDispositions(64), 10032 FirstUnknown(nullptr) { 10033 10034 // To use guards for proving predicates, we need to scan every instruction in 10035 // relevant basic blocks, and not just terminators. Doing this is a waste of 10036 // time if the IR does not actually contain any calls to 10037 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 10038 // 10039 // This pessimizes the case where a pass that preserves ScalarEvolution wants 10040 // to _add_ guards to the module when there weren't any before, and wants 10041 // ScalarEvolution to optimize based on those guards. For now we prefer to be 10042 // efficient in lieu of being smart in that rather obscure case. 10043 10044 auto *GuardDecl = F.getParent()->getFunction( 10045 Intrinsic::getName(Intrinsic::experimental_guard)); 10046 HasGuards = GuardDecl && !GuardDecl->use_empty(); 10047 } 10048 10049 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 10050 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 10051 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 10052 ValueExprMap(std::move(Arg.ValueExprMap)), 10053 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 10054 WalkingBEDominatingConds(false), ProvingSplitPredicate(false), 10055 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 10056 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 10057 PredicatedBackedgeTakenCounts( 10058 std::move(Arg.PredicatedBackedgeTakenCounts)), 10059 ConstantEvolutionLoopExitValue( 10060 std::move(Arg.ConstantEvolutionLoopExitValue)), 10061 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 10062 LoopDispositions(std::move(Arg.LoopDispositions)), 10063 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 10064 BlockDispositions(std::move(Arg.BlockDispositions)), 10065 UnsignedRanges(std::move(Arg.UnsignedRanges)), 10066 SignedRanges(std::move(Arg.SignedRanges)), 10067 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 10068 UniquePreds(std::move(Arg.UniquePreds)), 10069 SCEVAllocator(std::move(Arg.SCEVAllocator)), 10070 FirstUnknown(Arg.FirstUnknown) { 10071 Arg.FirstUnknown = nullptr; 10072 } 10073 10074 ScalarEvolution::~ScalarEvolution() { 10075 // Iterate through all the SCEVUnknown instances and call their 10076 // destructors, so that they release their references to their values. 10077 for (SCEVUnknown *U = FirstUnknown; U;) { 10078 SCEVUnknown *Tmp = U; 10079 U = U->Next; 10080 Tmp->~SCEVUnknown(); 10081 } 10082 FirstUnknown = nullptr; 10083 10084 ExprValueMap.clear(); 10085 ValueExprMap.clear(); 10086 HasRecMap.clear(); 10087 10088 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 10089 // that a loop had multiple computable exits. 10090 for (auto &BTCI : BackedgeTakenCounts) 10091 BTCI.second.clear(); 10092 for (auto &BTCI : PredicatedBackedgeTakenCounts) 10093 BTCI.second.clear(); 10094 10095 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 10096 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 10097 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 10098 } 10099 10100 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 10101 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 10102 } 10103 10104 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 10105 const Loop *L) { 10106 // Print all inner loops first 10107 for (Loop *I : *L) 10108 PrintLoopInfo(OS, SE, I); 10109 10110 OS << "Loop "; 10111 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10112 OS << ": "; 10113 10114 SmallVector<BasicBlock *, 8> ExitBlocks; 10115 L->getExitBlocks(ExitBlocks); 10116 if (ExitBlocks.size() != 1) 10117 OS << "<multiple exits> "; 10118 10119 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 10120 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 10121 } else { 10122 OS << "Unpredictable backedge-taken count. "; 10123 } 10124 10125 OS << "\n" 10126 "Loop "; 10127 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10128 OS << ": "; 10129 10130 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 10131 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 10132 if (SE->isBackedgeTakenCountMaxOrZero(L)) 10133 OS << ", actual taken count either this or zero."; 10134 } else { 10135 OS << "Unpredictable max backedge-taken count. "; 10136 } 10137 10138 OS << "\n" 10139 "Loop "; 10140 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10141 OS << ": "; 10142 10143 SCEVUnionPredicate Pred; 10144 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 10145 if (!isa<SCEVCouldNotCompute>(PBT)) { 10146 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 10147 OS << " Predicates:\n"; 10148 Pred.print(OS, 4); 10149 } else { 10150 OS << "Unpredictable predicated backedge-taken count. "; 10151 } 10152 OS << "\n"; 10153 10154 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 10155 OS << "Loop "; 10156 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10157 OS << ": "; 10158 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 10159 } 10160 } 10161 10162 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 10163 switch (LD) { 10164 case ScalarEvolution::LoopVariant: 10165 return "Variant"; 10166 case ScalarEvolution::LoopInvariant: 10167 return "Invariant"; 10168 case ScalarEvolution::LoopComputable: 10169 return "Computable"; 10170 } 10171 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 10172 } 10173 10174 void ScalarEvolution::print(raw_ostream &OS) const { 10175 // ScalarEvolution's implementation of the print method is to print 10176 // out SCEV values of all instructions that are interesting. Doing 10177 // this potentially causes it to create new SCEV objects though, 10178 // which technically conflicts with the const qualifier. This isn't 10179 // observable from outside the class though, so casting away the 10180 // const isn't dangerous. 10181 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 10182 10183 OS << "Classifying expressions for: "; 10184 F.printAsOperand(OS, /*PrintType=*/false); 10185 OS << "\n"; 10186 for (Instruction &I : instructions(F)) 10187 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 10188 OS << I << '\n'; 10189 OS << " --> "; 10190 const SCEV *SV = SE.getSCEV(&I); 10191 SV->print(OS); 10192 if (!isa<SCEVCouldNotCompute>(SV)) { 10193 OS << " U: "; 10194 SE.getUnsignedRange(SV).print(OS); 10195 OS << " S: "; 10196 SE.getSignedRange(SV).print(OS); 10197 } 10198 10199 const Loop *L = LI.getLoopFor(I.getParent()); 10200 10201 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 10202 if (AtUse != SV) { 10203 OS << " --> "; 10204 AtUse->print(OS); 10205 if (!isa<SCEVCouldNotCompute>(AtUse)) { 10206 OS << " U: "; 10207 SE.getUnsignedRange(AtUse).print(OS); 10208 OS << " S: "; 10209 SE.getSignedRange(AtUse).print(OS); 10210 } 10211 } 10212 10213 if (L) { 10214 OS << "\t\t" "Exits: "; 10215 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 10216 if (!SE.isLoopInvariant(ExitValue, L)) { 10217 OS << "<<Unknown>>"; 10218 } else { 10219 OS << *ExitValue; 10220 } 10221 10222 bool First = true; 10223 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 10224 if (First) { 10225 OS << "\t\t" "LoopDispositions: { "; 10226 First = false; 10227 } else { 10228 OS << ", "; 10229 } 10230 10231 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10232 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 10233 } 10234 10235 for (auto *InnerL : depth_first(L)) { 10236 if (InnerL == L) 10237 continue; 10238 if (First) { 10239 OS << "\t\t" "LoopDispositions: { "; 10240 First = false; 10241 } else { 10242 OS << ", "; 10243 } 10244 10245 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10246 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 10247 } 10248 10249 OS << " }"; 10250 } 10251 10252 OS << "\n"; 10253 } 10254 10255 OS << "Determining loop execution counts for: "; 10256 F.printAsOperand(OS, /*PrintType=*/false); 10257 OS << "\n"; 10258 for (Loop *I : LI) 10259 PrintLoopInfo(OS, &SE, I); 10260 } 10261 10262 ScalarEvolution::LoopDisposition 10263 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 10264 auto &Values = LoopDispositions[S]; 10265 for (auto &V : Values) { 10266 if (V.getPointer() == L) 10267 return V.getInt(); 10268 } 10269 Values.emplace_back(L, LoopVariant); 10270 LoopDisposition D = computeLoopDisposition(S, L); 10271 auto &Values2 = LoopDispositions[S]; 10272 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 10273 if (V.getPointer() == L) { 10274 V.setInt(D); 10275 break; 10276 } 10277 } 10278 return D; 10279 } 10280 10281 ScalarEvolution::LoopDisposition 10282 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 10283 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 10284 case scConstant: 10285 return LoopInvariant; 10286 case scTruncate: 10287 case scZeroExtend: 10288 case scSignExtend: 10289 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 10290 case scAddRecExpr: { 10291 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 10292 10293 // If L is the addrec's loop, it's computable. 10294 if (AR->getLoop() == L) 10295 return LoopComputable; 10296 10297 // Add recurrences are never invariant in the function-body (null loop). 10298 if (!L) 10299 return LoopVariant; 10300 10301 // This recurrence is variant w.r.t. L if L contains AR's loop. 10302 if (L->contains(AR->getLoop())) 10303 return LoopVariant; 10304 10305 // This recurrence is invariant w.r.t. L if AR's loop contains L. 10306 if (AR->getLoop()->contains(L)) 10307 return LoopInvariant; 10308 10309 // This recurrence is variant w.r.t. L if any of its operands 10310 // are variant. 10311 for (auto *Op : AR->operands()) 10312 if (!isLoopInvariant(Op, L)) 10313 return LoopVariant; 10314 10315 // Otherwise it's loop-invariant. 10316 return LoopInvariant; 10317 } 10318 case scAddExpr: 10319 case scMulExpr: 10320 case scUMaxExpr: 10321 case scSMaxExpr: { 10322 bool HasVarying = false; 10323 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 10324 LoopDisposition D = getLoopDisposition(Op, L); 10325 if (D == LoopVariant) 10326 return LoopVariant; 10327 if (D == LoopComputable) 10328 HasVarying = true; 10329 } 10330 return HasVarying ? LoopComputable : LoopInvariant; 10331 } 10332 case scUDivExpr: { 10333 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 10334 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 10335 if (LD == LoopVariant) 10336 return LoopVariant; 10337 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 10338 if (RD == LoopVariant) 10339 return LoopVariant; 10340 return (LD == LoopInvariant && RD == LoopInvariant) ? 10341 LoopInvariant : LoopComputable; 10342 } 10343 case scUnknown: 10344 // All non-instruction values are loop invariant. All instructions are loop 10345 // invariant if they are not contained in the specified loop. 10346 // Instructions are never considered invariant in the function body 10347 // (null loop) because they are defined within the "loop". 10348 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 10349 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 10350 return LoopInvariant; 10351 case scCouldNotCompute: 10352 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 10353 } 10354 llvm_unreachable("Unknown SCEV kind!"); 10355 } 10356 10357 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 10358 return getLoopDisposition(S, L) == LoopInvariant; 10359 } 10360 10361 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 10362 return getLoopDisposition(S, L) == LoopComputable; 10363 } 10364 10365 ScalarEvolution::BlockDisposition 10366 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 10367 auto &Values = BlockDispositions[S]; 10368 for (auto &V : Values) { 10369 if (V.getPointer() == BB) 10370 return V.getInt(); 10371 } 10372 Values.emplace_back(BB, DoesNotDominateBlock); 10373 BlockDisposition D = computeBlockDisposition(S, BB); 10374 auto &Values2 = BlockDispositions[S]; 10375 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 10376 if (V.getPointer() == BB) { 10377 V.setInt(D); 10378 break; 10379 } 10380 } 10381 return D; 10382 } 10383 10384 ScalarEvolution::BlockDisposition 10385 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 10386 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 10387 case scConstant: 10388 return ProperlyDominatesBlock; 10389 case scTruncate: 10390 case scZeroExtend: 10391 case scSignExtend: 10392 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 10393 case scAddRecExpr: { 10394 // This uses a "dominates" query instead of "properly dominates" query 10395 // to test for proper dominance too, because the instruction which 10396 // produces the addrec's value is a PHI, and a PHI effectively properly 10397 // dominates its entire containing block. 10398 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 10399 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 10400 return DoesNotDominateBlock; 10401 10402 // Fall through into SCEVNAryExpr handling. 10403 LLVM_FALLTHROUGH; 10404 } 10405 case scAddExpr: 10406 case scMulExpr: 10407 case scUMaxExpr: 10408 case scSMaxExpr: { 10409 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 10410 bool Proper = true; 10411 for (const SCEV *NAryOp : NAry->operands()) { 10412 BlockDisposition D = getBlockDisposition(NAryOp, BB); 10413 if (D == DoesNotDominateBlock) 10414 return DoesNotDominateBlock; 10415 if (D == DominatesBlock) 10416 Proper = false; 10417 } 10418 return Proper ? ProperlyDominatesBlock : DominatesBlock; 10419 } 10420 case scUDivExpr: { 10421 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 10422 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 10423 BlockDisposition LD = getBlockDisposition(LHS, BB); 10424 if (LD == DoesNotDominateBlock) 10425 return DoesNotDominateBlock; 10426 BlockDisposition RD = getBlockDisposition(RHS, BB); 10427 if (RD == DoesNotDominateBlock) 10428 return DoesNotDominateBlock; 10429 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 10430 ProperlyDominatesBlock : DominatesBlock; 10431 } 10432 case scUnknown: 10433 if (Instruction *I = 10434 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 10435 if (I->getParent() == BB) 10436 return DominatesBlock; 10437 if (DT.properlyDominates(I->getParent(), BB)) 10438 return ProperlyDominatesBlock; 10439 return DoesNotDominateBlock; 10440 } 10441 return ProperlyDominatesBlock; 10442 case scCouldNotCompute: 10443 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 10444 } 10445 llvm_unreachable("Unknown SCEV kind!"); 10446 } 10447 10448 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 10449 return getBlockDisposition(S, BB) >= DominatesBlock; 10450 } 10451 10452 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 10453 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 10454 } 10455 10456 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 10457 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 10458 } 10459 10460 void ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 10461 ValuesAtScopes.erase(S); 10462 LoopDispositions.erase(S); 10463 BlockDispositions.erase(S); 10464 UnsignedRanges.erase(S); 10465 SignedRanges.erase(S); 10466 ExprValueMap.erase(S); 10467 HasRecMap.erase(S); 10468 MinTrailingZerosCache.erase(S); 10469 10470 auto RemoveSCEVFromBackedgeMap = 10471 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 10472 for (auto I = Map.begin(), E = Map.end(); I != E;) { 10473 BackedgeTakenInfo &BEInfo = I->second; 10474 if (BEInfo.hasOperand(S, this)) { 10475 BEInfo.clear(); 10476 Map.erase(I++); 10477 } else 10478 ++I; 10479 } 10480 }; 10481 10482 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 10483 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 10484 } 10485 10486 void ScalarEvolution::verify() const { 10487 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 10488 ScalarEvolution SE2(F, TLI, AC, DT, LI); 10489 10490 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 10491 10492 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 10493 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 10494 const SCEV *visitConstant(const SCEVConstant *Constant) { 10495 return SE.getConstant(Constant->getAPInt()); 10496 } 10497 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 10498 return SE.getUnknown(Expr->getValue()); 10499 } 10500 10501 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 10502 return SE.getCouldNotCompute(); 10503 } 10504 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 10505 }; 10506 10507 SCEVMapper SCM(SE2); 10508 10509 while (!LoopStack.empty()) { 10510 auto *L = LoopStack.pop_back_val(); 10511 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 10512 10513 auto *CurBECount = SCM.visit( 10514 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 10515 auto *NewBECount = SE2.getBackedgeTakenCount(L); 10516 10517 if (CurBECount == SE2.getCouldNotCompute() || 10518 NewBECount == SE2.getCouldNotCompute()) { 10519 // NB! This situation is legal, but is very suspicious -- whatever pass 10520 // change the loop to make a trip count go from could not compute to 10521 // computable or vice-versa *should have* invalidated SCEV. However, we 10522 // choose not to assert here (for now) since we don't want false 10523 // positives. 10524 continue; 10525 } 10526 10527 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 10528 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 10529 // not propagate undef aggressively). This means we can (and do) fail 10530 // verification in cases where a transform makes the trip count of a loop 10531 // go from "undef" to "undef+1" (say). The transform is fine, since in 10532 // both cases the loop iterates "undef" times, but SCEV thinks we 10533 // increased the trip count of the loop by 1 incorrectly. 10534 continue; 10535 } 10536 10537 if (SE.getTypeSizeInBits(CurBECount->getType()) > 10538 SE.getTypeSizeInBits(NewBECount->getType())) 10539 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 10540 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 10541 SE.getTypeSizeInBits(NewBECount->getType())) 10542 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 10543 10544 auto *ConstantDelta = 10545 dyn_cast<SCEVConstant>(SE2.getMinusSCEV(CurBECount, NewBECount)); 10546 10547 if (ConstantDelta && ConstantDelta->getAPInt() != 0) { 10548 dbgs() << "Trip Count Changed!\n"; 10549 dbgs() << "Old: " << *CurBECount << "\n"; 10550 dbgs() << "New: " << *NewBECount << "\n"; 10551 dbgs() << "Delta: " << *ConstantDelta << "\n"; 10552 std::abort(); 10553 } 10554 } 10555 } 10556 10557 bool ScalarEvolution::invalidate( 10558 Function &F, const PreservedAnalyses &PA, 10559 FunctionAnalysisManager::Invalidator &Inv) { 10560 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 10561 // of its dependencies is invalidated. 10562 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 10563 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 10564 Inv.invalidate<AssumptionAnalysis>(F, PA) || 10565 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 10566 Inv.invalidate<LoopAnalysis>(F, PA); 10567 } 10568 10569 AnalysisKey ScalarEvolutionAnalysis::Key; 10570 10571 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 10572 FunctionAnalysisManager &AM) { 10573 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 10574 AM.getResult<AssumptionAnalysis>(F), 10575 AM.getResult<DominatorTreeAnalysis>(F), 10576 AM.getResult<LoopAnalysis>(F)); 10577 } 10578 10579 PreservedAnalyses 10580 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 10581 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 10582 return PreservedAnalyses::all(); 10583 } 10584 10585 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 10586 "Scalar Evolution Analysis", false, true) 10587 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 10588 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 10589 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 10590 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 10591 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 10592 "Scalar Evolution Analysis", false, true) 10593 char ScalarEvolutionWrapperPass::ID = 0; 10594 10595 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 10596 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 10597 } 10598 10599 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 10600 SE.reset(new ScalarEvolution( 10601 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 10602 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 10603 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 10604 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 10605 return false; 10606 } 10607 10608 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 10609 10610 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 10611 SE->print(OS); 10612 } 10613 10614 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 10615 if (!VerifySCEV) 10616 return; 10617 10618 SE->verify(); 10619 } 10620 10621 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 10622 AU.setPreservesAll(); 10623 AU.addRequiredTransitive<AssumptionCacheTracker>(); 10624 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 10625 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 10626 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 10627 } 10628 10629 const SCEVPredicate * 10630 ScalarEvolution::getEqualPredicate(const SCEVUnknown *LHS, 10631 const SCEVConstant *RHS) { 10632 FoldingSetNodeID ID; 10633 // Unique this node based on the arguments 10634 ID.AddInteger(SCEVPredicate::P_Equal); 10635 ID.AddPointer(LHS); 10636 ID.AddPointer(RHS); 10637 void *IP = nullptr; 10638 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 10639 return S; 10640 SCEVEqualPredicate *Eq = new (SCEVAllocator) 10641 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 10642 UniquePreds.InsertNode(Eq, IP); 10643 return Eq; 10644 } 10645 10646 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 10647 const SCEVAddRecExpr *AR, 10648 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 10649 FoldingSetNodeID ID; 10650 // Unique this node based on the arguments 10651 ID.AddInteger(SCEVPredicate::P_Wrap); 10652 ID.AddPointer(AR); 10653 ID.AddInteger(AddedFlags); 10654 void *IP = nullptr; 10655 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 10656 return S; 10657 auto *OF = new (SCEVAllocator) 10658 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 10659 UniquePreds.InsertNode(OF, IP); 10660 return OF; 10661 } 10662 10663 namespace { 10664 10665 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 10666 public: 10667 /// Rewrites \p S in the context of a loop L and the SCEV predication 10668 /// infrastructure. 10669 /// 10670 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 10671 /// equivalences present in \p Pred. 10672 /// 10673 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 10674 /// \p NewPreds such that the result will be an AddRecExpr. 10675 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 10676 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 10677 SCEVUnionPredicate *Pred) { 10678 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 10679 return Rewriter.visit(S); 10680 } 10681 10682 SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 10683 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 10684 SCEVUnionPredicate *Pred) 10685 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 10686 10687 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 10688 if (Pred) { 10689 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 10690 for (auto *Pred : ExprPreds) 10691 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 10692 if (IPred->getLHS() == Expr) 10693 return IPred->getRHS(); 10694 } 10695 10696 return Expr; 10697 } 10698 10699 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 10700 const SCEV *Operand = visit(Expr->getOperand()); 10701 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 10702 if (AR && AR->getLoop() == L && AR->isAffine()) { 10703 // This couldn't be folded because the operand didn't have the nuw 10704 // flag. Add the nusw flag as an assumption that we could make. 10705 const SCEV *Step = AR->getStepRecurrence(SE); 10706 Type *Ty = Expr->getType(); 10707 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 10708 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 10709 SE.getSignExtendExpr(Step, Ty), L, 10710 AR->getNoWrapFlags()); 10711 } 10712 return SE.getZeroExtendExpr(Operand, Expr->getType()); 10713 } 10714 10715 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 10716 const SCEV *Operand = visit(Expr->getOperand()); 10717 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 10718 if (AR && AR->getLoop() == L && AR->isAffine()) { 10719 // This couldn't be folded because the operand didn't have the nsw 10720 // flag. Add the nssw flag as an assumption that we could make. 10721 const SCEV *Step = AR->getStepRecurrence(SE); 10722 Type *Ty = Expr->getType(); 10723 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 10724 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 10725 SE.getSignExtendExpr(Step, Ty), L, 10726 AR->getNoWrapFlags()); 10727 } 10728 return SE.getSignExtendExpr(Operand, Expr->getType()); 10729 } 10730 10731 private: 10732 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 10733 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 10734 auto *A = SE.getWrapPredicate(AR, AddedFlags); 10735 if (!NewPreds) { 10736 // Check if we've already made this assumption. 10737 return Pred && Pred->implies(A); 10738 } 10739 NewPreds->insert(A); 10740 return true; 10741 } 10742 10743 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 10744 SCEVUnionPredicate *Pred; 10745 const Loop *L; 10746 }; 10747 } // end anonymous namespace 10748 10749 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 10750 SCEVUnionPredicate &Preds) { 10751 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 10752 } 10753 10754 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 10755 const SCEV *S, const Loop *L, 10756 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 10757 10758 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 10759 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 10760 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 10761 10762 if (!AddRec) 10763 return nullptr; 10764 10765 // Since the transformation was successful, we can now transfer the SCEV 10766 // predicates. 10767 for (auto *P : TransformPreds) 10768 Preds.insert(P); 10769 10770 return AddRec; 10771 } 10772 10773 /// SCEV predicates 10774 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 10775 SCEVPredicateKind Kind) 10776 : FastID(ID), Kind(Kind) {} 10777 10778 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 10779 const SCEVUnknown *LHS, 10780 const SCEVConstant *RHS) 10781 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) {} 10782 10783 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 10784 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 10785 10786 if (!Op) 10787 return false; 10788 10789 return Op->LHS == LHS && Op->RHS == RHS; 10790 } 10791 10792 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 10793 10794 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 10795 10796 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 10797 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 10798 } 10799 10800 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 10801 const SCEVAddRecExpr *AR, 10802 IncrementWrapFlags Flags) 10803 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 10804 10805 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 10806 10807 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 10808 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 10809 10810 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 10811 } 10812 10813 bool SCEVWrapPredicate::isAlwaysTrue() const { 10814 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 10815 IncrementWrapFlags IFlags = Flags; 10816 10817 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 10818 IFlags = clearFlags(IFlags, IncrementNSSW); 10819 10820 return IFlags == IncrementAnyWrap; 10821 } 10822 10823 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 10824 OS.indent(Depth) << *getExpr() << " Added Flags: "; 10825 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 10826 OS << "<nusw>"; 10827 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 10828 OS << "<nssw>"; 10829 OS << "\n"; 10830 } 10831 10832 SCEVWrapPredicate::IncrementWrapFlags 10833 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 10834 ScalarEvolution &SE) { 10835 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 10836 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 10837 10838 // We can safely transfer the NSW flag as NSSW. 10839 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 10840 ImpliedFlags = IncrementNSSW; 10841 10842 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 10843 // If the increment is positive, the SCEV NUW flag will also imply the 10844 // WrapPredicate NUSW flag. 10845 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 10846 if (Step->getValue()->getValue().isNonNegative()) 10847 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 10848 } 10849 10850 return ImpliedFlags; 10851 } 10852 10853 /// Union predicates don't get cached so create a dummy set ID for it. 10854 SCEVUnionPredicate::SCEVUnionPredicate() 10855 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 10856 10857 bool SCEVUnionPredicate::isAlwaysTrue() const { 10858 return all_of(Preds, 10859 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 10860 } 10861 10862 ArrayRef<const SCEVPredicate *> 10863 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 10864 auto I = SCEVToPreds.find(Expr); 10865 if (I == SCEVToPreds.end()) 10866 return ArrayRef<const SCEVPredicate *>(); 10867 return I->second; 10868 } 10869 10870 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 10871 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 10872 return all_of(Set->Preds, 10873 [this](const SCEVPredicate *I) { return this->implies(I); }); 10874 10875 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 10876 if (ScevPredsIt == SCEVToPreds.end()) 10877 return false; 10878 auto &SCEVPreds = ScevPredsIt->second; 10879 10880 return any_of(SCEVPreds, 10881 [N](const SCEVPredicate *I) { return I->implies(N); }); 10882 } 10883 10884 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 10885 10886 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 10887 for (auto Pred : Preds) 10888 Pred->print(OS, Depth); 10889 } 10890 10891 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 10892 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 10893 for (auto Pred : Set->Preds) 10894 add(Pred); 10895 return; 10896 } 10897 10898 if (implies(N)) 10899 return; 10900 10901 const SCEV *Key = N->getExpr(); 10902 assert(Key && "Only SCEVUnionPredicate doesn't have an " 10903 " associated expression!"); 10904 10905 SCEVToPreds[Key].push_back(N); 10906 Preds.push_back(N); 10907 } 10908 10909 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 10910 Loop &L) 10911 : SE(SE), L(L), Generation(0), BackedgeCount(nullptr) {} 10912 10913 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 10914 const SCEV *Expr = SE.getSCEV(V); 10915 RewriteEntry &Entry = RewriteMap[Expr]; 10916 10917 // If we already have an entry and the version matches, return it. 10918 if (Entry.second && Generation == Entry.first) 10919 return Entry.second; 10920 10921 // We found an entry but it's stale. Rewrite the stale entry 10922 // according to the current predicate. 10923 if (Entry.second) 10924 Expr = Entry.second; 10925 10926 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 10927 Entry = {Generation, NewSCEV}; 10928 10929 return NewSCEV; 10930 } 10931 10932 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 10933 if (!BackedgeCount) { 10934 SCEVUnionPredicate BackedgePred; 10935 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 10936 addPredicate(BackedgePred); 10937 } 10938 return BackedgeCount; 10939 } 10940 10941 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 10942 if (Preds.implies(&Pred)) 10943 return; 10944 Preds.add(&Pred); 10945 updateGeneration(); 10946 } 10947 10948 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 10949 return Preds; 10950 } 10951 10952 void PredicatedScalarEvolution::updateGeneration() { 10953 // If the generation number wrapped recompute everything. 10954 if (++Generation == 0) { 10955 for (auto &II : RewriteMap) { 10956 const SCEV *Rewritten = II.second.second; 10957 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 10958 } 10959 } 10960 } 10961 10962 void PredicatedScalarEvolution::setNoOverflow( 10963 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 10964 const SCEV *Expr = getSCEV(V); 10965 const auto *AR = cast<SCEVAddRecExpr>(Expr); 10966 10967 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 10968 10969 // Clear the statically implied flags. 10970 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 10971 addPredicate(*SE.getWrapPredicate(AR, Flags)); 10972 10973 auto II = FlagsMap.insert({V, Flags}); 10974 if (!II.second) 10975 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 10976 } 10977 10978 bool PredicatedScalarEvolution::hasNoOverflow( 10979 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 10980 const SCEV *Expr = getSCEV(V); 10981 const auto *AR = cast<SCEVAddRecExpr>(Expr); 10982 10983 Flags = SCEVWrapPredicate::clearFlags( 10984 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 10985 10986 auto II = FlagsMap.find(V); 10987 10988 if (II != FlagsMap.end()) 10989 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 10990 10991 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 10992 } 10993 10994 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 10995 const SCEV *Expr = this->getSCEV(V); 10996 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 10997 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 10998 10999 if (!New) 11000 return nullptr; 11001 11002 for (auto *P : NewPreds) 11003 Preds.add(P); 11004 11005 updateGeneration(); 11006 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 11007 return New; 11008 } 11009 11010 PredicatedScalarEvolution::PredicatedScalarEvolution( 11011 const PredicatedScalarEvolution &Init) 11012 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 11013 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 11014 for (const auto &I : Init.FlagsMap) 11015 FlagsMap.insert(I); 11016 } 11017 11018 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 11019 // For each block. 11020 for (auto *BB : L.getBlocks()) 11021 for (auto &I : *BB) { 11022 if (!SE.isSCEVable(I.getType())) 11023 continue; 11024 11025 auto *Expr = SE.getSCEV(&I); 11026 auto II = RewriteMap.find(Expr); 11027 11028 if (II == RewriteMap.end()) 11029 continue; 11030 11031 // Don't print things that are not interesting. 11032 if (II->second.second == Expr) 11033 continue; 11034 11035 OS.indent(Depth) << "[PSE]" << I << ":\n"; 11036 OS.indent(Depth + 2) << *Expr << "\n"; 11037 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 11038 } 11039 } 11040