1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #include "llvm/Analysis/ScalarEvolution.h" 62 #include "llvm/ADT/APInt.h" 63 #include "llvm/ADT/ArrayRef.h" 64 #include "llvm/ADT/DenseMap.h" 65 #include "llvm/ADT/DepthFirstIterator.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 83 #include "llvm/Analysis/TargetLibraryInfo.h" 84 #include "llvm/Analysis/ValueTracking.h" 85 #include "llvm/IR/Argument.h" 86 #include "llvm/IR/BasicBlock.h" 87 #include "llvm/IR/CFG.h" 88 #include "llvm/IR/CallSite.h" 89 #include "llvm/IR/Constant.h" 90 #include "llvm/IR/ConstantRange.h" 91 #include "llvm/IR/Constants.h" 92 #include "llvm/IR/DataLayout.h" 93 #include "llvm/IR/DerivedTypes.h" 94 #include "llvm/IR/Dominators.h" 95 #include "llvm/IR/Function.h" 96 #include "llvm/IR/GlobalAlias.h" 97 #include "llvm/IR/GlobalValue.h" 98 #include "llvm/IR/GlobalVariable.h" 99 #include "llvm/IR/InstIterator.h" 100 #include "llvm/IR/InstrTypes.h" 101 #include "llvm/IR/Instruction.h" 102 #include "llvm/IR/Instructions.h" 103 #include "llvm/IR/IntrinsicInst.h" 104 #include "llvm/IR/Intrinsics.h" 105 #include "llvm/IR/LLVMContext.h" 106 #include "llvm/IR/Metadata.h" 107 #include "llvm/IR/Operator.h" 108 #include "llvm/IR/PatternMatch.h" 109 #include "llvm/IR/Type.h" 110 #include "llvm/IR/Use.h" 111 #include "llvm/IR/User.h" 112 #include "llvm/IR/Value.h" 113 #include "llvm/Pass.h" 114 #include "llvm/Support/Casting.h" 115 #include "llvm/Support/CommandLine.h" 116 #include "llvm/Support/Compiler.h" 117 #include "llvm/Support/Debug.h" 118 #include "llvm/Support/ErrorHandling.h" 119 #include "llvm/Support/KnownBits.h" 120 #include "llvm/Support/SaveAndRestore.h" 121 #include "llvm/Support/raw_ostream.h" 122 #include <algorithm> 123 #include <cassert> 124 #include <climits> 125 #include <cstddef> 126 #include <cstdint> 127 #include <cstdlib> 128 #include <map> 129 #include <memory> 130 #include <tuple> 131 #include <utility> 132 #include <vector> 133 134 using namespace llvm; 135 136 #define DEBUG_TYPE "scalar-evolution" 137 138 STATISTIC(NumArrayLenItCounts, 139 "Number of trip counts computed with array length"); 140 STATISTIC(NumTripCountsComputed, 141 "Number of loops with predictable loop counts"); 142 STATISTIC(NumTripCountsNotComputed, 143 "Number of loops without predictable loop counts"); 144 STATISTIC(NumBruteForceTripCountsComputed, 145 "Number of loops with trip counts computed by force"); 146 147 static cl::opt<unsigned> 148 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 149 cl::desc("Maximum number of iterations SCEV will " 150 "symbolically execute a constant " 151 "derived loop"), 152 cl::init(100)); 153 154 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 155 static cl::opt<bool> 156 VerifySCEV("verify-scev", 157 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 158 static cl::opt<bool> 159 VerifySCEVMap("verify-scev-maps", 160 cl::desc("Verify no dangling value in ScalarEvolution's " 161 "ExprValueMap (slow)")); 162 163 static cl::opt<unsigned> MulOpsInlineThreshold( 164 "scev-mulops-inline-threshold", cl::Hidden, 165 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 166 cl::init(32)); 167 168 static cl::opt<unsigned> AddOpsInlineThreshold( 169 "scev-addops-inline-threshold", cl::Hidden, 170 cl::desc("Threshold for inlining addition operands into a SCEV"), 171 cl::init(500)); 172 173 static cl::opt<unsigned> MaxSCEVCompareDepth( 174 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 175 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 176 cl::init(32)); 177 178 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 179 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 180 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 181 cl::init(2)); 182 183 static cl::opt<unsigned> MaxValueCompareDepth( 184 "scalar-evolution-max-value-compare-depth", cl::Hidden, 185 cl::desc("Maximum depth of recursive value complexity comparisons"), 186 cl::init(2)); 187 188 static cl::opt<unsigned> 189 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 190 cl::desc("Maximum depth of recursive arithmetics"), 191 cl::init(32)); 192 193 static cl::opt<unsigned> MaxConstantEvolvingDepth( 194 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 195 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 196 197 static cl::opt<unsigned> 198 MaxExtDepth("scalar-evolution-max-ext-depth", cl::Hidden, 199 cl::desc("Maximum depth of recursive SExt/ZExt"), 200 cl::init(8)); 201 202 static cl::opt<unsigned> 203 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 204 cl::desc("Max coefficients in AddRec during evolving"), 205 cl::init(16)); 206 207 //===----------------------------------------------------------------------===// 208 // SCEV class definitions 209 //===----------------------------------------------------------------------===// 210 211 //===----------------------------------------------------------------------===// 212 // Implementation of the SCEV class. 213 // 214 215 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 216 LLVM_DUMP_METHOD void SCEV::dump() const { 217 print(dbgs()); 218 dbgs() << '\n'; 219 } 220 #endif 221 222 void SCEV::print(raw_ostream &OS) const { 223 switch (static_cast<SCEVTypes>(getSCEVType())) { 224 case scConstant: 225 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 226 return; 227 case scTruncate: { 228 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 229 const SCEV *Op = Trunc->getOperand(); 230 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 231 << *Trunc->getType() << ")"; 232 return; 233 } 234 case scZeroExtend: { 235 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 236 const SCEV *Op = ZExt->getOperand(); 237 OS << "(zext " << *Op->getType() << " " << *Op << " to " 238 << *ZExt->getType() << ")"; 239 return; 240 } 241 case scSignExtend: { 242 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 243 const SCEV *Op = SExt->getOperand(); 244 OS << "(sext " << *Op->getType() << " " << *Op << " to " 245 << *SExt->getType() << ")"; 246 return; 247 } 248 case scAddRecExpr: { 249 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 250 OS << "{" << *AR->getOperand(0); 251 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 252 OS << ",+," << *AR->getOperand(i); 253 OS << "}<"; 254 if (AR->hasNoUnsignedWrap()) 255 OS << "nuw><"; 256 if (AR->hasNoSignedWrap()) 257 OS << "nsw><"; 258 if (AR->hasNoSelfWrap() && 259 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 260 OS << "nw><"; 261 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 262 OS << ">"; 263 return; 264 } 265 case scAddExpr: 266 case scMulExpr: 267 case scUMaxExpr: 268 case scSMaxExpr: { 269 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 270 const char *OpStr = nullptr; 271 switch (NAry->getSCEVType()) { 272 case scAddExpr: OpStr = " + "; break; 273 case scMulExpr: OpStr = " * "; break; 274 case scUMaxExpr: OpStr = " umax "; break; 275 case scSMaxExpr: OpStr = " smax "; break; 276 } 277 OS << "("; 278 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 279 I != E; ++I) { 280 OS << **I; 281 if (std::next(I) != E) 282 OS << OpStr; 283 } 284 OS << ")"; 285 switch (NAry->getSCEVType()) { 286 case scAddExpr: 287 case scMulExpr: 288 if (NAry->hasNoUnsignedWrap()) 289 OS << "<nuw>"; 290 if (NAry->hasNoSignedWrap()) 291 OS << "<nsw>"; 292 } 293 return; 294 } 295 case scUDivExpr: { 296 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 297 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 298 return; 299 } 300 case scUnknown: { 301 const SCEVUnknown *U = cast<SCEVUnknown>(this); 302 Type *AllocTy; 303 if (U->isSizeOf(AllocTy)) { 304 OS << "sizeof(" << *AllocTy << ")"; 305 return; 306 } 307 if (U->isAlignOf(AllocTy)) { 308 OS << "alignof(" << *AllocTy << ")"; 309 return; 310 } 311 312 Type *CTy; 313 Constant *FieldNo; 314 if (U->isOffsetOf(CTy, FieldNo)) { 315 OS << "offsetof(" << *CTy << ", "; 316 FieldNo->printAsOperand(OS, false); 317 OS << ")"; 318 return; 319 } 320 321 // Otherwise just print it normally. 322 U->getValue()->printAsOperand(OS, false); 323 return; 324 } 325 case scCouldNotCompute: 326 OS << "***COULDNOTCOMPUTE***"; 327 return; 328 } 329 llvm_unreachable("Unknown SCEV kind!"); 330 } 331 332 Type *SCEV::getType() const { 333 switch (static_cast<SCEVTypes>(getSCEVType())) { 334 case scConstant: 335 return cast<SCEVConstant>(this)->getType(); 336 case scTruncate: 337 case scZeroExtend: 338 case scSignExtend: 339 return cast<SCEVCastExpr>(this)->getType(); 340 case scAddRecExpr: 341 case scMulExpr: 342 case scUMaxExpr: 343 case scSMaxExpr: 344 return cast<SCEVNAryExpr>(this)->getType(); 345 case scAddExpr: 346 return cast<SCEVAddExpr>(this)->getType(); 347 case scUDivExpr: 348 return cast<SCEVUDivExpr>(this)->getType(); 349 case scUnknown: 350 return cast<SCEVUnknown>(this)->getType(); 351 case scCouldNotCompute: 352 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 353 } 354 llvm_unreachable("Unknown SCEV kind!"); 355 } 356 357 bool SCEV::isZero() const { 358 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 359 return SC->getValue()->isZero(); 360 return false; 361 } 362 363 bool SCEV::isOne() const { 364 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 365 return SC->getValue()->isOne(); 366 return false; 367 } 368 369 bool SCEV::isAllOnesValue() const { 370 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 371 return SC->getValue()->isMinusOne(); 372 return false; 373 } 374 375 bool SCEV::isNonConstantNegative() const { 376 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 377 if (!Mul) return false; 378 379 // If there is a constant factor, it will be first. 380 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 381 if (!SC) return false; 382 383 // Return true if the value is negative, this matches things like (-42 * V). 384 return SC->getAPInt().isNegative(); 385 } 386 387 SCEVCouldNotCompute::SCEVCouldNotCompute() : 388 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} 389 390 bool SCEVCouldNotCompute::classof(const SCEV *S) { 391 return S->getSCEVType() == scCouldNotCompute; 392 } 393 394 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 395 FoldingSetNodeID ID; 396 ID.AddInteger(scConstant); 397 ID.AddPointer(V); 398 void *IP = nullptr; 399 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 400 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 401 UniqueSCEVs.InsertNode(S, IP); 402 return S; 403 } 404 405 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 406 return getConstant(ConstantInt::get(getContext(), Val)); 407 } 408 409 const SCEV * 410 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 411 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 412 return getConstant(ConstantInt::get(ITy, V, isSigned)); 413 } 414 415 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 416 unsigned SCEVTy, const SCEV *op, Type *ty) 417 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 418 419 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 420 const SCEV *op, Type *ty) 421 : SCEVCastExpr(ID, scTruncate, op, ty) { 422 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 423 (Ty->isIntegerTy() || Ty->isPointerTy()) && 424 "Cannot truncate non-integer value!"); 425 } 426 427 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 428 const SCEV *op, Type *ty) 429 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 430 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 431 (Ty->isIntegerTy() || Ty->isPointerTy()) && 432 "Cannot zero extend non-integer value!"); 433 } 434 435 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 436 const SCEV *op, Type *ty) 437 : SCEVCastExpr(ID, scSignExtend, op, ty) { 438 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 439 (Ty->isIntegerTy() || Ty->isPointerTy()) && 440 "Cannot sign extend non-integer value!"); 441 } 442 443 void SCEVUnknown::deleted() { 444 // Clear this SCEVUnknown from various maps. 445 SE->forgetMemoizedResults(this); 446 447 // Remove this SCEVUnknown from the uniquing map. 448 SE->UniqueSCEVs.RemoveNode(this); 449 450 // Release the value. 451 setValPtr(nullptr); 452 } 453 454 void SCEVUnknown::allUsesReplacedWith(Value *New) { 455 // Remove this SCEVUnknown from the uniquing map. 456 SE->UniqueSCEVs.RemoveNode(this); 457 458 // Update this SCEVUnknown to point to the new value. This is needed 459 // because there may still be outstanding SCEVs which still point to 460 // this SCEVUnknown. 461 setValPtr(New); 462 } 463 464 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 465 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 466 if (VCE->getOpcode() == Instruction::PtrToInt) 467 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 468 if (CE->getOpcode() == Instruction::GetElementPtr && 469 CE->getOperand(0)->isNullValue() && 470 CE->getNumOperands() == 2) 471 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 472 if (CI->isOne()) { 473 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 474 ->getElementType(); 475 return true; 476 } 477 478 return false; 479 } 480 481 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 482 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 483 if (VCE->getOpcode() == Instruction::PtrToInt) 484 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 485 if (CE->getOpcode() == Instruction::GetElementPtr && 486 CE->getOperand(0)->isNullValue()) { 487 Type *Ty = 488 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 489 if (StructType *STy = dyn_cast<StructType>(Ty)) 490 if (!STy->isPacked() && 491 CE->getNumOperands() == 3 && 492 CE->getOperand(1)->isNullValue()) { 493 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 494 if (CI->isOne() && 495 STy->getNumElements() == 2 && 496 STy->getElementType(0)->isIntegerTy(1)) { 497 AllocTy = STy->getElementType(1); 498 return true; 499 } 500 } 501 } 502 503 return false; 504 } 505 506 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 507 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 508 if (VCE->getOpcode() == Instruction::PtrToInt) 509 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 510 if (CE->getOpcode() == Instruction::GetElementPtr && 511 CE->getNumOperands() == 3 && 512 CE->getOperand(0)->isNullValue() && 513 CE->getOperand(1)->isNullValue()) { 514 Type *Ty = 515 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 516 // Ignore vector types here so that ScalarEvolutionExpander doesn't 517 // emit getelementptrs that index into vectors. 518 if (Ty->isStructTy() || Ty->isArrayTy()) { 519 CTy = Ty; 520 FieldNo = CE->getOperand(2); 521 return true; 522 } 523 } 524 525 return false; 526 } 527 528 //===----------------------------------------------------------------------===// 529 // SCEV Utilities 530 //===----------------------------------------------------------------------===// 531 532 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 533 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 534 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 535 /// have been previously deemed to be "equally complex" by this routine. It is 536 /// intended to avoid exponential time complexity in cases like: 537 /// 538 /// %a = f(%x, %y) 539 /// %b = f(%a, %a) 540 /// %c = f(%b, %b) 541 /// 542 /// %d = f(%x, %y) 543 /// %e = f(%d, %d) 544 /// %f = f(%e, %e) 545 /// 546 /// CompareValueComplexity(%f, %c) 547 /// 548 /// Since we do not continue running this routine on expression trees once we 549 /// have seen unequal values, there is no need to track them in the cache. 550 static int 551 CompareValueComplexity(SmallSet<std::pair<Value *, Value *>, 8> &EqCache, 552 const LoopInfo *const LI, Value *LV, Value *RV, 553 unsigned Depth) { 554 if (Depth > MaxValueCompareDepth || EqCache.count({LV, RV})) 555 return 0; 556 557 // Order pointer values after integer values. This helps SCEVExpander form 558 // GEPs. 559 bool LIsPointer = LV->getType()->isPointerTy(), 560 RIsPointer = RV->getType()->isPointerTy(); 561 if (LIsPointer != RIsPointer) 562 return (int)LIsPointer - (int)RIsPointer; 563 564 // Compare getValueID values. 565 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 566 if (LID != RID) 567 return (int)LID - (int)RID; 568 569 // Sort arguments by their position. 570 if (const auto *LA = dyn_cast<Argument>(LV)) { 571 const auto *RA = cast<Argument>(RV); 572 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 573 return (int)LArgNo - (int)RArgNo; 574 } 575 576 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 577 const auto *RGV = cast<GlobalValue>(RV); 578 579 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 580 auto LT = GV->getLinkage(); 581 return !(GlobalValue::isPrivateLinkage(LT) || 582 GlobalValue::isInternalLinkage(LT)); 583 }; 584 585 // Use the names to distinguish the two values, but only if the 586 // names are semantically important. 587 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 588 return LGV->getName().compare(RGV->getName()); 589 } 590 591 // For instructions, compare their loop depth, and their operand count. This 592 // is pretty loose. 593 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 594 const auto *RInst = cast<Instruction>(RV); 595 596 // Compare loop depths. 597 const BasicBlock *LParent = LInst->getParent(), 598 *RParent = RInst->getParent(); 599 if (LParent != RParent) { 600 unsigned LDepth = LI->getLoopDepth(LParent), 601 RDepth = LI->getLoopDepth(RParent); 602 if (LDepth != RDepth) 603 return (int)LDepth - (int)RDepth; 604 } 605 606 // Compare the number of operands. 607 unsigned LNumOps = LInst->getNumOperands(), 608 RNumOps = RInst->getNumOperands(); 609 if (LNumOps != RNumOps) 610 return (int)LNumOps - (int)RNumOps; 611 612 for (unsigned Idx : seq(0u, LNumOps)) { 613 int Result = 614 CompareValueComplexity(EqCache, LI, LInst->getOperand(Idx), 615 RInst->getOperand(Idx), Depth + 1); 616 if (Result != 0) 617 return Result; 618 } 619 } 620 621 EqCache.insert({LV, RV}); 622 return 0; 623 } 624 625 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 626 // than RHS, respectively. A three-way result allows recursive comparisons to be 627 // more efficient. 628 static int CompareSCEVComplexity( 629 SmallSet<std::pair<const SCEV *, const SCEV *>, 8> &EqCacheSCEV, 630 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 631 DominatorTree &DT, unsigned Depth = 0) { 632 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 633 if (LHS == RHS) 634 return 0; 635 636 // Primarily, sort the SCEVs by their getSCEVType(). 637 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 638 if (LType != RType) 639 return (int)LType - (int)RType; 640 641 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.count({LHS, RHS})) 642 return 0; 643 // Aside from the getSCEVType() ordering, the particular ordering 644 // isn't very important except that it's beneficial to be consistent, 645 // so that (a + b) and (b + a) don't end up as different expressions. 646 switch (static_cast<SCEVTypes>(LType)) { 647 case scUnknown: { 648 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 649 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 650 651 SmallSet<std::pair<Value *, Value *>, 8> EqCache; 652 int X = CompareValueComplexity(EqCache, LI, LU->getValue(), RU->getValue(), 653 Depth + 1); 654 if (X == 0) 655 EqCacheSCEV.insert({LHS, RHS}); 656 return X; 657 } 658 659 case scConstant: { 660 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 661 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 662 663 // Compare constant values. 664 const APInt &LA = LC->getAPInt(); 665 const APInt &RA = RC->getAPInt(); 666 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 667 if (LBitWidth != RBitWidth) 668 return (int)LBitWidth - (int)RBitWidth; 669 return LA.ult(RA) ? -1 : 1; 670 } 671 672 case scAddRecExpr: { 673 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 674 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 675 676 // There is always a dominance between two recs that are used by one SCEV, 677 // so we can safely sort recs by loop header dominance. We require such 678 // order in getAddExpr. 679 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 680 if (LLoop != RLoop) { 681 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 682 assert(LHead != RHead && "Two loops share the same header?"); 683 if (DT.dominates(LHead, RHead)) 684 return 1; 685 else 686 assert(DT.dominates(RHead, LHead) && 687 "No dominance between recurrences used by one SCEV?"); 688 return -1; 689 } 690 691 // Addrec complexity grows with operand count. 692 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 693 if (LNumOps != RNumOps) 694 return (int)LNumOps - (int)RNumOps; 695 696 // Lexicographically compare. 697 for (unsigned i = 0; i != LNumOps; ++i) { 698 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LA->getOperand(i), 699 RA->getOperand(i), DT, Depth + 1); 700 if (X != 0) 701 return X; 702 } 703 EqCacheSCEV.insert({LHS, RHS}); 704 return 0; 705 } 706 707 case scAddExpr: 708 case scMulExpr: 709 case scSMaxExpr: 710 case scUMaxExpr: { 711 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 712 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 713 714 // Lexicographically compare n-ary expressions. 715 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 716 if (LNumOps != RNumOps) 717 return (int)LNumOps - (int)RNumOps; 718 719 for (unsigned i = 0; i != LNumOps; ++i) { 720 if (i >= RNumOps) 721 return 1; 722 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getOperand(i), 723 RC->getOperand(i), DT, Depth + 1); 724 if (X != 0) 725 return X; 726 } 727 EqCacheSCEV.insert({LHS, RHS}); 728 return 0; 729 } 730 731 case scUDivExpr: { 732 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 733 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 734 735 // Lexicographically compare udiv expressions. 736 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getLHS(), RC->getLHS(), 737 DT, Depth + 1); 738 if (X != 0) 739 return X; 740 X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getRHS(), RC->getRHS(), DT, 741 Depth + 1); 742 if (X == 0) 743 EqCacheSCEV.insert({LHS, RHS}); 744 return X; 745 } 746 747 case scTruncate: 748 case scZeroExtend: 749 case scSignExtend: { 750 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 751 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 752 753 // Compare cast expressions by operand. 754 int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getOperand(), 755 RC->getOperand(), DT, Depth + 1); 756 if (X == 0) 757 EqCacheSCEV.insert({LHS, RHS}); 758 return X; 759 } 760 761 case scCouldNotCompute: 762 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 763 } 764 llvm_unreachable("Unknown SCEV kind!"); 765 } 766 767 /// Given a list of SCEV objects, order them by their complexity, and group 768 /// objects of the same complexity together by value. When this routine is 769 /// finished, we know that any duplicates in the vector are consecutive and that 770 /// complexity is monotonically increasing. 771 /// 772 /// Note that we go take special precautions to ensure that we get deterministic 773 /// results from this routine. In other words, we don't want the results of 774 /// this to depend on where the addresses of various SCEV objects happened to 775 /// land in memory. 776 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 777 LoopInfo *LI, DominatorTree &DT) { 778 if (Ops.size() < 2) return; // Noop 779 780 SmallSet<std::pair<const SCEV *, const SCEV *>, 8> EqCache; 781 if (Ops.size() == 2) { 782 // This is the common case, which also happens to be trivially simple. 783 // Special case it. 784 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 785 if (CompareSCEVComplexity(EqCache, LI, RHS, LHS, DT) < 0) 786 std::swap(LHS, RHS); 787 return; 788 } 789 790 // Do the rough sort by complexity. 791 std::stable_sort(Ops.begin(), Ops.end(), 792 [&EqCache, LI, &DT](const SCEV *LHS, const SCEV *RHS) { 793 return 794 CompareSCEVComplexity(EqCache, LI, LHS, RHS, DT) < 0; 795 }); 796 797 // Now that we are sorted by complexity, group elements of the same 798 // complexity. Note that this is, at worst, N^2, but the vector is likely to 799 // be extremely short in practice. Note that we take this approach because we 800 // do not want to depend on the addresses of the objects we are grouping. 801 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 802 const SCEV *S = Ops[i]; 803 unsigned Complexity = S->getSCEVType(); 804 805 // If there are any objects of the same complexity and same value as this 806 // one, group them. 807 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 808 if (Ops[j] == S) { // Found a duplicate. 809 // Move it to immediately after i'th element. 810 std::swap(Ops[i+1], Ops[j]); 811 ++i; // no need to rescan it. 812 if (i == e-2) return; // Done! 813 } 814 } 815 } 816 } 817 818 // Returns the size of the SCEV S. 819 static inline int sizeOfSCEV(const SCEV *S) { 820 struct FindSCEVSize { 821 int Size = 0; 822 823 FindSCEVSize() = default; 824 825 bool follow(const SCEV *S) { 826 ++Size; 827 // Keep looking at all operands of S. 828 return true; 829 } 830 831 bool isDone() const { 832 return false; 833 } 834 }; 835 836 FindSCEVSize F; 837 SCEVTraversal<FindSCEVSize> ST(F); 838 ST.visitAll(S); 839 return F.Size; 840 } 841 842 namespace { 843 844 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { 845 public: 846 // Computes the Quotient and Remainder of the division of Numerator by 847 // Denominator. 848 static void divide(ScalarEvolution &SE, const SCEV *Numerator, 849 const SCEV *Denominator, const SCEV **Quotient, 850 const SCEV **Remainder) { 851 assert(Numerator && Denominator && "Uninitialized SCEV"); 852 853 SCEVDivision D(SE, Numerator, Denominator); 854 855 // Check for the trivial case here to avoid having to check for it in the 856 // rest of the code. 857 if (Numerator == Denominator) { 858 *Quotient = D.One; 859 *Remainder = D.Zero; 860 return; 861 } 862 863 if (Numerator->isZero()) { 864 *Quotient = D.Zero; 865 *Remainder = D.Zero; 866 return; 867 } 868 869 // A simple case when N/1. The quotient is N. 870 if (Denominator->isOne()) { 871 *Quotient = Numerator; 872 *Remainder = D.Zero; 873 return; 874 } 875 876 // Split the Denominator when it is a product. 877 if (const SCEVMulExpr *T = dyn_cast<SCEVMulExpr>(Denominator)) { 878 const SCEV *Q, *R; 879 *Quotient = Numerator; 880 for (const SCEV *Op : T->operands()) { 881 divide(SE, *Quotient, Op, &Q, &R); 882 *Quotient = Q; 883 884 // Bail out when the Numerator is not divisible by one of the terms of 885 // the Denominator. 886 if (!R->isZero()) { 887 *Quotient = D.Zero; 888 *Remainder = Numerator; 889 return; 890 } 891 } 892 *Remainder = D.Zero; 893 return; 894 } 895 896 D.visit(Numerator); 897 *Quotient = D.Quotient; 898 *Remainder = D.Remainder; 899 } 900 901 // Except in the trivial case described above, we do not know how to divide 902 // Expr by Denominator for the following functions with empty implementation. 903 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} 904 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} 905 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} 906 void visitUDivExpr(const SCEVUDivExpr *Numerator) {} 907 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} 908 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} 909 void visitUnknown(const SCEVUnknown *Numerator) {} 910 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} 911 912 void visitConstant(const SCEVConstant *Numerator) { 913 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { 914 APInt NumeratorVal = Numerator->getAPInt(); 915 APInt DenominatorVal = D->getAPInt(); 916 uint32_t NumeratorBW = NumeratorVal.getBitWidth(); 917 uint32_t DenominatorBW = DenominatorVal.getBitWidth(); 918 919 if (NumeratorBW > DenominatorBW) 920 DenominatorVal = DenominatorVal.sext(NumeratorBW); 921 else if (NumeratorBW < DenominatorBW) 922 NumeratorVal = NumeratorVal.sext(DenominatorBW); 923 924 APInt QuotientVal(NumeratorVal.getBitWidth(), 0); 925 APInt RemainderVal(NumeratorVal.getBitWidth(), 0); 926 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); 927 Quotient = SE.getConstant(QuotientVal); 928 Remainder = SE.getConstant(RemainderVal); 929 return; 930 } 931 } 932 933 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { 934 const SCEV *StartQ, *StartR, *StepQ, *StepR; 935 if (!Numerator->isAffine()) 936 return cannotDivide(Numerator); 937 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); 938 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); 939 // Bail out if the types do not match. 940 Type *Ty = Denominator->getType(); 941 if (Ty != StartQ->getType() || Ty != StartR->getType() || 942 Ty != StepQ->getType() || Ty != StepR->getType()) 943 return cannotDivide(Numerator); 944 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), 945 Numerator->getNoWrapFlags()); 946 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), 947 Numerator->getNoWrapFlags()); 948 } 949 950 void visitAddExpr(const SCEVAddExpr *Numerator) { 951 SmallVector<const SCEV *, 2> Qs, Rs; 952 Type *Ty = Denominator->getType(); 953 954 for (const SCEV *Op : Numerator->operands()) { 955 const SCEV *Q, *R; 956 divide(SE, Op, Denominator, &Q, &R); 957 958 // Bail out if types do not match. 959 if (Ty != Q->getType() || Ty != R->getType()) 960 return cannotDivide(Numerator); 961 962 Qs.push_back(Q); 963 Rs.push_back(R); 964 } 965 966 if (Qs.size() == 1) { 967 Quotient = Qs[0]; 968 Remainder = Rs[0]; 969 return; 970 } 971 972 Quotient = SE.getAddExpr(Qs); 973 Remainder = SE.getAddExpr(Rs); 974 } 975 976 void visitMulExpr(const SCEVMulExpr *Numerator) { 977 SmallVector<const SCEV *, 2> Qs; 978 Type *Ty = Denominator->getType(); 979 980 bool FoundDenominatorTerm = false; 981 for (const SCEV *Op : Numerator->operands()) { 982 // Bail out if types do not match. 983 if (Ty != Op->getType()) 984 return cannotDivide(Numerator); 985 986 if (FoundDenominatorTerm) { 987 Qs.push_back(Op); 988 continue; 989 } 990 991 // Check whether Denominator divides one of the product operands. 992 const SCEV *Q, *R; 993 divide(SE, Op, Denominator, &Q, &R); 994 if (!R->isZero()) { 995 Qs.push_back(Op); 996 continue; 997 } 998 999 // Bail out if types do not match. 1000 if (Ty != Q->getType()) 1001 return cannotDivide(Numerator); 1002 1003 FoundDenominatorTerm = true; 1004 Qs.push_back(Q); 1005 } 1006 1007 if (FoundDenominatorTerm) { 1008 Remainder = Zero; 1009 if (Qs.size() == 1) 1010 Quotient = Qs[0]; 1011 else 1012 Quotient = SE.getMulExpr(Qs); 1013 return; 1014 } 1015 1016 if (!isa<SCEVUnknown>(Denominator)) 1017 return cannotDivide(Numerator); 1018 1019 // The Remainder is obtained by replacing Denominator by 0 in Numerator. 1020 ValueToValueMap RewriteMap; 1021 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1022 cast<SCEVConstant>(Zero)->getValue(); 1023 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1024 1025 if (Remainder->isZero()) { 1026 // The Quotient is obtained by replacing Denominator by 1 in Numerator. 1027 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1028 cast<SCEVConstant>(One)->getValue(); 1029 Quotient = 1030 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1031 return; 1032 } 1033 1034 // Quotient is (Numerator - Remainder) divided by Denominator. 1035 const SCEV *Q, *R; 1036 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); 1037 // This SCEV does not seem to simplify: fail the division here. 1038 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) 1039 return cannotDivide(Numerator); 1040 divide(SE, Diff, Denominator, &Q, &R); 1041 if (R != Zero) 1042 return cannotDivide(Numerator); 1043 Quotient = Q; 1044 } 1045 1046 private: 1047 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, 1048 const SCEV *Denominator) 1049 : SE(S), Denominator(Denominator) { 1050 Zero = SE.getZero(Denominator->getType()); 1051 One = SE.getOne(Denominator->getType()); 1052 1053 // We generally do not know how to divide Expr by Denominator. We 1054 // initialize the division to a "cannot divide" state to simplify the rest 1055 // of the code. 1056 cannotDivide(Numerator); 1057 } 1058 1059 // Convenience function for giving up on the division. We set the quotient to 1060 // be equal to zero and the remainder to be equal to the numerator. 1061 void cannotDivide(const SCEV *Numerator) { 1062 Quotient = Zero; 1063 Remainder = Numerator; 1064 } 1065 1066 ScalarEvolution &SE; 1067 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; 1068 }; 1069 1070 } // end anonymous namespace 1071 1072 //===----------------------------------------------------------------------===// 1073 // Simple SCEV method implementations 1074 //===----------------------------------------------------------------------===// 1075 1076 /// Compute BC(It, K). The result has width W. Assume, K > 0. 1077 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 1078 ScalarEvolution &SE, 1079 Type *ResultTy) { 1080 // Handle the simplest case efficiently. 1081 if (K == 1) 1082 return SE.getTruncateOrZeroExtend(It, ResultTy); 1083 1084 // We are using the following formula for BC(It, K): 1085 // 1086 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 1087 // 1088 // Suppose, W is the bitwidth of the return value. We must be prepared for 1089 // overflow. Hence, we must assure that the result of our computation is 1090 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 1091 // safe in modular arithmetic. 1092 // 1093 // However, this code doesn't use exactly that formula; the formula it uses 1094 // is something like the following, where T is the number of factors of 2 in 1095 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 1096 // exponentiation: 1097 // 1098 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 1099 // 1100 // This formula is trivially equivalent to the previous formula. However, 1101 // this formula can be implemented much more efficiently. The trick is that 1102 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 1103 // arithmetic. To do exact division in modular arithmetic, all we have 1104 // to do is multiply by the inverse. Therefore, this step can be done at 1105 // width W. 1106 // 1107 // The next issue is how to safely do the division by 2^T. The way this 1108 // is done is by doing the multiplication step at a width of at least W + T 1109 // bits. This way, the bottom W+T bits of the product are accurate. Then, 1110 // when we perform the division by 2^T (which is equivalent to a right shift 1111 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 1112 // truncated out after the division by 2^T. 1113 // 1114 // In comparison to just directly using the first formula, this technique 1115 // is much more efficient; using the first formula requires W * K bits, 1116 // but this formula less than W + K bits. Also, the first formula requires 1117 // a division step, whereas this formula only requires multiplies and shifts. 1118 // 1119 // It doesn't matter whether the subtraction step is done in the calculation 1120 // width or the input iteration count's width; if the subtraction overflows, 1121 // the result must be zero anyway. We prefer here to do it in the width of 1122 // the induction variable because it helps a lot for certain cases; CodeGen 1123 // isn't smart enough to ignore the overflow, which leads to much less 1124 // efficient code if the width of the subtraction is wider than the native 1125 // register width. 1126 // 1127 // (It's possible to not widen at all by pulling out factors of 2 before 1128 // the multiplication; for example, K=2 can be calculated as 1129 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 1130 // extra arithmetic, so it's not an obvious win, and it gets 1131 // much more complicated for K > 3.) 1132 1133 // Protection from insane SCEVs; this bound is conservative, 1134 // but it probably doesn't matter. 1135 if (K > 1000) 1136 return SE.getCouldNotCompute(); 1137 1138 unsigned W = SE.getTypeSizeInBits(ResultTy); 1139 1140 // Calculate K! / 2^T and T; we divide out the factors of two before 1141 // multiplying for calculating K! / 2^T to avoid overflow. 1142 // Other overflow doesn't matter because we only care about the bottom 1143 // W bits of the result. 1144 APInt OddFactorial(W, 1); 1145 unsigned T = 1; 1146 for (unsigned i = 3; i <= K; ++i) { 1147 APInt Mult(W, i); 1148 unsigned TwoFactors = Mult.countTrailingZeros(); 1149 T += TwoFactors; 1150 Mult.lshrInPlace(TwoFactors); 1151 OddFactorial *= Mult; 1152 } 1153 1154 // We need at least W + T bits for the multiplication step 1155 unsigned CalculationBits = W + T; 1156 1157 // Calculate 2^T, at width T+W. 1158 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1159 1160 // Calculate the multiplicative inverse of K! / 2^T; 1161 // this multiplication factor will perform the exact division by 1162 // K! / 2^T. 1163 APInt Mod = APInt::getSignedMinValue(W+1); 1164 APInt MultiplyFactor = OddFactorial.zext(W+1); 1165 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1166 MultiplyFactor = MultiplyFactor.trunc(W); 1167 1168 // Calculate the product, at width T+W 1169 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1170 CalculationBits); 1171 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1172 for (unsigned i = 1; i != K; ++i) { 1173 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1174 Dividend = SE.getMulExpr(Dividend, 1175 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1176 } 1177 1178 // Divide by 2^T 1179 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1180 1181 // Truncate the result, and divide by K! / 2^T. 1182 1183 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1184 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1185 } 1186 1187 /// Return the value of this chain of recurrences at the specified iteration 1188 /// number. We can evaluate this recurrence by multiplying each element in the 1189 /// chain by the binomial coefficient corresponding to it. In other words, we 1190 /// can evaluate {A,+,B,+,C,+,D} as: 1191 /// 1192 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1193 /// 1194 /// where BC(It, k) stands for binomial coefficient. 1195 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1196 ScalarEvolution &SE) const { 1197 const SCEV *Result = getStart(); 1198 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1199 // The computation is correct in the face of overflow provided that the 1200 // multiplication is performed _after_ the evaluation of the binomial 1201 // coefficient. 1202 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1203 if (isa<SCEVCouldNotCompute>(Coeff)) 1204 return Coeff; 1205 1206 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1207 } 1208 return Result; 1209 } 1210 1211 //===----------------------------------------------------------------------===// 1212 // SCEV Expression folder implementations 1213 //===----------------------------------------------------------------------===// 1214 1215 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 1216 Type *Ty) { 1217 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1218 "This is not a truncating conversion!"); 1219 assert(isSCEVable(Ty) && 1220 "This is not a conversion to a SCEVable type!"); 1221 Ty = getEffectiveSCEVType(Ty); 1222 1223 FoldingSetNodeID ID; 1224 ID.AddInteger(scTruncate); 1225 ID.AddPointer(Op); 1226 ID.AddPointer(Ty); 1227 void *IP = nullptr; 1228 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1229 1230 // Fold if the operand is constant. 1231 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1232 return getConstant( 1233 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1234 1235 // trunc(trunc(x)) --> trunc(x) 1236 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1237 return getTruncateExpr(ST->getOperand(), Ty); 1238 1239 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1240 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1241 return getTruncateOrSignExtend(SS->getOperand(), Ty); 1242 1243 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1244 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1245 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 1246 1247 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can 1248 // eliminate all the truncates, or we replace other casts with truncates. 1249 if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) { 1250 SmallVector<const SCEV *, 4> Operands; 1251 bool hasTrunc = false; 1252 for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) { 1253 const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty); 1254 if (!isa<SCEVCastExpr>(SA->getOperand(i))) 1255 hasTrunc = isa<SCEVTruncateExpr>(S); 1256 Operands.push_back(S); 1257 } 1258 if (!hasTrunc) 1259 return getAddExpr(Operands); 1260 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1261 } 1262 1263 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can 1264 // eliminate all the truncates, or we replace other casts with truncates. 1265 if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) { 1266 SmallVector<const SCEV *, 4> Operands; 1267 bool hasTrunc = false; 1268 for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) { 1269 const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty); 1270 if (!isa<SCEVCastExpr>(SM->getOperand(i))) 1271 hasTrunc = isa<SCEVTruncateExpr>(S); 1272 Operands.push_back(S); 1273 } 1274 if (!hasTrunc) 1275 return getMulExpr(Operands); 1276 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1277 } 1278 1279 // If the input value is a chrec scev, truncate the chrec's operands. 1280 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1281 SmallVector<const SCEV *, 4> Operands; 1282 for (const SCEV *Op : AddRec->operands()) 1283 Operands.push_back(getTruncateExpr(Op, Ty)); 1284 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1285 } 1286 1287 // The cast wasn't folded; create an explicit cast node. We can reuse 1288 // the existing insert position since if we get here, we won't have 1289 // made any changes which would invalidate it. 1290 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1291 Op, Ty); 1292 UniqueSCEVs.InsertNode(S, IP); 1293 return S; 1294 } 1295 1296 // Get the limit of a recurrence such that incrementing by Step cannot cause 1297 // signed overflow as long as the value of the recurrence within the 1298 // loop does not exceed this limit before incrementing. 1299 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1300 ICmpInst::Predicate *Pred, 1301 ScalarEvolution *SE) { 1302 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1303 if (SE->isKnownPositive(Step)) { 1304 *Pred = ICmpInst::ICMP_SLT; 1305 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1306 SE->getSignedRangeMax(Step)); 1307 } 1308 if (SE->isKnownNegative(Step)) { 1309 *Pred = ICmpInst::ICMP_SGT; 1310 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1311 SE->getSignedRangeMin(Step)); 1312 } 1313 return nullptr; 1314 } 1315 1316 // Get the limit of a recurrence such that incrementing by Step cannot cause 1317 // unsigned overflow as long as the value of the recurrence within the loop does 1318 // not exceed this limit before incrementing. 1319 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1320 ICmpInst::Predicate *Pred, 1321 ScalarEvolution *SE) { 1322 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1323 *Pred = ICmpInst::ICMP_ULT; 1324 1325 return SE->getConstant(APInt::getMinValue(BitWidth) - 1326 SE->getUnsignedRangeMax(Step)); 1327 } 1328 1329 namespace { 1330 1331 struct ExtendOpTraitsBase { 1332 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1333 unsigned); 1334 }; 1335 1336 // Used to make code generic over signed and unsigned overflow. 1337 template <typename ExtendOp> struct ExtendOpTraits { 1338 // Members present: 1339 // 1340 // static const SCEV::NoWrapFlags WrapType; 1341 // 1342 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1343 // 1344 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1345 // ICmpInst::Predicate *Pred, 1346 // ScalarEvolution *SE); 1347 }; 1348 1349 template <> 1350 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1351 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1352 1353 static const GetExtendExprTy GetExtendExpr; 1354 1355 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1356 ICmpInst::Predicate *Pred, 1357 ScalarEvolution *SE) { 1358 return getSignedOverflowLimitForStep(Step, Pred, SE); 1359 } 1360 }; 1361 1362 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1363 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1364 1365 template <> 1366 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1367 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1368 1369 static const GetExtendExprTy GetExtendExpr; 1370 1371 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1372 ICmpInst::Predicate *Pred, 1373 ScalarEvolution *SE) { 1374 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1375 } 1376 }; 1377 1378 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1379 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1380 1381 } // end anonymous namespace 1382 1383 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1384 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1385 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1386 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1387 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1388 // expression "Step + sext/zext(PreIncAR)" is congruent with 1389 // "sext/zext(PostIncAR)" 1390 template <typename ExtendOpTy> 1391 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1392 ScalarEvolution *SE, unsigned Depth) { 1393 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1394 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1395 1396 const Loop *L = AR->getLoop(); 1397 const SCEV *Start = AR->getStart(); 1398 const SCEV *Step = AR->getStepRecurrence(*SE); 1399 1400 // Check for a simple looking step prior to loop entry. 1401 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1402 if (!SA) 1403 return nullptr; 1404 1405 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1406 // subtraction is expensive. For this purpose, perform a quick and dirty 1407 // difference, by checking for Step in the operand list. 1408 SmallVector<const SCEV *, 4> DiffOps; 1409 for (const SCEV *Op : SA->operands()) 1410 if (Op != Step) 1411 DiffOps.push_back(Op); 1412 1413 if (DiffOps.size() == SA->getNumOperands()) 1414 return nullptr; 1415 1416 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1417 // `Step`: 1418 1419 // 1. NSW/NUW flags on the step increment. 1420 auto PreStartFlags = 1421 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1422 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1423 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1424 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1425 1426 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1427 // "S+X does not sign/unsign-overflow". 1428 // 1429 1430 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1431 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1432 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1433 return PreStart; 1434 1435 // 2. Direct overflow check on the step operation's expression. 1436 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1437 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1438 const SCEV *OperandExtendedStart = 1439 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1440 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1441 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1442 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1443 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1444 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1445 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1446 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1447 } 1448 return PreStart; 1449 } 1450 1451 // 3. Loop precondition. 1452 ICmpInst::Predicate Pred; 1453 const SCEV *OverflowLimit = 1454 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1455 1456 if (OverflowLimit && 1457 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1458 return PreStart; 1459 1460 return nullptr; 1461 } 1462 1463 // Get the normalized zero or sign extended expression for this AddRec's Start. 1464 template <typename ExtendOpTy> 1465 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1466 ScalarEvolution *SE, 1467 unsigned Depth) { 1468 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1469 1470 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1471 if (!PreStart) 1472 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1473 1474 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1475 Depth), 1476 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1477 } 1478 1479 // Try to prove away overflow by looking at "nearby" add recurrences. A 1480 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1481 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1482 // 1483 // Formally: 1484 // 1485 // {S,+,X} == {S-T,+,X} + T 1486 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1487 // 1488 // If ({S-T,+,X} + T) does not overflow ... (1) 1489 // 1490 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1491 // 1492 // If {S-T,+,X} does not overflow ... (2) 1493 // 1494 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1495 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1496 // 1497 // If (S-T)+T does not overflow ... (3) 1498 // 1499 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1500 // == {Ext(S),+,Ext(X)} == LHS 1501 // 1502 // Thus, if (1), (2) and (3) are true for some T, then 1503 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1504 // 1505 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1506 // does not overflow" restricted to the 0th iteration. Therefore we only need 1507 // to check for (1) and (2). 1508 // 1509 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1510 // is `Delta` (defined below). 1511 template <typename ExtendOpTy> 1512 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1513 const SCEV *Step, 1514 const Loop *L) { 1515 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1516 1517 // We restrict `Start` to a constant to prevent SCEV from spending too much 1518 // time here. It is correct (but more expensive) to continue with a 1519 // non-constant `Start` and do a general SCEV subtraction to compute 1520 // `PreStart` below. 1521 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1522 if (!StartC) 1523 return false; 1524 1525 APInt StartAI = StartC->getAPInt(); 1526 1527 for (unsigned Delta : {-2, -1, 1, 2}) { 1528 const SCEV *PreStart = getConstant(StartAI - Delta); 1529 1530 FoldingSetNodeID ID; 1531 ID.AddInteger(scAddRecExpr); 1532 ID.AddPointer(PreStart); 1533 ID.AddPointer(Step); 1534 ID.AddPointer(L); 1535 void *IP = nullptr; 1536 const auto *PreAR = 1537 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1538 1539 // Give up if we don't already have the add recurrence we need because 1540 // actually constructing an add recurrence is relatively expensive. 1541 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1542 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1543 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1544 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1545 DeltaS, &Pred, this); 1546 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1547 return true; 1548 } 1549 } 1550 1551 return false; 1552 } 1553 1554 const SCEV * 1555 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1556 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1557 "This is not an extending conversion!"); 1558 assert(isSCEVable(Ty) && 1559 "This is not a conversion to a SCEVable type!"); 1560 Ty = getEffectiveSCEVType(Ty); 1561 1562 // Fold if the operand is constant. 1563 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1564 return getConstant( 1565 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1566 1567 // zext(zext(x)) --> zext(x) 1568 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1569 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1570 1571 // Before doing any expensive analysis, check to see if we've already 1572 // computed a SCEV for this Op and Ty. 1573 FoldingSetNodeID ID; 1574 ID.AddInteger(scZeroExtend); 1575 ID.AddPointer(Op); 1576 ID.AddPointer(Ty); 1577 void *IP = nullptr; 1578 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1579 if (Depth > MaxExtDepth) { 1580 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1581 Op, Ty); 1582 UniqueSCEVs.InsertNode(S, IP); 1583 return S; 1584 } 1585 1586 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1587 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1588 // It's possible the bits taken off by the truncate were all zero bits. If 1589 // so, we should be able to simplify this further. 1590 const SCEV *X = ST->getOperand(); 1591 ConstantRange CR = getUnsignedRange(X); 1592 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1593 unsigned NewBits = getTypeSizeInBits(Ty); 1594 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1595 CR.zextOrTrunc(NewBits))) 1596 return getTruncateOrZeroExtend(X, Ty); 1597 } 1598 1599 // If the input value is a chrec scev, and we can prove that the value 1600 // did not overflow the old, smaller, value, we can zero extend all of the 1601 // operands (often constants). This allows analysis of something like 1602 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1603 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1604 if (AR->isAffine()) { 1605 const SCEV *Start = AR->getStart(); 1606 const SCEV *Step = AR->getStepRecurrence(*this); 1607 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1608 const Loop *L = AR->getLoop(); 1609 1610 if (!AR->hasNoUnsignedWrap()) { 1611 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1612 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1613 } 1614 1615 // If we have special knowledge that this addrec won't overflow, 1616 // we don't need to do any further analysis. 1617 if (AR->hasNoUnsignedWrap()) 1618 return getAddRecExpr( 1619 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1620 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1621 1622 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1623 // Note that this serves two purposes: It filters out loops that are 1624 // simply not analyzable, and it covers the case where this code is 1625 // being called from within backedge-taken count analysis, such that 1626 // attempting to ask for the backedge-taken count would likely result 1627 // in infinite recursion. In the later case, the analysis code will 1628 // cope with a conservative value, and it will take care to purge 1629 // that value once it has finished. 1630 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1631 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1632 // Manually compute the final value for AR, checking for 1633 // overflow. 1634 1635 // Check whether the backedge-taken count can be losslessly casted to 1636 // the addrec's type. The count is always unsigned. 1637 const SCEV *CastedMaxBECount = 1638 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1639 const SCEV *RecastedMaxBECount = 1640 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1641 if (MaxBECount == RecastedMaxBECount) { 1642 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1643 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1644 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1645 SCEV::FlagAnyWrap, Depth + 1); 1646 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1647 SCEV::FlagAnyWrap, 1648 Depth + 1), 1649 WideTy, Depth + 1); 1650 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1651 const SCEV *WideMaxBECount = 1652 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1653 const SCEV *OperandExtendedAdd = 1654 getAddExpr(WideStart, 1655 getMulExpr(WideMaxBECount, 1656 getZeroExtendExpr(Step, WideTy, Depth + 1), 1657 SCEV::FlagAnyWrap, Depth + 1), 1658 SCEV::FlagAnyWrap, Depth + 1); 1659 if (ZAdd == OperandExtendedAdd) { 1660 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1661 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1662 // Return the expression with the addrec on the outside. 1663 return getAddRecExpr( 1664 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1665 Depth + 1), 1666 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1667 AR->getNoWrapFlags()); 1668 } 1669 // Similar to above, only this time treat the step value as signed. 1670 // This covers loops that count down. 1671 OperandExtendedAdd = 1672 getAddExpr(WideStart, 1673 getMulExpr(WideMaxBECount, 1674 getSignExtendExpr(Step, WideTy, Depth + 1), 1675 SCEV::FlagAnyWrap, Depth + 1), 1676 SCEV::FlagAnyWrap, Depth + 1); 1677 if (ZAdd == OperandExtendedAdd) { 1678 // Cache knowledge of AR NW, which is propagated to this AddRec. 1679 // Negative step causes unsigned wrap, but it still can't self-wrap. 1680 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1681 // Return the expression with the addrec on the outside. 1682 return getAddRecExpr( 1683 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1684 Depth + 1), 1685 getSignExtendExpr(Step, Ty, Depth + 1), L, 1686 AR->getNoWrapFlags()); 1687 } 1688 } 1689 } 1690 1691 // Normally, in the cases we can prove no-overflow via a 1692 // backedge guarding condition, we can also compute a backedge 1693 // taken count for the loop. The exceptions are assumptions and 1694 // guards present in the loop -- SCEV is not great at exploiting 1695 // these to compute max backedge taken counts, but can still use 1696 // these to prove lack of overflow. Use this fact to avoid 1697 // doing extra work that may not pay off. 1698 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1699 !AC.assumptions().empty()) { 1700 // If the backedge is guarded by a comparison with the pre-inc 1701 // value the addrec is safe. Also, if the entry is guarded by 1702 // a comparison with the start value and the backedge is 1703 // guarded by a comparison with the post-inc value, the addrec 1704 // is safe. 1705 if (isKnownPositive(Step)) { 1706 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1707 getUnsignedRangeMax(Step)); 1708 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1709 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && 1710 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, 1711 AR->getPostIncExpr(*this), N))) { 1712 // Cache knowledge of AR NUW, which is propagated to this 1713 // AddRec. 1714 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1715 // Return the expression with the addrec on the outside. 1716 return getAddRecExpr( 1717 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1718 Depth + 1), 1719 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1720 AR->getNoWrapFlags()); 1721 } 1722 } else if (isKnownNegative(Step)) { 1723 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1724 getSignedRangeMin(Step)); 1725 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1726 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) && 1727 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, 1728 AR->getPostIncExpr(*this), N))) { 1729 // Cache knowledge of AR NW, which is propagated to this 1730 // AddRec. Negative step causes unsigned wrap, but it 1731 // still can't self-wrap. 1732 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1733 // Return the expression with the addrec on the outside. 1734 return getAddRecExpr( 1735 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1736 Depth + 1), 1737 getSignExtendExpr(Step, Ty, Depth + 1), L, 1738 AR->getNoWrapFlags()); 1739 } 1740 } 1741 } 1742 1743 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1744 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1745 return getAddRecExpr( 1746 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1747 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1748 } 1749 } 1750 1751 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1752 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1753 if (SA->hasNoUnsignedWrap()) { 1754 // If the addition does not unsign overflow then we can, by definition, 1755 // commute the zero extension with the addition operation. 1756 SmallVector<const SCEV *, 4> Ops; 1757 for (const auto *Op : SA->operands()) 1758 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1759 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1760 } 1761 } 1762 1763 // The cast wasn't folded; create an explicit cast node. 1764 // Recompute the insert position, as it may have been invalidated. 1765 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1766 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1767 Op, Ty); 1768 UniqueSCEVs.InsertNode(S, IP); 1769 return S; 1770 } 1771 1772 const SCEV * 1773 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1774 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1775 "This is not an extending conversion!"); 1776 assert(isSCEVable(Ty) && 1777 "This is not a conversion to a SCEVable type!"); 1778 Ty = getEffectiveSCEVType(Ty); 1779 1780 // Fold if the operand is constant. 1781 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1782 return getConstant( 1783 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1784 1785 // sext(sext(x)) --> sext(x) 1786 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1787 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1788 1789 // sext(zext(x)) --> zext(x) 1790 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1791 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1792 1793 // Before doing any expensive analysis, check to see if we've already 1794 // computed a SCEV for this Op and Ty. 1795 FoldingSetNodeID ID; 1796 ID.AddInteger(scSignExtend); 1797 ID.AddPointer(Op); 1798 ID.AddPointer(Ty); 1799 void *IP = nullptr; 1800 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1801 // Limit recursion depth. 1802 if (Depth > MaxExtDepth) { 1803 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1804 Op, Ty); 1805 UniqueSCEVs.InsertNode(S, IP); 1806 return S; 1807 } 1808 1809 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1810 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1811 // It's possible the bits taken off by the truncate were all sign bits. If 1812 // so, we should be able to simplify this further. 1813 const SCEV *X = ST->getOperand(); 1814 ConstantRange CR = getSignedRange(X); 1815 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1816 unsigned NewBits = getTypeSizeInBits(Ty); 1817 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1818 CR.sextOrTrunc(NewBits))) 1819 return getTruncateOrSignExtend(X, Ty); 1820 } 1821 1822 // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2 1823 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1824 if (SA->getNumOperands() == 2) { 1825 auto *SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0)); 1826 auto *SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1)); 1827 if (SMul && SC1) { 1828 if (auto *SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) { 1829 const APInt &C1 = SC1->getAPInt(); 1830 const APInt &C2 = SC2->getAPInt(); 1831 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && 1832 C2.ugt(C1) && C2.isPowerOf2()) 1833 return getAddExpr(getSignExtendExpr(SC1, Ty, Depth + 1), 1834 getSignExtendExpr(SMul, Ty, Depth + 1), 1835 SCEV::FlagAnyWrap, Depth + 1); 1836 } 1837 } 1838 } 1839 1840 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1841 if (SA->hasNoSignedWrap()) { 1842 // If the addition does not sign overflow then we can, by definition, 1843 // commute the sign extension with the addition operation. 1844 SmallVector<const SCEV *, 4> Ops; 1845 for (const auto *Op : SA->operands()) 1846 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1847 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1848 } 1849 } 1850 // If the input value is a chrec scev, and we can prove that the value 1851 // did not overflow the old, smaller, value, we can sign extend all of the 1852 // operands (often constants). This allows analysis of something like 1853 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1854 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1855 if (AR->isAffine()) { 1856 const SCEV *Start = AR->getStart(); 1857 const SCEV *Step = AR->getStepRecurrence(*this); 1858 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1859 const Loop *L = AR->getLoop(); 1860 1861 if (!AR->hasNoSignedWrap()) { 1862 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1863 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1864 } 1865 1866 // If we have special knowledge that this addrec won't overflow, 1867 // we don't need to do any further analysis. 1868 if (AR->hasNoSignedWrap()) 1869 return getAddRecExpr( 1870 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1871 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1872 1873 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1874 // Note that this serves two purposes: It filters out loops that are 1875 // simply not analyzable, and it covers the case where this code is 1876 // being called from within backedge-taken count analysis, such that 1877 // attempting to ask for the backedge-taken count would likely result 1878 // in infinite recursion. In the later case, the analysis code will 1879 // cope with a conservative value, and it will take care to purge 1880 // that value once it has finished. 1881 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1882 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1883 // Manually compute the final value for AR, checking for 1884 // overflow. 1885 1886 // Check whether the backedge-taken count can be losslessly casted to 1887 // the addrec's type. The count is always unsigned. 1888 const SCEV *CastedMaxBECount = 1889 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1890 const SCEV *RecastedMaxBECount = 1891 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1892 if (MaxBECount == RecastedMaxBECount) { 1893 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1894 // Check whether Start+Step*MaxBECount has no signed overflow. 1895 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 1896 SCEV::FlagAnyWrap, Depth + 1); 1897 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 1898 SCEV::FlagAnyWrap, 1899 Depth + 1), 1900 WideTy, Depth + 1); 1901 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 1902 const SCEV *WideMaxBECount = 1903 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1904 const SCEV *OperandExtendedAdd = 1905 getAddExpr(WideStart, 1906 getMulExpr(WideMaxBECount, 1907 getSignExtendExpr(Step, WideTy, Depth + 1), 1908 SCEV::FlagAnyWrap, Depth + 1), 1909 SCEV::FlagAnyWrap, Depth + 1); 1910 if (SAdd == OperandExtendedAdd) { 1911 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1912 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1913 // Return the expression with the addrec on the outside. 1914 return getAddRecExpr( 1915 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1916 Depth + 1), 1917 getSignExtendExpr(Step, Ty, Depth + 1), L, 1918 AR->getNoWrapFlags()); 1919 } 1920 // Similar to above, only this time treat the step value as unsigned. 1921 // This covers loops that count up with an unsigned step. 1922 OperandExtendedAdd = 1923 getAddExpr(WideStart, 1924 getMulExpr(WideMaxBECount, 1925 getZeroExtendExpr(Step, WideTy, Depth + 1), 1926 SCEV::FlagAnyWrap, Depth + 1), 1927 SCEV::FlagAnyWrap, Depth + 1); 1928 if (SAdd == OperandExtendedAdd) { 1929 // If AR wraps around then 1930 // 1931 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 1932 // => SAdd != OperandExtendedAdd 1933 // 1934 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 1935 // (SAdd == OperandExtendedAdd => AR is NW) 1936 1937 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1938 1939 // Return the expression with the addrec on the outside. 1940 return getAddRecExpr( 1941 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1942 Depth + 1), 1943 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1944 AR->getNoWrapFlags()); 1945 } 1946 } 1947 } 1948 1949 // Normally, in the cases we can prove no-overflow via a 1950 // backedge guarding condition, we can also compute a backedge 1951 // taken count for the loop. The exceptions are assumptions and 1952 // guards present in the loop -- SCEV is not great at exploiting 1953 // these to compute max backedge taken counts, but can still use 1954 // these to prove lack of overflow. Use this fact to avoid 1955 // doing extra work that may not pay off. 1956 1957 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1958 !AC.assumptions().empty()) { 1959 // If the backedge is guarded by a comparison with the pre-inc 1960 // value the addrec is safe. Also, if the entry is guarded by 1961 // a comparison with the start value and the backedge is 1962 // guarded by a comparison with the post-inc value, the addrec 1963 // is safe. 1964 ICmpInst::Predicate Pred; 1965 const SCEV *OverflowLimit = 1966 getSignedOverflowLimitForStep(Step, &Pred, this); 1967 if (OverflowLimit && 1968 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 1969 (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) && 1970 isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this), 1971 OverflowLimit)))) { 1972 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 1973 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1974 return getAddRecExpr( 1975 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1976 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1977 } 1978 } 1979 1980 // If Start and Step are constants, check if we can apply this 1981 // transformation: 1982 // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2 1983 auto *SC1 = dyn_cast<SCEVConstant>(Start); 1984 auto *SC2 = dyn_cast<SCEVConstant>(Step); 1985 if (SC1 && SC2) { 1986 const APInt &C1 = SC1->getAPInt(); 1987 const APInt &C2 = SC2->getAPInt(); 1988 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) && 1989 C2.isPowerOf2()) { 1990 Start = getSignExtendExpr(Start, Ty, Depth + 1); 1991 const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L, 1992 AR->getNoWrapFlags()); 1993 return getAddExpr(Start, getSignExtendExpr(NewAR, Ty, Depth + 1), 1994 SCEV::FlagAnyWrap, Depth + 1); 1995 } 1996 } 1997 1998 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 1999 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2000 return getAddRecExpr( 2001 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2002 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2003 } 2004 } 2005 2006 // If the input value is provably positive and we could not simplify 2007 // away the sext build a zext instead. 2008 if (isKnownNonNegative(Op)) 2009 return getZeroExtendExpr(Op, Ty, Depth + 1); 2010 2011 // The cast wasn't folded; create an explicit cast node. 2012 // Recompute the insert position, as it may have been invalidated. 2013 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2014 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2015 Op, Ty); 2016 UniqueSCEVs.InsertNode(S, IP); 2017 return S; 2018 } 2019 2020 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2021 /// unspecified bits out to the given type. 2022 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2023 Type *Ty) { 2024 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2025 "This is not an extending conversion!"); 2026 assert(isSCEVable(Ty) && 2027 "This is not a conversion to a SCEVable type!"); 2028 Ty = getEffectiveSCEVType(Ty); 2029 2030 // Sign-extend negative constants. 2031 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2032 if (SC->getAPInt().isNegative()) 2033 return getSignExtendExpr(Op, Ty); 2034 2035 // Peel off a truncate cast. 2036 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2037 const SCEV *NewOp = T->getOperand(); 2038 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2039 return getAnyExtendExpr(NewOp, Ty); 2040 return getTruncateOrNoop(NewOp, Ty); 2041 } 2042 2043 // Next try a zext cast. If the cast is folded, use it. 2044 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2045 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2046 return ZExt; 2047 2048 // Next try a sext cast. If the cast is folded, use it. 2049 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2050 if (!isa<SCEVSignExtendExpr>(SExt)) 2051 return SExt; 2052 2053 // Force the cast to be folded into the operands of an addrec. 2054 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2055 SmallVector<const SCEV *, 4> Ops; 2056 for (const SCEV *Op : AR->operands()) 2057 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2058 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2059 } 2060 2061 // If the expression is obviously signed, use the sext cast value. 2062 if (isa<SCEVSMaxExpr>(Op)) 2063 return SExt; 2064 2065 // Absent any other information, use the zext cast value. 2066 return ZExt; 2067 } 2068 2069 /// Process the given Ops list, which is a list of operands to be added under 2070 /// the given scale, update the given map. This is a helper function for 2071 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2072 /// that would form an add expression like this: 2073 /// 2074 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2075 /// 2076 /// where A and B are constants, update the map with these values: 2077 /// 2078 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2079 /// 2080 /// and add 13 + A*B*29 to AccumulatedConstant. 2081 /// This will allow getAddRecExpr to produce this: 2082 /// 2083 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2084 /// 2085 /// This form often exposes folding opportunities that are hidden in 2086 /// the original operand list. 2087 /// 2088 /// Return true iff it appears that any interesting folding opportunities 2089 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2090 /// the common case where no interesting opportunities are present, and 2091 /// is also used as a check to avoid infinite recursion. 2092 static bool 2093 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2094 SmallVectorImpl<const SCEV *> &NewOps, 2095 APInt &AccumulatedConstant, 2096 const SCEV *const *Ops, size_t NumOperands, 2097 const APInt &Scale, 2098 ScalarEvolution &SE) { 2099 bool Interesting = false; 2100 2101 // Iterate over the add operands. They are sorted, with constants first. 2102 unsigned i = 0; 2103 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2104 ++i; 2105 // Pull a buried constant out to the outside. 2106 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2107 Interesting = true; 2108 AccumulatedConstant += Scale * C->getAPInt(); 2109 } 2110 2111 // Next comes everything else. We're especially interested in multiplies 2112 // here, but they're in the middle, so just visit the rest with one loop. 2113 for (; i != NumOperands; ++i) { 2114 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2115 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2116 APInt NewScale = 2117 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2118 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2119 // A multiplication of a constant with another add; recurse. 2120 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2121 Interesting |= 2122 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2123 Add->op_begin(), Add->getNumOperands(), 2124 NewScale, SE); 2125 } else { 2126 // A multiplication of a constant with some other value. Update 2127 // the map. 2128 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2129 const SCEV *Key = SE.getMulExpr(MulOps); 2130 auto Pair = M.insert({Key, NewScale}); 2131 if (Pair.second) { 2132 NewOps.push_back(Pair.first->first); 2133 } else { 2134 Pair.first->second += NewScale; 2135 // The map already had an entry for this value, which may indicate 2136 // a folding opportunity. 2137 Interesting = true; 2138 } 2139 } 2140 } else { 2141 // An ordinary operand. Update the map. 2142 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2143 M.insert({Ops[i], Scale}); 2144 if (Pair.second) { 2145 NewOps.push_back(Pair.first->first); 2146 } else { 2147 Pair.first->second += Scale; 2148 // The map already had an entry for this value, which may indicate 2149 // a folding opportunity. 2150 Interesting = true; 2151 } 2152 } 2153 } 2154 2155 return Interesting; 2156 } 2157 2158 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2159 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2160 // can't-overflow flags for the operation if possible. 2161 static SCEV::NoWrapFlags 2162 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2163 const SmallVectorImpl<const SCEV *> &Ops, 2164 SCEV::NoWrapFlags Flags) { 2165 using namespace std::placeholders; 2166 2167 using OBO = OverflowingBinaryOperator; 2168 2169 bool CanAnalyze = 2170 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2171 (void)CanAnalyze; 2172 assert(CanAnalyze && "don't call from other places!"); 2173 2174 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2175 SCEV::NoWrapFlags SignOrUnsignWrap = 2176 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2177 2178 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2179 auto IsKnownNonNegative = [&](const SCEV *S) { 2180 return SE->isKnownNonNegative(S); 2181 }; 2182 2183 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2184 Flags = 2185 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2186 2187 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2188 2189 if (SignOrUnsignWrap != SignOrUnsignMask && Type == scAddExpr && 2190 Ops.size() == 2 && isa<SCEVConstant>(Ops[0])) { 2191 2192 // (A + C) --> (A + C)<nsw> if the addition does not sign overflow 2193 // (A + C) --> (A + C)<nuw> if the addition does not unsign overflow 2194 2195 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2196 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2197 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2198 Instruction::Add, C, OBO::NoSignedWrap); 2199 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2200 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2201 } 2202 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2203 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2204 Instruction::Add, C, OBO::NoUnsignedWrap); 2205 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2206 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2207 } 2208 } 2209 2210 return Flags; 2211 } 2212 2213 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2214 if (!isLoopInvariant(S, L)) 2215 return false; 2216 // If a value depends on a SCEVUnknown which is defined after the loop, we 2217 // conservatively assume that we cannot calculate it at the loop's entry. 2218 struct FindDominatedSCEVUnknown { 2219 bool Found = false; 2220 const Loop *L; 2221 DominatorTree &DT; 2222 LoopInfo &LI; 2223 2224 FindDominatedSCEVUnknown(const Loop *L, DominatorTree &DT, LoopInfo &LI) 2225 : L(L), DT(DT), LI(LI) {} 2226 2227 bool checkSCEVUnknown(const SCEVUnknown *SU) { 2228 if (auto *I = dyn_cast<Instruction>(SU->getValue())) { 2229 if (DT.dominates(L->getHeader(), I->getParent())) 2230 Found = true; 2231 else 2232 assert(DT.dominates(I->getParent(), L->getHeader()) && 2233 "No dominance relationship between SCEV and loop?"); 2234 } 2235 return false; 2236 } 2237 2238 bool follow(const SCEV *S) { 2239 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 2240 case scConstant: 2241 return false; 2242 case scAddRecExpr: 2243 case scTruncate: 2244 case scZeroExtend: 2245 case scSignExtend: 2246 case scAddExpr: 2247 case scMulExpr: 2248 case scUMaxExpr: 2249 case scSMaxExpr: 2250 case scUDivExpr: 2251 return true; 2252 case scUnknown: 2253 return checkSCEVUnknown(cast<SCEVUnknown>(S)); 2254 case scCouldNotCompute: 2255 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 2256 } 2257 return false; 2258 } 2259 2260 bool isDone() { return Found; } 2261 }; 2262 2263 FindDominatedSCEVUnknown FSU(L, DT, LI); 2264 SCEVTraversal<FindDominatedSCEVUnknown> ST(FSU); 2265 ST.visitAll(S); 2266 return !FSU.Found; 2267 } 2268 2269 /// Get a canonical add expression, or something simpler if possible. 2270 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2271 SCEV::NoWrapFlags Flags, 2272 unsigned Depth) { 2273 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2274 "only nuw or nsw allowed"); 2275 assert(!Ops.empty() && "Cannot get empty add!"); 2276 if (Ops.size() == 1) return Ops[0]; 2277 #ifndef NDEBUG 2278 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2279 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2280 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2281 "SCEVAddExpr operand types don't match!"); 2282 #endif 2283 2284 // Sort by complexity, this groups all similar expression types together. 2285 GroupByComplexity(Ops, &LI, DT); 2286 2287 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2288 2289 // If there are any constants, fold them together. 2290 unsigned Idx = 0; 2291 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2292 ++Idx; 2293 assert(Idx < Ops.size()); 2294 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2295 // We found two constants, fold them together! 2296 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2297 if (Ops.size() == 2) return Ops[0]; 2298 Ops.erase(Ops.begin()+1); // Erase the folded element 2299 LHSC = cast<SCEVConstant>(Ops[0]); 2300 } 2301 2302 // If we are left with a constant zero being added, strip it off. 2303 if (LHSC->getValue()->isZero()) { 2304 Ops.erase(Ops.begin()); 2305 --Idx; 2306 } 2307 2308 if (Ops.size() == 1) return Ops[0]; 2309 } 2310 2311 // Limit recursion calls depth. 2312 if (Depth > MaxArithDepth) 2313 return getOrCreateAddExpr(Ops, Flags); 2314 2315 // Okay, check to see if the same value occurs in the operand list more than 2316 // once. If so, merge them together into an multiply expression. Since we 2317 // sorted the list, these values are required to be adjacent. 2318 Type *Ty = Ops[0]->getType(); 2319 bool FoundMatch = false; 2320 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2321 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2322 // Scan ahead to count how many equal operands there are. 2323 unsigned Count = 2; 2324 while (i+Count != e && Ops[i+Count] == Ops[i]) 2325 ++Count; 2326 // Merge the values into a multiply. 2327 const SCEV *Scale = getConstant(Ty, Count); 2328 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2329 if (Ops.size() == Count) 2330 return Mul; 2331 Ops[i] = Mul; 2332 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2333 --i; e -= Count - 1; 2334 FoundMatch = true; 2335 } 2336 if (FoundMatch) 2337 return getAddExpr(Ops, Flags); 2338 2339 // Check for truncates. If all the operands are truncated from the same 2340 // type, see if factoring out the truncate would permit the result to be 2341 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) 2342 // if the contents of the resulting outer trunc fold to something simple. 2343 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { 2344 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); 2345 Type *DstType = Trunc->getType(); 2346 Type *SrcType = Trunc->getOperand()->getType(); 2347 SmallVector<const SCEV *, 8> LargeOps; 2348 bool Ok = true; 2349 // Check all the operands to see if they can be represented in the 2350 // source type of the truncate. 2351 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2352 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2353 if (T->getOperand()->getType() != SrcType) { 2354 Ok = false; 2355 break; 2356 } 2357 LargeOps.push_back(T->getOperand()); 2358 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2359 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2360 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2361 SmallVector<const SCEV *, 8> LargeMulOps; 2362 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2363 if (const SCEVTruncateExpr *T = 2364 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2365 if (T->getOperand()->getType() != SrcType) { 2366 Ok = false; 2367 break; 2368 } 2369 LargeMulOps.push_back(T->getOperand()); 2370 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2371 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2372 } else { 2373 Ok = false; 2374 break; 2375 } 2376 } 2377 if (Ok) 2378 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2379 } else { 2380 Ok = false; 2381 break; 2382 } 2383 } 2384 if (Ok) { 2385 // Evaluate the expression in the larger type. 2386 const SCEV *Fold = getAddExpr(LargeOps, Flags, Depth + 1); 2387 // If it folds to something simple, use it. Otherwise, don't. 2388 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2389 return getTruncateExpr(Fold, DstType); 2390 } 2391 } 2392 2393 // Skip past any other cast SCEVs. 2394 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2395 ++Idx; 2396 2397 // If there are add operands they would be next. 2398 if (Idx < Ops.size()) { 2399 bool DeletedAdd = false; 2400 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2401 if (Ops.size() > AddOpsInlineThreshold || 2402 Add->getNumOperands() > AddOpsInlineThreshold) 2403 break; 2404 // If we have an add, expand the add operands onto the end of the operands 2405 // list. 2406 Ops.erase(Ops.begin()+Idx); 2407 Ops.append(Add->op_begin(), Add->op_end()); 2408 DeletedAdd = true; 2409 } 2410 2411 // If we deleted at least one add, we added operands to the end of the list, 2412 // and they are not necessarily sorted. Recurse to resort and resimplify 2413 // any operands we just acquired. 2414 if (DeletedAdd) 2415 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2416 } 2417 2418 // Skip over the add expression until we get to a multiply. 2419 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2420 ++Idx; 2421 2422 // Check to see if there are any folding opportunities present with 2423 // operands multiplied by constant values. 2424 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2425 uint64_t BitWidth = getTypeSizeInBits(Ty); 2426 DenseMap<const SCEV *, APInt> M; 2427 SmallVector<const SCEV *, 8> NewOps; 2428 APInt AccumulatedConstant(BitWidth, 0); 2429 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2430 Ops.data(), Ops.size(), 2431 APInt(BitWidth, 1), *this)) { 2432 struct APIntCompare { 2433 bool operator()(const APInt &LHS, const APInt &RHS) const { 2434 return LHS.ult(RHS); 2435 } 2436 }; 2437 2438 // Some interesting folding opportunity is present, so its worthwhile to 2439 // re-generate the operands list. Group the operands by constant scale, 2440 // to avoid multiplying by the same constant scale multiple times. 2441 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2442 for (const SCEV *NewOp : NewOps) 2443 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2444 // Re-generate the operands list. 2445 Ops.clear(); 2446 if (AccumulatedConstant != 0) 2447 Ops.push_back(getConstant(AccumulatedConstant)); 2448 for (auto &MulOp : MulOpLists) 2449 if (MulOp.first != 0) 2450 Ops.push_back(getMulExpr( 2451 getConstant(MulOp.first), 2452 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2453 SCEV::FlagAnyWrap, Depth + 1)); 2454 if (Ops.empty()) 2455 return getZero(Ty); 2456 if (Ops.size() == 1) 2457 return Ops[0]; 2458 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2459 } 2460 } 2461 2462 // If we are adding something to a multiply expression, make sure the 2463 // something is not already an operand of the multiply. If so, merge it into 2464 // the multiply. 2465 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2466 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2467 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2468 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2469 if (isa<SCEVConstant>(MulOpSCEV)) 2470 continue; 2471 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2472 if (MulOpSCEV == Ops[AddOp]) { 2473 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2474 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2475 if (Mul->getNumOperands() != 2) { 2476 // If the multiply has more than two operands, we must get the 2477 // Y*Z term. 2478 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2479 Mul->op_begin()+MulOp); 2480 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2481 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2482 } 2483 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2484 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2485 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2486 SCEV::FlagAnyWrap, Depth + 1); 2487 if (Ops.size() == 2) return OuterMul; 2488 if (AddOp < Idx) { 2489 Ops.erase(Ops.begin()+AddOp); 2490 Ops.erase(Ops.begin()+Idx-1); 2491 } else { 2492 Ops.erase(Ops.begin()+Idx); 2493 Ops.erase(Ops.begin()+AddOp-1); 2494 } 2495 Ops.push_back(OuterMul); 2496 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2497 } 2498 2499 // Check this multiply against other multiplies being added together. 2500 for (unsigned OtherMulIdx = Idx+1; 2501 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2502 ++OtherMulIdx) { 2503 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2504 // If MulOp occurs in OtherMul, we can fold the two multiplies 2505 // together. 2506 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2507 OMulOp != e; ++OMulOp) 2508 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2509 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2510 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2511 if (Mul->getNumOperands() != 2) { 2512 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2513 Mul->op_begin()+MulOp); 2514 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2515 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2516 } 2517 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2518 if (OtherMul->getNumOperands() != 2) { 2519 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2520 OtherMul->op_begin()+OMulOp); 2521 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2522 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2523 } 2524 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2525 const SCEV *InnerMulSum = 2526 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2527 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2528 SCEV::FlagAnyWrap, Depth + 1); 2529 if (Ops.size() == 2) return OuterMul; 2530 Ops.erase(Ops.begin()+Idx); 2531 Ops.erase(Ops.begin()+OtherMulIdx-1); 2532 Ops.push_back(OuterMul); 2533 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2534 } 2535 } 2536 } 2537 } 2538 2539 // If there are any add recurrences in the operands list, see if any other 2540 // added values are loop invariant. If so, we can fold them into the 2541 // recurrence. 2542 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2543 ++Idx; 2544 2545 // Scan over all recurrences, trying to fold loop invariants into them. 2546 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2547 // Scan all of the other operands to this add and add them to the vector if 2548 // they are loop invariant w.r.t. the recurrence. 2549 SmallVector<const SCEV *, 8> LIOps; 2550 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2551 const Loop *AddRecLoop = AddRec->getLoop(); 2552 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2553 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2554 LIOps.push_back(Ops[i]); 2555 Ops.erase(Ops.begin()+i); 2556 --i; --e; 2557 } 2558 2559 // If we found some loop invariants, fold them into the recurrence. 2560 if (!LIOps.empty()) { 2561 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2562 LIOps.push_back(AddRec->getStart()); 2563 2564 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2565 AddRec->op_end()); 2566 // This follows from the fact that the no-wrap flags on the outer add 2567 // expression are applicable on the 0th iteration, when the add recurrence 2568 // will be equal to its start value. 2569 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2570 2571 // Build the new addrec. Propagate the NUW and NSW flags if both the 2572 // outer add and the inner addrec are guaranteed to have no overflow. 2573 // Always propagate NW. 2574 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2575 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2576 2577 // If all of the other operands were loop invariant, we are done. 2578 if (Ops.size() == 1) return NewRec; 2579 2580 // Otherwise, add the folded AddRec by the non-invariant parts. 2581 for (unsigned i = 0;; ++i) 2582 if (Ops[i] == AddRec) { 2583 Ops[i] = NewRec; 2584 break; 2585 } 2586 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2587 } 2588 2589 // Okay, if there weren't any loop invariants to be folded, check to see if 2590 // there are multiple AddRec's with the same loop induction variable being 2591 // added together. If so, we can fold them. 2592 for (unsigned OtherIdx = Idx+1; 2593 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2594 ++OtherIdx) { 2595 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2596 // so that the 1st found AddRecExpr is dominated by all others. 2597 assert(DT.dominates( 2598 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2599 AddRec->getLoop()->getHeader()) && 2600 "AddRecExprs are not sorted in reverse dominance order?"); 2601 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2602 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2603 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2604 AddRec->op_end()); 2605 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2606 ++OtherIdx) { 2607 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2608 if (OtherAddRec->getLoop() == AddRecLoop) { 2609 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2610 i != e; ++i) { 2611 if (i >= AddRecOps.size()) { 2612 AddRecOps.append(OtherAddRec->op_begin()+i, 2613 OtherAddRec->op_end()); 2614 break; 2615 } 2616 SmallVector<const SCEV *, 2> TwoOps = { 2617 AddRecOps[i], OtherAddRec->getOperand(i)}; 2618 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2619 } 2620 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2621 } 2622 } 2623 // Step size has changed, so we cannot guarantee no self-wraparound. 2624 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2625 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2626 } 2627 } 2628 2629 // Otherwise couldn't fold anything into this recurrence. Move onto the 2630 // next one. 2631 } 2632 2633 // Okay, it looks like we really DO need an add expr. Check to see if we 2634 // already have one, otherwise create a new one. 2635 return getOrCreateAddExpr(Ops, Flags); 2636 } 2637 2638 const SCEV * 2639 ScalarEvolution::getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2640 SCEV::NoWrapFlags Flags) { 2641 FoldingSetNodeID ID; 2642 ID.AddInteger(scAddExpr); 2643 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2644 ID.AddPointer(Ops[i]); 2645 void *IP = nullptr; 2646 SCEVAddExpr *S = 2647 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2648 if (!S) { 2649 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2650 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2651 S = new (SCEVAllocator) 2652 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2653 UniqueSCEVs.InsertNode(S, IP); 2654 } 2655 S->setNoWrapFlags(Flags); 2656 return S; 2657 } 2658 2659 const SCEV * 2660 ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2661 SCEV::NoWrapFlags Flags) { 2662 FoldingSetNodeID ID; 2663 ID.AddInteger(scMulExpr); 2664 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2665 ID.AddPointer(Ops[i]); 2666 void *IP = nullptr; 2667 SCEVMulExpr *S = 2668 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2669 if (!S) { 2670 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2671 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2672 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2673 O, Ops.size()); 2674 UniqueSCEVs.InsertNode(S, IP); 2675 } 2676 S->setNoWrapFlags(Flags); 2677 return S; 2678 } 2679 2680 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2681 uint64_t k = i*j; 2682 if (j > 1 && k / j != i) Overflow = true; 2683 return k; 2684 } 2685 2686 /// Compute the result of "n choose k", the binomial coefficient. If an 2687 /// intermediate computation overflows, Overflow will be set and the return will 2688 /// be garbage. Overflow is not cleared on absence of overflow. 2689 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2690 // We use the multiplicative formula: 2691 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2692 // At each iteration, we take the n-th term of the numeral and divide by the 2693 // (k-n)th term of the denominator. This division will always produce an 2694 // integral result, and helps reduce the chance of overflow in the 2695 // intermediate computations. However, we can still overflow even when the 2696 // final result would fit. 2697 2698 if (n == 0 || n == k) return 1; 2699 if (k > n) return 0; 2700 2701 if (k > n/2) 2702 k = n-k; 2703 2704 uint64_t r = 1; 2705 for (uint64_t i = 1; i <= k; ++i) { 2706 r = umul_ov(r, n-(i-1), Overflow); 2707 r /= i; 2708 } 2709 return r; 2710 } 2711 2712 /// Determine if any of the operands in this SCEV are a constant or if 2713 /// any of the add or multiply expressions in this SCEV contain a constant. 2714 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2715 struct FindConstantInAddMulChain { 2716 bool FoundConstant = false; 2717 2718 bool follow(const SCEV *S) { 2719 FoundConstant |= isa<SCEVConstant>(S); 2720 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2721 } 2722 2723 bool isDone() const { 2724 return FoundConstant; 2725 } 2726 }; 2727 2728 FindConstantInAddMulChain F; 2729 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2730 ST.visitAll(StartExpr); 2731 return F.FoundConstant; 2732 } 2733 2734 /// Get a canonical multiply expression, or something simpler if possible. 2735 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2736 SCEV::NoWrapFlags Flags, 2737 unsigned Depth) { 2738 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2739 "only nuw or nsw allowed"); 2740 assert(!Ops.empty() && "Cannot get empty mul!"); 2741 if (Ops.size() == 1) return Ops[0]; 2742 #ifndef NDEBUG 2743 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2744 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2745 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2746 "SCEVMulExpr operand types don't match!"); 2747 #endif 2748 2749 // Sort by complexity, this groups all similar expression types together. 2750 GroupByComplexity(Ops, &LI, DT); 2751 2752 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2753 2754 // Limit recursion calls depth. 2755 if (Depth > MaxArithDepth) 2756 return getOrCreateMulExpr(Ops, Flags); 2757 2758 // If there are any constants, fold them together. 2759 unsigned Idx = 0; 2760 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2761 2762 // C1*(C2+V) -> C1*C2 + C1*V 2763 if (Ops.size() == 2) 2764 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2765 // If any of Add's ops are Adds or Muls with a constant, 2766 // apply this transformation as well. 2767 if (Add->getNumOperands() == 2) 2768 // TODO: There are some cases where this transformation is not 2769 // profitable, for example: 2770 // Add = (C0 + X) * Y + Z. 2771 // Maybe the scope of this transformation should be narrowed down. 2772 if (containsConstantInAddMulChain(Add)) 2773 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2774 SCEV::FlagAnyWrap, Depth + 1), 2775 getMulExpr(LHSC, Add->getOperand(1), 2776 SCEV::FlagAnyWrap, Depth + 1), 2777 SCEV::FlagAnyWrap, Depth + 1); 2778 2779 ++Idx; 2780 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2781 // We found two constants, fold them together! 2782 ConstantInt *Fold = 2783 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2784 Ops[0] = getConstant(Fold); 2785 Ops.erase(Ops.begin()+1); // Erase the folded element 2786 if (Ops.size() == 1) return Ops[0]; 2787 LHSC = cast<SCEVConstant>(Ops[0]); 2788 } 2789 2790 // If we are left with a constant one being multiplied, strip it off. 2791 if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) { 2792 Ops.erase(Ops.begin()); 2793 --Idx; 2794 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2795 // If we have a multiply of zero, it will always be zero. 2796 return Ops[0]; 2797 } else if (Ops[0]->isAllOnesValue()) { 2798 // If we have a mul by -1 of an add, try distributing the -1 among the 2799 // add operands. 2800 if (Ops.size() == 2) { 2801 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2802 SmallVector<const SCEV *, 4> NewOps; 2803 bool AnyFolded = false; 2804 for (const SCEV *AddOp : Add->operands()) { 2805 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2806 Depth + 1); 2807 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2808 NewOps.push_back(Mul); 2809 } 2810 if (AnyFolded) 2811 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2812 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2813 // Negation preserves a recurrence's no self-wrap property. 2814 SmallVector<const SCEV *, 4> Operands; 2815 for (const SCEV *AddRecOp : AddRec->operands()) 2816 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2817 Depth + 1)); 2818 2819 return getAddRecExpr(Operands, AddRec->getLoop(), 2820 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2821 } 2822 } 2823 } 2824 2825 if (Ops.size() == 1) 2826 return Ops[0]; 2827 } 2828 2829 // Skip over the add expression until we get to a multiply. 2830 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2831 ++Idx; 2832 2833 // If there are mul operands inline them all into this expression. 2834 if (Idx < Ops.size()) { 2835 bool DeletedMul = false; 2836 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2837 if (Ops.size() > MulOpsInlineThreshold) 2838 break; 2839 // If we have an mul, expand the mul operands onto the end of the 2840 // operands list. 2841 Ops.erase(Ops.begin()+Idx); 2842 Ops.append(Mul->op_begin(), Mul->op_end()); 2843 DeletedMul = true; 2844 } 2845 2846 // If we deleted at least one mul, we added operands to the end of the 2847 // list, and they are not necessarily sorted. Recurse to resort and 2848 // resimplify any operands we just acquired. 2849 if (DeletedMul) 2850 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2851 } 2852 2853 // If there are any add recurrences in the operands list, see if any other 2854 // added values are loop invariant. If so, we can fold them into the 2855 // recurrence. 2856 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2857 ++Idx; 2858 2859 // Scan over all recurrences, trying to fold loop invariants into them. 2860 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2861 // Scan all of the other operands to this mul and add them to the vector 2862 // if they are loop invariant w.r.t. the recurrence. 2863 SmallVector<const SCEV *, 8> LIOps; 2864 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2865 const Loop *AddRecLoop = AddRec->getLoop(); 2866 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2867 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2868 LIOps.push_back(Ops[i]); 2869 Ops.erase(Ops.begin()+i); 2870 --i; --e; 2871 } 2872 2873 // If we found some loop invariants, fold them into the recurrence. 2874 if (!LIOps.empty()) { 2875 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2876 SmallVector<const SCEV *, 4> NewOps; 2877 NewOps.reserve(AddRec->getNumOperands()); 2878 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 2879 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2880 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 2881 SCEV::FlagAnyWrap, Depth + 1)); 2882 2883 // Build the new addrec. Propagate the NUW and NSW flags if both the 2884 // outer mul and the inner addrec are guaranteed to have no overflow. 2885 // 2886 // No self-wrap cannot be guaranteed after changing the step size, but 2887 // will be inferred if either NUW or NSW is true. 2888 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 2889 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 2890 2891 // If all of the other operands were loop invariant, we are done. 2892 if (Ops.size() == 1) return NewRec; 2893 2894 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2895 for (unsigned i = 0;; ++i) 2896 if (Ops[i] == AddRec) { 2897 Ops[i] = NewRec; 2898 break; 2899 } 2900 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2901 } 2902 2903 // Okay, if there weren't any loop invariants to be folded, check to see 2904 // if there are multiple AddRec's with the same loop induction variable 2905 // being multiplied together. If so, we can fold them. 2906 2907 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2908 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2909 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2910 // ]]],+,...up to x=2n}. 2911 // Note that the arguments to choose() are always integers with values 2912 // known at compile time, never SCEV objects. 2913 // 2914 // The implementation avoids pointless extra computations when the two 2915 // addrec's are of different length (mathematically, it's equivalent to 2916 // an infinite stream of zeros on the right). 2917 bool OpsModified = false; 2918 for (unsigned OtherIdx = Idx+1; 2919 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2920 ++OtherIdx) { 2921 const SCEVAddRecExpr *OtherAddRec = 2922 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2923 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 2924 continue; 2925 2926 // Limit max number of arguments to avoid creation of unreasonably big 2927 // SCEVAddRecs with very complex operands. 2928 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 2929 MaxAddRecSize) 2930 continue; 2931 2932 bool Overflow = false; 2933 Type *Ty = AddRec->getType(); 2934 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 2935 SmallVector<const SCEV*, 7> AddRecOps; 2936 for (int x = 0, xe = AddRec->getNumOperands() + 2937 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 2938 const SCEV *Term = getZero(Ty); 2939 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 2940 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 2941 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 2942 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 2943 z < ze && !Overflow; ++z) { 2944 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 2945 uint64_t Coeff; 2946 if (LargerThan64Bits) 2947 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 2948 else 2949 Coeff = Coeff1*Coeff2; 2950 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 2951 const SCEV *Term1 = AddRec->getOperand(y-z); 2952 const SCEV *Term2 = OtherAddRec->getOperand(z); 2953 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1, Term2, 2954 SCEV::FlagAnyWrap, Depth + 1), 2955 SCEV::FlagAnyWrap, Depth + 1); 2956 } 2957 } 2958 AddRecOps.push_back(Term); 2959 } 2960 if (!Overflow) { 2961 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(), 2962 SCEV::FlagAnyWrap); 2963 if (Ops.size() == 2) return NewAddRec; 2964 Ops[Idx] = NewAddRec; 2965 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2966 OpsModified = true; 2967 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 2968 if (!AddRec) 2969 break; 2970 } 2971 } 2972 if (OpsModified) 2973 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2974 2975 // Otherwise couldn't fold anything into this recurrence. Move onto the 2976 // next one. 2977 } 2978 2979 // Okay, it looks like we really DO need an mul expr. Check to see if we 2980 // already have one, otherwise create a new one. 2981 return getOrCreateMulExpr(Ops, Flags); 2982 } 2983 2984 /// Represents an unsigned remainder expression based on unsigned division. 2985 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 2986 const SCEV *RHS) { 2987 assert(getEffectiveSCEVType(LHS->getType()) == 2988 getEffectiveSCEVType(RHS->getType()) && 2989 "SCEVURemExpr operand types don't match!"); 2990 2991 // Short-circuit easy cases 2992 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 2993 // If constant is one, the result is trivial 2994 if (RHSC->getValue()->isOne()) 2995 return getZero(LHS->getType()); // X urem 1 --> 0 2996 2997 // If constant is a power of two, fold into a zext(trunc(LHS)). 2998 if (RHSC->getAPInt().isPowerOf2()) { 2999 Type *FullTy = LHS->getType(); 3000 Type *TruncTy = 3001 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3002 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3003 } 3004 } 3005 3006 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3007 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3008 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3009 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3010 } 3011 3012 /// Get a canonical unsigned division expression, or something simpler if 3013 /// possible. 3014 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3015 const SCEV *RHS) { 3016 assert(getEffectiveSCEVType(LHS->getType()) == 3017 getEffectiveSCEVType(RHS->getType()) && 3018 "SCEVUDivExpr operand types don't match!"); 3019 3020 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3021 if (RHSC->getValue()->isOne()) 3022 return LHS; // X udiv 1 --> x 3023 // If the denominator is zero, the result of the udiv is undefined. Don't 3024 // try to analyze it, because the resolution chosen here may differ from 3025 // the resolution chosen in other parts of the compiler. 3026 if (!RHSC->getValue()->isZero()) { 3027 // Determine if the division can be folded into the operands of 3028 // its operands. 3029 // TODO: Generalize this to non-constants by using known-bits information. 3030 Type *Ty = LHS->getType(); 3031 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3032 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3033 // For non-power-of-two values, effectively round the value up to the 3034 // nearest power of two. 3035 if (!RHSC->getAPInt().isPowerOf2()) 3036 ++MaxShiftAmt; 3037 IntegerType *ExtTy = 3038 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3039 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3040 if (const SCEVConstant *Step = 3041 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3042 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3043 const APInt &StepInt = Step->getAPInt(); 3044 const APInt &DivInt = RHSC->getAPInt(); 3045 if (!StepInt.urem(DivInt) && 3046 getZeroExtendExpr(AR, ExtTy) == 3047 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3048 getZeroExtendExpr(Step, ExtTy), 3049 AR->getLoop(), SCEV::FlagAnyWrap)) { 3050 SmallVector<const SCEV *, 4> Operands; 3051 for (const SCEV *Op : AR->operands()) 3052 Operands.push_back(getUDivExpr(Op, RHS)); 3053 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3054 } 3055 /// Get a canonical UDivExpr for a recurrence. 3056 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3057 // We can currently only fold X%N if X is constant. 3058 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3059 if (StartC && !DivInt.urem(StepInt) && 3060 getZeroExtendExpr(AR, ExtTy) == 3061 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3062 getZeroExtendExpr(Step, ExtTy), 3063 AR->getLoop(), SCEV::FlagAnyWrap)) { 3064 const APInt &StartInt = StartC->getAPInt(); 3065 const APInt &StartRem = StartInt.urem(StepInt); 3066 if (StartRem != 0) 3067 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, 3068 AR->getLoop(), SCEV::FlagNW); 3069 } 3070 } 3071 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3072 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3073 SmallVector<const SCEV *, 4> Operands; 3074 for (const SCEV *Op : M->operands()) 3075 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3076 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3077 // Find an operand that's safely divisible. 3078 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3079 const SCEV *Op = M->getOperand(i); 3080 const SCEV *Div = getUDivExpr(Op, RHSC); 3081 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3082 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3083 M->op_end()); 3084 Operands[i] = Div; 3085 return getMulExpr(Operands); 3086 } 3087 } 3088 } 3089 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3090 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3091 SmallVector<const SCEV *, 4> Operands; 3092 for (const SCEV *Op : A->operands()) 3093 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3094 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3095 Operands.clear(); 3096 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3097 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3098 if (isa<SCEVUDivExpr>(Op) || 3099 getMulExpr(Op, RHS) != A->getOperand(i)) 3100 break; 3101 Operands.push_back(Op); 3102 } 3103 if (Operands.size() == A->getNumOperands()) 3104 return getAddExpr(Operands); 3105 } 3106 } 3107 3108 // Fold if both operands are constant. 3109 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3110 Constant *LHSCV = LHSC->getValue(); 3111 Constant *RHSCV = RHSC->getValue(); 3112 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3113 RHSCV))); 3114 } 3115 } 3116 } 3117 3118 FoldingSetNodeID ID; 3119 ID.AddInteger(scUDivExpr); 3120 ID.AddPointer(LHS); 3121 ID.AddPointer(RHS); 3122 void *IP = nullptr; 3123 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3124 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3125 LHS, RHS); 3126 UniqueSCEVs.InsertNode(S, IP); 3127 return S; 3128 } 3129 3130 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3131 APInt A = C1->getAPInt().abs(); 3132 APInt B = C2->getAPInt().abs(); 3133 uint32_t ABW = A.getBitWidth(); 3134 uint32_t BBW = B.getBitWidth(); 3135 3136 if (ABW > BBW) 3137 B = B.zext(ABW); 3138 else if (ABW < BBW) 3139 A = A.zext(BBW); 3140 3141 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3142 } 3143 3144 /// Get a canonical unsigned division expression, or something simpler if 3145 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3146 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3147 /// it's not exact because the udiv may be clearing bits. 3148 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3149 const SCEV *RHS) { 3150 // TODO: we could try to find factors in all sorts of things, but for now we 3151 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3152 // end of this file for inspiration. 3153 3154 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3155 if (!Mul || !Mul->hasNoUnsignedWrap()) 3156 return getUDivExpr(LHS, RHS); 3157 3158 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3159 // If the mulexpr multiplies by a constant, then that constant must be the 3160 // first element of the mulexpr. 3161 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3162 if (LHSCst == RHSCst) { 3163 SmallVector<const SCEV *, 2> Operands; 3164 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3165 return getMulExpr(Operands); 3166 } 3167 3168 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3169 // that there's a factor provided by one of the other terms. We need to 3170 // check. 3171 APInt Factor = gcd(LHSCst, RHSCst); 3172 if (!Factor.isIntN(1)) { 3173 LHSCst = 3174 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3175 RHSCst = 3176 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3177 SmallVector<const SCEV *, 2> Operands; 3178 Operands.push_back(LHSCst); 3179 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3180 LHS = getMulExpr(Operands); 3181 RHS = RHSCst; 3182 Mul = dyn_cast<SCEVMulExpr>(LHS); 3183 if (!Mul) 3184 return getUDivExactExpr(LHS, RHS); 3185 } 3186 } 3187 } 3188 3189 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3190 if (Mul->getOperand(i) == RHS) { 3191 SmallVector<const SCEV *, 2> Operands; 3192 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3193 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3194 return getMulExpr(Operands); 3195 } 3196 } 3197 3198 return getUDivExpr(LHS, RHS); 3199 } 3200 3201 /// Get an add recurrence expression for the specified loop. Simplify the 3202 /// expression as much as possible. 3203 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3204 const Loop *L, 3205 SCEV::NoWrapFlags Flags) { 3206 SmallVector<const SCEV *, 4> Operands; 3207 Operands.push_back(Start); 3208 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3209 if (StepChrec->getLoop() == L) { 3210 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3211 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3212 } 3213 3214 Operands.push_back(Step); 3215 return getAddRecExpr(Operands, L, Flags); 3216 } 3217 3218 /// Get an add recurrence expression for the specified loop. Simplify the 3219 /// expression as much as possible. 3220 const SCEV * 3221 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3222 const Loop *L, SCEV::NoWrapFlags Flags) { 3223 if (Operands.size() == 1) return Operands[0]; 3224 #ifndef NDEBUG 3225 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3226 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3227 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3228 "SCEVAddRecExpr operand types don't match!"); 3229 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3230 assert(isLoopInvariant(Operands[i], L) && 3231 "SCEVAddRecExpr operand is not loop-invariant!"); 3232 #endif 3233 3234 if (Operands.back()->isZero()) { 3235 Operands.pop_back(); 3236 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3237 } 3238 3239 // It's tempting to want to call getMaxBackedgeTakenCount count here and 3240 // use that information to infer NUW and NSW flags. However, computing a 3241 // BE count requires calling getAddRecExpr, so we may not yet have a 3242 // meaningful BE count at this point (and if we don't, we'd be stuck 3243 // with a SCEVCouldNotCompute as the cached BE count). 3244 3245 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3246 3247 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3248 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3249 const Loop *NestedLoop = NestedAR->getLoop(); 3250 if (L->contains(NestedLoop) 3251 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3252 : (!NestedLoop->contains(L) && 3253 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3254 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3255 NestedAR->op_end()); 3256 Operands[0] = NestedAR->getStart(); 3257 // AddRecs require their operands be loop-invariant with respect to their 3258 // loops. Don't perform this transformation if it would break this 3259 // requirement. 3260 bool AllInvariant = all_of( 3261 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3262 3263 if (AllInvariant) { 3264 // Create a recurrence for the outer loop with the same step size. 3265 // 3266 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3267 // inner recurrence has the same property. 3268 SCEV::NoWrapFlags OuterFlags = 3269 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3270 3271 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3272 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3273 return isLoopInvariant(Op, NestedLoop); 3274 }); 3275 3276 if (AllInvariant) { 3277 // Ok, both add recurrences are valid after the transformation. 3278 // 3279 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3280 // the outer recurrence has the same property. 3281 SCEV::NoWrapFlags InnerFlags = 3282 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3283 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3284 } 3285 } 3286 // Reset Operands to its original state. 3287 Operands[0] = NestedAR; 3288 } 3289 } 3290 3291 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3292 // already have one, otherwise create a new one. 3293 FoldingSetNodeID ID; 3294 ID.AddInteger(scAddRecExpr); 3295 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3296 ID.AddPointer(Operands[i]); 3297 ID.AddPointer(L); 3298 void *IP = nullptr; 3299 SCEVAddRecExpr *S = 3300 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 3301 if (!S) { 3302 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); 3303 std::uninitialized_copy(Operands.begin(), Operands.end(), O); 3304 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), 3305 O, Operands.size(), L); 3306 UniqueSCEVs.InsertNode(S, IP); 3307 } 3308 S->setNoWrapFlags(Flags); 3309 return S; 3310 } 3311 3312 const SCEV * 3313 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3314 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3315 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3316 // getSCEV(Base)->getType() has the same address space as Base->getType() 3317 // because SCEV::getType() preserves the address space. 3318 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); 3319 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3320 // instruction to its SCEV, because the Instruction may be guarded by control 3321 // flow and the no-overflow bits may not be valid for the expression in any 3322 // context. This can be fixed similarly to how these flags are handled for 3323 // adds. 3324 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3325 : SCEV::FlagAnyWrap; 3326 3327 const SCEV *TotalOffset = getZero(IntPtrTy); 3328 // The array size is unimportant. The first thing we do on CurTy is getting 3329 // its element type. 3330 Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0); 3331 for (const SCEV *IndexExpr : IndexExprs) { 3332 // Compute the (potentially symbolic) offset in bytes for this index. 3333 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3334 // For a struct, add the member offset. 3335 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3336 unsigned FieldNo = Index->getZExtValue(); 3337 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); 3338 3339 // Add the field offset to the running total offset. 3340 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3341 3342 // Update CurTy to the type of the field at Index. 3343 CurTy = STy->getTypeAtIndex(Index); 3344 } else { 3345 // Update CurTy to its element type. 3346 CurTy = cast<SequentialType>(CurTy)->getElementType(); 3347 // For an array, add the element offset, explicitly scaled. 3348 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); 3349 // Getelementptr indices are signed. 3350 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); 3351 3352 // Multiply the index by the element size to compute the element offset. 3353 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3354 3355 // Add the element offset to the running total offset. 3356 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3357 } 3358 } 3359 3360 // Add the total offset from all the GEP indices to the base. 3361 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3362 } 3363 3364 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 3365 const SCEV *RHS) { 3366 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3367 return getSMaxExpr(Ops); 3368 } 3369 3370 const SCEV * 3371 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3372 assert(!Ops.empty() && "Cannot get empty smax!"); 3373 if (Ops.size() == 1) return Ops[0]; 3374 #ifndef NDEBUG 3375 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3376 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3377 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3378 "SCEVSMaxExpr operand types don't match!"); 3379 #endif 3380 3381 // Sort by complexity, this groups all similar expression types together. 3382 GroupByComplexity(Ops, &LI, DT); 3383 3384 // If there are any constants, fold them together. 3385 unsigned Idx = 0; 3386 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3387 ++Idx; 3388 assert(Idx < Ops.size()); 3389 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3390 // We found two constants, fold them together! 3391 ConstantInt *Fold = ConstantInt::get( 3392 getContext(), APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt())); 3393 Ops[0] = getConstant(Fold); 3394 Ops.erase(Ops.begin()+1); // Erase the folded element 3395 if (Ops.size() == 1) return Ops[0]; 3396 LHSC = cast<SCEVConstant>(Ops[0]); 3397 } 3398 3399 // If we are left with a constant minimum-int, strip it off. 3400 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 3401 Ops.erase(Ops.begin()); 3402 --Idx; 3403 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 3404 // If we have an smax with a constant maximum-int, it will always be 3405 // maximum-int. 3406 return Ops[0]; 3407 } 3408 3409 if (Ops.size() == 1) return Ops[0]; 3410 } 3411 3412 // Find the first SMax 3413 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 3414 ++Idx; 3415 3416 // Check to see if one of the operands is an SMax. If so, expand its operands 3417 // onto our operand list, and recurse to simplify. 3418 if (Idx < Ops.size()) { 3419 bool DeletedSMax = false; 3420 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 3421 Ops.erase(Ops.begin()+Idx); 3422 Ops.append(SMax->op_begin(), SMax->op_end()); 3423 DeletedSMax = true; 3424 } 3425 3426 if (DeletedSMax) 3427 return getSMaxExpr(Ops); 3428 } 3429 3430 // Okay, check to see if the same value occurs in the operand list twice. If 3431 // so, delete one. Since we sorted the list, these values are required to 3432 // be adjacent. 3433 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3434 // X smax Y smax Y --> X smax Y 3435 // X smax Y --> X, if X is always greater than Y 3436 if (Ops[i] == Ops[i+1] || 3437 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 3438 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3439 --i; --e; 3440 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 3441 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3442 --i; --e; 3443 } 3444 3445 if (Ops.size() == 1) return Ops[0]; 3446 3447 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3448 3449 // Okay, it looks like we really DO need an smax expr. Check to see if we 3450 // already have one, otherwise create a new one. 3451 FoldingSetNodeID ID; 3452 ID.AddInteger(scSMaxExpr); 3453 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3454 ID.AddPointer(Ops[i]); 3455 void *IP = nullptr; 3456 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3457 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3458 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3459 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), 3460 O, Ops.size()); 3461 UniqueSCEVs.InsertNode(S, IP); 3462 return S; 3463 } 3464 3465 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 3466 const SCEV *RHS) { 3467 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3468 return getUMaxExpr(Ops); 3469 } 3470 3471 const SCEV * 3472 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3473 assert(!Ops.empty() && "Cannot get empty umax!"); 3474 if (Ops.size() == 1) return Ops[0]; 3475 #ifndef NDEBUG 3476 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3477 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3478 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3479 "SCEVUMaxExpr operand types don't match!"); 3480 #endif 3481 3482 // Sort by complexity, this groups all similar expression types together. 3483 GroupByComplexity(Ops, &LI, DT); 3484 3485 // If there are any constants, fold them together. 3486 unsigned Idx = 0; 3487 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3488 ++Idx; 3489 assert(Idx < Ops.size()); 3490 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3491 // We found two constants, fold them together! 3492 ConstantInt *Fold = ConstantInt::get( 3493 getContext(), APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt())); 3494 Ops[0] = getConstant(Fold); 3495 Ops.erase(Ops.begin()+1); // Erase the folded element 3496 if (Ops.size() == 1) return Ops[0]; 3497 LHSC = cast<SCEVConstant>(Ops[0]); 3498 } 3499 3500 // If we are left with a constant minimum-int, strip it off. 3501 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 3502 Ops.erase(Ops.begin()); 3503 --Idx; 3504 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 3505 // If we have an umax with a constant maximum-int, it will always be 3506 // maximum-int. 3507 return Ops[0]; 3508 } 3509 3510 if (Ops.size() == 1) return Ops[0]; 3511 } 3512 3513 // Find the first UMax 3514 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 3515 ++Idx; 3516 3517 // Check to see if one of the operands is a UMax. If so, expand its operands 3518 // onto our operand list, and recurse to simplify. 3519 if (Idx < Ops.size()) { 3520 bool DeletedUMax = false; 3521 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 3522 Ops.erase(Ops.begin()+Idx); 3523 Ops.append(UMax->op_begin(), UMax->op_end()); 3524 DeletedUMax = true; 3525 } 3526 3527 if (DeletedUMax) 3528 return getUMaxExpr(Ops); 3529 } 3530 3531 // Okay, check to see if the same value occurs in the operand list twice. If 3532 // so, delete one. Since we sorted the list, these values are required to 3533 // be adjacent. 3534 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3535 // X umax Y umax Y --> X umax Y 3536 // X umax Y --> X, if X is always greater than Y 3537 if (Ops[i] == Ops[i+1] || 3538 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) { 3539 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3540 --i; --e; 3541 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) { 3542 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3543 --i; --e; 3544 } 3545 3546 if (Ops.size() == 1) return Ops[0]; 3547 3548 assert(!Ops.empty() && "Reduced umax down to nothing!"); 3549 3550 // Okay, it looks like we really DO need a umax expr. Check to see if we 3551 // already have one, otherwise create a new one. 3552 FoldingSetNodeID ID; 3553 ID.AddInteger(scUMaxExpr); 3554 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3555 ID.AddPointer(Ops[i]); 3556 void *IP = nullptr; 3557 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3558 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3559 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3560 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 3561 O, Ops.size()); 3562 UniqueSCEVs.InsertNode(S, IP); 3563 return S; 3564 } 3565 3566 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3567 const SCEV *RHS) { 3568 // ~smax(~x, ~y) == smin(x, y). 3569 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3570 } 3571 3572 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3573 const SCEV *RHS) { 3574 // ~umax(~x, ~y) == umin(x, y) 3575 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3576 } 3577 3578 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3579 // We can bypass creating a target-independent 3580 // constant expression and then folding it back into a ConstantInt. 3581 // This is just a compile-time optimization. 3582 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3583 } 3584 3585 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3586 StructType *STy, 3587 unsigned FieldNo) { 3588 // We can bypass creating a target-independent 3589 // constant expression and then folding it back into a ConstantInt. 3590 // This is just a compile-time optimization. 3591 return getConstant( 3592 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3593 } 3594 3595 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3596 // Don't attempt to do anything other than create a SCEVUnknown object 3597 // here. createSCEV only calls getUnknown after checking for all other 3598 // interesting possibilities, and any other code that calls getUnknown 3599 // is doing so in order to hide a value from SCEV canonicalization. 3600 3601 FoldingSetNodeID ID; 3602 ID.AddInteger(scUnknown); 3603 ID.AddPointer(V); 3604 void *IP = nullptr; 3605 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3606 assert(cast<SCEVUnknown>(S)->getValue() == V && 3607 "Stale SCEVUnknown in uniquing map!"); 3608 return S; 3609 } 3610 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3611 FirstUnknown); 3612 FirstUnknown = cast<SCEVUnknown>(S); 3613 UniqueSCEVs.InsertNode(S, IP); 3614 return S; 3615 } 3616 3617 //===----------------------------------------------------------------------===// 3618 // Basic SCEV Analysis and PHI Idiom Recognition Code 3619 // 3620 3621 /// Test if values of the given type are analyzable within the SCEV 3622 /// framework. This primarily includes integer types, and it can optionally 3623 /// include pointer types if the ScalarEvolution class has access to 3624 /// target-specific information. 3625 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3626 // Integers and pointers are always SCEVable. 3627 return Ty->isIntegerTy() || Ty->isPointerTy(); 3628 } 3629 3630 /// Return the size in bits of the specified type, for which isSCEVable must 3631 /// return true. 3632 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3633 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3634 return getDataLayout().getTypeSizeInBits(Ty); 3635 } 3636 3637 /// Return a type with the same bitwidth as the given type and which represents 3638 /// how SCEV will treat the given type, for which isSCEVable must return 3639 /// true. For pointer types, this is the pointer-sized integer type. 3640 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3641 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3642 3643 if (Ty->isIntegerTy()) 3644 return Ty; 3645 3646 // The only other support type is pointer. 3647 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3648 return getDataLayout().getIntPtrType(Ty); 3649 } 3650 3651 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3652 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3653 } 3654 3655 const SCEV *ScalarEvolution::getCouldNotCompute() { 3656 return CouldNotCompute.get(); 3657 } 3658 3659 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3660 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3661 auto *SU = dyn_cast<SCEVUnknown>(S); 3662 return SU && SU->getValue() == nullptr; 3663 }); 3664 3665 return !ContainsNulls; 3666 } 3667 3668 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3669 HasRecMapType::iterator I = HasRecMap.find(S); 3670 if (I != HasRecMap.end()) 3671 return I->second; 3672 3673 bool FoundAddRec = SCEVExprContains(S, isa<SCEVAddRecExpr, const SCEV *>); 3674 HasRecMap.insert({S, FoundAddRec}); 3675 return FoundAddRec; 3676 } 3677 3678 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3679 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3680 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3681 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3682 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3683 if (!Add) 3684 return {S, nullptr}; 3685 3686 if (Add->getNumOperands() != 2) 3687 return {S, nullptr}; 3688 3689 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3690 if (!ConstOp) 3691 return {S, nullptr}; 3692 3693 return {Add->getOperand(1), ConstOp->getValue()}; 3694 } 3695 3696 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3697 /// by the value and offset from any ValueOffsetPair in the set. 3698 SetVector<ScalarEvolution::ValueOffsetPair> * 3699 ScalarEvolution::getSCEVValues(const SCEV *S) { 3700 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3701 if (SI == ExprValueMap.end()) 3702 return nullptr; 3703 #ifndef NDEBUG 3704 if (VerifySCEVMap) { 3705 // Check there is no dangling Value in the set returned. 3706 for (const auto &VE : SI->second) 3707 assert(ValueExprMap.count(VE.first)); 3708 } 3709 #endif 3710 return &SI->second; 3711 } 3712 3713 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3714 /// cannot be used separately. eraseValueFromMap should be used to remove 3715 /// V from ValueExprMap and ExprValueMap at the same time. 3716 void ScalarEvolution::eraseValueFromMap(Value *V) { 3717 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3718 if (I != ValueExprMap.end()) { 3719 const SCEV *S = I->second; 3720 // Remove {V, 0} from the set of ExprValueMap[S] 3721 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3722 SV->remove({V, nullptr}); 3723 3724 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3725 const SCEV *Stripped; 3726 ConstantInt *Offset; 3727 std::tie(Stripped, Offset) = splitAddExpr(S); 3728 if (Offset != nullptr) { 3729 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3730 SV->remove({V, Offset}); 3731 } 3732 ValueExprMap.erase(V); 3733 } 3734 } 3735 3736 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3737 /// create a new one. 3738 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3739 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3740 3741 const SCEV *S = getExistingSCEV(V); 3742 if (S == nullptr) { 3743 S = createSCEV(V); 3744 // During PHI resolution, it is possible to create two SCEVs for the same 3745 // V, so it is needed to double check whether V->S is inserted into 3746 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3747 std::pair<ValueExprMapType::iterator, bool> Pair = 3748 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3749 if (Pair.second) { 3750 ExprValueMap[S].insert({V, nullptr}); 3751 3752 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3753 // ExprValueMap. 3754 const SCEV *Stripped = S; 3755 ConstantInt *Offset = nullptr; 3756 std::tie(Stripped, Offset) = splitAddExpr(S); 3757 // If stripped is SCEVUnknown, don't bother to save 3758 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3759 // increase the complexity of the expansion code. 3760 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3761 // because it may generate add/sub instead of GEP in SCEV expansion. 3762 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3763 !isa<GetElementPtrInst>(V)) 3764 ExprValueMap[Stripped].insert({V, Offset}); 3765 } 3766 } 3767 return S; 3768 } 3769 3770 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3771 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3772 3773 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3774 if (I != ValueExprMap.end()) { 3775 const SCEV *S = I->second; 3776 if (checkValidity(S)) 3777 return S; 3778 eraseValueFromMap(V); 3779 forgetMemoizedResults(S); 3780 } 3781 return nullptr; 3782 } 3783 3784 /// Return a SCEV corresponding to -V = -1*V 3785 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3786 SCEV::NoWrapFlags Flags) { 3787 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3788 return getConstant( 3789 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3790 3791 Type *Ty = V->getType(); 3792 Ty = getEffectiveSCEVType(Ty); 3793 return getMulExpr( 3794 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 3795 } 3796 3797 /// Return a SCEV corresponding to ~V = -1-V 3798 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3799 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3800 return getConstant( 3801 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3802 3803 Type *Ty = V->getType(); 3804 Ty = getEffectiveSCEVType(Ty); 3805 const SCEV *AllOnes = 3806 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 3807 return getMinusSCEV(AllOnes, V); 3808 } 3809 3810 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3811 SCEV::NoWrapFlags Flags, 3812 unsigned Depth) { 3813 // Fast path: X - X --> 0. 3814 if (LHS == RHS) 3815 return getZero(LHS->getType()); 3816 3817 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3818 // makes it so that we cannot make much use of NUW. 3819 auto AddFlags = SCEV::FlagAnyWrap; 3820 const bool RHSIsNotMinSigned = 3821 !getSignedRangeMin(RHS).isMinSignedValue(); 3822 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3823 // Let M be the minimum representable signed value. Then (-1)*RHS 3824 // signed-wraps if and only if RHS is M. That can happen even for 3825 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3826 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3827 // (-1)*RHS, we need to prove that RHS != M. 3828 // 3829 // If LHS is non-negative and we know that LHS - RHS does not 3830 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 3831 // either by proving that RHS > M or that LHS >= 0. 3832 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 3833 AddFlags = SCEV::FlagNSW; 3834 } 3835 } 3836 3837 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 3838 // RHS is NSW and LHS >= 0. 3839 // 3840 // The difficulty here is that the NSW flag may have been proven 3841 // relative to a loop that is to be found in a recurrence in LHS and 3842 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 3843 // larger scope than intended. 3844 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3845 3846 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 3847 } 3848 3849 const SCEV * 3850 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) { 3851 Type *SrcTy = V->getType(); 3852 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3853 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3854 "Cannot truncate or zero extend with non-integer arguments!"); 3855 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3856 return V; // No conversion 3857 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3858 return getTruncateExpr(V, Ty); 3859 return getZeroExtendExpr(V, Ty); 3860 } 3861 3862 const SCEV * 3863 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 3864 Type *Ty) { 3865 Type *SrcTy = V->getType(); 3866 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3867 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3868 "Cannot truncate or zero extend with non-integer arguments!"); 3869 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3870 return V; // No conversion 3871 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3872 return getTruncateExpr(V, Ty); 3873 return getSignExtendExpr(V, Ty); 3874 } 3875 3876 const SCEV * 3877 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 3878 Type *SrcTy = V->getType(); 3879 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3880 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3881 "Cannot noop or zero extend with non-integer arguments!"); 3882 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3883 "getNoopOrZeroExtend cannot truncate!"); 3884 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3885 return V; // No conversion 3886 return getZeroExtendExpr(V, Ty); 3887 } 3888 3889 const SCEV * 3890 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 3891 Type *SrcTy = V->getType(); 3892 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3893 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3894 "Cannot noop or sign extend with non-integer arguments!"); 3895 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3896 "getNoopOrSignExtend cannot truncate!"); 3897 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3898 return V; // No conversion 3899 return getSignExtendExpr(V, Ty); 3900 } 3901 3902 const SCEV * 3903 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 3904 Type *SrcTy = V->getType(); 3905 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3906 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3907 "Cannot noop or any extend with non-integer arguments!"); 3908 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3909 "getNoopOrAnyExtend cannot truncate!"); 3910 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3911 return V; // No conversion 3912 return getAnyExtendExpr(V, Ty); 3913 } 3914 3915 const SCEV * 3916 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 3917 Type *SrcTy = V->getType(); 3918 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3919 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3920 "Cannot truncate or noop with non-integer arguments!"); 3921 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 3922 "getTruncateOrNoop cannot extend!"); 3923 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3924 return V; // No conversion 3925 return getTruncateExpr(V, Ty); 3926 } 3927 3928 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 3929 const SCEV *RHS) { 3930 const SCEV *PromotedLHS = LHS; 3931 const SCEV *PromotedRHS = RHS; 3932 3933 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3934 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3935 else 3936 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3937 3938 return getUMaxExpr(PromotedLHS, PromotedRHS); 3939 } 3940 3941 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 3942 const SCEV *RHS) { 3943 const SCEV *PromotedLHS = LHS; 3944 const SCEV *PromotedRHS = RHS; 3945 3946 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3947 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3948 else 3949 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3950 3951 return getUMinExpr(PromotedLHS, PromotedRHS); 3952 } 3953 3954 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 3955 // A pointer operand may evaluate to a nonpointer expression, such as null. 3956 if (!V->getType()->isPointerTy()) 3957 return V; 3958 3959 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 3960 return getPointerBase(Cast->getOperand()); 3961 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 3962 const SCEV *PtrOp = nullptr; 3963 for (const SCEV *NAryOp : NAry->operands()) { 3964 if (NAryOp->getType()->isPointerTy()) { 3965 // Cannot find the base of an expression with multiple pointer operands. 3966 if (PtrOp) 3967 return V; 3968 PtrOp = NAryOp; 3969 } 3970 } 3971 if (!PtrOp) 3972 return V; 3973 return getPointerBase(PtrOp); 3974 } 3975 return V; 3976 } 3977 3978 /// Push users of the given Instruction onto the given Worklist. 3979 static void 3980 PushDefUseChildren(Instruction *I, 3981 SmallVectorImpl<Instruction *> &Worklist) { 3982 // Push the def-use children onto the Worklist stack. 3983 for (User *U : I->users()) 3984 Worklist.push_back(cast<Instruction>(U)); 3985 } 3986 3987 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 3988 SmallVector<Instruction *, 16> Worklist; 3989 PushDefUseChildren(PN, Worklist); 3990 3991 SmallPtrSet<Instruction *, 8> Visited; 3992 Visited.insert(PN); 3993 while (!Worklist.empty()) { 3994 Instruction *I = Worklist.pop_back_val(); 3995 if (!Visited.insert(I).second) 3996 continue; 3997 3998 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 3999 if (It != ValueExprMap.end()) { 4000 const SCEV *Old = It->second; 4001 4002 // Short-circuit the def-use traversal if the symbolic name 4003 // ceases to appear in expressions. 4004 if (Old != SymName && !hasOperand(Old, SymName)) 4005 continue; 4006 4007 // SCEVUnknown for a PHI either means that it has an unrecognized 4008 // structure, it's a PHI that's in the progress of being computed 4009 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4010 // additional loop trip count information isn't going to change anything. 4011 // In the second case, createNodeForPHI will perform the necessary 4012 // updates on its own when it gets to that point. In the third, we do 4013 // want to forget the SCEVUnknown. 4014 if (!isa<PHINode>(I) || 4015 !isa<SCEVUnknown>(Old) || 4016 (I != PN && Old == SymName)) { 4017 eraseValueFromMap(It->first); 4018 forgetMemoizedResults(Old); 4019 } 4020 } 4021 4022 PushDefUseChildren(I, Worklist); 4023 } 4024 } 4025 4026 namespace { 4027 4028 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4029 public: 4030 SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4031 : SCEVRewriteVisitor(SE), L(L) {} 4032 4033 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4034 ScalarEvolution &SE) { 4035 SCEVInitRewriter Rewriter(L, SE); 4036 const SCEV *Result = Rewriter.visit(S); 4037 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4038 } 4039 4040 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4041 if (!SE.isLoopInvariant(Expr, L)) 4042 Valid = false; 4043 return Expr; 4044 } 4045 4046 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4047 // Only allow AddRecExprs for this loop. 4048 if (Expr->getLoop() == L) 4049 return Expr->getStart(); 4050 Valid = false; 4051 return Expr; 4052 } 4053 4054 bool isValid() { return Valid; } 4055 4056 private: 4057 const Loop *L; 4058 bool Valid = true; 4059 }; 4060 4061 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4062 public: 4063 SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4064 : SCEVRewriteVisitor(SE), L(L) {} 4065 4066 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4067 ScalarEvolution &SE) { 4068 SCEVShiftRewriter Rewriter(L, SE); 4069 const SCEV *Result = Rewriter.visit(S); 4070 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4071 } 4072 4073 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4074 // Only allow AddRecExprs for this loop. 4075 if (!SE.isLoopInvariant(Expr, L)) 4076 Valid = false; 4077 return Expr; 4078 } 4079 4080 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4081 if (Expr->getLoop() == L && Expr->isAffine()) 4082 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4083 Valid = false; 4084 return Expr; 4085 } 4086 4087 bool isValid() { return Valid; } 4088 4089 private: 4090 const Loop *L; 4091 bool Valid = true; 4092 }; 4093 4094 } // end anonymous namespace 4095 4096 SCEV::NoWrapFlags 4097 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4098 if (!AR->isAffine()) 4099 return SCEV::FlagAnyWrap; 4100 4101 using OBO = OverflowingBinaryOperator; 4102 4103 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4104 4105 if (!AR->hasNoSignedWrap()) { 4106 ConstantRange AddRecRange = getSignedRange(AR); 4107 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4108 4109 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4110 Instruction::Add, IncRange, OBO::NoSignedWrap); 4111 if (NSWRegion.contains(AddRecRange)) 4112 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4113 } 4114 4115 if (!AR->hasNoUnsignedWrap()) { 4116 ConstantRange AddRecRange = getUnsignedRange(AR); 4117 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4118 4119 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4120 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4121 if (NUWRegion.contains(AddRecRange)) 4122 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4123 } 4124 4125 return Result; 4126 } 4127 4128 namespace { 4129 4130 /// Represents an abstract binary operation. This may exist as a 4131 /// normal instruction or constant expression, or may have been 4132 /// derived from an expression tree. 4133 struct BinaryOp { 4134 unsigned Opcode; 4135 Value *LHS; 4136 Value *RHS; 4137 bool IsNSW = false; 4138 bool IsNUW = false; 4139 4140 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4141 /// constant expression. 4142 Operator *Op = nullptr; 4143 4144 explicit BinaryOp(Operator *Op) 4145 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4146 Op(Op) { 4147 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4148 IsNSW = OBO->hasNoSignedWrap(); 4149 IsNUW = OBO->hasNoUnsignedWrap(); 4150 } 4151 } 4152 4153 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4154 bool IsNUW = false) 4155 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 4156 }; 4157 4158 } // end anonymous namespace 4159 4160 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4161 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4162 auto *Op = dyn_cast<Operator>(V); 4163 if (!Op) 4164 return None; 4165 4166 // Implementation detail: all the cleverness here should happen without 4167 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4168 // SCEV expressions when possible, and we should not break that. 4169 4170 switch (Op->getOpcode()) { 4171 case Instruction::Add: 4172 case Instruction::Sub: 4173 case Instruction::Mul: 4174 case Instruction::UDiv: 4175 case Instruction::URem: 4176 case Instruction::And: 4177 case Instruction::Or: 4178 case Instruction::AShr: 4179 case Instruction::Shl: 4180 return BinaryOp(Op); 4181 4182 case Instruction::Xor: 4183 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4184 // If the RHS of the xor is a signmask, then this is just an add. 4185 // Instcombine turns add of signmask into xor as a strength reduction step. 4186 if (RHSC->getValue().isSignMask()) 4187 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4188 return BinaryOp(Op); 4189 4190 case Instruction::LShr: 4191 // Turn logical shift right of a constant into a unsigned divide. 4192 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4193 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4194 4195 // If the shift count is not less than the bitwidth, the result of 4196 // the shift is undefined. Don't try to analyze it, because the 4197 // resolution chosen here may differ from the resolution chosen in 4198 // other parts of the compiler. 4199 if (SA->getValue().ult(BitWidth)) { 4200 Constant *X = 4201 ConstantInt::get(SA->getContext(), 4202 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4203 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4204 } 4205 } 4206 return BinaryOp(Op); 4207 4208 case Instruction::ExtractValue: { 4209 auto *EVI = cast<ExtractValueInst>(Op); 4210 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4211 break; 4212 4213 auto *CI = dyn_cast<CallInst>(EVI->getAggregateOperand()); 4214 if (!CI) 4215 break; 4216 4217 if (auto *F = CI->getCalledFunction()) 4218 switch (F->getIntrinsicID()) { 4219 case Intrinsic::sadd_with_overflow: 4220 case Intrinsic::uadd_with_overflow: 4221 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4222 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4223 CI->getArgOperand(1)); 4224 4225 // Now that we know that all uses of the arithmetic-result component of 4226 // CI are guarded by the overflow check, we can go ahead and pretend 4227 // that the arithmetic is non-overflowing. 4228 if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow) 4229 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4230 CI->getArgOperand(1), /* IsNSW = */ true, 4231 /* IsNUW = */ false); 4232 else 4233 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4234 CI->getArgOperand(1), /* IsNSW = */ false, 4235 /* IsNUW*/ true); 4236 case Intrinsic::ssub_with_overflow: 4237 case Intrinsic::usub_with_overflow: 4238 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4239 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4240 CI->getArgOperand(1)); 4241 4242 // The same reasoning as sadd/uadd above. 4243 if (F->getIntrinsicID() == Intrinsic::ssub_with_overflow) 4244 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4245 CI->getArgOperand(1), /* IsNSW = */ true, 4246 /* IsNUW = */ false); 4247 else 4248 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4249 CI->getArgOperand(1), /* IsNSW = */ false, 4250 /* IsNUW = */ true); 4251 case Intrinsic::smul_with_overflow: 4252 case Intrinsic::umul_with_overflow: 4253 return BinaryOp(Instruction::Mul, CI->getArgOperand(0), 4254 CI->getArgOperand(1)); 4255 default: 4256 break; 4257 } 4258 } 4259 4260 default: 4261 break; 4262 } 4263 4264 return None; 4265 } 4266 4267 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4268 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4269 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4270 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4271 /// follows one of the following patterns: 4272 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4273 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4274 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4275 /// we return the type of the truncation operation, and indicate whether the 4276 /// truncated type should be treated as signed/unsigned by setting 4277 /// \p Signed to true/false, respectively. 4278 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4279 bool &Signed, ScalarEvolution &SE) { 4280 // The case where Op == SymbolicPHI (that is, with no type conversions on 4281 // the way) is handled by the regular add recurrence creating logic and 4282 // would have already been triggered in createAddRecForPHI. Reaching it here 4283 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4284 // because one of the other operands of the SCEVAddExpr updating this PHI is 4285 // not invariant). 4286 // 4287 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4288 // this case predicates that allow us to prove that Op == SymbolicPHI will 4289 // be added. 4290 if (Op == SymbolicPHI) 4291 return nullptr; 4292 4293 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4294 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4295 if (SourceBits != NewBits) 4296 return nullptr; 4297 4298 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4299 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4300 if (!SExt && !ZExt) 4301 return nullptr; 4302 const SCEVTruncateExpr *Trunc = 4303 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4304 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4305 if (!Trunc) 4306 return nullptr; 4307 const SCEV *X = Trunc->getOperand(); 4308 if (X != SymbolicPHI) 4309 return nullptr; 4310 Signed = SExt != nullptr; 4311 return Trunc->getType(); 4312 } 4313 4314 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4315 if (!PN->getType()->isIntegerTy()) 4316 return nullptr; 4317 const Loop *L = LI.getLoopFor(PN->getParent()); 4318 if (!L || L->getHeader() != PN->getParent()) 4319 return nullptr; 4320 return L; 4321 } 4322 4323 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4324 // computation that updates the phi follows the following pattern: 4325 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4326 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4327 // If so, try to see if it can be rewritten as an AddRecExpr under some 4328 // Predicates. If successful, return them as a pair. Also cache the results 4329 // of the analysis. 4330 // 4331 // Example usage scenario: 4332 // Say the Rewriter is called for the following SCEV: 4333 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4334 // where: 4335 // %X = phi i64 (%Start, %BEValue) 4336 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4337 // and call this function with %SymbolicPHI = %X. 4338 // 4339 // The analysis will find that the value coming around the backedge has 4340 // the following SCEV: 4341 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4342 // Upon concluding that this matches the desired pattern, the function 4343 // will return the pair {NewAddRec, SmallPredsVec} where: 4344 // NewAddRec = {%Start,+,%Step} 4345 // SmallPredsVec = {P1, P2, P3} as follows: 4346 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4347 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4348 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4349 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4350 // under the predicates {P1,P2,P3}. 4351 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4352 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4353 // 4354 // TODO's: 4355 // 4356 // 1) Extend the Induction descriptor to also support inductions that involve 4357 // casts: When needed (namely, when we are called in the context of the 4358 // vectorizer induction analysis), a Set of cast instructions will be 4359 // populated by this method, and provided back to isInductionPHI. This is 4360 // needed to allow the vectorizer to properly record them to be ignored by 4361 // the cost model and to avoid vectorizing them (otherwise these casts, 4362 // which are redundant under the runtime overflow checks, will be 4363 // vectorized, which can be costly). 4364 // 4365 // 2) Support additional induction/PHISCEV patterns: We also want to support 4366 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4367 // after the induction update operation (the induction increment): 4368 // 4369 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4370 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4371 // 4372 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4373 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4374 // 4375 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4376 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4377 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4378 SmallVector<const SCEVPredicate *, 3> Predicates; 4379 4380 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4381 // return an AddRec expression under some predicate. 4382 4383 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4384 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4385 assert(L && "Expecting an integer loop header phi"); 4386 4387 // The loop may have multiple entrances or multiple exits; we can analyze 4388 // this phi as an addrec if it has a unique entry value and a unique 4389 // backedge value. 4390 Value *BEValueV = nullptr, *StartValueV = nullptr; 4391 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4392 Value *V = PN->getIncomingValue(i); 4393 if (L->contains(PN->getIncomingBlock(i))) { 4394 if (!BEValueV) { 4395 BEValueV = V; 4396 } else if (BEValueV != V) { 4397 BEValueV = nullptr; 4398 break; 4399 } 4400 } else if (!StartValueV) { 4401 StartValueV = V; 4402 } else if (StartValueV != V) { 4403 StartValueV = nullptr; 4404 break; 4405 } 4406 } 4407 if (!BEValueV || !StartValueV) 4408 return None; 4409 4410 const SCEV *BEValue = getSCEV(BEValueV); 4411 4412 // If the value coming around the backedge is an add with the symbolic 4413 // value we just inserted, possibly with casts that we can ignore under 4414 // an appropriate runtime guard, then we found a simple induction variable! 4415 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4416 if (!Add) 4417 return None; 4418 4419 // If there is a single occurrence of the symbolic value, possibly 4420 // casted, replace it with a recurrence. 4421 unsigned FoundIndex = Add->getNumOperands(); 4422 Type *TruncTy = nullptr; 4423 bool Signed; 4424 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4425 if ((TruncTy = 4426 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4427 if (FoundIndex == e) { 4428 FoundIndex = i; 4429 break; 4430 } 4431 4432 if (FoundIndex == Add->getNumOperands()) 4433 return None; 4434 4435 // Create an add with everything but the specified operand. 4436 SmallVector<const SCEV *, 8> Ops; 4437 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4438 if (i != FoundIndex) 4439 Ops.push_back(Add->getOperand(i)); 4440 const SCEV *Accum = getAddExpr(Ops); 4441 4442 // The runtime checks will not be valid if the step amount is 4443 // varying inside the loop. 4444 if (!isLoopInvariant(Accum, L)) 4445 return None; 4446 4447 // *** Part2: Create the predicates 4448 4449 // Analysis was successful: we have a phi-with-cast pattern for which we 4450 // can return an AddRec expression under the following predicates: 4451 // 4452 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4453 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4454 // P2: An Equal predicate that guarantees that 4455 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4456 // P3: An Equal predicate that guarantees that 4457 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4458 // 4459 // As we next prove, the above predicates guarantee that: 4460 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4461 // 4462 // 4463 // More formally, we want to prove that: 4464 // Expr(i+1) = Start + (i+1) * Accum 4465 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4466 // 4467 // Given that: 4468 // 1) Expr(0) = Start 4469 // 2) Expr(1) = Start + Accum 4470 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4471 // 3) Induction hypothesis (step i): 4472 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4473 // 4474 // Proof: 4475 // Expr(i+1) = 4476 // = Start + (i+1)*Accum 4477 // = (Start + i*Accum) + Accum 4478 // = Expr(i) + Accum 4479 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4480 // :: from step i 4481 // 4482 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4483 // 4484 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4485 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4486 // + Accum :: from P3 4487 // 4488 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4489 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4490 // 4491 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4492 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4493 // 4494 // By induction, the same applies to all iterations 1<=i<n: 4495 // 4496 4497 // Create a truncated addrec for which we will add a no overflow check (P1). 4498 const SCEV *StartVal = getSCEV(StartValueV); 4499 const SCEV *PHISCEV = 4500 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4501 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4502 4503 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4504 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4505 // will be constant. 4506 // 4507 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4508 // add P1. 4509 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4510 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4511 Signed ? SCEVWrapPredicate::IncrementNSSW 4512 : SCEVWrapPredicate::IncrementNUSW; 4513 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4514 Predicates.push_back(AddRecPred); 4515 } else 4516 assert(isa<SCEVConstant>(PHISCEV) && "Expected constant SCEV"); 4517 4518 // Create the Equal Predicates P2,P3: 4519 4520 // It is possible that the predicates P2 and/or P3 are computable at 4521 // compile time due to StartVal and/or Accum being constants. 4522 // If either one is, then we can check that now and escape if either P2 4523 // or P3 is false. 4524 4525 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4526 // for each of StartVal and Accum 4527 auto GetExtendedExpr = [&](const SCEV *Expr) -> const SCEV * { 4528 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4529 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4530 const SCEV *ExtendedExpr = 4531 Signed ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4532 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4533 return ExtendedExpr; 4534 }; 4535 4536 // Given: 4537 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4538 // = GetExtendedExpr(Expr) 4539 // Determine whether the predicate P: Expr == ExtendedExpr 4540 // is known to be false at compile time 4541 auto PredIsKnownFalse = [&](const SCEV *Expr, 4542 const SCEV *ExtendedExpr) -> bool { 4543 return Expr != ExtendedExpr && 4544 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4545 }; 4546 4547 const SCEV *StartExtended = GetExtendedExpr(StartVal); 4548 if (PredIsKnownFalse(StartVal, StartExtended)) { 4549 DEBUG(dbgs() << "P2 is compile-time false\n";); 4550 return None; 4551 } 4552 4553 const SCEV *AccumExtended = GetExtendedExpr(Accum); 4554 if (PredIsKnownFalse(Accum, AccumExtended)) { 4555 DEBUG(dbgs() << "P3 is compile-time false\n";); 4556 return None; 4557 } 4558 4559 auto AppendPredicate = [&](const SCEV *Expr, 4560 const SCEV *ExtendedExpr) -> void { 4561 if (Expr != ExtendedExpr && 4562 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4563 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4564 DEBUG (dbgs() << "Added Predicate: " << *Pred); 4565 Predicates.push_back(Pred); 4566 } 4567 }; 4568 4569 AppendPredicate(StartVal, StartExtended); 4570 AppendPredicate(Accum, AccumExtended); 4571 4572 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4573 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4574 // into NewAR if it will also add the runtime overflow checks specified in 4575 // Predicates. 4576 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4577 4578 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4579 std::make_pair(NewAR, Predicates); 4580 // Remember the result of the analysis for this SCEV at this locayyytion. 4581 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4582 return PredRewrite; 4583 } 4584 4585 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4586 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4587 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4588 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4589 if (!L) 4590 return None; 4591 4592 // Check to see if we already analyzed this PHI. 4593 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4594 if (I != PredicatedSCEVRewrites.end()) { 4595 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 4596 I->second; 4597 // Analysis was done before and failed to create an AddRec: 4598 if (Rewrite.first == SymbolicPHI) 4599 return None; 4600 // Analysis was done before and succeeded to create an AddRec under 4601 // a predicate: 4602 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 4603 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 4604 return Rewrite; 4605 } 4606 4607 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4608 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 4609 4610 // Record in the cache that the analysis failed 4611 if (!Rewrite) { 4612 SmallVector<const SCEVPredicate *, 3> Predicates; 4613 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 4614 return None; 4615 } 4616 4617 return Rewrite; 4618 } 4619 4620 /// A helper function for createAddRecFromPHI to handle simple cases. 4621 /// 4622 /// This function tries to find an AddRec expression for the simplest (yet most 4623 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 4624 /// If it fails, createAddRecFromPHI will use a more general, but slow, 4625 /// technique for finding the AddRec expression. 4626 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 4627 Value *BEValueV, 4628 Value *StartValueV) { 4629 const Loop *L = LI.getLoopFor(PN->getParent()); 4630 assert(L && L->getHeader() == PN->getParent()); 4631 assert(BEValueV && StartValueV); 4632 4633 auto BO = MatchBinaryOp(BEValueV, DT); 4634 if (!BO) 4635 return nullptr; 4636 4637 if (BO->Opcode != Instruction::Add) 4638 return nullptr; 4639 4640 const SCEV *Accum = nullptr; 4641 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 4642 Accum = getSCEV(BO->RHS); 4643 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 4644 Accum = getSCEV(BO->LHS); 4645 4646 if (!Accum) 4647 return nullptr; 4648 4649 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4650 if (BO->IsNUW) 4651 Flags = setFlags(Flags, SCEV::FlagNUW); 4652 if (BO->IsNSW) 4653 Flags = setFlags(Flags, SCEV::FlagNSW); 4654 4655 const SCEV *StartVal = getSCEV(StartValueV); 4656 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4657 4658 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4659 4660 // We can add Flags to the post-inc expression only if we 4661 // know that it is *undefined behavior* for BEValueV to 4662 // overflow. 4663 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4664 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4665 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4666 4667 return PHISCEV; 4668 } 4669 4670 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 4671 const Loop *L = LI.getLoopFor(PN->getParent()); 4672 if (!L || L->getHeader() != PN->getParent()) 4673 return nullptr; 4674 4675 // The loop may have multiple entrances or multiple exits; we can analyze 4676 // this phi as an addrec if it has a unique entry value and a unique 4677 // backedge value. 4678 Value *BEValueV = nullptr, *StartValueV = nullptr; 4679 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4680 Value *V = PN->getIncomingValue(i); 4681 if (L->contains(PN->getIncomingBlock(i))) { 4682 if (!BEValueV) { 4683 BEValueV = V; 4684 } else if (BEValueV != V) { 4685 BEValueV = nullptr; 4686 break; 4687 } 4688 } else if (!StartValueV) { 4689 StartValueV = V; 4690 } else if (StartValueV != V) { 4691 StartValueV = nullptr; 4692 break; 4693 } 4694 } 4695 if (!BEValueV || !StartValueV) 4696 return nullptr; 4697 4698 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 4699 "PHI node already processed?"); 4700 4701 // First, try to find AddRec expression without creating a fictituos symbolic 4702 // value for PN. 4703 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 4704 return S; 4705 4706 // Handle PHI node value symbolically. 4707 const SCEV *SymbolicName = getUnknown(PN); 4708 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 4709 4710 // Using this symbolic name for the PHI, analyze the value coming around 4711 // the back-edge. 4712 const SCEV *BEValue = getSCEV(BEValueV); 4713 4714 // NOTE: If BEValue is loop invariant, we know that the PHI node just 4715 // has a special value for the first iteration of the loop. 4716 4717 // If the value coming around the backedge is an add with the symbolic 4718 // value we just inserted, then we found a simple induction variable! 4719 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 4720 // If there is a single occurrence of the symbolic value, replace it 4721 // with a recurrence. 4722 unsigned FoundIndex = Add->getNumOperands(); 4723 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4724 if (Add->getOperand(i) == SymbolicName) 4725 if (FoundIndex == e) { 4726 FoundIndex = i; 4727 break; 4728 } 4729 4730 if (FoundIndex != Add->getNumOperands()) { 4731 // Create an add with everything but the specified operand. 4732 SmallVector<const SCEV *, 8> Ops; 4733 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4734 if (i != FoundIndex) 4735 Ops.push_back(Add->getOperand(i)); 4736 const SCEV *Accum = getAddExpr(Ops); 4737 4738 // This is not a valid addrec if the step amount is varying each 4739 // loop iteration, but is not itself an addrec in this loop. 4740 if (isLoopInvariant(Accum, L) || 4741 (isa<SCEVAddRecExpr>(Accum) && 4742 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 4743 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4744 4745 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 4746 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 4747 if (BO->IsNUW) 4748 Flags = setFlags(Flags, SCEV::FlagNUW); 4749 if (BO->IsNSW) 4750 Flags = setFlags(Flags, SCEV::FlagNSW); 4751 } 4752 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 4753 // If the increment is an inbounds GEP, then we know the address 4754 // space cannot be wrapped around. We cannot make any guarantee 4755 // about signed or unsigned overflow because pointers are 4756 // unsigned but we may have a negative index from the base 4757 // pointer. We can guarantee that no unsigned wrap occurs if the 4758 // indices form a positive value. 4759 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 4760 Flags = setFlags(Flags, SCEV::FlagNW); 4761 4762 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 4763 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 4764 Flags = setFlags(Flags, SCEV::FlagNUW); 4765 } 4766 4767 // We cannot transfer nuw and nsw flags from subtraction 4768 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 4769 // for instance. 4770 } 4771 4772 const SCEV *StartVal = getSCEV(StartValueV); 4773 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4774 4775 // Okay, for the entire analysis of this edge we assumed the PHI 4776 // to be symbolic. We now need to go back and purge all of the 4777 // entries for the scalars that use the symbolic expression. 4778 forgetSymbolicName(PN, SymbolicName); 4779 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4780 4781 // We can add Flags to the post-inc expression only if we 4782 // know that it is *undefined behavior* for BEValueV to 4783 // overflow. 4784 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4785 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4786 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4787 4788 return PHISCEV; 4789 } 4790 } 4791 } else { 4792 // Otherwise, this could be a loop like this: 4793 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 4794 // In this case, j = {1,+,1} and BEValue is j. 4795 // Because the other in-value of i (0) fits the evolution of BEValue 4796 // i really is an addrec evolution. 4797 // 4798 // We can generalize this saying that i is the shifted value of BEValue 4799 // by one iteration: 4800 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 4801 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 4802 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this); 4803 if (Shifted != getCouldNotCompute() && 4804 Start != getCouldNotCompute()) { 4805 const SCEV *StartVal = getSCEV(StartValueV); 4806 if (Start == StartVal) { 4807 // Okay, for the entire analysis of this edge we assumed the PHI 4808 // to be symbolic. We now need to go back and purge all of the 4809 // entries for the scalars that use the symbolic expression. 4810 forgetSymbolicName(PN, SymbolicName); 4811 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 4812 return Shifted; 4813 } 4814 } 4815 } 4816 4817 // Remove the temporary PHI node SCEV that has been inserted while intending 4818 // to create an AddRecExpr for this PHI node. We can not keep this temporary 4819 // as it will prevent later (possibly simpler) SCEV expressions to be added 4820 // to the ValueExprMap. 4821 eraseValueFromMap(PN); 4822 4823 return nullptr; 4824 } 4825 4826 // Checks if the SCEV S is available at BB. S is considered available at BB 4827 // if S can be materialized at BB without introducing a fault. 4828 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 4829 BasicBlock *BB) { 4830 struct CheckAvailable { 4831 bool TraversalDone = false; 4832 bool Available = true; 4833 4834 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 4835 BasicBlock *BB = nullptr; 4836 DominatorTree &DT; 4837 4838 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 4839 : L(L), BB(BB), DT(DT) {} 4840 4841 bool setUnavailable() { 4842 TraversalDone = true; 4843 Available = false; 4844 return false; 4845 } 4846 4847 bool follow(const SCEV *S) { 4848 switch (S->getSCEVType()) { 4849 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 4850 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 4851 // These expressions are available if their operand(s) is/are. 4852 return true; 4853 4854 case scAddRecExpr: { 4855 // We allow add recurrences that are on the loop BB is in, or some 4856 // outer loop. This guarantees availability because the value of the 4857 // add recurrence at BB is simply the "current" value of the induction 4858 // variable. We can relax this in the future; for instance an add 4859 // recurrence on a sibling dominating loop is also available at BB. 4860 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 4861 if (L && (ARLoop == L || ARLoop->contains(L))) 4862 return true; 4863 4864 return setUnavailable(); 4865 } 4866 4867 case scUnknown: { 4868 // For SCEVUnknown, we check for simple dominance. 4869 const auto *SU = cast<SCEVUnknown>(S); 4870 Value *V = SU->getValue(); 4871 4872 if (isa<Argument>(V)) 4873 return false; 4874 4875 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 4876 return false; 4877 4878 return setUnavailable(); 4879 } 4880 4881 case scUDivExpr: 4882 case scCouldNotCompute: 4883 // We do not try to smart about these at all. 4884 return setUnavailable(); 4885 } 4886 llvm_unreachable("switch should be fully covered!"); 4887 } 4888 4889 bool isDone() { return TraversalDone; } 4890 }; 4891 4892 CheckAvailable CA(L, BB, DT); 4893 SCEVTraversal<CheckAvailable> ST(CA); 4894 4895 ST.visitAll(S); 4896 return CA.Available; 4897 } 4898 4899 // Try to match a control flow sequence that branches out at BI and merges back 4900 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 4901 // match. 4902 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 4903 Value *&C, Value *&LHS, Value *&RHS) { 4904 C = BI->getCondition(); 4905 4906 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 4907 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 4908 4909 if (!LeftEdge.isSingleEdge()) 4910 return false; 4911 4912 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 4913 4914 Use &LeftUse = Merge->getOperandUse(0); 4915 Use &RightUse = Merge->getOperandUse(1); 4916 4917 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 4918 LHS = LeftUse; 4919 RHS = RightUse; 4920 return true; 4921 } 4922 4923 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 4924 LHS = RightUse; 4925 RHS = LeftUse; 4926 return true; 4927 } 4928 4929 return false; 4930 } 4931 4932 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 4933 auto IsReachable = 4934 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 4935 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 4936 const Loop *L = LI.getLoopFor(PN->getParent()); 4937 4938 // We don't want to break LCSSA, even in a SCEV expression tree. 4939 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 4940 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 4941 return nullptr; 4942 4943 // Try to match 4944 // 4945 // br %cond, label %left, label %right 4946 // left: 4947 // br label %merge 4948 // right: 4949 // br label %merge 4950 // merge: 4951 // V = phi [ %x, %left ], [ %y, %right ] 4952 // 4953 // as "select %cond, %x, %y" 4954 4955 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 4956 assert(IDom && "At least the entry block should dominate PN"); 4957 4958 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 4959 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 4960 4961 if (BI && BI->isConditional() && 4962 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 4963 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 4964 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 4965 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 4966 } 4967 4968 return nullptr; 4969 } 4970 4971 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 4972 if (const SCEV *S = createAddRecFromPHI(PN)) 4973 return S; 4974 4975 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 4976 return S; 4977 4978 // If the PHI has a single incoming value, follow that value, unless the 4979 // PHI's incoming blocks are in a different loop, in which case doing so 4980 // risks breaking LCSSA form. Instcombine would normally zap these, but 4981 // it doesn't have DominatorTree information, so it may miss cases. 4982 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 4983 if (LI.replacementPreservesLCSSAForm(PN, V)) 4984 return getSCEV(V); 4985 4986 // If it's not a loop phi, we can't handle it yet. 4987 return getUnknown(PN); 4988 } 4989 4990 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 4991 Value *Cond, 4992 Value *TrueVal, 4993 Value *FalseVal) { 4994 // Handle "constant" branch or select. This can occur for instance when a 4995 // loop pass transforms an inner loop and moves on to process the outer loop. 4996 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 4997 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 4998 4999 // Try to match some simple smax or umax patterns. 5000 auto *ICI = dyn_cast<ICmpInst>(Cond); 5001 if (!ICI) 5002 return getUnknown(I); 5003 5004 Value *LHS = ICI->getOperand(0); 5005 Value *RHS = ICI->getOperand(1); 5006 5007 switch (ICI->getPredicate()) { 5008 case ICmpInst::ICMP_SLT: 5009 case ICmpInst::ICMP_SLE: 5010 std::swap(LHS, RHS); 5011 LLVM_FALLTHROUGH; 5012 case ICmpInst::ICMP_SGT: 5013 case ICmpInst::ICMP_SGE: 5014 // a >s b ? a+x : b+x -> smax(a, b)+x 5015 // a >s b ? b+x : a+x -> smin(a, b)+x 5016 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5017 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5018 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5019 const SCEV *LA = getSCEV(TrueVal); 5020 const SCEV *RA = getSCEV(FalseVal); 5021 const SCEV *LDiff = getMinusSCEV(LA, LS); 5022 const SCEV *RDiff = getMinusSCEV(RA, RS); 5023 if (LDiff == RDiff) 5024 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5025 LDiff = getMinusSCEV(LA, RS); 5026 RDiff = getMinusSCEV(RA, LS); 5027 if (LDiff == RDiff) 5028 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5029 } 5030 break; 5031 case ICmpInst::ICMP_ULT: 5032 case ICmpInst::ICMP_ULE: 5033 std::swap(LHS, RHS); 5034 LLVM_FALLTHROUGH; 5035 case ICmpInst::ICMP_UGT: 5036 case ICmpInst::ICMP_UGE: 5037 // a >u b ? a+x : b+x -> umax(a, b)+x 5038 // a >u b ? b+x : a+x -> umin(a, b)+x 5039 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5040 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5041 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5042 const SCEV *LA = getSCEV(TrueVal); 5043 const SCEV *RA = getSCEV(FalseVal); 5044 const SCEV *LDiff = getMinusSCEV(LA, LS); 5045 const SCEV *RDiff = getMinusSCEV(RA, RS); 5046 if (LDiff == RDiff) 5047 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5048 LDiff = getMinusSCEV(LA, RS); 5049 RDiff = getMinusSCEV(RA, LS); 5050 if (LDiff == RDiff) 5051 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5052 } 5053 break; 5054 case ICmpInst::ICMP_NE: 5055 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5056 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5057 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5058 const SCEV *One = getOne(I->getType()); 5059 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5060 const SCEV *LA = getSCEV(TrueVal); 5061 const SCEV *RA = getSCEV(FalseVal); 5062 const SCEV *LDiff = getMinusSCEV(LA, LS); 5063 const SCEV *RDiff = getMinusSCEV(RA, One); 5064 if (LDiff == RDiff) 5065 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5066 } 5067 break; 5068 case ICmpInst::ICMP_EQ: 5069 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5070 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5071 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5072 const SCEV *One = getOne(I->getType()); 5073 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5074 const SCEV *LA = getSCEV(TrueVal); 5075 const SCEV *RA = getSCEV(FalseVal); 5076 const SCEV *LDiff = getMinusSCEV(LA, One); 5077 const SCEV *RDiff = getMinusSCEV(RA, LS); 5078 if (LDiff == RDiff) 5079 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5080 } 5081 break; 5082 default: 5083 break; 5084 } 5085 5086 return getUnknown(I); 5087 } 5088 5089 /// Expand GEP instructions into add and multiply operations. This allows them 5090 /// to be analyzed by regular SCEV code. 5091 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5092 // Don't attempt to analyze GEPs over unsized objects. 5093 if (!GEP->getSourceElementType()->isSized()) 5094 return getUnknown(GEP); 5095 5096 SmallVector<const SCEV *, 4> IndexExprs; 5097 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5098 IndexExprs.push_back(getSCEV(*Index)); 5099 return getGEPExpr(GEP, IndexExprs); 5100 } 5101 5102 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5103 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5104 return C->getAPInt().countTrailingZeros(); 5105 5106 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5107 return std::min(GetMinTrailingZeros(T->getOperand()), 5108 (uint32_t)getTypeSizeInBits(T->getType())); 5109 5110 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5111 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5112 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5113 ? getTypeSizeInBits(E->getType()) 5114 : OpRes; 5115 } 5116 5117 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5118 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5119 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5120 ? getTypeSizeInBits(E->getType()) 5121 : OpRes; 5122 } 5123 5124 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5125 // The result is the min of all operands results. 5126 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5127 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5128 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5129 return MinOpRes; 5130 } 5131 5132 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5133 // The result is the sum of all operands results. 5134 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5135 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5136 for (unsigned i = 1, e = M->getNumOperands(); 5137 SumOpRes != BitWidth && i != e; ++i) 5138 SumOpRes = 5139 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5140 return SumOpRes; 5141 } 5142 5143 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5144 // The result is the min of all operands results. 5145 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5146 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5147 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5148 return MinOpRes; 5149 } 5150 5151 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5152 // The result is the min of all operands results. 5153 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5154 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5155 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5156 return MinOpRes; 5157 } 5158 5159 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5160 // The result is the min of all operands results. 5161 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5162 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5163 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5164 return MinOpRes; 5165 } 5166 5167 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5168 // For a SCEVUnknown, ask ValueTracking. 5169 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5170 return Known.countMinTrailingZeros(); 5171 } 5172 5173 // SCEVUDivExpr 5174 return 0; 5175 } 5176 5177 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5178 auto I = MinTrailingZerosCache.find(S); 5179 if (I != MinTrailingZerosCache.end()) 5180 return I->second; 5181 5182 uint32_t Result = GetMinTrailingZerosImpl(S); 5183 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5184 assert(InsertPair.second && "Should insert a new key"); 5185 return InsertPair.first->second; 5186 } 5187 5188 /// Helper method to assign a range to V from metadata present in the IR. 5189 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5190 if (Instruction *I = dyn_cast<Instruction>(V)) 5191 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5192 return getConstantRangeFromMetadata(*MD); 5193 5194 return None; 5195 } 5196 5197 /// Determine the range for a particular SCEV. If SignHint is 5198 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5199 /// with a "cleaner" unsigned (resp. signed) representation. 5200 const ConstantRange & 5201 ScalarEvolution::getRangeRef(const SCEV *S, 5202 ScalarEvolution::RangeSignHint SignHint) { 5203 DenseMap<const SCEV *, ConstantRange> &Cache = 5204 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5205 : SignedRanges; 5206 5207 // See if we've computed this range already. 5208 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5209 if (I != Cache.end()) 5210 return I->second; 5211 5212 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5213 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5214 5215 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5216 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5217 5218 // If the value has known zeros, the maximum value will have those known zeros 5219 // as well. 5220 uint32_t TZ = GetMinTrailingZeros(S); 5221 if (TZ != 0) { 5222 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5223 ConservativeResult = 5224 ConstantRange(APInt::getMinValue(BitWidth), 5225 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5226 else 5227 ConservativeResult = ConstantRange( 5228 APInt::getSignedMinValue(BitWidth), 5229 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5230 } 5231 5232 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5233 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5234 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5235 X = X.add(getRangeRef(Add->getOperand(i), SignHint)); 5236 return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); 5237 } 5238 5239 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5240 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5241 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5242 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5243 return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); 5244 } 5245 5246 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5247 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5248 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5249 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5250 return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); 5251 } 5252 5253 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5254 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5255 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5256 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5257 return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); 5258 } 5259 5260 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5261 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5262 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5263 return setRange(UDiv, SignHint, 5264 ConservativeResult.intersectWith(X.udiv(Y))); 5265 } 5266 5267 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5268 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5269 return setRange(ZExt, SignHint, 5270 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 5271 } 5272 5273 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5274 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5275 return setRange(SExt, SignHint, 5276 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 5277 } 5278 5279 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5280 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5281 return setRange(Trunc, SignHint, 5282 ConservativeResult.intersectWith(X.truncate(BitWidth))); 5283 } 5284 5285 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5286 // If there's no unsigned wrap, the value will never be less than its 5287 // initial value. 5288 if (AddRec->hasNoUnsignedWrap()) 5289 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 5290 if (!C->getValue()->isZero()) 5291 ConservativeResult = ConservativeResult.intersectWith( 5292 ConstantRange(C->getAPInt(), APInt(BitWidth, 0))); 5293 5294 // If there's no signed wrap, and all the operands have the same sign or 5295 // zero, the value won't ever change sign. 5296 if (AddRec->hasNoSignedWrap()) { 5297 bool AllNonNeg = true; 5298 bool AllNonPos = true; 5299 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 5300 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 5301 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 5302 } 5303 if (AllNonNeg) 5304 ConservativeResult = ConservativeResult.intersectWith( 5305 ConstantRange(APInt(BitWidth, 0), 5306 APInt::getSignedMinValue(BitWidth))); 5307 else if (AllNonPos) 5308 ConservativeResult = ConservativeResult.intersectWith( 5309 ConstantRange(APInt::getSignedMinValue(BitWidth), 5310 APInt(BitWidth, 1))); 5311 } 5312 5313 // TODO: non-affine addrec 5314 if (AddRec->isAffine()) { 5315 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 5316 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5317 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5318 auto RangeFromAffine = getRangeForAffineAR( 5319 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5320 BitWidth); 5321 if (!RangeFromAffine.isFullSet()) 5322 ConservativeResult = 5323 ConservativeResult.intersectWith(RangeFromAffine); 5324 5325 auto RangeFromFactoring = getRangeViaFactoring( 5326 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5327 BitWidth); 5328 if (!RangeFromFactoring.isFullSet()) 5329 ConservativeResult = 5330 ConservativeResult.intersectWith(RangeFromFactoring); 5331 } 5332 } 5333 5334 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5335 } 5336 5337 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5338 // Check if the IR explicitly contains !range metadata. 5339 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5340 if (MDRange.hasValue()) 5341 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue()); 5342 5343 // Split here to avoid paying the compile-time cost of calling both 5344 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 5345 // if needed. 5346 const DataLayout &DL = getDataLayout(); 5347 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 5348 // For a SCEVUnknown, ask ValueTracking. 5349 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5350 if (Known.One != ~Known.Zero + 1) 5351 ConservativeResult = 5352 ConservativeResult.intersectWith(ConstantRange(Known.One, 5353 ~Known.Zero + 1)); 5354 } else { 5355 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 5356 "generalize as needed!"); 5357 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5358 if (NS > 1) 5359 ConservativeResult = ConservativeResult.intersectWith( 5360 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5361 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1)); 5362 } 5363 5364 return setRange(U, SignHint, std::move(ConservativeResult)); 5365 } 5366 5367 return setRange(S, SignHint, std::move(ConservativeResult)); 5368 } 5369 5370 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5371 // values that the expression can take. Initially, the expression has a value 5372 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5373 // argument defines if we treat Step as signed or unsigned. 5374 static ConstantRange getRangeForAffineARHelper(APInt Step, 5375 const ConstantRange &StartRange, 5376 const APInt &MaxBECount, 5377 unsigned BitWidth, bool Signed) { 5378 // If either Step or MaxBECount is 0, then the expression won't change, and we 5379 // just need to return the initial range. 5380 if (Step == 0 || MaxBECount == 0) 5381 return StartRange; 5382 5383 // If we don't know anything about the initial value (i.e. StartRange is 5384 // FullRange), then we don't know anything about the final range either. 5385 // Return FullRange. 5386 if (StartRange.isFullSet()) 5387 return ConstantRange(BitWidth, /* isFullSet = */ true); 5388 5389 // If Step is signed and negative, then we use its absolute value, but we also 5390 // note that we're moving in the opposite direction. 5391 bool Descending = Signed && Step.isNegative(); 5392 5393 if (Signed) 5394 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 5395 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 5396 // This equations hold true due to the well-defined wrap-around behavior of 5397 // APInt. 5398 Step = Step.abs(); 5399 5400 // Check if Offset is more than full span of BitWidth. If it is, the 5401 // expression is guaranteed to overflow. 5402 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 5403 return ConstantRange(BitWidth, /* isFullSet = */ true); 5404 5405 // Offset is by how much the expression can change. Checks above guarantee no 5406 // overflow here. 5407 APInt Offset = Step * MaxBECount; 5408 5409 // Minimum value of the final range will match the minimal value of StartRange 5410 // if the expression is increasing and will be decreased by Offset otherwise. 5411 // Maximum value of the final range will match the maximal value of StartRange 5412 // if the expression is decreasing and will be increased by Offset otherwise. 5413 APInt StartLower = StartRange.getLower(); 5414 APInt StartUpper = StartRange.getUpper() - 1; 5415 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 5416 : (StartUpper + std::move(Offset)); 5417 5418 // It's possible that the new minimum/maximum value will fall into the initial 5419 // range (due to wrap around). This means that the expression can take any 5420 // value in this bitwidth, and we have to return full range. 5421 if (StartRange.contains(MovedBoundary)) 5422 return ConstantRange(BitWidth, /* isFullSet = */ true); 5423 5424 APInt NewLower = 5425 Descending ? std::move(MovedBoundary) : std::move(StartLower); 5426 APInt NewUpper = 5427 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 5428 NewUpper += 1; 5429 5430 // If we end up with full range, return a proper full range. 5431 if (NewLower == NewUpper) 5432 return ConstantRange(BitWidth, /* isFullSet = */ true); 5433 5434 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 5435 return ConstantRange(std::move(NewLower), std::move(NewUpper)); 5436 } 5437 5438 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5439 const SCEV *Step, 5440 const SCEV *MaxBECount, 5441 unsigned BitWidth) { 5442 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5443 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5444 "Precondition!"); 5445 5446 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5447 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5448 5449 // First, consider step signed. 5450 ConstantRange StartSRange = getSignedRange(Start); 5451 ConstantRange StepSRange = getSignedRange(Step); 5452 5453 // If Step can be both positive and negative, we need to find ranges for the 5454 // maximum absolute step values in both directions and union them. 5455 ConstantRange SR = 5456 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5457 MaxBECountValue, BitWidth, /* Signed = */ true); 5458 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5459 StartSRange, MaxBECountValue, 5460 BitWidth, /* Signed = */ true)); 5461 5462 // Next, consider step unsigned. 5463 ConstantRange UR = getRangeForAffineARHelper( 5464 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5465 MaxBECountValue, BitWidth, /* Signed = */ false); 5466 5467 // Finally, intersect signed and unsigned ranges. 5468 return SR.intersectWith(UR); 5469 } 5470 5471 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 5472 const SCEV *Step, 5473 const SCEV *MaxBECount, 5474 unsigned BitWidth) { 5475 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 5476 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 5477 5478 struct SelectPattern { 5479 Value *Condition = nullptr; 5480 APInt TrueValue; 5481 APInt FalseValue; 5482 5483 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 5484 const SCEV *S) { 5485 Optional<unsigned> CastOp; 5486 APInt Offset(BitWidth, 0); 5487 5488 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 5489 "Should be!"); 5490 5491 // Peel off a constant offset: 5492 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 5493 // In the future we could consider being smarter here and handle 5494 // {Start+Step,+,Step} too. 5495 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 5496 return; 5497 5498 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 5499 S = SA->getOperand(1); 5500 } 5501 5502 // Peel off a cast operation 5503 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 5504 CastOp = SCast->getSCEVType(); 5505 S = SCast->getOperand(); 5506 } 5507 5508 using namespace llvm::PatternMatch; 5509 5510 auto *SU = dyn_cast<SCEVUnknown>(S); 5511 const APInt *TrueVal, *FalseVal; 5512 if (!SU || 5513 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 5514 m_APInt(FalseVal)))) { 5515 Condition = nullptr; 5516 return; 5517 } 5518 5519 TrueValue = *TrueVal; 5520 FalseValue = *FalseVal; 5521 5522 // Re-apply the cast we peeled off earlier 5523 if (CastOp.hasValue()) 5524 switch (*CastOp) { 5525 default: 5526 llvm_unreachable("Unknown SCEV cast type!"); 5527 5528 case scTruncate: 5529 TrueValue = TrueValue.trunc(BitWidth); 5530 FalseValue = FalseValue.trunc(BitWidth); 5531 break; 5532 case scZeroExtend: 5533 TrueValue = TrueValue.zext(BitWidth); 5534 FalseValue = FalseValue.zext(BitWidth); 5535 break; 5536 case scSignExtend: 5537 TrueValue = TrueValue.sext(BitWidth); 5538 FalseValue = FalseValue.sext(BitWidth); 5539 break; 5540 } 5541 5542 // Re-apply the constant offset we peeled off earlier 5543 TrueValue += Offset; 5544 FalseValue += Offset; 5545 } 5546 5547 bool isRecognized() { return Condition != nullptr; } 5548 }; 5549 5550 SelectPattern StartPattern(*this, BitWidth, Start); 5551 if (!StartPattern.isRecognized()) 5552 return ConstantRange(BitWidth, /* isFullSet = */ true); 5553 5554 SelectPattern StepPattern(*this, BitWidth, Step); 5555 if (!StepPattern.isRecognized()) 5556 return ConstantRange(BitWidth, /* isFullSet = */ true); 5557 5558 if (StartPattern.Condition != StepPattern.Condition) { 5559 // We don't handle this case today; but we could, by considering four 5560 // possibilities below instead of two. I'm not sure if there are cases where 5561 // that will help over what getRange already does, though. 5562 return ConstantRange(BitWidth, /* isFullSet = */ true); 5563 } 5564 5565 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 5566 // construct arbitrary general SCEV expressions here. This function is called 5567 // from deep in the call stack, and calling getSCEV (on a sext instruction, 5568 // say) can end up caching a suboptimal value. 5569 5570 // FIXME: without the explicit `this` receiver below, MSVC errors out with 5571 // C2352 and C2512 (otherwise it isn't needed). 5572 5573 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 5574 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 5575 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 5576 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 5577 5578 ConstantRange TrueRange = 5579 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 5580 ConstantRange FalseRange = 5581 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 5582 5583 return TrueRange.unionWith(FalseRange); 5584 } 5585 5586 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 5587 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 5588 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 5589 5590 // Return early if there are no flags to propagate to the SCEV. 5591 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5592 if (BinOp->hasNoUnsignedWrap()) 5593 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 5594 if (BinOp->hasNoSignedWrap()) 5595 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 5596 if (Flags == SCEV::FlagAnyWrap) 5597 return SCEV::FlagAnyWrap; 5598 5599 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 5600 } 5601 5602 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 5603 // Here we check that I is in the header of the innermost loop containing I, 5604 // since we only deal with instructions in the loop header. The actual loop we 5605 // need to check later will come from an add recurrence, but getting that 5606 // requires computing the SCEV of the operands, which can be expensive. This 5607 // check we can do cheaply to rule out some cases early. 5608 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 5609 if (InnermostContainingLoop == nullptr || 5610 InnermostContainingLoop->getHeader() != I->getParent()) 5611 return false; 5612 5613 // Only proceed if we can prove that I does not yield poison. 5614 if (!programUndefinedIfFullPoison(I)) 5615 return false; 5616 5617 // At this point we know that if I is executed, then it does not wrap 5618 // according to at least one of NSW or NUW. If I is not executed, then we do 5619 // not know if the calculation that I represents would wrap. Multiple 5620 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 5621 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 5622 // derived from other instructions that map to the same SCEV. We cannot make 5623 // that guarantee for cases where I is not executed. So we need to find the 5624 // loop that I is considered in relation to and prove that I is executed for 5625 // every iteration of that loop. That implies that the value that I 5626 // calculates does not wrap anywhere in the loop, so then we can apply the 5627 // flags to the SCEV. 5628 // 5629 // We check isLoopInvariant to disambiguate in case we are adding recurrences 5630 // from different loops, so that we know which loop to prove that I is 5631 // executed in. 5632 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 5633 // I could be an extractvalue from a call to an overflow intrinsic. 5634 // TODO: We can do better here in some cases. 5635 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 5636 return false; 5637 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 5638 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 5639 bool AllOtherOpsLoopInvariant = true; 5640 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 5641 ++OtherOpIndex) { 5642 if (OtherOpIndex != OpIndex) { 5643 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 5644 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 5645 AllOtherOpsLoopInvariant = false; 5646 break; 5647 } 5648 } 5649 } 5650 if (AllOtherOpsLoopInvariant && 5651 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 5652 return true; 5653 } 5654 } 5655 return false; 5656 } 5657 5658 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 5659 // If we know that \c I can never be poison period, then that's enough. 5660 if (isSCEVExprNeverPoison(I)) 5661 return true; 5662 5663 // For an add recurrence specifically, we assume that infinite loops without 5664 // side effects are undefined behavior, and then reason as follows: 5665 // 5666 // If the add recurrence is poison in any iteration, it is poison on all 5667 // future iterations (since incrementing poison yields poison). If the result 5668 // of the add recurrence is fed into the loop latch condition and the loop 5669 // does not contain any throws or exiting blocks other than the latch, we now 5670 // have the ability to "choose" whether the backedge is taken or not (by 5671 // choosing a sufficiently evil value for the poison feeding into the branch) 5672 // for every iteration including and after the one in which \p I first became 5673 // poison. There are two possibilities (let's call the iteration in which \p 5674 // I first became poison as K): 5675 // 5676 // 1. In the set of iterations including and after K, the loop body executes 5677 // no side effects. In this case executing the backege an infinte number 5678 // of times will yield undefined behavior. 5679 // 5680 // 2. In the set of iterations including and after K, the loop body executes 5681 // at least one side effect. In this case, that specific instance of side 5682 // effect is control dependent on poison, which also yields undefined 5683 // behavior. 5684 5685 auto *ExitingBB = L->getExitingBlock(); 5686 auto *LatchBB = L->getLoopLatch(); 5687 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 5688 return false; 5689 5690 SmallPtrSet<const Instruction *, 16> Pushed; 5691 SmallVector<const Instruction *, 8> PoisonStack; 5692 5693 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 5694 // things that are known to be fully poison under that assumption go on the 5695 // PoisonStack. 5696 Pushed.insert(I); 5697 PoisonStack.push_back(I); 5698 5699 bool LatchControlDependentOnPoison = false; 5700 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 5701 const Instruction *Poison = PoisonStack.pop_back_val(); 5702 5703 for (auto *PoisonUser : Poison->users()) { 5704 if (propagatesFullPoison(cast<Instruction>(PoisonUser))) { 5705 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 5706 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 5707 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 5708 assert(BI->isConditional() && "Only possibility!"); 5709 if (BI->getParent() == LatchBB) { 5710 LatchControlDependentOnPoison = true; 5711 break; 5712 } 5713 } 5714 } 5715 } 5716 5717 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 5718 } 5719 5720 ScalarEvolution::LoopProperties 5721 ScalarEvolution::getLoopProperties(const Loop *L) { 5722 using LoopProperties = ScalarEvolution::LoopProperties; 5723 5724 auto Itr = LoopPropertiesCache.find(L); 5725 if (Itr == LoopPropertiesCache.end()) { 5726 auto HasSideEffects = [](Instruction *I) { 5727 if (auto *SI = dyn_cast<StoreInst>(I)) 5728 return !SI->isSimple(); 5729 5730 return I->mayHaveSideEffects(); 5731 }; 5732 5733 LoopProperties LP = {/* HasNoAbnormalExits */ true, 5734 /*HasNoSideEffects*/ true}; 5735 5736 for (auto *BB : L->getBlocks()) 5737 for (auto &I : *BB) { 5738 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5739 LP.HasNoAbnormalExits = false; 5740 if (HasSideEffects(&I)) 5741 LP.HasNoSideEffects = false; 5742 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 5743 break; // We're already as pessimistic as we can get. 5744 } 5745 5746 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 5747 assert(InsertPair.second && "We just checked!"); 5748 Itr = InsertPair.first; 5749 } 5750 5751 return Itr->second; 5752 } 5753 5754 const SCEV *ScalarEvolution::createSCEV(Value *V) { 5755 if (!isSCEVable(V->getType())) 5756 return getUnknown(V); 5757 5758 if (Instruction *I = dyn_cast<Instruction>(V)) { 5759 // Don't attempt to analyze instructions in blocks that aren't 5760 // reachable. Such instructions don't matter, and they aren't required 5761 // to obey basic rules for definitions dominating uses which this 5762 // analysis depends on. 5763 if (!DT.isReachableFromEntry(I->getParent())) 5764 return getUnknown(V); 5765 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 5766 return getConstant(CI); 5767 else if (isa<ConstantPointerNull>(V)) 5768 return getZero(V->getType()); 5769 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 5770 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 5771 else if (!isa<ConstantExpr>(V)) 5772 return getUnknown(V); 5773 5774 Operator *U = cast<Operator>(V); 5775 if (auto BO = MatchBinaryOp(U, DT)) { 5776 switch (BO->Opcode) { 5777 case Instruction::Add: { 5778 // The simple thing to do would be to just call getSCEV on both operands 5779 // and call getAddExpr with the result. However if we're looking at a 5780 // bunch of things all added together, this can be quite inefficient, 5781 // because it leads to N-1 getAddExpr calls for N ultimate operands. 5782 // Instead, gather up all the operands and make a single getAddExpr call. 5783 // LLVM IR canonical form means we need only traverse the left operands. 5784 SmallVector<const SCEV *, 4> AddOps; 5785 do { 5786 if (BO->Op) { 5787 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 5788 AddOps.push_back(OpSCEV); 5789 break; 5790 } 5791 5792 // If a NUW or NSW flag can be applied to the SCEV for this 5793 // addition, then compute the SCEV for this addition by itself 5794 // with a separate call to getAddExpr. We need to do that 5795 // instead of pushing the operands of the addition onto AddOps, 5796 // since the flags are only known to apply to this particular 5797 // addition - they may not apply to other additions that can be 5798 // formed with operands from AddOps. 5799 const SCEV *RHS = getSCEV(BO->RHS); 5800 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 5801 if (Flags != SCEV::FlagAnyWrap) { 5802 const SCEV *LHS = getSCEV(BO->LHS); 5803 if (BO->Opcode == Instruction::Sub) 5804 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 5805 else 5806 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 5807 break; 5808 } 5809 } 5810 5811 if (BO->Opcode == Instruction::Sub) 5812 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 5813 else 5814 AddOps.push_back(getSCEV(BO->RHS)); 5815 5816 auto NewBO = MatchBinaryOp(BO->LHS, DT); 5817 if (!NewBO || (NewBO->Opcode != Instruction::Add && 5818 NewBO->Opcode != Instruction::Sub)) { 5819 AddOps.push_back(getSCEV(BO->LHS)); 5820 break; 5821 } 5822 BO = NewBO; 5823 } while (true); 5824 5825 return getAddExpr(AddOps); 5826 } 5827 5828 case Instruction::Mul: { 5829 SmallVector<const SCEV *, 4> MulOps; 5830 do { 5831 if (BO->Op) { 5832 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 5833 MulOps.push_back(OpSCEV); 5834 break; 5835 } 5836 5837 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 5838 if (Flags != SCEV::FlagAnyWrap) { 5839 MulOps.push_back( 5840 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 5841 break; 5842 } 5843 } 5844 5845 MulOps.push_back(getSCEV(BO->RHS)); 5846 auto NewBO = MatchBinaryOp(BO->LHS, DT); 5847 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 5848 MulOps.push_back(getSCEV(BO->LHS)); 5849 break; 5850 } 5851 BO = NewBO; 5852 } while (true); 5853 5854 return getMulExpr(MulOps); 5855 } 5856 case Instruction::UDiv: 5857 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 5858 case Instruction::URem: 5859 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 5860 case Instruction::Sub: { 5861 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5862 if (BO->Op) 5863 Flags = getNoWrapFlagsFromUB(BO->Op); 5864 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 5865 } 5866 case Instruction::And: 5867 // For an expression like x&255 that merely masks off the high bits, 5868 // use zext(trunc(x)) as the SCEV expression. 5869 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5870 if (CI->isZero()) 5871 return getSCEV(BO->RHS); 5872 if (CI->isMinusOne()) 5873 return getSCEV(BO->LHS); 5874 const APInt &A = CI->getValue(); 5875 5876 // Instcombine's ShrinkDemandedConstant may strip bits out of 5877 // constants, obscuring what would otherwise be a low-bits mask. 5878 // Use computeKnownBits to compute what ShrinkDemandedConstant 5879 // knew about to reconstruct a low-bits mask value. 5880 unsigned LZ = A.countLeadingZeros(); 5881 unsigned TZ = A.countTrailingZeros(); 5882 unsigned BitWidth = A.getBitWidth(); 5883 KnownBits Known(BitWidth); 5884 computeKnownBits(BO->LHS, Known, getDataLayout(), 5885 0, &AC, nullptr, &DT); 5886 5887 APInt EffectiveMask = 5888 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 5889 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 5890 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 5891 const SCEV *LHS = getSCEV(BO->LHS); 5892 const SCEV *ShiftedLHS = nullptr; 5893 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 5894 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 5895 // For an expression like (x * 8) & 8, simplify the multiply. 5896 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 5897 unsigned GCD = std::min(MulZeros, TZ); 5898 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 5899 SmallVector<const SCEV*, 4> MulOps; 5900 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 5901 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 5902 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 5903 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 5904 } 5905 } 5906 if (!ShiftedLHS) 5907 ShiftedLHS = getUDivExpr(LHS, MulCount); 5908 return getMulExpr( 5909 getZeroExtendExpr( 5910 getTruncateExpr(ShiftedLHS, 5911 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 5912 BO->LHS->getType()), 5913 MulCount); 5914 } 5915 } 5916 break; 5917 5918 case Instruction::Or: 5919 // If the RHS of the Or is a constant, we may have something like: 5920 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 5921 // optimizations will transparently handle this case. 5922 // 5923 // In order for this transformation to be safe, the LHS must be of the 5924 // form X*(2^n) and the Or constant must be less than 2^n. 5925 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5926 const SCEV *LHS = getSCEV(BO->LHS); 5927 const APInt &CIVal = CI->getValue(); 5928 if (GetMinTrailingZeros(LHS) >= 5929 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 5930 // Build a plain add SCEV. 5931 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 5932 // If the LHS of the add was an addrec and it has no-wrap flags, 5933 // transfer the no-wrap flags, since an or won't introduce a wrap. 5934 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 5935 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 5936 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( 5937 OldAR->getNoWrapFlags()); 5938 } 5939 return S; 5940 } 5941 } 5942 break; 5943 5944 case Instruction::Xor: 5945 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5946 // If the RHS of xor is -1, then this is a not operation. 5947 if (CI->isMinusOne()) 5948 return getNotSCEV(getSCEV(BO->LHS)); 5949 5950 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 5951 // This is a variant of the check for xor with -1, and it handles 5952 // the case where instcombine has trimmed non-demanded bits out 5953 // of an xor with -1. 5954 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 5955 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 5956 if (LBO->getOpcode() == Instruction::And && 5957 LCI->getValue() == CI->getValue()) 5958 if (const SCEVZeroExtendExpr *Z = 5959 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 5960 Type *UTy = BO->LHS->getType(); 5961 const SCEV *Z0 = Z->getOperand(); 5962 Type *Z0Ty = Z0->getType(); 5963 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 5964 5965 // If C is a low-bits mask, the zero extend is serving to 5966 // mask off the high bits. Complement the operand and 5967 // re-apply the zext. 5968 if (CI->getValue().isMask(Z0TySize)) 5969 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 5970 5971 // If C is a single bit, it may be in the sign-bit position 5972 // before the zero-extend. In this case, represent the xor 5973 // using an add, which is equivalent, and re-apply the zext. 5974 APInt Trunc = CI->getValue().trunc(Z0TySize); 5975 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 5976 Trunc.isSignMask()) 5977 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 5978 UTy); 5979 } 5980 } 5981 break; 5982 5983 case Instruction::Shl: 5984 // Turn shift left of a constant amount into a multiply. 5985 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 5986 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 5987 5988 // If the shift count is not less than the bitwidth, the result of 5989 // the shift is undefined. Don't try to analyze it, because the 5990 // resolution chosen here may differ from the resolution chosen in 5991 // other parts of the compiler. 5992 if (SA->getValue().uge(BitWidth)) 5993 break; 5994 5995 // It is currently not resolved how to interpret NSW for left 5996 // shift by BitWidth - 1, so we avoid applying flags in that 5997 // case. Remove this check (or this comment) once the situation 5998 // is resolved. See 5999 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html 6000 // and http://reviews.llvm.org/D8890 . 6001 auto Flags = SCEV::FlagAnyWrap; 6002 if (BO->Op && SA->getValue().ult(BitWidth - 1)) 6003 Flags = getNoWrapFlagsFromUB(BO->Op); 6004 6005 Constant *X = ConstantInt::get(getContext(), 6006 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6007 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6008 } 6009 break; 6010 6011 case Instruction::AShr: { 6012 // AShr X, C, where C is a constant. 6013 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6014 if (!CI) 6015 break; 6016 6017 Type *OuterTy = BO->LHS->getType(); 6018 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6019 // If the shift count is not less than the bitwidth, the result of 6020 // the shift is undefined. Don't try to analyze it, because the 6021 // resolution chosen here may differ from the resolution chosen in 6022 // other parts of the compiler. 6023 if (CI->getValue().uge(BitWidth)) 6024 break; 6025 6026 if (CI->isZero()) 6027 return getSCEV(BO->LHS); // shift by zero --> noop 6028 6029 uint64_t AShrAmt = CI->getZExtValue(); 6030 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6031 6032 Operator *L = dyn_cast<Operator>(BO->LHS); 6033 if (L && L->getOpcode() == Instruction::Shl) { 6034 // X = Shl A, n 6035 // Y = AShr X, m 6036 // Both n and m are constant. 6037 6038 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6039 if (L->getOperand(1) == BO->RHS) 6040 // For a two-shift sext-inreg, i.e. n = m, 6041 // use sext(trunc(x)) as the SCEV expression. 6042 return getSignExtendExpr( 6043 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6044 6045 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6046 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6047 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6048 if (ShlAmt > AShrAmt) { 6049 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6050 // expression. We already checked that ShlAmt < BitWidth, so 6051 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6052 // ShlAmt - AShrAmt < Amt. 6053 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6054 ShlAmt - AShrAmt); 6055 return getSignExtendExpr( 6056 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6057 getConstant(Mul)), OuterTy); 6058 } 6059 } 6060 } 6061 break; 6062 } 6063 } 6064 } 6065 6066 switch (U->getOpcode()) { 6067 case Instruction::Trunc: 6068 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6069 6070 case Instruction::ZExt: 6071 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6072 6073 case Instruction::SExt: 6074 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6075 // The NSW flag of a subtract does not always survive the conversion to 6076 // A + (-1)*B. By pushing sign extension onto its operands we are much 6077 // more likely to preserve NSW and allow later AddRec optimisations. 6078 // 6079 // NOTE: This is effectively duplicating this logic from getSignExtend: 6080 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6081 // but by that point the NSW information has potentially been lost. 6082 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6083 Type *Ty = U->getType(); 6084 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6085 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6086 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6087 } 6088 } 6089 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6090 6091 case Instruction::BitCast: 6092 // BitCasts are no-op casts so we just eliminate the cast. 6093 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6094 return getSCEV(U->getOperand(0)); 6095 break; 6096 6097 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 6098 // lead to pointer expressions which cannot safely be expanded to GEPs, 6099 // because ScalarEvolution doesn't respect the GEP aliasing rules when 6100 // simplifying integer expressions. 6101 6102 case Instruction::GetElementPtr: 6103 return createNodeForGEP(cast<GEPOperator>(U)); 6104 6105 case Instruction::PHI: 6106 return createNodeForPHI(cast<PHINode>(U)); 6107 6108 case Instruction::Select: 6109 // U can also be a select constant expr, which let fall through. Since 6110 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6111 // constant expressions cannot have instructions as operands, we'd have 6112 // returned getUnknown for a select constant expressions anyway. 6113 if (isa<Instruction>(U)) 6114 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6115 U->getOperand(1), U->getOperand(2)); 6116 break; 6117 6118 case Instruction::Call: 6119 case Instruction::Invoke: 6120 if (Value *RV = CallSite(U).getReturnedArgOperand()) 6121 return getSCEV(RV); 6122 break; 6123 } 6124 6125 return getUnknown(V); 6126 } 6127 6128 //===----------------------------------------------------------------------===// 6129 // Iteration Count Computation Code 6130 // 6131 6132 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6133 if (!ExitCount) 6134 return 0; 6135 6136 ConstantInt *ExitConst = ExitCount->getValue(); 6137 6138 // Guard against huge trip counts. 6139 if (ExitConst->getValue().getActiveBits() > 32) 6140 return 0; 6141 6142 // In case of integer overflow, this returns 0, which is correct. 6143 return ((unsigned)ExitConst->getZExtValue()) + 1; 6144 } 6145 6146 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6147 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6148 return getSmallConstantTripCount(L, ExitingBB); 6149 6150 // No trip count information for multiple exits. 6151 return 0; 6152 } 6153 6154 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6155 BasicBlock *ExitingBlock) { 6156 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6157 assert(L->isLoopExiting(ExitingBlock) && 6158 "Exiting block must actually branch out of the loop!"); 6159 const SCEVConstant *ExitCount = 6160 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6161 return getConstantTripCount(ExitCount); 6162 } 6163 6164 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6165 const auto *MaxExitCount = 6166 dyn_cast<SCEVConstant>(getMaxBackedgeTakenCount(L)); 6167 return getConstantTripCount(MaxExitCount); 6168 } 6169 6170 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6171 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6172 return getSmallConstantTripMultiple(L, ExitingBB); 6173 6174 // No trip multiple information for multiple exits. 6175 return 0; 6176 } 6177 6178 /// Returns the largest constant divisor of the trip count of this loop as a 6179 /// normal unsigned value, if possible. This means that the actual trip count is 6180 /// always a multiple of the returned value (don't forget the trip count could 6181 /// very well be zero as well!). 6182 /// 6183 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6184 /// multiple of a constant (which is also the case if the trip count is simply 6185 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6186 /// if the trip count is very large (>= 2^32). 6187 /// 6188 /// As explained in the comments for getSmallConstantTripCount, this assumes 6189 /// that control exits the loop via ExitingBlock. 6190 unsigned 6191 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6192 BasicBlock *ExitingBlock) { 6193 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6194 assert(L->isLoopExiting(ExitingBlock) && 6195 "Exiting block must actually branch out of the loop!"); 6196 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6197 if (ExitCount == getCouldNotCompute()) 6198 return 1; 6199 6200 // Get the trip count from the BE count by adding 1. 6201 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6202 6203 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6204 if (!TC) 6205 // Attempt to factor more general cases. Returns the greatest power of 6206 // two divisor. If overflow happens, the trip count expression is still 6207 // divisible by the greatest power of 2 divisor returned. 6208 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 6209 6210 ConstantInt *Result = TC->getValue(); 6211 6212 // Guard against huge trip counts (this requires checking 6213 // for zero to handle the case where the trip count == -1 and the 6214 // addition wraps). 6215 if (!Result || Result->getValue().getActiveBits() > 32 || 6216 Result->getValue().getActiveBits() == 0) 6217 return 1; 6218 6219 return (unsigned)Result->getZExtValue(); 6220 } 6221 6222 /// Get the expression for the number of loop iterations for which this loop is 6223 /// guaranteed not to exit via ExitingBlock. Otherwise return 6224 /// SCEVCouldNotCompute. 6225 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6226 BasicBlock *ExitingBlock) { 6227 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6228 } 6229 6230 const SCEV * 6231 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6232 SCEVUnionPredicate &Preds) { 6233 return getPredicatedBackedgeTakenInfo(L).getExact(this, &Preds); 6234 } 6235 6236 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 6237 return getBackedgeTakenInfo(L).getExact(this); 6238 } 6239 6240 /// Similar to getBackedgeTakenCount, except return the least SCEV value that is 6241 /// known never to be less than the actual backedge taken count. 6242 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 6243 return getBackedgeTakenInfo(L).getMax(this); 6244 } 6245 6246 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 6247 return getBackedgeTakenInfo(L).isMaxOrZero(this); 6248 } 6249 6250 /// Push PHI nodes in the header of the given loop onto the given Worklist. 6251 static void 6252 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 6253 BasicBlock *Header = L->getHeader(); 6254 6255 // Push all Loop-header PHIs onto the Worklist stack. 6256 for (BasicBlock::iterator I = Header->begin(); 6257 PHINode *PN = dyn_cast<PHINode>(I); ++I) 6258 Worklist.push_back(PN); 6259 } 6260 6261 const ScalarEvolution::BackedgeTakenInfo & 6262 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 6263 auto &BTI = getBackedgeTakenInfo(L); 6264 if (BTI.hasFullInfo()) 6265 return BTI; 6266 6267 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6268 6269 if (!Pair.second) 6270 return Pair.first->second; 6271 6272 BackedgeTakenInfo Result = 6273 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 6274 6275 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 6276 } 6277 6278 const ScalarEvolution::BackedgeTakenInfo & 6279 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 6280 // Initially insert an invalid entry for this loop. If the insertion 6281 // succeeds, proceed to actually compute a backedge-taken count and 6282 // update the value. The temporary CouldNotCompute value tells SCEV 6283 // code elsewhere that it shouldn't attempt to request a new 6284 // backedge-taken count, which could result in infinite recursion. 6285 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 6286 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6287 if (!Pair.second) 6288 return Pair.first->second; 6289 6290 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 6291 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 6292 // must be cleared in this scope. 6293 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 6294 6295 if (Result.getExact(this) != getCouldNotCompute()) { 6296 assert(isLoopInvariant(Result.getExact(this), L) && 6297 isLoopInvariant(Result.getMax(this), L) && 6298 "Computed backedge-taken count isn't loop invariant for loop!"); 6299 ++NumTripCountsComputed; 6300 } 6301 else if (Result.getMax(this) == getCouldNotCompute() && 6302 isa<PHINode>(L->getHeader()->begin())) { 6303 // Only count loops that have phi nodes as not being computable. 6304 ++NumTripCountsNotComputed; 6305 } 6306 6307 // Now that we know more about the trip count for this loop, forget any 6308 // existing SCEV values for PHI nodes in this loop since they are only 6309 // conservative estimates made without the benefit of trip count 6310 // information. This is similar to the code in forgetLoop, except that 6311 // it handles SCEVUnknown PHI nodes specially. 6312 if (Result.hasAnyInfo()) { 6313 SmallVector<Instruction *, 16> Worklist; 6314 PushLoopPHIs(L, Worklist); 6315 6316 SmallPtrSet<Instruction *, 8> Visited; 6317 while (!Worklist.empty()) { 6318 Instruction *I = Worklist.pop_back_val(); 6319 if (!Visited.insert(I).second) 6320 continue; 6321 6322 ValueExprMapType::iterator It = 6323 ValueExprMap.find_as(static_cast<Value *>(I)); 6324 if (It != ValueExprMap.end()) { 6325 const SCEV *Old = It->second; 6326 6327 // SCEVUnknown for a PHI either means that it has an unrecognized 6328 // structure, or it's a PHI that's in the progress of being computed 6329 // by createNodeForPHI. In the former case, additional loop trip 6330 // count information isn't going to change anything. In the later 6331 // case, createNodeForPHI will perform the necessary updates on its 6332 // own when it gets to that point. 6333 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 6334 eraseValueFromMap(It->first); 6335 forgetMemoizedResults(Old, false); 6336 } 6337 if (PHINode *PN = dyn_cast<PHINode>(I)) 6338 ConstantEvolutionLoopExitValue.erase(PN); 6339 } 6340 6341 PushDefUseChildren(I, Worklist); 6342 } 6343 } 6344 6345 // Re-lookup the insert position, since the call to 6346 // computeBackedgeTakenCount above could result in a 6347 // recusive call to getBackedgeTakenInfo (on a different 6348 // loop), which would invalidate the iterator computed 6349 // earlier. 6350 return BackedgeTakenCounts.find(L)->second = std::move(Result); 6351 } 6352 6353 void ScalarEvolution::forgetLoop(const Loop *L) { 6354 // Drop any stored trip count value. 6355 auto RemoveLoopFromBackedgeMap = 6356 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 6357 auto BTCPos = Map.find(L); 6358 if (BTCPos != Map.end()) { 6359 BTCPos->second.clear(); 6360 Map.erase(BTCPos); 6361 } 6362 }; 6363 6364 SmallVector<const Loop *, 16> LoopWorklist(1, L); 6365 SmallVector<Instruction *, 32> Worklist; 6366 SmallPtrSet<Instruction *, 16> Visited; 6367 6368 // Iterate over all the loops and sub-loops to drop SCEV information. 6369 while (!LoopWorklist.empty()) { 6370 auto *CurrL = LoopWorklist.pop_back_val(); 6371 6372 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 6373 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 6374 6375 // Drop information about predicated SCEV rewrites for this loop. 6376 for (auto I = PredicatedSCEVRewrites.begin(); 6377 I != PredicatedSCEVRewrites.end();) { 6378 std::pair<const SCEV *, const Loop *> Entry = I->first; 6379 if (Entry.second == CurrL) 6380 PredicatedSCEVRewrites.erase(I++); 6381 else 6382 ++I; 6383 } 6384 6385 // Drop information about expressions based on loop-header PHIs. 6386 PushLoopPHIs(CurrL, Worklist); 6387 6388 while (!Worklist.empty()) { 6389 Instruction *I = Worklist.pop_back_val(); 6390 if (!Visited.insert(I).second) 6391 continue; 6392 6393 ValueExprMapType::iterator It = 6394 ValueExprMap.find_as(static_cast<Value *>(I)); 6395 if (It != ValueExprMap.end()) { 6396 eraseValueFromMap(It->first); 6397 forgetMemoizedResults(It->second); 6398 if (PHINode *PN = dyn_cast<PHINode>(I)) 6399 ConstantEvolutionLoopExitValue.erase(PN); 6400 } 6401 6402 PushDefUseChildren(I, Worklist); 6403 } 6404 6405 for (auto I = ExitLimits.begin(); I != ExitLimits.end(); ++I) { 6406 auto &Query = I->first; 6407 if (Query.L == CurrL) 6408 ExitLimits.erase(I); 6409 } 6410 6411 LoopPropertiesCache.erase(CurrL); 6412 // Forget all contained loops too, to avoid dangling entries in the 6413 // ValuesAtScopes map. 6414 LoopWorklist.append(CurrL->begin(), CurrL->end()); 6415 } 6416 } 6417 6418 void ScalarEvolution::forgetValue(Value *V) { 6419 Instruction *I = dyn_cast<Instruction>(V); 6420 if (!I) return; 6421 6422 // Drop information about expressions based on loop-header PHIs. 6423 SmallVector<Instruction *, 16> Worklist; 6424 Worklist.push_back(I); 6425 6426 SmallPtrSet<Instruction *, 8> Visited; 6427 while (!Worklist.empty()) { 6428 I = Worklist.pop_back_val(); 6429 if (!Visited.insert(I).second) 6430 continue; 6431 6432 ValueExprMapType::iterator It = 6433 ValueExprMap.find_as(static_cast<Value *>(I)); 6434 if (It != ValueExprMap.end()) { 6435 eraseValueFromMap(It->first); 6436 forgetMemoizedResults(It->second); 6437 if (PHINode *PN = dyn_cast<PHINode>(I)) 6438 ConstantEvolutionLoopExitValue.erase(PN); 6439 } 6440 6441 PushDefUseChildren(I, Worklist); 6442 } 6443 } 6444 6445 /// Get the exact loop backedge taken count considering all loop exits. A 6446 /// computable result can only be returned for loops with a single exit. 6447 /// Returning the minimum taken count among all exits is incorrect because one 6448 /// of the loop's exit limit's may have been skipped. howFarToZero assumes that 6449 /// the limit of each loop test is never skipped. This is a valid assumption as 6450 /// long as the loop exits via that test. For precise results, it is the 6451 /// caller's responsibility to specify the relevant loop exit using 6452 /// getExact(ExitingBlock, SE). 6453 const SCEV * 6454 ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE, 6455 SCEVUnionPredicate *Preds) const { 6456 // If any exits were not computable, the loop is not computable. 6457 if (!isComplete() || ExitNotTaken.empty()) 6458 return SE->getCouldNotCompute(); 6459 6460 const SCEV *BECount = nullptr; 6461 for (auto &ENT : ExitNotTaken) { 6462 assert(ENT.ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV"); 6463 6464 if (!BECount) 6465 BECount = ENT.ExactNotTaken; 6466 else if (BECount != ENT.ExactNotTaken) 6467 return SE->getCouldNotCompute(); 6468 if (Preds && !ENT.hasAlwaysTruePredicate()) 6469 Preds->add(ENT.Predicate.get()); 6470 6471 assert((Preds || ENT.hasAlwaysTruePredicate()) && 6472 "Predicate should be always true!"); 6473 } 6474 6475 assert(BECount && "Invalid not taken count for loop exit"); 6476 return BECount; 6477 } 6478 6479 /// Get the exact not taken count for this loop exit. 6480 const SCEV * 6481 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 6482 ScalarEvolution *SE) const { 6483 for (auto &ENT : ExitNotTaken) 6484 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6485 return ENT.ExactNotTaken; 6486 6487 return SE->getCouldNotCompute(); 6488 } 6489 6490 /// getMax - Get the max backedge taken count for the loop. 6491 const SCEV * 6492 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 6493 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6494 return !ENT.hasAlwaysTruePredicate(); 6495 }; 6496 6497 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax()) 6498 return SE->getCouldNotCompute(); 6499 6500 assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) && 6501 "No point in having a non-constant max backedge taken count!"); 6502 return getMax(); 6503 } 6504 6505 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const { 6506 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6507 return !ENT.hasAlwaysTruePredicate(); 6508 }; 6509 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 6510 } 6511 6512 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 6513 ScalarEvolution *SE) const { 6514 if (getMax() && getMax() != SE->getCouldNotCompute() && 6515 SE->hasOperand(getMax(), S)) 6516 return true; 6517 6518 for (auto &ENT : ExitNotTaken) 6519 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 6520 SE->hasOperand(ENT.ExactNotTaken, S)) 6521 return true; 6522 6523 return false; 6524 } 6525 6526 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 6527 : ExactNotTaken(E), MaxNotTaken(E) { 6528 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6529 isa<SCEVConstant>(MaxNotTaken)) && 6530 "No point in having a non-constant max backedge taken count!"); 6531 } 6532 6533 ScalarEvolution::ExitLimit::ExitLimit( 6534 const SCEV *E, const SCEV *M, bool MaxOrZero, 6535 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 6536 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 6537 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 6538 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 6539 "Exact is not allowed to be less precise than Max"); 6540 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6541 isa<SCEVConstant>(MaxNotTaken)) && 6542 "No point in having a non-constant max backedge taken count!"); 6543 for (auto *PredSet : PredSetList) 6544 for (auto *P : *PredSet) 6545 addPredicate(P); 6546 } 6547 6548 ScalarEvolution::ExitLimit::ExitLimit( 6549 const SCEV *E, const SCEV *M, bool MaxOrZero, 6550 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 6551 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 6552 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6553 isa<SCEVConstant>(MaxNotTaken)) && 6554 "No point in having a non-constant max backedge taken count!"); 6555 } 6556 6557 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 6558 bool MaxOrZero) 6559 : ExitLimit(E, M, MaxOrZero, None) { 6560 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6561 isa<SCEVConstant>(MaxNotTaken)) && 6562 "No point in having a non-constant max backedge taken count!"); 6563 } 6564 6565 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 6566 /// computable exit into a persistent ExitNotTakenInfo array. 6567 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 6568 SmallVectorImpl<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> 6569 &&ExitCounts, 6570 bool Complete, const SCEV *MaxCount, bool MaxOrZero) 6571 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { 6572 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 6573 6574 ExitNotTaken.reserve(ExitCounts.size()); 6575 std::transform( 6576 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 6577 [&](const EdgeExitInfo &EEI) { 6578 BasicBlock *ExitBB = EEI.first; 6579 const ExitLimit &EL = EEI.second; 6580 if (EL.Predicates.empty()) 6581 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, nullptr); 6582 6583 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 6584 for (auto *Pred : EL.Predicates) 6585 Predicate->add(Pred); 6586 6587 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, std::move(Predicate)); 6588 }); 6589 assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) && 6590 "No point in having a non-constant max backedge taken count!"); 6591 } 6592 6593 /// Invalidate this result and free the ExitNotTakenInfo array. 6594 void ScalarEvolution::BackedgeTakenInfo::clear() { 6595 ExitNotTaken.clear(); 6596 } 6597 6598 /// Compute the number of times the backedge of the specified loop will execute. 6599 ScalarEvolution::BackedgeTakenInfo 6600 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 6601 bool AllowPredicates) { 6602 SmallVector<BasicBlock *, 8> ExitingBlocks; 6603 L->getExitingBlocks(ExitingBlocks); 6604 6605 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 6606 6607 SmallVector<EdgeExitInfo, 4> ExitCounts; 6608 bool CouldComputeBECount = true; 6609 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 6610 const SCEV *MustExitMaxBECount = nullptr; 6611 const SCEV *MayExitMaxBECount = nullptr; 6612 bool MustExitMaxOrZero = false; 6613 6614 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 6615 // and compute maxBECount. 6616 // Do a union of all the predicates here. 6617 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 6618 BasicBlock *ExitBB = ExitingBlocks[i]; 6619 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 6620 6621 assert((AllowPredicates || EL.Predicates.empty()) && 6622 "Predicated exit limit when predicates are not allowed!"); 6623 6624 // 1. For each exit that can be computed, add an entry to ExitCounts. 6625 // CouldComputeBECount is true only if all exits can be computed. 6626 if (EL.ExactNotTaken == getCouldNotCompute()) 6627 // We couldn't compute an exact value for this exit, so 6628 // we won't be able to compute an exact value for the loop. 6629 CouldComputeBECount = false; 6630 else 6631 ExitCounts.emplace_back(ExitBB, EL); 6632 6633 // 2. Derive the loop's MaxBECount from each exit's max number of 6634 // non-exiting iterations. Partition the loop exits into two kinds: 6635 // LoopMustExits and LoopMayExits. 6636 // 6637 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 6638 // is a LoopMayExit. If any computable LoopMustExit is found, then 6639 // MaxBECount is the minimum EL.MaxNotTaken of computable 6640 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 6641 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 6642 // computable EL.MaxNotTaken. 6643 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 6644 DT.dominates(ExitBB, Latch)) { 6645 if (!MustExitMaxBECount) { 6646 MustExitMaxBECount = EL.MaxNotTaken; 6647 MustExitMaxOrZero = EL.MaxOrZero; 6648 } else { 6649 MustExitMaxBECount = 6650 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 6651 } 6652 } else if (MayExitMaxBECount != getCouldNotCompute()) { 6653 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 6654 MayExitMaxBECount = EL.MaxNotTaken; 6655 else { 6656 MayExitMaxBECount = 6657 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 6658 } 6659 } 6660 } 6661 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 6662 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 6663 // The loop backedge will be taken the maximum or zero times if there's 6664 // a single exit that must be taken the maximum or zero times. 6665 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 6666 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 6667 MaxBECount, MaxOrZero); 6668 } 6669 6670 ScalarEvolution::ExitLimit 6671 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 6672 bool AllowPredicates) { 6673 ExitLimitQuery Query(L, ExitingBlock, AllowPredicates); 6674 auto MaybeEL = ExitLimits.find(Query); 6675 if (MaybeEL != ExitLimits.end()) 6676 return MaybeEL->second; 6677 ExitLimit EL = computeExitLimitImpl(L, ExitingBlock, AllowPredicates); 6678 ExitLimits.insert({Query, EL}); 6679 return EL; 6680 } 6681 6682 ScalarEvolution::ExitLimit 6683 ScalarEvolution::computeExitLimitImpl(const Loop *L, BasicBlock *ExitingBlock, 6684 bool AllowPredicates) { 6685 // Okay, we've chosen an exiting block. See what condition causes us to exit 6686 // at this block and remember the exit block and whether all other targets 6687 // lead to the loop header. 6688 bool MustExecuteLoopHeader = true; 6689 BasicBlock *Exit = nullptr; 6690 for (auto *SBB : successors(ExitingBlock)) 6691 if (!L->contains(SBB)) { 6692 if (Exit) // Multiple exit successors. 6693 return getCouldNotCompute(); 6694 Exit = SBB; 6695 } else if (SBB != L->getHeader()) { 6696 MustExecuteLoopHeader = false; 6697 } 6698 6699 // At this point, we know we have a conditional branch that determines whether 6700 // the loop is exited. However, we don't know if the branch is executed each 6701 // time through the loop. If not, then the execution count of the branch will 6702 // not be equal to the trip count of the loop. 6703 // 6704 // Currently we check for this by checking to see if the Exit branch goes to 6705 // the loop header. If so, we know it will always execute the same number of 6706 // times as the loop. We also handle the case where the exit block *is* the 6707 // loop header. This is common for un-rotated loops. 6708 // 6709 // If both of those tests fail, walk up the unique predecessor chain to the 6710 // header, stopping if there is an edge that doesn't exit the loop. If the 6711 // header is reached, the execution count of the branch will be equal to the 6712 // trip count of the loop. 6713 // 6714 // More extensive analysis could be done to handle more cases here. 6715 // 6716 if (!MustExecuteLoopHeader && ExitingBlock != L->getHeader()) { 6717 // The simple checks failed, try climbing the unique predecessor chain 6718 // up to the header. 6719 bool Ok = false; 6720 for (BasicBlock *BB = ExitingBlock; BB; ) { 6721 BasicBlock *Pred = BB->getUniquePredecessor(); 6722 if (!Pred) 6723 return getCouldNotCompute(); 6724 TerminatorInst *PredTerm = Pred->getTerminator(); 6725 for (const BasicBlock *PredSucc : PredTerm->successors()) { 6726 if (PredSucc == BB) 6727 continue; 6728 // If the predecessor has a successor that isn't BB and isn't 6729 // outside the loop, assume the worst. 6730 if (L->contains(PredSucc)) 6731 return getCouldNotCompute(); 6732 } 6733 if (Pred == L->getHeader()) { 6734 Ok = true; 6735 break; 6736 } 6737 BB = Pred; 6738 } 6739 if (!Ok) 6740 return getCouldNotCompute(); 6741 } 6742 6743 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 6744 TerminatorInst *Term = ExitingBlock->getTerminator(); 6745 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 6746 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 6747 // Proceed to the next level to examine the exit condition expression. 6748 return computeExitLimitFromCond( 6749 L, BI->getCondition(), BI->getSuccessor(0), BI->getSuccessor(1), 6750 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 6751 } 6752 6753 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) 6754 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 6755 /*ControlsExit=*/IsOnlyExit); 6756 6757 return getCouldNotCompute(); 6758 } 6759 6760 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 6761 const Loop *L, Value *ExitCond, BasicBlock *TBB, BasicBlock *FBB, 6762 bool ControlsExit, bool AllowPredicates) { 6763 ScalarEvolution::ExitLimitCacheTy Cache(L, TBB, FBB, AllowPredicates); 6764 return computeExitLimitFromCondCached(Cache, L, ExitCond, TBB, FBB, 6765 ControlsExit, AllowPredicates); 6766 } 6767 6768 Optional<ScalarEvolution::ExitLimit> 6769 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 6770 BasicBlock *TBB, BasicBlock *FBB, 6771 bool ControlsExit, bool AllowPredicates) { 6772 (void)this->L; 6773 (void)this->TBB; 6774 (void)this->FBB; 6775 (void)this->AllowPredicates; 6776 6777 assert(this->L == L && this->TBB == TBB && this->FBB == FBB && 6778 this->AllowPredicates == AllowPredicates && 6779 "Variance in assumed invariant key components!"); 6780 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 6781 if (Itr == TripCountMap.end()) 6782 return None; 6783 return Itr->second; 6784 } 6785 6786 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 6787 BasicBlock *TBB, BasicBlock *FBB, 6788 bool ControlsExit, 6789 bool AllowPredicates, 6790 const ExitLimit &EL) { 6791 assert(this->L == L && this->TBB == TBB && this->FBB == FBB && 6792 this->AllowPredicates == AllowPredicates && 6793 "Variance in assumed invariant key components!"); 6794 6795 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 6796 assert(InsertResult.second && "Expected successful insertion!"); 6797 (void)InsertResult; 6798 } 6799 6800 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 6801 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, BasicBlock *TBB, 6802 BasicBlock *FBB, bool ControlsExit, bool AllowPredicates) { 6803 6804 if (auto MaybeEL = 6805 Cache.find(L, ExitCond, TBB, FBB, ControlsExit, AllowPredicates)) 6806 return *MaybeEL; 6807 6808 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, TBB, FBB, 6809 ControlsExit, AllowPredicates); 6810 Cache.insert(L, ExitCond, TBB, FBB, ControlsExit, AllowPredicates, EL); 6811 return EL; 6812 } 6813 6814 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 6815 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, BasicBlock *TBB, 6816 BasicBlock *FBB, bool ControlsExit, bool AllowPredicates) { 6817 // Check if the controlling expression for this loop is an And or Or. 6818 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 6819 if (BO->getOpcode() == Instruction::And) { 6820 // Recurse on the operands of the and. 6821 bool EitherMayExit = L->contains(TBB); 6822 ExitLimit EL0 = computeExitLimitFromCondCached( 6823 Cache, L, BO->getOperand(0), TBB, FBB, ControlsExit && !EitherMayExit, 6824 AllowPredicates); 6825 ExitLimit EL1 = computeExitLimitFromCondCached( 6826 Cache, L, BO->getOperand(1), TBB, FBB, ControlsExit && !EitherMayExit, 6827 AllowPredicates); 6828 const SCEV *BECount = getCouldNotCompute(); 6829 const SCEV *MaxBECount = getCouldNotCompute(); 6830 if (EitherMayExit) { 6831 // Both conditions must be true for the loop to continue executing. 6832 // Choose the less conservative count. 6833 if (EL0.ExactNotTaken == getCouldNotCompute() || 6834 EL1.ExactNotTaken == getCouldNotCompute()) 6835 BECount = getCouldNotCompute(); 6836 else 6837 BECount = 6838 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 6839 if (EL0.MaxNotTaken == getCouldNotCompute()) 6840 MaxBECount = EL1.MaxNotTaken; 6841 else if (EL1.MaxNotTaken == getCouldNotCompute()) 6842 MaxBECount = EL0.MaxNotTaken; 6843 else 6844 MaxBECount = 6845 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 6846 } else { 6847 // Both conditions must be true at the same time for the loop to exit. 6848 // For now, be conservative. 6849 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 6850 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 6851 MaxBECount = EL0.MaxNotTaken; 6852 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 6853 BECount = EL0.ExactNotTaken; 6854 } 6855 6856 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 6857 // to be more aggressive when computing BECount than when computing 6858 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 6859 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 6860 // to not. 6861 if (isa<SCEVCouldNotCompute>(MaxBECount) && 6862 !isa<SCEVCouldNotCompute>(BECount)) 6863 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 6864 6865 return ExitLimit(BECount, MaxBECount, false, 6866 {&EL0.Predicates, &EL1.Predicates}); 6867 } 6868 if (BO->getOpcode() == Instruction::Or) { 6869 // Recurse on the operands of the or. 6870 bool EitherMayExit = L->contains(FBB); 6871 ExitLimit EL0 = computeExitLimitFromCondCached( 6872 Cache, L, BO->getOperand(0), TBB, FBB, ControlsExit && !EitherMayExit, 6873 AllowPredicates); 6874 ExitLimit EL1 = computeExitLimitFromCondCached( 6875 Cache, L, BO->getOperand(1), TBB, FBB, ControlsExit && !EitherMayExit, 6876 AllowPredicates); 6877 const SCEV *BECount = getCouldNotCompute(); 6878 const SCEV *MaxBECount = getCouldNotCompute(); 6879 if (EitherMayExit) { 6880 // Both conditions must be false for the loop to continue executing. 6881 // Choose the less conservative count. 6882 if (EL0.ExactNotTaken == getCouldNotCompute() || 6883 EL1.ExactNotTaken == getCouldNotCompute()) 6884 BECount = getCouldNotCompute(); 6885 else 6886 BECount = 6887 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 6888 if (EL0.MaxNotTaken == getCouldNotCompute()) 6889 MaxBECount = EL1.MaxNotTaken; 6890 else if (EL1.MaxNotTaken == getCouldNotCompute()) 6891 MaxBECount = EL0.MaxNotTaken; 6892 else 6893 MaxBECount = 6894 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 6895 } else { 6896 // Both conditions must be false at the same time for the loop to exit. 6897 // For now, be conservative. 6898 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 6899 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 6900 MaxBECount = EL0.MaxNotTaken; 6901 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 6902 BECount = EL0.ExactNotTaken; 6903 } 6904 6905 return ExitLimit(BECount, MaxBECount, false, 6906 {&EL0.Predicates, &EL1.Predicates}); 6907 } 6908 } 6909 6910 // With an icmp, it may be feasible to compute an exact backedge-taken count. 6911 // Proceed to the next level to examine the icmp. 6912 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 6913 ExitLimit EL = 6914 computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit); 6915 if (EL.hasFullInfo() || !AllowPredicates) 6916 return EL; 6917 6918 // Try again, but use SCEV predicates this time. 6919 return computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit, 6920 /*AllowPredicates=*/true); 6921 } 6922 6923 // Check for a constant condition. These are normally stripped out by 6924 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 6925 // preserve the CFG and is temporarily leaving constant conditions 6926 // in place. 6927 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 6928 if (L->contains(FBB) == !CI->getZExtValue()) 6929 // The backedge is always taken. 6930 return getCouldNotCompute(); 6931 else 6932 // The backedge is never taken. 6933 return getZero(CI->getType()); 6934 } 6935 6936 // If it's not an integer or pointer comparison then compute it the hard way. 6937 return computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 6938 } 6939 6940 ScalarEvolution::ExitLimit 6941 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 6942 ICmpInst *ExitCond, 6943 BasicBlock *TBB, 6944 BasicBlock *FBB, 6945 bool ControlsExit, 6946 bool AllowPredicates) { 6947 // If the condition was exit on true, convert the condition to exit on false 6948 ICmpInst::Predicate Cond; 6949 if (!L->contains(FBB)) 6950 Cond = ExitCond->getPredicate(); 6951 else 6952 Cond = ExitCond->getInversePredicate(); 6953 6954 // Handle common loops like: for (X = "string"; *X; ++X) 6955 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 6956 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 6957 ExitLimit ItCnt = 6958 computeLoadConstantCompareExitLimit(LI, RHS, L, Cond); 6959 if (ItCnt.hasAnyInfo()) 6960 return ItCnt; 6961 } 6962 6963 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 6964 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 6965 6966 // Try to evaluate any dependencies out of the loop. 6967 LHS = getSCEVAtScope(LHS, L); 6968 RHS = getSCEVAtScope(RHS, L); 6969 6970 // At this point, we would like to compute how many iterations of the 6971 // loop the predicate will return true for these inputs. 6972 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 6973 // If there is a loop-invariant, force it into the RHS. 6974 std::swap(LHS, RHS); 6975 Cond = ICmpInst::getSwappedPredicate(Cond); 6976 } 6977 6978 // Simplify the operands before analyzing them. 6979 (void)SimplifyICmpOperands(Cond, LHS, RHS); 6980 6981 // If we have a comparison of a chrec against a constant, try to use value 6982 // ranges to answer this query. 6983 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 6984 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 6985 if (AddRec->getLoop() == L) { 6986 // Form the constant range. 6987 ConstantRange CompRange = 6988 ConstantRange::makeExactICmpRegion(Cond, RHSC->getAPInt()); 6989 6990 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 6991 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 6992 } 6993 6994 switch (Cond) { 6995 case ICmpInst::ICMP_NE: { // while (X != Y) 6996 // Convert to: while (X-Y != 0) 6997 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 6998 AllowPredicates); 6999 if (EL.hasAnyInfo()) return EL; 7000 break; 7001 } 7002 case ICmpInst::ICMP_EQ: { // while (X == Y) 7003 // Convert to: while (X-Y == 0) 7004 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7005 if (EL.hasAnyInfo()) return EL; 7006 break; 7007 } 7008 case ICmpInst::ICMP_SLT: 7009 case ICmpInst::ICMP_ULT: { // while (X < Y) 7010 bool IsSigned = Cond == ICmpInst::ICMP_SLT; 7011 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7012 AllowPredicates); 7013 if (EL.hasAnyInfo()) return EL; 7014 break; 7015 } 7016 case ICmpInst::ICMP_SGT: 7017 case ICmpInst::ICMP_UGT: { // while (X > Y) 7018 bool IsSigned = Cond == ICmpInst::ICMP_SGT; 7019 ExitLimit EL = 7020 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7021 AllowPredicates); 7022 if (EL.hasAnyInfo()) return EL; 7023 break; 7024 } 7025 default: 7026 break; 7027 } 7028 7029 auto *ExhaustiveCount = 7030 computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 7031 7032 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7033 return ExhaustiveCount; 7034 7035 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7036 ExitCond->getOperand(1), L, Cond); 7037 } 7038 7039 ScalarEvolution::ExitLimit 7040 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7041 SwitchInst *Switch, 7042 BasicBlock *ExitingBlock, 7043 bool ControlsExit) { 7044 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7045 7046 // Give up if the exit is the default dest of a switch. 7047 if (Switch->getDefaultDest() == ExitingBlock) 7048 return getCouldNotCompute(); 7049 7050 assert(L->contains(Switch->getDefaultDest()) && 7051 "Default case must not exit the loop!"); 7052 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7053 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7054 7055 // while (X != Y) --> while (X-Y != 0) 7056 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7057 if (EL.hasAnyInfo()) 7058 return EL; 7059 7060 return getCouldNotCompute(); 7061 } 7062 7063 static ConstantInt * 7064 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7065 ScalarEvolution &SE) { 7066 const SCEV *InVal = SE.getConstant(C); 7067 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7068 assert(isa<SCEVConstant>(Val) && 7069 "Evaluation of SCEV at constant didn't fold correctly?"); 7070 return cast<SCEVConstant>(Val)->getValue(); 7071 } 7072 7073 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7074 /// compute the backedge execution count. 7075 ScalarEvolution::ExitLimit 7076 ScalarEvolution::computeLoadConstantCompareExitLimit( 7077 LoadInst *LI, 7078 Constant *RHS, 7079 const Loop *L, 7080 ICmpInst::Predicate predicate) { 7081 if (LI->isVolatile()) return getCouldNotCompute(); 7082 7083 // Check to see if the loaded pointer is a getelementptr of a global. 7084 // TODO: Use SCEV instead of manually grubbing with GEPs. 7085 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7086 if (!GEP) return getCouldNotCompute(); 7087 7088 // Make sure that it is really a constant global we are gepping, with an 7089 // initializer, and make sure the first IDX is really 0. 7090 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7091 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7092 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7093 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7094 return getCouldNotCompute(); 7095 7096 // Okay, we allow one non-constant index into the GEP instruction. 7097 Value *VarIdx = nullptr; 7098 std::vector<Constant*> Indexes; 7099 unsigned VarIdxNum = 0; 7100 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7101 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7102 Indexes.push_back(CI); 7103 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7104 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7105 VarIdx = GEP->getOperand(i); 7106 VarIdxNum = i-2; 7107 Indexes.push_back(nullptr); 7108 } 7109 7110 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7111 if (!VarIdx) 7112 return getCouldNotCompute(); 7113 7114 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7115 // Check to see if X is a loop variant variable value now. 7116 const SCEV *Idx = getSCEV(VarIdx); 7117 Idx = getSCEVAtScope(Idx, L); 7118 7119 // We can only recognize very limited forms of loop index expressions, in 7120 // particular, only affine AddRec's like {C1,+,C2}. 7121 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7122 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 7123 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7124 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7125 return getCouldNotCompute(); 7126 7127 unsigned MaxSteps = MaxBruteForceIterations; 7128 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7129 ConstantInt *ItCst = ConstantInt::get( 7130 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7131 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7132 7133 // Form the GEP offset. 7134 Indexes[VarIdxNum] = Val; 7135 7136 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7137 Indexes); 7138 if (!Result) break; // Cannot compute! 7139 7140 // Evaluate the condition for this iteration. 7141 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7142 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7143 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7144 ++NumArrayLenItCounts; 7145 return getConstant(ItCst); // Found terminating iteration! 7146 } 7147 } 7148 return getCouldNotCompute(); 7149 } 7150 7151 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7152 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7153 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7154 if (!RHS) 7155 return getCouldNotCompute(); 7156 7157 const BasicBlock *Latch = L->getLoopLatch(); 7158 if (!Latch) 7159 return getCouldNotCompute(); 7160 7161 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7162 if (!Predecessor) 7163 return getCouldNotCompute(); 7164 7165 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7166 // Return LHS in OutLHS and shift_opt in OutOpCode. 7167 auto MatchPositiveShift = 7168 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7169 7170 using namespace PatternMatch; 7171 7172 ConstantInt *ShiftAmt; 7173 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7174 OutOpCode = Instruction::LShr; 7175 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7176 OutOpCode = Instruction::AShr; 7177 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7178 OutOpCode = Instruction::Shl; 7179 else 7180 return false; 7181 7182 return ShiftAmt->getValue().isStrictlyPositive(); 7183 }; 7184 7185 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 7186 // 7187 // loop: 7188 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 7189 // %iv.shifted = lshr i32 %iv, <positive constant> 7190 // 7191 // Return true on a successful match. Return the corresponding PHI node (%iv 7192 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 7193 auto MatchShiftRecurrence = 7194 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 7195 Optional<Instruction::BinaryOps> PostShiftOpCode; 7196 7197 { 7198 Instruction::BinaryOps OpC; 7199 Value *V; 7200 7201 // If we encounter a shift instruction, "peel off" the shift operation, 7202 // and remember that we did so. Later when we inspect %iv's backedge 7203 // value, we will make sure that the backedge value uses the same 7204 // operation. 7205 // 7206 // Note: the peeled shift operation does not have to be the same 7207 // instruction as the one feeding into the PHI's backedge value. We only 7208 // really care about it being the same *kind* of shift instruction -- 7209 // that's all that is required for our later inferences to hold. 7210 if (MatchPositiveShift(LHS, V, OpC)) { 7211 PostShiftOpCode = OpC; 7212 LHS = V; 7213 } 7214 } 7215 7216 PNOut = dyn_cast<PHINode>(LHS); 7217 if (!PNOut || PNOut->getParent() != L->getHeader()) 7218 return false; 7219 7220 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 7221 Value *OpLHS; 7222 7223 return 7224 // The backedge value for the PHI node must be a shift by a positive 7225 // amount 7226 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 7227 7228 // of the PHI node itself 7229 OpLHS == PNOut && 7230 7231 // and the kind of shift should be match the kind of shift we peeled 7232 // off, if any. 7233 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 7234 }; 7235 7236 PHINode *PN; 7237 Instruction::BinaryOps OpCode; 7238 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 7239 return getCouldNotCompute(); 7240 7241 const DataLayout &DL = getDataLayout(); 7242 7243 // The key rationale for this optimization is that for some kinds of shift 7244 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 7245 // within a finite number of iterations. If the condition guarding the 7246 // backedge (in the sense that the backedge is taken if the condition is true) 7247 // is false for the value the shift recurrence stabilizes to, then we know 7248 // that the backedge is taken only a finite number of times. 7249 7250 ConstantInt *StableValue = nullptr; 7251 switch (OpCode) { 7252 default: 7253 llvm_unreachable("Impossible case!"); 7254 7255 case Instruction::AShr: { 7256 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 7257 // bitwidth(K) iterations. 7258 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 7259 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 7260 Predecessor->getTerminator(), &DT); 7261 auto *Ty = cast<IntegerType>(RHS->getType()); 7262 if (Known.isNonNegative()) 7263 StableValue = ConstantInt::get(Ty, 0); 7264 else if (Known.isNegative()) 7265 StableValue = ConstantInt::get(Ty, -1, true); 7266 else 7267 return getCouldNotCompute(); 7268 7269 break; 7270 } 7271 case Instruction::LShr: 7272 case Instruction::Shl: 7273 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 7274 // stabilize to 0 in at most bitwidth(K) iterations. 7275 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 7276 break; 7277 } 7278 7279 auto *Result = 7280 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 7281 assert(Result->getType()->isIntegerTy(1) && 7282 "Otherwise cannot be an operand to a branch instruction"); 7283 7284 if (Result->isZeroValue()) { 7285 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 7286 const SCEV *UpperBound = 7287 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 7288 return ExitLimit(getCouldNotCompute(), UpperBound, false); 7289 } 7290 7291 return getCouldNotCompute(); 7292 } 7293 7294 /// Return true if we can constant fold an instruction of the specified type, 7295 /// assuming that all operands were constants. 7296 static bool CanConstantFold(const Instruction *I) { 7297 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 7298 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7299 isa<LoadInst>(I)) 7300 return true; 7301 7302 if (const CallInst *CI = dyn_cast<CallInst>(I)) 7303 if (const Function *F = CI->getCalledFunction()) 7304 return canConstantFoldCallTo(CI, F); 7305 return false; 7306 } 7307 7308 /// Determine whether this instruction can constant evolve within this loop 7309 /// assuming its operands can all constant evolve. 7310 static bool canConstantEvolve(Instruction *I, const Loop *L) { 7311 // An instruction outside of the loop can't be derived from a loop PHI. 7312 if (!L->contains(I)) return false; 7313 7314 if (isa<PHINode>(I)) { 7315 // We don't currently keep track of the control flow needed to evaluate 7316 // PHIs, so we cannot handle PHIs inside of loops. 7317 return L->getHeader() == I->getParent(); 7318 } 7319 7320 // If we won't be able to constant fold this expression even if the operands 7321 // are constants, bail early. 7322 return CanConstantFold(I); 7323 } 7324 7325 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 7326 /// recursing through each instruction operand until reaching a loop header phi. 7327 static PHINode * 7328 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 7329 DenseMap<Instruction *, PHINode *> &PHIMap, 7330 unsigned Depth) { 7331 if (Depth > MaxConstantEvolvingDepth) 7332 return nullptr; 7333 7334 // Otherwise, we can evaluate this instruction if all of its operands are 7335 // constant or derived from a PHI node themselves. 7336 PHINode *PHI = nullptr; 7337 for (Value *Op : UseInst->operands()) { 7338 if (isa<Constant>(Op)) continue; 7339 7340 Instruction *OpInst = dyn_cast<Instruction>(Op); 7341 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 7342 7343 PHINode *P = dyn_cast<PHINode>(OpInst); 7344 if (!P) 7345 // If this operand is already visited, reuse the prior result. 7346 // We may have P != PHI if this is the deepest point at which the 7347 // inconsistent paths meet. 7348 P = PHIMap.lookup(OpInst); 7349 if (!P) { 7350 // Recurse and memoize the results, whether a phi is found or not. 7351 // This recursive call invalidates pointers into PHIMap. 7352 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 7353 PHIMap[OpInst] = P; 7354 } 7355 if (!P) 7356 return nullptr; // Not evolving from PHI 7357 if (PHI && PHI != P) 7358 return nullptr; // Evolving from multiple different PHIs. 7359 PHI = P; 7360 } 7361 // This is a expression evolving from a constant PHI! 7362 return PHI; 7363 } 7364 7365 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 7366 /// in the loop that V is derived from. We allow arbitrary operations along the 7367 /// way, but the operands of an operation must either be constants or a value 7368 /// derived from a constant PHI. If this expression does not fit with these 7369 /// constraints, return null. 7370 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 7371 Instruction *I = dyn_cast<Instruction>(V); 7372 if (!I || !canConstantEvolve(I, L)) return nullptr; 7373 7374 if (PHINode *PN = dyn_cast<PHINode>(I)) 7375 return PN; 7376 7377 // Record non-constant instructions contained by the loop. 7378 DenseMap<Instruction *, PHINode *> PHIMap; 7379 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 7380 } 7381 7382 /// EvaluateExpression - Given an expression that passes the 7383 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 7384 /// in the loop has the value PHIVal. If we can't fold this expression for some 7385 /// reason, return null. 7386 static Constant *EvaluateExpression(Value *V, const Loop *L, 7387 DenseMap<Instruction *, Constant *> &Vals, 7388 const DataLayout &DL, 7389 const TargetLibraryInfo *TLI) { 7390 // Convenient constant check, but redundant for recursive calls. 7391 if (Constant *C = dyn_cast<Constant>(V)) return C; 7392 Instruction *I = dyn_cast<Instruction>(V); 7393 if (!I) return nullptr; 7394 7395 if (Constant *C = Vals.lookup(I)) return C; 7396 7397 // An instruction inside the loop depends on a value outside the loop that we 7398 // weren't given a mapping for, or a value such as a call inside the loop. 7399 if (!canConstantEvolve(I, L)) return nullptr; 7400 7401 // An unmapped PHI can be due to a branch or another loop inside this loop, 7402 // or due to this not being the initial iteration through a loop where we 7403 // couldn't compute the evolution of this particular PHI last time. 7404 if (isa<PHINode>(I)) return nullptr; 7405 7406 std::vector<Constant*> Operands(I->getNumOperands()); 7407 7408 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 7409 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 7410 if (!Operand) { 7411 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 7412 if (!Operands[i]) return nullptr; 7413 continue; 7414 } 7415 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 7416 Vals[Operand] = C; 7417 if (!C) return nullptr; 7418 Operands[i] = C; 7419 } 7420 7421 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 7422 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7423 Operands[1], DL, TLI); 7424 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7425 if (!LI->isVolatile()) 7426 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7427 } 7428 return ConstantFoldInstOperands(I, Operands, DL, TLI); 7429 } 7430 7431 7432 // If every incoming value to PN except the one for BB is a specific Constant, 7433 // return that, else return nullptr. 7434 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 7435 Constant *IncomingVal = nullptr; 7436 7437 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 7438 if (PN->getIncomingBlock(i) == BB) 7439 continue; 7440 7441 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 7442 if (!CurrentVal) 7443 return nullptr; 7444 7445 if (IncomingVal != CurrentVal) { 7446 if (IncomingVal) 7447 return nullptr; 7448 IncomingVal = CurrentVal; 7449 } 7450 } 7451 7452 return IncomingVal; 7453 } 7454 7455 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 7456 /// in the header of its containing loop, we know the loop executes a 7457 /// constant number of times, and the PHI node is just a recurrence 7458 /// involving constants, fold it. 7459 Constant * 7460 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 7461 const APInt &BEs, 7462 const Loop *L) { 7463 auto I = ConstantEvolutionLoopExitValue.find(PN); 7464 if (I != ConstantEvolutionLoopExitValue.end()) 7465 return I->second; 7466 7467 if (BEs.ugt(MaxBruteForceIterations)) 7468 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 7469 7470 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 7471 7472 DenseMap<Instruction *, Constant *> CurrentIterVals; 7473 BasicBlock *Header = L->getHeader(); 7474 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7475 7476 BasicBlock *Latch = L->getLoopLatch(); 7477 if (!Latch) 7478 return nullptr; 7479 7480 for (auto &I : *Header) { 7481 PHINode *PHI = dyn_cast<PHINode>(&I); 7482 if (!PHI) break; 7483 auto *StartCST = getOtherIncomingValue(PHI, Latch); 7484 if (!StartCST) continue; 7485 CurrentIterVals[PHI] = StartCST; 7486 } 7487 if (!CurrentIterVals.count(PN)) 7488 return RetVal = nullptr; 7489 7490 Value *BEValue = PN->getIncomingValueForBlock(Latch); 7491 7492 // Execute the loop symbolically to determine the exit value. 7493 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 7494 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 7495 7496 unsigned NumIterations = BEs.getZExtValue(); // must be in range 7497 unsigned IterationNum = 0; 7498 const DataLayout &DL = getDataLayout(); 7499 for (; ; ++IterationNum) { 7500 if (IterationNum == NumIterations) 7501 return RetVal = CurrentIterVals[PN]; // Got exit value! 7502 7503 // Compute the value of the PHIs for the next iteration. 7504 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 7505 DenseMap<Instruction *, Constant *> NextIterVals; 7506 Constant *NextPHI = 7507 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7508 if (!NextPHI) 7509 return nullptr; // Couldn't evaluate! 7510 NextIterVals[PN] = NextPHI; 7511 7512 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 7513 7514 // Also evaluate the other PHI nodes. However, we don't get to stop if we 7515 // cease to be able to evaluate one of them or if they stop evolving, 7516 // because that doesn't necessarily prevent us from computing PN. 7517 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 7518 for (const auto &I : CurrentIterVals) { 7519 PHINode *PHI = dyn_cast<PHINode>(I.first); 7520 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 7521 PHIsToCompute.emplace_back(PHI, I.second); 7522 } 7523 // We use two distinct loops because EvaluateExpression may invalidate any 7524 // iterators into CurrentIterVals. 7525 for (const auto &I : PHIsToCompute) { 7526 PHINode *PHI = I.first; 7527 Constant *&NextPHI = NextIterVals[PHI]; 7528 if (!NextPHI) { // Not already computed. 7529 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7530 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7531 } 7532 if (NextPHI != I.second) 7533 StoppedEvolving = false; 7534 } 7535 7536 // If all entries in CurrentIterVals == NextIterVals then we can stop 7537 // iterating, the loop can't continue to change. 7538 if (StoppedEvolving) 7539 return RetVal = CurrentIterVals[PN]; 7540 7541 CurrentIterVals.swap(NextIterVals); 7542 } 7543 } 7544 7545 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 7546 Value *Cond, 7547 bool ExitWhen) { 7548 PHINode *PN = getConstantEvolvingPHI(Cond, L); 7549 if (!PN) return getCouldNotCompute(); 7550 7551 // If the loop is canonicalized, the PHI will have exactly two entries. 7552 // That's the only form we support here. 7553 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 7554 7555 DenseMap<Instruction *, Constant *> CurrentIterVals; 7556 BasicBlock *Header = L->getHeader(); 7557 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7558 7559 BasicBlock *Latch = L->getLoopLatch(); 7560 assert(Latch && "Should follow from NumIncomingValues == 2!"); 7561 7562 for (auto &I : *Header) { 7563 PHINode *PHI = dyn_cast<PHINode>(&I); 7564 if (!PHI) 7565 break; 7566 auto *StartCST = getOtherIncomingValue(PHI, Latch); 7567 if (!StartCST) continue; 7568 CurrentIterVals[PHI] = StartCST; 7569 } 7570 if (!CurrentIterVals.count(PN)) 7571 return getCouldNotCompute(); 7572 7573 // Okay, we find a PHI node that defines the trip count of this loop. Execute 7574 // the loop symbolically to determine when the condition gets a value of 7575 // "ExitWhen". 7576 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 7577 const DataLayout &DL = getDataLayout(); 7578 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 7579 auto *CondVal = dyn_cast_or_null<ConstantInt>( 7580 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 7581 7582 // Couldn't symbolically evaluate. 7583 if (!CondVal) return getCouldNotCompute(); 7584 7585 if (CondVal->getValue() == uint64_t(ExitWhen)) { 7586 ++NumBruteForceTripCountsComputed; 7587 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 7588 } 7589 7590 // Update all the PHI nodes for the next iteration. 7591 DenseMap<Instruction *, Constant *> NextIterVals; 7592 7593 // Create a list of which PHIs we need to compute. We want to do this before 7594 // calling EvaluateExpression on them because that may invalidate iterators 7595 // into CurrentIterVals. 7596 SmallVector<PHINode *, 8> PHIsToCompute; 7597 for (const auto &I : CurrentIterVals) { 7598 PHINode *PHI = dyn_cast<PHINode>(I.first); 7599 if (!PHI || PHI->getParent() != Header) continue; 7600 PHIsToCompute.push_back(PHI); 7601 } 7602 for (PHINode *PHI : PHIsToCompute) { 7603 Constant *&NextPHI = NextIterVals[PHI]; 7604 if (NextPHI) continue; // Already computed! 7605 7606 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7607 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7608 } 7609 CurrentIterVals.swap(NextIterVals); 7610 } 7611 7612 // Too many iterations were needed to evaluate. 7613 return getCouldNotCompute(); 7614 } 7615 7616 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 7617 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 7618 ValuesAtScopes[V]; 7619 // Check to see if we've folded this expression at this loop before. 7620 for (auto &LS : Values) 7621 if (LS.first == L) 7622 return LS.second ? LS.second : V; 7623 7624 Values.emplace_back(L, nullptr); 7625 7626 // Otherwise compute it. 7627 const SCEV *C = computeSCEVAtScope(V, L); 7628 for (auto &LS : reverse(ValuesAtScopes[V])) 7629 if (LS.first == L) { 7630 LS.second = C; 7631 break; 7632 } 7633 return C; 7634 } 7635 7636 /// This builds up a Constant using the ConstantExpr interface. That way, we 7637 /// will return Constants for objects which aren't represented by a 7638 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 7639 /// Returns NULL if the SCEV isn't representable as a Constant. 7640 static Constant *BuildConstantFromSCEV(const SCEV *V) { 7641 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 7642 case scCouldNotCompute: 7643 case scAddRecExpr: 7644 break; 7645 case scConstant: 7646 return cast<SCEVConstant>(V)->getValue(); 7647 case scUnknown: 7648 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 7649 case scSignExtend: { 7650 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 7651 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 7652 return ConstantExpr::getSExt(CastOp, SS->getType()); 7653 break; 7654 } 7655 case scZeroExtend: { 7656 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 7657 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 7658 return ConstantExpr::getZExt(CastOp, SZ->getType()); 7659 break; 7660 } 7661 case scTruncate: { 7662 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 7663 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 7664 return ConstantExpr::getTrunc(CastOp, ST->getType()); 7665 break; 7666 } 7667 case scAddExpr: { 7668 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 7669 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 7670 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7671 unsigned AS = PTy->getAddressSpace(); 7672 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7673 C = ConstantExpr::getBitCast(C, DestPtrTy); 7674 } 7675 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 7676 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 7677 if (!C2) return nullptr; 7678 7679 // First pointer! 7680 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 7681 unsigned AS = C2->getType()->getPointerAddressSpace(); 7682 std::swap(C, C2); 7683 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7684 // The offsets have been converted to bytes. We can add bytes to an 7685 // i8* by GEP with the byte count in the first index. 7686 C = ConstantExpr::getBitCast(C, DestPtrTy); 7687 } 7688 7689 // Don't bother trying to sum two pointers. We probably can't 7690 // statically compute a load that results from it anyway. 7691 if (C2->getType()->isPointerTy()) 7692 return nullptr; 7693 7694 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7695 if (PTy->getElementType()->isStructTy()) 7696 C2 = ConstantExpr::getIntegerCast( 7697 C2, Type::getInt32Ty(C->getContext()), true); 7698 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 7699 } else 7700 C = ConstantExpr::getAdd(C, C2); 7701 } 7702 return C; 7703 } 7704 break; 7705 } 7706 case scMulExpr: { 7707 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 7708 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 7709 // Don't bother with pointers at all. 7710 if (C->getType()->isPointerTy()) return nullptr; 7711 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 7712 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 7713 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 7714 C = ConstantExpr::getMul(C, C2); 7715 } 7716 return C; 7717 } 7718 break; 7719 } 7720 case scUDivExpr: { 7721 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 7722 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 7723 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 7724 if (LHS->getType() == RHS->getType()) 7725 return ConstantExpr::getUDiv(LHS, RHS); 7726 break; 7727 } 7728 case scSMaxExpr: 7729 case scUMaxExpr: 7730 break; // TODO: smax, umax. 7731 } 7732 return nullptr; 7733 } 7734 7735 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 7736 if (isa<SCEVConstant>(V)) return V; 7737 7738 // If this instruction is evolved from a constant-evolving PHI, compute the 7739 // exit value from the loop without using SCEVs. 7740 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 7741 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 7742 const Loop *LI = this->LI[I->getParent()]; 7743 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 7744 if (PHINode *PN = dyn_cast<PHINode>(I)) 7745 if (PN->getParent() == LI->getHeader()) { 7746 // Okay, there is no closed form solution for the PHI node. Check 7747 // to see if the loop that contains it has a known backedge-taken 7748 // count. If so, we may be able to force computation of the exit 7749 // value. 7750 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 7751 if (const SCEVConstant *BTCC = 7752 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 7753 7754 // This trivial case can show up in some degenerate cases where 7755 // the incoming IR has not yet been fully simplified. 7756 if (BTCC->getValue()->isZero()) { 7757 Value *InitValue = nullptr; 7758 bool MultipleInitValues = false; 7759 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 7760 if (!LI->contains(PN->getIncomingBlock(i))) { 7761 if (!InitValue) 7762 InitValue = PN->getIncomingValue(i); 7763 else if (InitValue != PN->getIncomingValue(i)) { 7764 MultipleInitValues = true; 7765 break; 7766 } 7767 } 7768 if (!MultipleInitValues && InitValue) 7769 return getSCEV(InitValue); 7770 } 7771 } 7772 // Okay, we know how many times the containing loop executes. If 7773 // this is a constant evolving PHI node, get the final value at 7774 // the specified iteration number. 7775 Constant *RV = 7776 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 7777 if (RV) return getSCEV(RV); 7778 } 7779 } 7780 7781 // Okay, this is an expression that we cannot symbolically evaluate 7782 // into a SCEV. Check to see if it's possible to symbolically evaluate 7783 // the arguments into constants, and if so, try to constant propagate the 7784 // result. This is particularly useful for computing loop exit values. 7785 if (CanConstantFold(I)) { 7786 SmallVector<Constant *, 4> Operands; 7787 bool MadeImprovement = false; 7788 for (Value *Op : I->operands()) { 7789 if (Constant *C = dyn_cast<Constant>(Op)) { 7790 Operands.push_back(C); 7791 continue; 7792 } 7793 7794 // If any of the operands is non-constant and if they are 7795 // non-integer and non-pointer, don't even try to analyze them 7796 // with scev techniques. 7797 if (!isSCEVable(Op->getType())) 7798 return V; 7799 7800 const SCEV *OrigV = getSCEV(Op); 7801 const SCEV *OpV = getSCEVAtScope(OrigV, L); 7802 MadeImprovement |= OrigV != OpV; 7803 7804 Constant *C = BuildConstantFromSCEV(OpV); 7805 if (!C) return V; 7806 if (C->getType() != Op->getType()) 7807 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 7808 Op->getType(), 7809 false), 7810 C, Op->getType()); 7811 Operands.push_back(C); 7812 } 7813 7814 // Check to see if getSCEVAtScope actually made an improvement. 7815 if (MadeImprovement) { 7816 Constant *C = nullptr; 7817 const DataLayout &DL = getDataLayout(); 7818 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 7819 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7820 Operands[1], DL, &TLI); 7821 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 7822 if (!LI->isVolatile()) 7823 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7824 } else 7825 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 7826 if (!C) return V; 7827 return getSCEV(C); 7828 } 7829 } 7830 } 7831 7832 // This is some other type of SCEVUnknown, just return it. 7833 return V; 7834 } 7835 7836 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 7837 // Avoid performing the look-up in the common case where the specified 7838 // expression has no loop-variant portions. 7839 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 7840 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 7841 if (OpAtScope != Comm->getOperand(i)) { 7842 // Okay, at least one of these operands is loop variant but might be 7843 // foldable. Build a new instance of the folded commutative expression. 7844 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 7845 Comm->op_begin()+i); 7846 NewOps.push_back(OpAtScope); 7847 7848 for (++i; i != e; ++i) { 7849 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 7850 NewOps.push_back(OpAtScope); 7851 } 7852 if (isa<SCEVAddExpr>(Comm)) 7853 return getAddExpr(NewOps); 7854 if (isa<SCEVMulExpr>(Comm)) 7855 return getMulExpr(NewOps); 7856 if (isa<SCEVSMaxExpr>(Comm)) 7857 return getSMaxExpr(NewOps); 7858 if (isa<SCEVUMaxExpr>(Comm)) 7859 return getUMaxExpr(NewOps); 7860 llvm_unreachable("Unknown commutative SCEV type!"); 7861 } 7862 } 7863 // If we got here, all operands are loop invariant. 7864 return Comm; 7865 } 7866 7867 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 7868 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 7869 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 7870 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 7871 return Div; // must be loop invariant 7872 return getUDivExpr(LHS, RHS); 7873 } 7874 7875 // If this is a loop recurrence for a loop that does not contain L, then we 7876 // are dealing with the final value computed by the loop. 7877 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 7878 // First, attempt to evaluate each operand. 7879 // Avoid performing the look-up in the common case where the specified 7880 // expression has no loop-variant portions. 7881 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 7882 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 7883 if (OpAtScope == AddRec->getOperand(i)) 7884 continue; 7885 7886 // Okay, at least one of these operands is loop variant but might be 7887 // foldable. Build a new instance of the folded commutative expression. 7888 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 7889 AddRec->op_begin()+i); 7890 NewOps.push_back(OpAtScope); 7891 for (++i; i != e; ++i) 7892 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 7893 7894 const SCEV *FoldedRec = 7895 getAddRecExpr(NewOps, AddRec->getLoop(), 7896 AddRec->getNoWrapFlags(SCEV::FlagNW)); 7897 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 7898 // The addrec may be folded to a nonrecurrence, for example, if the 7899 // induction variable is multiplied by zero after constant folding. Go 7900 // ahead and return the folded value. 7901 if (!AddRec) 7902 return FoldedRec; 7903 break; 7904 } 7905 7906 // If the scope is outside the addrec's loop, evaluate it by using the 7907 // loop exit value of the addrec. 7908 if (!AddRec->getLoop()->contains(L)) { 7909 // To evaluate this recurrence, we need to know how many times the AddRec 7910 // loop iterates. Compute this now. 7911 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 7912 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 7913 7914 // Then, evaluate the AddRec. 7915 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 7916 } 7917 7918 return AddRec; 7919 } 7920 7921 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 7922 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7923 if (Op == Cast->getOperand()) 7924 return Cast; // must be loop invariant 7925 return getZeroExtendExpr(Op, Cast->getType()); 7926 } 7927 7928 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 7929 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7930 if (Op == Cast->getOperand()) 7931 return Cast; // must be loop invariant 7932 return getSignExtendExpr(Op, Cast->getType()); 7933 } 7934 7935 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 7936 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 7937 if (Op == Cast->getOperand()) 7938 return Cast; // must be loop invariant 7939 return getTruncateExpr(Op, Cast->getType()); 7940 } 7941 7942 llvm_unreachable("Unknown SCEV type!"); 7943 } 7944 7945 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 7946 return getSCEVAtScope(getSCEV(V), L); 7947 } 7948 7949 /// Finds the minimum unsigned root of the following equation: 7950 /// 7951 /// A * X = B (mod N) 7952 /// 7953 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 7954 /// A and B isn't important. 7955 /// 7956 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 7957 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 7958 ScalarEvolution &SE) { 7959 uint32_t BW = A.getBitWidth(); 7960 assert(BW == SE.getTypeSizeInBits(B->getType())); 7961 assert(A != 0 && "A must be non-zero."); 7962 7963 // 1. D = gcd(A, N) 7964 // 7965 // The gcd of A and N may have only one prime factor: 2. The number of 7966 // trailing zeros in A is its multiplicity 7967 uint32_t Mult2 = A.countTrailingZeros(); 7968 // D = 2^Mult2 7969 7970 // 2. Check if B is divisible by D. 7971 // 7972 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 7973 // is not less than multiplicity of this prime factor for D. 7974 if (SE.GetMinTrailingZeros(B) < Mult2) 7975 return SE.getCouldNotCompute(); 7976 7977 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 7978 // modulo (N / D). 7979 // 7980 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 7981 // (N / D) in general. The inverse itself always fits into BW bits, though, 7982 // so we immediately truncate it. 7983 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 7984 APInt Mod(BW + 1, 0); 7985 Mod.setBit(BW - Mult2); // Mod = N / D 7986 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 7987 7988 // 4. Compute the minimum unsigned root of the equation: 7989 // I * (B / D) mod (N / D) 7990 // To simplify the computation, we factor out the divide by D: 7991 // (I * B mod N) / D 7992 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 7993 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 7994 } 7995 7996 /// Find the roots of the quadratic equation for the given quadratic chrec 7997 /// {L,+,M,+,N}. This returns either the two roots (which might be the same) or 7998 /// two SCEVCouldNotCompute objects. 7999 static Optional<std::pair<const SCEVConstant *,const SCEVConstant *>> 8000 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8001 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8002 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8003 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8004 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8005 8006 // We currently can only solve this if the coefficients are constants. 8007 if (!LC || !MC || !NC) 8008 return None; 8009 8010 uint32_t BitWidth = LC->getAPInt().getBitWidth(); 8011 const APInt &L = LC->getAPInt(); 8012 const APInt &M = MC->getAPInt(); 8013 const APInt &N = NC->getAPInt(); 8014 APInt Two(BitWidth, 2); 8015 8016 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 8017 8018 // The A coefficient is N/2 8019 APInt A = N.sdiv(Two); 8020 8021 // The B coefficient is M-N/2 8022 APInt B = M; 8023 B -= A; // A is the same as N/2. 8024 8025 // The C coefficient is L. 8026 const APInt& C = L; 8027 8028 // Compute the B^2-4ac term. 8029 APInt SqrtTerm = B; 8030 SqrtTerm *= B; 8031 SqrtTerm -= 4 * (A * C); 8032 8033 if (SqrtTerm.isNegative()) { 8034 // The loop is provably infinite. 8035 return None; 8036 } 8037 8038 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 8039 // integer value or else APInt::sqrt() will assert. 8040 APInt SqrtVal = SqrtTerm.sqrt(); 8041 8042 // Compute the two solutions for the quadratic formula. 8043 // The divisions must be performed as signed divisions. 8044 APInt NegB = -std::move(B); 8045 APInt TwoA = std::move(A); 8046 TwoA <<= 1; 8047 if (TwoA.isNullValue()) 8048 return None; 8049 8050 LLVMContext &Context = SE.getContext(); 8051 8052 ConstantInt *Solution1 = 8053 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); 8054 ConstantInt *Solution2 = 8055 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); 8056 8057 return std::make_pair(cast<SCEVConstant>(SE.getConstant(Solution1)), 8058 cast<SCEVConstant>(SE.getConstant(Solution2))); 8059 } 8060 8061 ScalarEvolution::ExitLimit 8062 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 8063 bool AllowPredicates) { 8064 8065 // This is only used for loops with a "x != y" exit test. The exit condition 8066 // is now expressed as a single expression, V = x-y. So the exit test is 8067 // effectively V != 0. We know and take advantage of the fact that this 8068 // expression only being used in a comparison by zero context. 8069 8070 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 8071 // If the value is a constant 8072 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8073 // If the value is already zero, the branch will execute zero times. 8074 if (C->getValue()->isZero()) return C; 8075 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8076 } 8077 8078 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 8079 if (!AddRec && AllowPredicates) 8080 // Try to make this an AddRec using runtime tests, in the first X 8081 // iterations of this loop, where X is the SCEV expression found by the 8082 // algorithm below. 8083 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 8084 8085 if (!AddRec || AddRec->getLoop() != L) 8086 return getCouldNotCompute(); 8087 8088 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 8089 // the quadratic equation to solve it. 8090 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 8091 if (auto Roots = SolveQuadraticEquation(AddRec, *this)) { 8092 const SCEVConstant *R1 = Roots->first; 8093 const SCEVConstant *R2 = Roots->second; 8094 // Pick the smallest positive root value. 8095 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 8096 CmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 8097 if (!CB->getZExtValue()) 8098 std::swap(R1, R2); // R1 is the minimum root now. 8099 8100 // We can only use this value if the chrec ends up with an exact zero 8101 // value at this index. When solving for "X*X != 5", for example, we 8102 // should not accept a root of 2. 8103 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); 8104 if (Val->isZero()) 8105 // We found a quadratic root! 8106 return ExitLimit(R1, R1, false, Predicates); 8107 } 8108 } 8109 return getCouldNotCompute(); 8110 } 8111 8112 // Otherwise we can only handle this if it is affine. 8113 if (!AddRec->isAffine()) 8114 return getCouldNotCompute(); 8115 8116 // If this is an affine expression, the execution count of this branch is 8117 // the minimum unsigned root of the following equation: 8118 // 8119 // Start + Step*N = 0 (mod 2^BW) 8120 // 8121 // equivalent to: 8122 // 8123 // Step*N = -Start (mod 2^BW) 8124 // 8125 // where BW is the common bit width of Start and Step. 8126 8127 // Get the initial value for the loop. 8128 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 8129 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 8130 8131 // For now we handle only constant steps. 8132 // 8133 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 8134 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 8135 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 8136 // We have not yet seen any such cases. 8137 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 8138 if (!StepC || StepC->getValue()->isZero()) 8139 return getCouldNotCompute(); 8140 8141 // For positive steps (counting up until unsigned overflow): 8142 // N = -Start/Step (as unsigned) 8143 // For negative steps (counting down to zero): 8144 // N = Start/-Step 8145 // First compute the unsigned distance from zero in the direction of Step. 8146 bool CountDown = StepC->getAPInt().isNegative(); 8147 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 8148 8149 // Handle unitary steps, which cannot wraparound. 8150 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 8151 // N = Distance (as unsigned) 8152 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 8153 APInt MaxBECount = getUnsignedRangeMax(Distance); 8154 8155 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 8156 // we end up with a loop whose backedge-taken count is n - 1. Detect this 8157 // case, and see if we can improve the bound. 8158 // 8159 // Explicitly handling this here is necessary because getUnsignedRange 8160 // isn't context-sensitive; it doesn't know that we only care about the 8161 // range inside the loop. 8162 const SCEV *Zero = getZero(Distance->getType()); 8163 const SCEV *One = getOne(Distance->getType()); 8164 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 8165 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 8166 // If Distance + 1 doesn't overflow, we can compute the maximum distance 8167 // as "unsigned_max(Distance + 1) - 1". 8168 ConstantRange CR = getUnsignedRange(DistancePlusOne); 8169 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 8170 } 8171 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 8172 } 8173 8174 // If the condition controls loop exit (the loop exits only if the expression 8175 // is true) and the addition is no-wrap we can use unsigned divide to 8176 // compute the backedge count. In this case, the step may not divide the 8177 // distance, but we don't care because if the condition is "missed" the loop 8178 // will have undefined behavior due to wrapping. 8179 if (ControlsExit && AddRec->hasNoSelfWrap() && 8180 loopHasNoAbnormalExits(AddRec->getLoop())) { 8181 const SCEV *Exact = 8182 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 8183 const SCEV *Max = 8184 Exact == getCouldNotCompute() 8185 ? Exact 8186 : getConstant(getUnsignedRangeMax(Exact)); 8187 return ExitLimit(Exact, Max, false, Predicates); 8188 } 8189 8190 // Solve the general equation. 8191 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 8192 getNegativeSCEV(Start), *this); 8193 const SCEV *M = E == getCouldNotCompute() 8194 ? E 8195 : getConstant(getUnsignedRangeMax(E)); 8196 return ExitLimit(E, M, false, Predicates); 8197 } 8198 8199 ScalarEvolution::ExitLimit 8200 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 8201 // Loops that look like: while (X == 0) are very strange indeed. We don't 8202 // handle them yet except for the trivial case. This could be expanded in the 8203 // future as needed. 8204 8205 // If the value is a constant, check to see if it is known to be non-zero 8206 // already. If so, the backedge will execute zero times. 8207 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8208 if (!C->getValue()->isZero()) 8209 return getZero(C->getType()); 8210 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8211 } 8212 8213 // We could implement others, but I really doubt anyone writes loops like 8214 // this, and if they did, they would already be constant folded. 8215 return getCouldNotCompute(); 8216 } 8217 8218 std::pair<BasicBlock *, BasicBlock *> 8219 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 8220 // If the block has a unique predecessor, then there is no path from the 8221 // predecessor to the block that does not go through the direct edge 8222 // from the predecessor to the block. 8223 if (BasicBlock *Pred = BB->getSinglePredecessor()) 8224 return {Pred, BB}; 8225 8226 // A loop's header is defined to be a block that dominates the loop. 8227 // If the header has a unique predecessor outside the loop, it must be 8228 // a block that has exactly one successor that can reach the loop. 8229 if (Loop *L = LI.getLoopFor(BB)) 8230 return {L->getLoopPredecessor(), L->getHeader()}; 8231 8232 return {nullptr, nullptr}; 8233 } 8234 8235 /// SCEV structural equivalence is usually sufficient for testing whether two 8236 /// expressions are equal, however for the purposes of looking for a condition 8237 /// guarding a loop, it can be useful to be a little more general, since a 8238 /// front-end may have replicated the controlling expression. 8239 static bool HasSameValue(const SCEV *A, const SCEV *B) { 8240 // Quick check to see if they are the same SCEV. 8241 if (A == B) return true; 8242 8243 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 8244 // Not all instructions that are "identical" compute the same value. For 8245 // instance, two distinct alloca instructions allocating the same type are 8246 // identical and do not read memory; but compute distinct values. 8247 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 8248 }; 8249 8250 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 8251 // two different instructions with the same value. Check for this case. 8252 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 8253 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 8254 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 8255 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 8256 if (ComputesEqualValues(AI, BI)) 8257 return true; 8258 8259 // Otherwise assume they may have a different value. 8260 return false; 8261 } 8262 8263 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 8264 const SCEV *&LHS, const SCEV *&RHS, 8265 unsigned Depth) { 8266 bool Changed = false; 8267 8268 // If we hit the max recursion limit bail out. 8269 if (Depth >= 3) 8270 return false; 8271 8272 // Canonicalize a constant to the right side. 8273 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 8274 // Check for both operands constant. 8275 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 8276 if (ConstantExpr::getICmp(Pred, 8277 LHSC->getValue(), 8278 RHSC->getValue())->isNullValue()) 8279 goto trivially_false; 8280 else 8281 goto trivially_true; 8282 } 8283 // Otherwise swap the operands to put the constant on the right. 8284 std::swap(LHS, RHS); 8285 Pred = ICmpInst::getSwappedPredicate(Pred); 8286 Changed = true; 8287 } 8288 8289 // If we're comparing an addrec with a value which is loop-invariant in the 8290 // addrec's loop, put the addrec on the left. Also make a dominance check, 8291 // as both operands could be addrecs loop-invariant in each other's loop. 8292 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 8293 const Loop *L = AR->getLoop(); 8294 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 8295 std::swap(LHS, RHS); 8296 Pred = ICmpInst::getSwappedPredicate(Pred); 8297 Changed = true; 8298 } 8299 } 8300 8301 // If there's a constant operand, canonicalize comparisons with boundary 8302 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 8303 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 8304 const APInt &RA = RC->getAPInt(); 8305 8306 bool SimplifiedByConstantRange = false; 8307 8308 if (!ICmpInst::isEquality(Pred)) { 8309 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 8310 if (ExactCR.isFullSet()) 8311 goto trivially_true; 8312 else if (ExactCR.isEmptySet()) 8313 goto trivially_false; 8314 8315 APInt NewRHS; 8316 CmpInst::Predicate NewPred; 8317 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 8318 ICmpInst::isEquality(NewPred)) { 8319 // We were able to convert an inequality to an equality. 8320 Pred = NewPred; 8321 RHS = getConstant(NewRHS); 8322 Changed = SimplifiedByConstantRange = true; 8323 } 8324 } 8325 8326 if (!SimplifiedByConstantRange) { 8327 switch (Pred) { 8328 default: 8329 break; 8330 case ICmpInst::ICMP_EQ: 8331 case ICmpInst::ICMP_NE: 8332 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 8333 if (!RA) 8334 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 8335 if (const SCEVMulExpr *ME = 8336 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 8337 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 8338 ME->getOperand(0)->isAllOnesValue()) { 8339 RHS = AE->getOperand(1); 8340 LHS = ME->getOperand(1); 8341 Changed = true; 8342 } 8343 break; 8344 8345 8346 // The "Should have been caught earlier!" messages refer to the fact 8347 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 8348 // should have fired on the corresponding cases, and canonicalized the 8349 // check to trivially_true or trivially_false. 8350 8351 case ICmpInst::ICMP_UGE: 8352 assert(!RA.isMinValue() && "Should have been caught earlier!"); 8353 Pred = ICmpInst::ICMP_UGT; 8354 RHS = getConstant(RA - 1); 8355 Changed = true; 8356 break; 8357 case ICmpInst::ICMP_ULE: 8358 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 8359 Pred = ICmpInst::ICMP_ULT; 8360 RHS = getConstant(RA + 1); 8361 Changed = true; 8362 break; 8363 case ICmpInst::ICMP_SGE: 8364 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 8365 Pred = ICmpInst::ICMP_SGT; 8366 RHS = getConstant(RA - 1); 8367 Changed = true; 8368 break; 8369 case ICmpInst::ICMP_SLE: 8370 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 8371 Pred = ICmpInst::ICMP_SLT; 8372 RHS = getConstant(RA + 1); 8373 Changed = true; 8374 break; 8375 } 8376 } 8377 } 8378 8379 // Check for obvious equality. 8380 if (HasSameValue(LHS, RHS)) { 8381 if (ICmpInst::isTrueWhenEqual(Pred)) 8382 goto trivially_true; 8383 if (ICmpInst::isFalseWhenEqual(Pred)) 8384 goto trivially_false; 8385 } 8386 8387 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 8388 // adding or subtracting 1 from one of the operands. 8389 switch (Pred) { 8390 case ICmpInst::ICMP_SLE: 8391 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 8392 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8393 SCEV::FlagNSW); 8394 Pred = ICmpInst::ICMP_SLT; 8395 Changed = true; 8396 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 8397 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 8398 SCEV::FlagNSW); 8399 Pred = ICmpInst::ICMP_SLT; 8400 Changed = true; 8401 } 8402 break; 8403 case ICmpInst::ICMP_SGE: 8404 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 8405 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 8406 SCEV::FlagNSW); 8407 Pred = ICmpInst::ICMP_SGT; 8408 Changed = true; 8409 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 8410 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8411 SCEV::FlagNSW); 8412 Pred = ICmpInst::ICMP_SGT; 8413 Changed = true; 8414 } 8415 break; 8416 case ICmpInst::ICMP_ULE: 8417 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 8418 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8419 SCEV::FlagNUW); 8420 Pred = ICmpInst::ICMP_ULT; 8421 Changed = true; 8422 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 8423 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 8424 Pred = ICmpInst::ICMP_ULT; 8425 Changed = true; 8426 } 8427 break; 8428 case ICmpInst::ICMP_UGE: 8429 if (!getUnsignedRangeMin(RHS).isMinValue()) { 8430 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 8431 Pred = ICmpInst::ICMP_UGT; 8432 Changed = true; 8433 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 8434 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8435 SCEV::FlagNUW); 8436 Pred = ICmpInst::ICMP_UGT; 8437 Changed = true; 8438 } 8439 break; 8440 default: 8441 break; 8442 } 8443 8444 // TODO: More simplifications are possible here. 8445 8446 // Recursively simplify until we either hit a recursion limit or nothing 8447 // changes. 8448 if (Changed) 8449 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 8450 8451 return Changed; 8452 8453 trivially_true: 8454 // Return 0 == 0. 8455 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8456 Pred = ICmpInst::ICMP_EQ; 8457 return true; 8458 8459 trivially_false: 8460 // Return 0 != 0. 8461 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8462 Pred = ICmpInst::ICMP_NE; 8463 return true; 8464 } 8465 8466 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 8467 return getSignedRangeMax(S).isNegative(); 8468 } 8469 8470 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 8471 return getSignedRangeMin(S).isStrictlyPositive(); 8472 } 8473 8474 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 8475 return !getSignedRangeMin(S).isNegative(); 8476 } 8477 8478 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 8479 return !getSignedRangeMax(S).isStrictlyPositive(); 8480 } 8481 8482 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 8483 return isKnownNegative(S) || isKnownPositive(S); 8484 } 8485 8486 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 8487 const SCEV *LHS, const SCEV *RHS) { 8488 // Canonicalize the inputs first. 8489 (void)SimplifyICmpOperands(Pred, LHS, RHS); 8490 8491 // If LHS or RHS is an addrec, check to see if the condition is true in 8492 // every iteration of the loop. 8493 // If LHS and RHS are both addrec, both conditions must be true in 8494 // every iteration of the loop. 8495 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 8496 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 8497 bool LeftGuarded = false; 8498 bool RightGuarded = false; 8499 if (LAR) { 8500 const Loop *L = LAR->getLoop(); 8501 if (isLoopEntryGuardedByCond(L, Pred, LAR->getStart(), RHS) && 8502 isLoopBackedgeGuardedByCond(L, Pred, LAR->getPostIncExpr(*this), RHS)) { 8503 if (!RAR) return true; 8504 LeftGuarded = true; 8505 } 8506 } 8507 if (RAR) { 8508 const Loop *L = RAR->getLoop(); 8509 if (isLoopEntryGuardedByCond(L, Pred, LHS, RAR->getStart()) && 8510 isLoopBackedgeGuardedByCond(L, Pred, LHS, RAR->getPostIncExpr(*this))) { 8511 if (!LAR) return true; 8512 RightGuarded = true; 8513 } 8514 } 8515 if (LeftGuarded && RightGuarded) 8516 return true; 8517 8518 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 8519 return true; 8520 8521 // Otherwise see what can be done with known constant ranges. 8522 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS); 8523 } 8524 8525 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 8526 ICmpInst::Predicate Pred, 8527 bool &Increasing) { 8528 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 8529 8530 #ifndef NDEBUG 8531 // Verify an invariant: inverting the predicate should turn a monotonically 8532 // increasing change to a monotonically decreasing one, and vice versa. 8533 bool IncreasingSwapped; 8534 bool ResultSwapped = isMonotonicPredicateImpl( 8535 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 8536 8537 assert(Result == ResultSwapped && "should be able to analyze both!"); 8538 if (ResultSwapped) 8539 assert(Increasing == !IncreasingSwapped && 8540 "monotonicity should flip as we flip the predicate"); 8541 #endif 8542 8543 return Result; 8544 } 8545 8546 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 8547 ICmpInst::Predicate Pred, 8548 bool &Increasing) { 8549 8550 // A zero step value for LHS means the induction variable is essentially a 8551 // loop invariant value. We don't really depend on the predicate actually 8552 // flipping from false to true (for increasing predicates, and the other way 8553 // around for decreasing predicates), all we care about is that *if* the 8554 // predicate changes then it only changes from false to true. 8555 // 8556 // A zero step value in itself is not very useful, but there may be places 8557 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 8558 // as general as possible. 8559 8560 switch (Pred) { 8561 default: 8562 return false; // Conservative answer 8563 8564 case ICmpInst::ICMP_UGT: 8565 case ICmpInst::ICMP_UGE: 8566 case ICmpInst::ICMP_ULT: 8567 case ICmpInst::ICMP_ULE: 8568 if (!LHS->hasNoUnsignedWrap()) 8569 return false; 8570 8571 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 8572 return true; 8573 8574 case ICmpInst::ICMP_SGT: 8575 case ICmpInst::ICMP_SGE: 8576 case ICmpInst::ICMP_SLT: 8577 case ICmpInst::ICMP_SLE: { 8578 if (!LHS->hasNoSignedWrap()) 8579 return false; 8580 8581 const SCEV *Step = LHS->getStepRecurrence(*this); 8582 8583 if (isKnownNonNegative(Step)) { 8584 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 8585 return true; 8586 } 8587 8588 if (isKnownNonPositive(Step)) { 8589 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 8590 return true; 8591 } 8592 8593 return false; 8594 } 8595 8596 } 8597 8598 llvm_unreachable("switch has default clause!"); 8599 } 8600 8601 bool ScalarEvolution::isLoopInvariantPredicate( 8602 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 8603 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 8604 const SCEV *&InvariantRHS) { 8605 8606 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 8607 if (!isLoopInvariant(RHS, L)) { 8608 if (!isLoopInvariant(LHS, L)) 8609 return false; 8610 8611 std::swap(LHS, RHS); 8612 Pred = ICmpInst::getSwappedPredicate(Pred); 8613 } 8614 8615 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 8616 if (!ArLHS || ArLHS->getLoop() != L) 8617 return false; 8618 8619 bool Increasing; 8620 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 8621 return false; 8622 8623 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 8624 // true as the loop iterates, and the backedge is control dependent on 8625 // "ArLHS `Pred` RHS" == true then we can reason as follows: 8626 // 8627 // * if the predicate was false in the first iteration then the predicate 8628 // is never evaluated again, since the loop exits without taking the 8629 // backedge. 8630 // * if the predicate was true in the first iteration then it will 8631 // continue to be true for all future iterations since it is 8632 // monotonically increasing. 8633 // 8634 // For both the above possibilities, we can replace the loop varying 8635 // predicate with its value on the first iteration of the loop (which is 8636 // loop invariant). 8637 // 8638 // A similar reasoning applies for a monotonically decreasing predicate, by 8639 // replacing true with false and false with true in the above two bullets. 8640 8641 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 8642 8643 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 8644 return false; 8645 8646 InvariantPred = Pred; 8647 InvariantLHS = ArLHS->getStart(); 8648 InvariantRHS = RHS; 8649 return true; 8650 } 8651 8652 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 8653 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 8654 if (HasSameValue(LHS, RHS)) 8655 return ICmpInst::isTrueWhenEqual(Pred); 8656 8657 // This code is split out from isKnownPredicate because it is called from 8658 // within isLoopEntryGuardedByCond. 8659 8660 auto CheckRanges = 8661 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 8662 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 8663 .contains(RangeLHS); 8664 }; 8665 8666 // The check at the top of the function catches the case where the values are 8667 // known to be equal. 8668 if (Pred == CmpInst::ICMP_EQ) 8669 return false; 8670 8671 if (Pred == CmpInst::ICMP_NE) 8672 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 8673 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 8674 isKnownNonZero(getMinusSCEV(LHS, RHS)); 8675 8676 if (CmpInst::isSigned(Pred)) 8677 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 8678 8679 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 8680 } 8681 8682 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 8683 const SCEV *LHS, 8684 const SCEV *RHS) { 8685 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 8686 // Return Y via OutY. 8687 auto MatchBinaryAddToConst = 8688 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 8689 SCEV::NoWrapFlags ExpectedFlags) { 8690 const SCEV *NonConstOp, *ConstOp; 8691 SCEV::NoWrapFlags FlagsPresent; 8692 8693 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 8694 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 8695 return false; 8696 8697 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 8698 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 8699 }; 8700 8701 APInt C; 8702 8703 switch (Pred) { 8704 default: 8705 break; 8706 8707 case ICmpInst::ICMP_SGE: 8708 std::swap(LHS, RHS); 8709 LLVM_FALLTHROUGH; 8710 case ICmpInst::ICMP_SLE: 8711 // X s<= (X + C)<nsw> if C >= 0 8712 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 8713 return true; 8714 8715 // (X + C)<nsw> s<= X if C <= 0 8716 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 8717 !C.isStrictlyPositive()) 8718 return true; 8719 break; 8720 8721 case ICmpInst::ICMP_SGT: 8722 std::swap(LHS, RHS); 8723 LLVM_FALLTHROUGH; 8724 case ICmpInst::ICMP_SLT: 8725 // X s< (X + C)<nsw> if C > 0 8726 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 8727 C.isStrictlyPositive()) 8728 return true; 8729 8730 // (X + C)<nsw> s< X if C < 0 8731 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 8732 return true; 8733 break; 8734 } 8735 8736 return false; 8737 } 8738 8739 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 8740 const SCEV *LHS, 8741 const SCEV *RHS) { 8742 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 8743 return false; 8744 8745 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 8746 // the stack can result in exponential time complexity. 8747 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 8748 8749 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 8750 // 8751 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 8752 // isKnownPredicate. isKnownPredicate is more powerful, but also more 8753 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 8754 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 8755 // use isKnownPredicate later if needed. 8756 return isKnownNonNegative(RHS) && 8757 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 8758 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 8759 } 8760 8761 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 8762 ICmpInst::Predicate Pred, 8763 const SCEV *LHS, const SCEV *RHS) { 8764 // No need to even try if we know the module has no guards. 8765 if (!HasGuards) 8766 return false; 8767 8768 return any_of(*BB, [&](Instruction &I) { 8769 using namespace llvm::PatternMatch; 8770 8771 Value *Condition; 8772 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 8773 m_Value(Condition))) && 8774 isImpliedCond(Pred, LHS, RHS, Condition, false); 8775 }); 8776 } 8777 8778 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 8779 /// protected by a conditional between LHS and RHS. This is used to 8780 /// to eliminate casts. 8781 bool 8782 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 8783 ICmpInst::Predicate Pred, 8784 const SCEV *LHS, const SCEV *RHS) { 8785 // Interpret a null as meaning no loop, where there is obviously no guard 8786 // (interprocedural conditions notwithstanding). 8787 if (!L) return true; 8788 8789 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 8790 return true; 8791 8792 BasicBlock *Latch = L->getLoopLatch(); 8793 if (!Latch) 8794 return false; 8795 8796 BranchInst *LoopContinuePredicate = 8797 dyn_cast<BranchInst>(Latch->getTerminator()); 8798 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 8799 isImpliedCond(Pred, LHS, RHS, 8800 LoopContinuePredicate->getCondition(), 8801 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 8802 return true; 8803 8804 // We don't want more than one activation of the following loops on the stack 8805 // -- that can lead to O(n!) time complexity. 8806 if (WalkingBEDominatingConds) 8807 return false; 8808 8809 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 8810 8811 // See if we can exploit a trip count to prove the predicate. 8812 const auto &BETakenInfo = getBackedgeTakenInfo(L); 8813 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 8814 if (LatchBECount != getCouldNotCompute()) { 8815 // We know that Latch branches back to the loop header exactly 8816 // LatchBECount times. This means the backdege condition at Latch is 8817 // equivalent to "{0,+,1} u< LatchBECount". 8818 Type *Ty = LatchBECount->getType(); 8819 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 8820 const SCEV *LoopCounter = 8821 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 8822 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 8823 LatchBECount)) 8824 return true; 8825 } 8826 8827 // Check conditions due to any @llvm.assume intrinsics. 8828 for (auto &AssumeVH : AC.assumptions()) { 8829 if (!AssumeVH) 8830 continue; 8831 auto *CI = cast<CallInst>(AssumeVH); 8832 if (!DT.dominates(CI, Latch->getTerminator())) 8833 continue; 8834 8835 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 8836 return true; 8837 } 8838 8839 // If the loop is not reachable from the entry block, we risk running into an 8840 // infinite loop as we walk up into the dom tree. These loops do not matter 8841 // anyway, so we just return a conservative answer when we see them. 8842 if (!DT.isReachableFromEntry(L->getHeader())) 8843 return false; 8844 8845 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 8846 return true; 8847 8848 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 8849 DTN != HeaderDTN; DTN = DTN->getIDom()) { 8850 assert(DTN && "should reach the loop header before reaching the root!"); 8851 8852 BasicBlock *BB = DTN->getBlock(); 8853 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 8854 return true; 8855 8856 BasicBlock *PBB = BB->getSinglePredecessor(); 8857 if (!PBB) 8858 continue; 8859 8860 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 8861 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 8862 continue; 8863 8864 Value *Condition = ContinuePredicate->getCondition(); 8865 8866 // If we have an edge `E` within the loop body that dominates the only 8867 // latch, the condition guarding `E` also guards the backedge. This 8868 // reasoning works only for loops with a single latch. 8869 8870 BasicBlockEdge DominatingEdge(PBB, BB); 8871 if (DominatingEdge.isSingleEdge()) { 8872 // We're constructively (and conservatively) enumerating edges within the 8873 // loop body that dominate the latch. The dominator tree better agree 8874 // with us on this: 8875 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 8876 8877 if (isImpliedCond(Pred, LHS, RHS, Condition, 8878 BB != ContinuePredicate->getSuccessor(0))) 8879 return true; 8880 } 8881 } 8882 8883 return false; 8884 } 8885 8886 bool 8887 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 8888 ICmpInst::Predicate Pred, 8889 const SCEV *LHS, const SCEV *RHS) { 8890 // Interpret a null as meaning no loop, where there is obviously no guard 8891 // (interprocedural conditions notwithstanding). 8892 if (!L) return false; 8893 8894 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 8895 return true; 8896 8897 // Starting at the loop predecessor, climb up the predecessor chain, as long 8898 // as there are predecessors that can be found that have unique successors 8899 // leading to the original header. 8900 for (std::pair<BasicBlock *, BasicBlock *> 8901 Pair(L->getLoopPredecessor(), L->getHeader()); 8902 Pair.first; 8903 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 8904 8905 if (isImpliedViaGuard(Pair.first, Pred, LHS, RHS)) 8906 return true; 8907 8908 BranchInst *LoopEntryPredicate = 8909 dyn_cast<BranchInst>(Pair.first->getTerminator()); 8910 if (!LoopEntryPredicate || 8911 LoopEntryPredicate->isUnconditional()) 8912 continue; 8913 8914 if (isImpliedCond(Pred, LHS, RHS, 8915 LoopEntryPredicate->getCondition(), 8916 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 8917 return true; 8918 } 8919 8920 // Check conditions due to any @llvm.assume intrinsics. 8921 for (auto &AssumeVH : AC.assumptions()) { 8922 if (!AssumeVH) 8923 continue; 8924 auto *CI = cast<CallInst>(AssumeVH); 8925 if (!DT.dominates(CI, L->getHeader())) 8926 continue; 8927 8928 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 8929 return true; 8930 } 8931 8932 return false; 8933 } 8934 8935 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 8936 const SCEV *LHS, const SCEV *RHS, 8937 Value *FoundCondValue, 8938 bool Inverse) { 8939 if (!PendingLoopPredicates.insert(FoundCondValue).second) 8940 return false; 8941 8942 auto ClearOnExit = 8943 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 8944 8945 // Recursively handle And and Or conditions. 8946 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 8947 if (BO->getOpcode() == Instruction::And) { 8948 if (!Inverse) 8949 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 8950 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 8951 } else if (BO->getOpcode() == Instruction::Or) { 8952 if (Inverse) 8953 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 8954 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 8955 } 8956 } 8957 8958 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 8959 if (!ICI) return false; 8960 8961 // Now that we found a conditional branch that dominates the loop or controls 8962 // the loop latch. Check to see if it is the comparison we are looking for. 8963 ICmpInst::Predicate FoundPred; 8964 if (Inverse) 8965 FoundPred = ICI->getInversePredicate(); 8966 else 8967 FoundPred = ICI->getPredicate(); 8968 8969 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 8970 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 8971 8972 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 8973 } 8974 8975 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 8976 const SCEV *RHS, 8977 ICmpInst::Predicate FoundPred, 8978 const SCEV *FoundLHS, 8979 const SCEV *FoundRHS) { 8980 // Balance the types. 8981 if (getTypeSizeInBits(LHS->getType()) < 8982 getTypeSizeInBits(FoundLHS->getType())) { 8983 if (CmpInst::isSigned(Pred)) { 8984 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 8985 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 8986 } else { 8987 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 8988 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 8989 } 8990 } else if (getTypeSizeInBits(LHS->getType()) > 8991 getTypeSizeInBits(FoundLHS->getType())) { 8992 if (CmpInst::isSigned(FoundPred)) { 8993 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 8994 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 8995 } else { 8996 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 8997 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 8998 } 8999 } 9000 9001 // Canonicalize the query to match the way instcombine will have 9002 // canonicalized the comparison. 9003 if (SimplifyICmpOperands(Pred, LHS, RHS)) 9004 if (LHS == RHS) 9005 return CmpInst::isTrueWhenEqual(Pred); 9006 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 9007 if (FoundLHS == FoundRHS) 9008 return CmpInst::isFalseWhenEqual(FoundPred); 9009 9010 // Check to see if we can make the LHS or RHS match. 9011 if (LHS == FoundRHS || RHS == FoundLHS) { 9012 if (isa<SCEVConstant>(RHS)) { 9013 std::swap(FoundLHS, FoundRHS); 9014 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 9015 } else { 9016 std::swap(LHS, RHS); 9017 Pred = ICmpInst::getSwappedPredicate(Pred); 9018 } 9019 } 9020 9021 // Check whether the found predicate is the same as the desired predicate. 9022 if (FoundPred == Pred) 9023 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9024 9025 // Check whether swapping the found predicate makes it the same as the 9026 // desired predicate. 9027 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 9028 if (isa<SCEVConstant>(RHS)) 9029 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 9030 else 9031 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 9032 RHS, LHS, FoundLHS, FoundRHS); 9033 } 9034 9035 // Unsigned comparison is the same as signed comparison when both the operands 9036 // are non-negative. 9037 if (CmpInst::isUnsigned(FoundPred) && 9038 CmpInst::getSignedPredicate(FoundPred) == Pred && 9039 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 9040 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9041 9042 // Check if we can make progress by sharpening ranges. 9043 if (FoundPred == ICmpInst::ICMP_NE && 9044 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 9045 9046 const SCEVConstant *C = nullptr; 9047 const SCEV *V = nullptr; 9048 9049 if (isa<SCEVConstant>(FoundLHS)) { 9050 C = cast<SCEVConstant>(FoundLHS); 9051 V = FoundRHS; 9052 } else { 9053 C = cast<SCEVConstant>(FoundRHS); 9054 V = FoundLHS; 9055 } 9056 9057 // The guarding predicate tells us that C != V. If the known range 9058 // of V is [C, t), we can sharpen the range to [C + 1, t). The 9059 // range we consider has to correspond to same signedness as the 9060 // predicate we're interested in folding. 9061 9062 APInt Min = ICmpInst::isSigned(Pred) ? 9063 getSignedRangeMin(V) : getUnsignedRangeMin(V); 9064 9065 if (Min == C->getAPInt()) { 9066 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 9067 // This is true even if (Min + 1) wraps around -- in case of 9068 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 9069 9070 APInt SharperMin = Min + 1; 9071 9072 switch (Pred) { 9073 case ICmpInst::ICMP_SGE: 9074 case ICmpInst::ICMP_UGE: 9075 // We know V `Pred` SharperMin. If this implies LHS `Pred` 9076 // RHS, we're done. 9077 if (isImpliedCondOperands(Pred, LHS, RHS, V, 9078 getConstant(SharperMin))) 9079 return true; 9080 LLVM_FALLTHROUGH; 9081 9082 case ICmpInst::ICMP_SGT: 9083 case ICmpInst::ICMP_UGT: 9084 // We know from the range information that (V `Pred` Min || 9085 // V == Min). We know from the guarding condition that !(V 9086 // == Min). This gives us 9087 // 9088 // V `Pred` Min || V == Min && !(V == Min) 9089 // => V `Pred` Min 9090 // 9091 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 9092 9093 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 9094 return true; 9095 LLVM_FALLTHROUGH; 9096 9097 default: 9098 // No change 9099 break; 9100 } 9101 } 9102 } 9103 9104 // Check whether the actual condition is beyond sufficient. 9105 if (FoundPred == ICmpInst::ICMP_EQ) 9106 if (ICmpInst::isTrueWhenEqual(Pred)) 9107 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9108 return true; 9109 if (Pred == ICmpInst::ICMP_NE) 9110 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 9111 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 9112 return true; 9113 9114 // Otherwise assume the worst. 9115 return false; 9116 } 9117 9118 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 9119 const SCEV *&L, const SCEV *&R, 9120 SCEV::NoWrapFlags &Flags) { 9121 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 9122 if (!AE || AE->getNumOperands() != 2) 9123 return false; 9124 9125 L = AE->getOperand(0); 9126 R = AE->getOperand(1); 9127 Flags = AE->getNoWrapFlags(); 9128 return true; 9129 } 9130 9131 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 9132 const SCEV *Less) { 9133 // We avoid subtracting expressions here because this function is usually 9134 // fairly deep in the call stack (i.e. is called many times). 9135 9136 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 9137 const auto *LAR = cast<SCEVAddRecExpr>(Less); 9138 const auto *MAR = cast<SCEVAddRecExpr>(More); 9139 9140 if (LAR->getLoop() != MAR->getLoop()) 9141 return None; 9142 9143 // We look at affine expressions only; not for correctness but to keep 9144 // getStepRecurrence cheap. 9145 if (!LAR->isAffine() || !MAR->isAffine()) 9146 return None; 9147 9148 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 9149 return None; 9150 9151 Less = LAR->getStart(); 9152 More = MAR->getStart(); 9153 9154 // fall through 9155 } 9156 9157 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 9158 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 9159 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 9160 return M - L; 9161 } 9162 9163 const SCEV *L, *R; 9164 SCEV::NoWrapFlags Flags; 9165 if (splitBinaryAdd(Less, L, R, Flags)) 9166 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 9167 if (R == More) 9168 return -(LC->getAPInt()); 9169 9170 if (splitBinaryAdd(More, L, R, Flags)) 9171 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 9172 if (R == Less) 9173 return LC->getAPInt(); 9174 9175 return None; 9176 } 9177 9178 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 9179 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 9180 const SCEV *FoundLHS, const SCEV *FoundRHS) { 9181 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 9182 return false; 9183 9184 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9185 if (!AddRecLHS) 9186 return false; 9187 9188 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 9189 if (!AddRecFoundLHS) 9190 return false; 9191 9192 // We'd like to let SCEV reason about control dependencies, so we constrain 9193 // both the inequalities to be about add recurrences on the same loop. This 9194 // way we can use isLoopEntryGuardedByCond later. 9195 9196 const Loop *L = AddRecFoundLHS->getLoop(); 9197 if (L != AddRecLHS->getLoop()) 9198 return false; 9199 9200 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 9201 // 9202 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 9203 // ... (2) 9204 // 9205 // Informal proof for (2), assuming (1) [*]: 9206 // 9207 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 9208 // 9209 // Then 9210 // 9211 // FoundLHS s< FoundRHS s< INT_MIN - C 9212 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 9213 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 9214 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 9215 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 9216 // <=> FoundLHS + C s< FoundRHS + C 9217 // 9218 // [*]: (1) can be proved by ruling out overflow. 9219 // 9220 // [**]: This can be proved by analyzing all the four possibilities: 9221 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 9222 // (A s>= 0, B s>= 0). 9223 // 9224 // Note: 9225 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 9226 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 9227 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 9228 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 9229 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 9230 // C)". 9231 9232 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 9233 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 9234 if (!LDiff || !RDiff || *LDiff != *RDiff) 9235 return false; 9236 9237 if (LDiff->isMinValue()) 9238 return true; 9239 9240 APInt FoundRHSLimit; 9241 9242 if (Pred == CmpInst::ICMP_ULT) { 9243 FoundRHSLimit = -(*RDiff); 9244 } else { 9245 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 9246 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 9247 } 9248 9249 // Try to prove (1) or (2), as needed. 9250 return isLoopEntryGuardedByCond(L, Pred, FoundRHS, 9251 getConstant(FoundRHSLimit)); 9252 } 9253 9254 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 9255 const SCEV *LHS, const SCEV *RHS, 9256 const SCEV *FoundLHS, 9257 const SCEV *FoundRHS) { 9258 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9259 return true; 9260 9261 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9262 return true; 9263 9264 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 9265 FoundLHS, FoundRHS) || 9266 // ~x < ~y --> x > y 9267 isImpliedCondOperandsHelper(Pred, LHS, RHS, 9268 getNotSCEV(FoundRHS), 9269 getNotSCEV(FoundLHS)); 9270 } 9271 9272 /// If Expr computes ~A, return A else return nullptr 9273 static const SCEV *MatchNotExpr(const SCEV *Expr) { 9274 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 9275 if (!Add || Add->getNumOperands() != 2 || 9276 !Add->getOperand(0)->isAllOnesValue()) 9277 return nullptr; 9278 9279 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 9280 if (!AddRHS || AddRHS->getNumOperands() != 2 || 9281 !AddRHS->getOperand(0)->isAllOnesValue()) 9282 return nullptr; 9283 9284 return AddRHS->getOperand(1); 9285 } 9286 9287 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values? 9288 template<typename MaxExprType> 9289 static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, 9290 const SCEV *Candidate) { 9291 const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr); 9292 if (!MaxExpr) return false; 9293 9294 return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end(); 9295 } 9296 9297 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values? 9298 template<typename MaxExprType> 9299 static bool IsMinConsistingOf(ScalarEvolution &SE, 9300 const SCEV *MaybeMinExpr, 9301 const SCEV *Candidate) { 9302 const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr); 9303 if (!MaybeMaxExpr) 9304 return false; 9305 9306 return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate)); 9307 } 9308 9309 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 9310 ICmpInst::Predicate Pred, 9311 const SCEV *LHS, const SCEV *RHS) { 9312 // If both sides are affine addrecs for the same loop, with equal 9313 // steps, and we know the recurrences don't wrap, then we only 9314 // need to check the predicate on the starting values. 9315 9316 if (!ICmpInst::isRelational(Pred)) 9317 return false; 9318 9319 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 9320 if (!LAR) 9321 return false; 9322 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 9323 if (!RAR) 9324 return false; 9325 if (LAR->getLoop() != RAR->getLoop()) 9326 return false; 9327 if (!LAR->isAffine() || !RAR->isAffine()) 9328 return false; 9329 9330 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 9331 return false; 9332 9333 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 9334 SCEV::FlagNSW : SCEV::FlagNUW; 9335 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 9336 return false; 9337 9338 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 9339 } 9340 9341 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 9342 /// expression? 9343 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 9344 ICmpInst::Predicate Pred, 9345 const SCEV *LHS, const SCEV *RHS) { 9346 switch (Pred) { 9347 default: 9348 return false; 9349 9350 case ICmpInst::ICMP_SGE: 9351 std::swap(LHS, RHS); 9352 LLVM_FALLTHROUGH; 9353 case ICmpInst::ICMP_SLE: 9354 return 9355 // min(A, ...) <= A 9356 IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) || 9357 // A <= max(A, ...) 9358 IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 9359 9360 case ICmpInst::ICMP_UGE: 9361 std::swap(LHS, RHS); 9362 LLVM_FALLTHROUGH; 9363 case ICmpInst::ICMP_ULE: 9364 return 9365 // min(A, ...) <= A 9366 IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) || 9367 // A <= max(A, ...) 9368 IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 9369 } 9370 9371 llvm_unreachable("covered switch fell through?!"); 9372 } 9373 9374 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 9375 const SCEV *LHS, const SCEV *RHS, 9376 const SCEV *FoundLHS, 9377 const SCEV *FoundRHS, 9378 unsigned Depth) { 9379 assert(getTypeSizeInBits(LHS->getType()) == 9380 getTypeSizeInBits(RHS->getType()) && 9381 "LHS and RHS have different sizes?"); 9382 assert(getTypeSizeInBits(FoundLHS->getType()) == 9383 getTypeSizeInBits(FoundRHS->getType()) && 9384 "FoundLHS and FoundRHS have different sizes?"); 9385 // We want to avoid hurting the compile time with analysis of too big trees. 9386 if (Depth > MaxSCEVOperationsImplicationDepth) 9387 return false; 9388 // We only want to work with ICMP_SGT comparison so far. 9389 // TODO: Extend to ICMP_UGT? 9390 if (Pred == ICmpInst::ICMP_SLT) { 9391 Pred = ICmpInst::ICMP_SGT; 9392 std::swap(LHS, RHS); 9393 std::swap(FoundLHS, FoundRHS); 9394 } 9395 if (Pred != ICmpInst::ICMP_SGT) 9396 return false; 9397 9398 auto GetOpFromSExt = [&](const SCEV *S) { 9399 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 9400 return Ext->getOperand(); 9401 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 9402 // the constant in some cases. 9403 return S; 9404 }; 9405 9406 // Acquire values from extensions. 9407 auto *OrigFoundLHS = FoundLHS; 9408 LHS = GetOpFromSExt(LHS); 9409 FoundLHS = GetOpFromSExt(FoundLHS); 9410 9411 // Is the SGT predicate can be proved trivially or using the found context. 9412 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 9413 return isKnownViaSimpleReasoning(ICmpInst::ICMP_SGT, S1, S2) || 9414 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 9415 FoundRHS, Depth + 1); 9416 }; 9417 9418 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 9419 // We want to avoid creation of any new non-constant SCEV. Since we are 9420 // going to compare the operands to RHS, we should be certain that we don't 9421 // need any size extensions for this. So let's decline all cases when the 9422 // sizes of types of LHS and RHS do not match. 9423 // TODO: Maybe try to get RHS from sext to catch more cases? 9424 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 9425 return false; 9426 9427 // Should not overflow. 9428 if (!LHSAddExpr->hasNoSignedWrap()) 9429 return false; 9430 9431 auto *LL = LHSAddExpr->getOperand(0); 9432 auto *LR = LHSAddExpr->getOperand(1); 9433 auto *MinusOne = getNegativeSCEV(getOne(RHS->getType())); 9434 9435 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 9436 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 9437 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 9438 }; 9439 // Try to prove the following rule: 9440 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 9441 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 9442 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 9443 return true; 9444 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 9445 Value *LL, *LR; 9446 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 9447 9448 using namespace llvm::PatternMatch; 9449 9450 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 9451 // Rules for division. 9452 // We are going to perform some comparisons with Denominator and its 9453 // derivative expressions. In general case, creating a SCEV for it may 9454 // lead to a complex analysis of the entire graph, and in particular it 9455 // can request trip count recalculation for the same loop. This would 9456 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 9457 // this, we only want to create SCEVs that are constants in this section. 9458 // So we bail if Denominator is not a constant. 9459 if (!isa<ConstantInt>(LR)) 9460 return false; 9461 9462 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 9463 9464 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 9465 // then a SCEV for the numerator already exists and matches with FoundLHS. 9466 auto *Numerator = getExistingSCEV(LL); 9467 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 9468 return false; 9469 9470 // Make sure that the numerator matches with FoundLHS and the denominator 9471 // is positive. 9472 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 9473 return false; 9474 9475 auto *DTy = Denominator->getType(); 9476 auto *FRHSTy = FoundRHS->getType(); 9477 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 9478 // One of types is a pointer and another one is not. We cannot extend 9479 // them properly to a wider type, so let us just reject this case. 9480 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 9481 // to avoid this check. 9482 return false; 9483 9484 // Given that: 9485 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 9486 auto *WTy = getWiderType(DTy, FRHSTy); 9487 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 9488 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 9489 9490 // Try to prove the following rule: 9491 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 9492 // For example, given that FoundLHS > 2. It means that FoundLHS is at 9493 // least 3. If we divide it by Denominator < 4, we will have at least 1. 9494 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 9495 if (isKnownNonPositive(RHS) && 9496 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 9497 return true; 9498 9499 // Try to prove the following rule: 9500 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 9501 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 9502 // If we divide it by Denominator > 2, then: 9503 // 1. If FoundLHS is negative, then the result is 0. 9504 // 2. If FoundLHS is non-negative, then the result is non-negative. 9505 // Anyways, the result is non-negative. 9506 auto *MinusOne = getNegativeSCEV(getOne(WTy)); 9507 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 9508 if (isKnownNegative(RHS) && 9509 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 9510 return true; 9511 } 9512 } 9513 9514 return false; 9515 } 9516 9517 bool 9518 ScalarEvolution::isKnownViaSimpleReasoning(ICmpInst::Predicate Pred, 9519 const SCEV *LHS, const SCEV *RHS) { 9520 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 9521 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 9522 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 9523 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 9524 } 9525 9526 bool 9527 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 9528 const SCEV *LHS, const SCEV *RHS, 9529 const SCEV *FoundLHS, 9530 const SCEV *FoundRHS) { 9531 switch (Pred) { 9532 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 9533 case ICmpInst::ICMP_EQ: 9534 case ICmpInst::ICMP_NE: 9535 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 9536 return true; 9537 break; 9538 case ICmpInst::ICMP_SLT: 9539 case ICmpInst::ICMP_SLE: 9540 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 9541 isKnownViaSimpleReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 9542 return true; 9543 break; 9544 case ICmpInst::ICMP_SGT: 9545 case ICmpInst::ICMP_SGE: 9546 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 9547 isKnownViaSimpleReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 9548 return true; 9549 break; 9550 case ICmpInst::ICMP_ULT: 9551 case ICmpInst::ICMP_ULE: 9552 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 9553 isKnownViaSimpleReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 9554 return true; 9555 break; 9556 case ICmpInst::ICMP_UGT: 9557 case ICmpInst::ICMP_UGE: 9558 if (isKnownViaSimpleReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 9559 isKnownViaSimpleReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 9560 return true; 9561 break; 9562 } 9563 9564 // Maybe it can be proved via operations? 9565 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9566 return true; 9567 9568 return false; 9569 } 9570 9571 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 9572 const SCEV *LHS, 9573 const SCEV *RHS, 9574 const SCEV *FoundLHS, 9575 const SCEV *FoundRHS) { 9576 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 9577 // The restriction on `FoundRHS` be lifted easily -- it exists only to 9578 // reduce the compile time impact of this optimization. 9579 return false; 9580 9581 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 9582 if (!Addend) 9583 return false; 9584 9585 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 9586 9587 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 9588 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 9589 ConstantRange FoundLHSRange = 9590 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 9591 9592 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 9593 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 9594 9595 // We can also compute the range of values for `LHS` that satisfy the 9596 // consequent, "`LHS` `Pred` `RHS`": 9597 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 9598 ConstantRange SatisfyingLHSRange = 9599 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 9600 9601 // The antecedent implies the consequent if every value of `LHS` that 9602 // satisfies the antecedent also satisfies the consequent. 9603 return SatisfyingLHSRange.contains(LHSRange); 9604 } 9605 9606 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 9607 bool IsSigned, bool NoWrap) { 9608 assert(isKnownPositive(Stride) && "Positive stride expected!"); 9609 9610 if (NoWrap) return false; 9611 9612 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9613 const SCEV *One = getOne(Stride->getType()); 9614 9615 if (IsSigned) { 9616 APInt MaxRHS = getSignedRangeMax(RHS); 9617 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 9618 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 9619 9620 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 9621 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 9622 } 9623 9624 APInt MaxRHS = getUnsignedRangeMax(RHS); 9625 APInt MaxValue = APInt::getMaxValue(BitWidth); 9626 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 9627 9628 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 9629 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 9630 } 9631 9632 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 9633 bool IsSigned, bool NoWrap) { 9634 if (NoWrap) return false; 9635 9636 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 9637 const SCEV *One = getOne(Stride->getType()); 9638 9639 if (IsSigned) { 9640 APInt MinRHS = getSignedRangeMin(RHS); 9641 APInt MinValue = APInt::getSignedMinValue(BitWidth); 9642 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 9643 9644 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 9645 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 9646 } 9647 9648 APInt MinRHS = getUnsignedRangeMin(RHS); 9649 APInt MinValue = APInt::getMinValue(BitWidth); 9650 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 9651 9652 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 9653 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 9654 } 9655 9656 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 9657 bool Equality) { 9658 const SCEV *One = getOne(Step->getType()); 9659 Delta = Equality ? getAddExpr(Delta, Step) 9660 : getAddExpr(Delta, getMinusSCEV(Step, One)); 9661 return getUDivExpr(Delta, Step); 9662 } 9663 9664 ScalarEvolution::ExitLimit 9665 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 9666 const Loop *L, bool IsSigned, 9667 bool ControlsExit, bool AllowPredicates) { 9668 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9669 // We handle only IV < Invariant 9670 if (!isLoopInvariant(RHS, L)) 9671 return getCouldNotCompute(); 9672 9673 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 9674 bool PredicatedIV = false; 9675 9676 if (!IV && AllowPredicates) { 9677 // Try to make this an AddRec using runtime tests, in the first X 9678 // iterations of this loop, where X is the SCEV expression found by the 9679 // algorithm below. 9680 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 9681 PredicatedIV = true; 9682 } 9683 9684 // Avoid weird loops 9685 if (!IV || IV->getLoop() != L || !IV->isAffine()) 9686 return getCouldNotCompute(); 9687 9688 bool NoWrap = ControlsExit && 9689 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 9690 9691 const SCEV *Stride = IV->getStepRecurrence(*this); 9692 9693 bool PositiveStride = isKnownPositive(Stride); 9694 9695 // Avoid negative or zero stride values. 9696 if (!PositiveStride) { 9697 // We can compute the correct backedge taken count for loops with unknown 9698 // strides if we can prove that the loop is not an infinite loop with side 9699 // effects. Here's the loop structure we are trying to handle - 9700 // 9701 // i = start 9702 // do { 9703 // A[i] = i; 9704 // i += s; 9705 // } while (i < end); 9706 // 9707 // The backedge taken count for such loops is evaluated as - 9708 // (max(end, start + stride) - start - 1) /u stride 9709 // 9710 // The additional preconditions that we need to check to prove correctness 9711 // of the above formula is as follows - 9712 // 9713 // a) IV is either nuw or nsw depending upon signedness (indicated by the 9714 // NoWrap flag). 9715 // b) loop is single exit with no side effects. 9716 // 9717 // 9718 // Precondition a) implies that if the stride is negative, this is a single 9719 // trip loop. The backedge taken count formula reduces to zero in this case. 9720 // 9721 // Precondition b) implies that the unknown stride cannot be zero otherwise 9722 // we have UB. 9723 // 9724 // The positive stride case is the same as isKnownPositive(Stride) returning 9725 // true (original behavior of the function). 9726 // 9727 // We want to make sure that the stride is truly unknown as there are edge 9728 // cases where ScalarEvolution propagates no wrap flags to the 9729 // post-increment/decrement IV even though the increment/decrement operation 9730 // itself is wrapping. The computed backedge taken count may be wrong in 9731 // such cases. This is prevented by checking that the stride is not known to 9732 // be either positive or non-positive. For example, no wrap flags are 9733 // propagated to the post-increment IV of this loop with a trip count of 2 - 9734 // 9735 // unsigned char i; 9736 // for(i=127; i<128; i+=129) 9737 // A[i] = i; 9738 // 9739 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 9740 !loopHasNoSideEffects(L)) 9741 return getCouldNotCompute(); 9742 } else if (!Stride->isOne() && 9743 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 9744 // Avoid proven overflow cases: this will ensure that the backedge taken 9745 // count will not generate any unsigned overflow. Relaxed no-overflow 9746 // conditions exploit NoWrapFlags, allowing to optimize in presence of 9747 // undefined behaviors like the case of C language. 9748 return getCouldNotCompute(); 9749 9750 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 9751 : ICmpInst::ICMP_ULT; 9752 const SCEV *Start = IV->getStart(); 9753 const SCEV *End = RHS; 9754 // If the backedge is taken at least once, then it will be taken 9755 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 9756 // is the LHS value of the less-than comparison the first time it is evaluated 9757 // and End is the RHS. 9758 const SCEV *BECountIfBackedgeTaken = 9759 computeBECount(getMinusSCEV(End, Start), Stride, false); 9760 // If the loop entry is guarded by the result of the backedge test of the 9761 // first loop iteration, then we know the backedge will be taken at least 9762 // once and so the backedge taken count is as above. If not then we use the 9763 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 9764 // as if the backedge is taken at least once max(End,Start) is End and so the 9765 // result is as above, and if not max(End,Start) is Start so we get a backedge 9766 // count of zero. 9767 const SCEV *BECount; 9768 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 9769 BECount = BECountIfBackedgeTaken; 9770 else { 9771 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 9772 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 9773 } 9774 9775 const SCEV *MaxBECount; 9776 bool MaxOrZero = false; 9777 if (isa<SCEVConstant>(BECount)) 9778 MaxBECount = BECount; 9779 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 9780 // If we know exactly how many times the backedge will be taken if it's 9781 // taken at least once, then the backedge count will either be that or 9782 // zero. 9783 MaxBECount = BECountIfBackedgeTaken; 9784 MaxOrZero = true; 9785 } else { 9786 // Calculate the maximum backedge count based on the range of values 9787 // permitted by Start, End, and Stride. 9788 APInt MinStart = IsSigned ? getSignedRangeMin(Start) 9789 : getUnsignedRangeMin(Start); 9790 9791 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 9792 9793 APInt StrideForMaxBECount; 9794 9795 if (PositiveStride) 9796 StrideForMaxBECount = 9797 IsSigned ? getSignedRangeMin(Stride) 9798 : getUnsignedRangeMin(Stride); 9799 else 9800 // Using a stride of 1 is safe when computing max backedge taken count for 9801 // a loop with unknown stride. 9802 StrideForMaxBECount = APInt(BitWidth, 1, IsSigned); 9803 9804 APInt Limit = 9805 IsSigned ? APInt::getSignedMaxValue(BitWidth) - (StrideForMaxBECount - 1) 9806 : APInt::getMaxValue(BitWidth) - (StrideForMaxBECount - 1); 9807 9808 // Although End can be a MAX expression we estimate MaxEnd considering only 9809 // the case End = RHS. This is safe because in the other case (End - Start) 9810 // is zero, leading to a zero maximum backedge taken count. 9811 APInt MaxEnd = 9812 IsSigned ? APIntOps::smin(getSignedRangeMax(RHS), Limit) 9813 : APIntOps::umin(getUnsignedRangeMax(RHS), Limit); 9814 9815 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart), 9816 getConstant(StrideForMaxBECount), false); 9817 } 9818 9819 if (isa<SCEVCouldNotCompute>(MaxBECount) && 9820 !isa<SCEVCouldNotCompute>(BECount)) 9821 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 9822 9823 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 9824 } 9825 9826 ScalarEvolution::ExitLimit 9827 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 9828 const Loop *L, bool IsSigned, 9829 bool ControlsExit, bool AllowPredicates) { 9830 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9831 // We handle only IV > Invariant 9832 if (!isLoopInvariant(RHS, L)) 9833 return getCouldNotCompute(); 9834 9835 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 9836 if (!IV && AllowPredicates) 9837 // Try to make this an AddRec using runtime tests, in the first X 9838 // iterations of this loop, where X is the SCEV expression found by the 9839 // algorithm below. 9840 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 9841 9842 // Avoid weird loops 9843 if (!IV || IV->getLoop() != L || !IV->isAffine()) 9844 return getCouldNotCompute(); 9845 9846 bool NoWrap = ControlsExit && 9847 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 9848 9849 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 9850 9851 // Avoid negative or zero stride values 9852 if (!isKnownPositive(Stride)) 9853 return getCouldNotCompute(); 9854 9855 // Avoid proven overflow cases: this will ensure that the backedge taken count 9856 // will not generate any unsigned overflow. Relaxed no-overflow conditions 9857 // exploit NoWrapFlags, allowing to optimize in presence of undefined 9858 // behaviors like the case of C language. 9859 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 9860 return getCouldNotCompute(); 9861 9862 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 9863 : ICmpInst::ICMP_UGT; 9864 9865 const SCEV *Start = IV->getStart(); 9866 const SCEV *End = RHS; 9867 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) 9868 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 9869 9870 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 9871 9872 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 9873 : getUnsignedRangeMax(Start); 9874 9875 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 9876 : getUnsignedRangeMin(Stride); 9877 9878 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 9879 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 9880 : APInt::getMinValue(BitWidth) + (MinStride - 1); 9881 9882 // Although End can be a MIN expression we estimate MinEnd considering only 9883 // the case End = RHS. This is safe because in the other case (Start - End) 9884 // is zero, leading to a zero maximum backedge taken count. 9885 APInt MinEnd = 9886 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 9887 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 9888 9889 9890 const SCEV *MaxBECount = getCouldNotCompute(); 9891 if (isa<SCEVConstant>(BECount)) 9892 MaxBECount = BECount; 9893 else 9894 MaxBECount = computeBECount(getConstant(MaxStart - MinEnd), 9895 getConstant(MinStride), false); 9896 9897 if (isa<SCEVCouldNotCompute>(MaxBECount)) 9898 MaxBECount = BECount; 9899 9900 return ExitLimit(BECount, MaxBECount, false, Predicates); 9901 } 9902 9903 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 9904 ScalarEvolution &SE) const { 9905 if (Range.isFullSet()) // Infinite loop. 9906 return SE.getCouldNotCompute(); 9907 9908 // If the start is a non-zero constant, shift the range to simplify things. 9909 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 9910 if (!SC->getValue()->isZero()) { 9911 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 9912 Operands[0] = SE.getZero(SC->getType()); 9913 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 9914 getNoWrapFlags(FlagNW)); 9915 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 9916 return ShiftedAddRec->getNumIterationsInRange( 9917 Range.subtract(SC->getAPInt()), SE); 9918 // This is strange and shouldn't happen. 9919 return SE.getCouldNotCompute(); 9920 } 9921 9922 // The only time we can solve this is when we have all constant indices. 9923 // Otherwise, we cannot determine the overflow conditions. 9924 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 9925 return SE.getCouldNotCompute(); 9926 9927 // Okay at this point we know that all elements of the chrec are constants and 9928 // that the start element is zero. 9929 9930 // First check to see if the range contains zero. If not, the first 9931 // iteration exits. 9932 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 9933 if (!Range.contains(APInt(BitWidth, 0))) 9934 return SE.getZero(getType()); 9935 9936 if (isAffine()) { 9937 // If this is an affine expression then we have this situation: 9938 // Solve {0,+,A} in Range === Ax in Range 9939 9940 // We know that zero is in the range. If A is positive then we know that 9941 // the upper value of the range must be the first possible exit value. 9942 // If A is negative then the lower of the range is the last possible loop 9943 // value. Also note that we already checked for a full range. 9944 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 9945 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 9946 9947 // The exit value should be (End+A)/A. 9948 APInt ExitVal = (End + A).udiv(A); 9949 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 9950 9951 // Evaluate at the exit value. If we really did fall out of the valid 9952 // range, then we computed our trip count, otherwise wrap around or other 9953 // things must have happened. 9954 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 9955 if (Range.contains(Val->getValue())) 9956 return SE.getCouldNotCompute(); // Something strange happened 9957 9958 // Ensure that the previous value is in the range. This is a sanity check. 9959 assert(Range.contains( 9960 EvaluateConstantChrecAtConstant(this, 9961 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 9962 "Linear scev computation is off in a bad way!"); 9963 return SE.getConstant(ExitValue); 9964 } else if (isQuadratic()) { 9965 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 9966 // quadratic equation to solve it. To do this, we must frame our problem in 9967 // terms of figuring out when zero is crossed, instead of when 9968 // Range.getUpper() is crossed. 9969 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); 9970 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 9971 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(), FlagAnyWrap); 9972 9973 // Next, solve the constructed addrec 9974 if (auto Roots = 9975 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE)) { 9976 const SCEVConstant *R1 = Roots->first; 9977 const SCEVConstant *R2 = Roots->second; 9978 // Pick the smallest positive root value. 9979 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 9980 ICmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 9981 if (!CB->getZExtValue()) 9982 std::swap(R1, R2); // R1 is the minimum root now. 9983 9984 // Make sure the root is not off by one. The returned iteration should 9985 // not be in the range, but the previous one should be. When solving 9986 // for "X*X < 5", for example, we should not return a root of 2. 9987 ConstantInt *R1Val = 9988 EvaluateConstantChrecAtConstant(this, R1->getValue(), SE); 9989 if (Range.contains(R1Val->getValue())) { 9990 // The next iteration must be out of the range... 9991 ConstantInt *NextVal = 9992 ConstantInt::get(SE.getContext(), R1->getAPInt() + 1); 9993 9994 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 9995 if (!Range.contains(R1Val->getValue())) 9996 return SE.getConstant(NextVal); 9997 return SE.getCouldNotCompute(); // Something strange happened 9998 } 9999 10000 // If R1 was not in the range, then it is a good return value. Make 10001 // sure that R1-1 WAS in the range though, just in case. 10002 ConstantInt *NextVal = 10003 ConstantInt::get(SE.getContext(), R1->getAPInt() - 1); 10004 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 10005 if (Range.contains(R1Val->getValue())) 10006 return R1; 10007 return SE.getCouldNotCompute(); // Something strange happened 10008 } 10009 } 10010 } 10011 10012 return SE.getCouldNotCompute(); 10013 } 10014 10015 // Return true when S contains at least an undef value. 10016 static inline bool containsUndefs(const SCEV *S) { 10017 return SCEVExprContains(S, [](const SCEV *S) { 10018 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 10019 return isa<UndefValue>(SU->getValue()); 10020 else if (const auto *SC = dyn_cast<SCEVConstant>(S)) 10021 return isa<UndefValue>(SC->getValue()); 10022 return false; 10023 }); 10024 } 10025 10026 namespace { 10027 10028 // Collect all steps of SCEV expressions. 10029 struct SCEVCollectStrides { 10030 ScalarEvolution &SE; 10031 SmallVectorImpl<const SCEV *> &Strides; 10032 10033 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 10034 : SE(SE), Strides(S) {} 10035 10036 bool follow(const SCEV *S) { 10037 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 10038 Strides.push_back(AR->getStepRecurrence(SE)); 10039 return true; 10040 } 10041 10042 bool isDone() const { return false; } 10043 }; 10044 10045 // Collect all SCEVUnknown and SCEVMulExpr expressions. 10046 struct SCEVCollectTerms { 10047 SmallVectorImpl<const SCEV *> &Terms; 10048 10049 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 10050 10051 bool follow(const SCEV *S) { 10052 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 10053 isa<SCEVSignExtendExpr>(S)) { 10054 if (!containsUndefs(S)) 10055 Terms.push_back(S); 10056 10057 // Stop recursion: once we collected a term, do not walk its operands. 10058 return false; 10059 } 10060 10061 // Keep looking. 10062 return true; 10063 } 10064 10065 bool isDone() const { return false; } 10066 }; 10067 10068 // Check if a SCEV contains an AddRecExpr. 10069 struct SCEVHasAddRec { 10070 bool &ContainsAddRec; 10071 10072 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 10073 ContainsAddRec = false; 10074 } 10075 10076 bool follow(const SCEV *S) { 10077 if (isa<SCEVAddRecExpr>(S)) { 10078 ContainsAddRec = true; 10079 10080 // Stop recursion: once we collected a term, do not walk its operands. 10081 return false; 10082 } 10083 10084 // Keep looking. 10085 return true; 10086 } 10087 10088 bool isDone() const { return false; } 10089 }; 10090 10091 // Find factors that are multiplied with an expression that (possibly as a 10092 // subexpression) contains an AddRecExpr. In the expression: 10093 // 10094 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 10095 // 10096 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 10097 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 10098 // parameters as they form a product with an induction variable. 10099 // 10100 // This collector expects all array size parameters to be in the same MulExpr. 10101 // It might be necessary to later add support for collecting parameters that are 10102 // spread over different nested MulExpr. 10103 struct SCEVCollectAddRecMultiplies { 10104 SmallVectorImpl<const SCEV *> &Terms; 10105 ScalarEvolution &SE; 10106 10107 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 10108 : Terms(T), SE(SE) {} 10109 10110 bool follow(const SCEV *S) { 10111 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 10112 bool HasAddRec = false; 10113 SmallVector<const SCEV *, 0> Operands; 10114 for (auto Op : Mul->operands()) { 10115 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 10116 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 10117 Operands.push_back(Op); 10118 } else if (Unknown) { 10119 HasAddRec = true; 10120 } else { 10121 bool ContainsAddRec; 10122 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 10123 visitAll(Op, ContiansAddRec); 10124 HasAddRec |= ContainsAddRec; 10125 } 10126 } 10127 if (Operands.size() == 0) 10128 return true; 10129 10130 if (!HasAddRec) 10131 return false; 10132 10133 Terms.push_back(SE.getMulExpr(Operands)); 10134 // Stop recursion: once we collected a term, do not walk its operands. 10135 return false; 10136 } 10137 10138 // Keep looking. 10139 return true; 10140 } 10141 10142 bool isDone() const { return false; } 10143 }; 10144 10145 } // end anonymous namespace 10146 10147 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 10148 /// two places: 10149 /// 1) The strides of AddRec expressions. 10150 /// 2) Unknowns that are multiplied with AddRec expressions. 10151 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 10152 SmallVectorImpl<const SCEV *> &Terms) { 10153 SmallVector<const SCEV *, 4> Strides; 10154 SCEVCollectStrides StrideCollector(*this, Strides); 10155 visitAll(Expr, StrideCollector); 10156 10157 DEBUG({ 10158 dbgs() << "Strides:\n"; 10159 for (const SCEV *S : Strides) 10160 dbgs() << *S << "\n"; 10161 }); 10162 10163 for (const SCEV *S : Strides) { 10164 SCEVCollectTerms TermCollector(Terms); 10165 visitAll(S, TermCollector); 10166 } 10167 10168 DEBUG({ 10169 dbgs() << "Terms:\n"; 10170 for (const SCEV *T : Terms) 10171 dbgs() << *T << "\n"; 10172 }); 10173 10174 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 10175 visitAll(Expr, MulCollector); 10176 } 10177 10178 static bool findArrayDimensionsRec(ScalarEvolution &SE, 10179 SmallVectorImpl<const SCEV *> &Terms, 10180 SmallVectorImpl<const SCEV *> &Sizes) { 10181 int Last = Terms.size() - 1; 10182 const SCEV *Step = Terms[Last]; 10183 10184 // End of recursion. 10185 if (Last == 0) { 10186 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 10187 SmallVector<const SCEV *, 2> Qs; 10188 for (const SCEV *Op : M->operands()) 10189 if (!isa<SCEVConstant>(Op)) 10190 Qs.push_back(Op); 10191 10192 Step = SE.getMulExpr(Qs); 10193 } 10194 10195 Sizes.push_back(Step); 10196 return true; 10197 } 10198 10199 for (const SCEV *&Term : Terms) { 10200 // Normalize the terms before the next call to findArrayDimensionsRec. 10201 const SCEV *Q, *R; 10202 SCEVDivision::divide(SE, Term, Step, &Q, &R); 10203 10204 // Bail out when GCD does not evenly divide one of the terms. 10205 if (!R->isZero()) 10206 return false; 10207 10208 Term = Q; 10209 } 10210 10211 // Remove all SCEVConstants. 10212 Terms.erase( 10213 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 10214 Terms.end()); 10215 10216 if (Terms.size() > 0) 10217 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 10218 return false; 10219 10220 Sizes.push_back(Step); 10221 return true; 10222 } 10223 10224 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 10225 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 10226 for (const SCEV *T : Terms) 10227 if (SCEVExprContains(T, isa<SCEVUnknown, const SCEV *>)) 10228 return true; 10229 return false; 10230 } 10231 10232 // Return the number of product terms in S. 10233 static inline int numberOfTerms(const SCEV *S) { 10234 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 10235 return Expr->getNumOperands(); 10236 return 1; 10237 } 10238 10239 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 10240 if (isa<SCEVConstant>(T)) 10241 return nullptr; 10242 10243 if (isa<SCEVUnknown>(T)) 10244 return T; 10245 10246 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 10247 SmallVector<const SCEV *, 2> Factors; 10248 for (const SCEV *Op : M->operands()) 10249 if (!isa<SCEVConstant>(Op)) 10250 Factors.push_back(Op); 10251 10252 return SE.getMulExpr(Factors); 10253 } 10254 10255 return T; 10256 } 10257 10258 /// Return the size of an element read or written by Inst. 10259 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 10260 Type *Ty; 10261 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 10262 Ty = Store->getValueOperand()->getType(); 10263 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 10264 Ty = Load->getType(); 10265 else 10266 return nullptr; 10267 10268 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 10269 return getSizeOfExpr(ETy, Ty); 10270 } 10271 10272 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 10273 SmallVectorImpl<const SCEV *> &Sizes, 10274 const SCEV *ElementSize) { 10275 if (Terms.size() < 1 || !ElementSize) 10276 return; 10277 10278 // Early return when Terms do not contain parameters: we do not delinearize 10279 // non parametric SCEVs. 10280 if (!containsParameters(Terms)) 10281 return; 10282 10283 DEBUG({ 10284 dbgs() << "Terms:\n"; 10285 for (const SCEV *T : Terms) 10286 dbgs() << *T << "\n"; 10287 }); 10288 10289 // Remove duplicates. 10290 array_pod_sort(Terms.begin(), Terms.end()); 10291 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 10292 10293 // Put larger terms first. 10294 std::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) { 10295 return numberOfTerms(LHS) > numberOfTerms(RHS); 10296 }); 10297 10298 // Try to divide all terms by the element size. If term is not divisible by 10299 // element size, proceed with the original term. 10300 for (const SCEV *&Term : Terms) { 10301 const SCEV *Q, *R; 10302 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 10303 if (!Q->isZero()) 10304 Term = Q; 10305 } 10306 10307 SmallVector<const SCEV *, 4> NewTerms; 10308 10309 // Remove constant factors. 10310 for (const SCEV *T : Terms) 10311 if (const SCEV *NewT = removeConstantFactors(*this, T)) 10312 NewTerms.push_back(NewT); 10313 10314 DEBUG({ 10315 dbgs() << "Terms after sorting:\n"; 10316 for (const SCEV *T : NewTerms) 10317 dbgs() << *T << "\n"; 10318 }); 10319 10320 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 10321 Sizes.clear(); 10322 return; 10323 } 10324 10325 // The last element to be pushed into Sizes is the size of an element. 10326 Sizes.push_back(ElementSize); 10327 10328 DEBUG({ 10329 dbgs() << "Sizes:\n"; 10330 for (const SCEV *S : Sizes) 10331 dbgs() << *S << "\n"; 10332 }); 10333 } 10334 10335 void ScalarEvolution::computeAccessFunctions( 10336 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 10337 SmallVectorImpl<const SCEV *> &Sizes) { 10338 // Early exit in case this SCEV is not an affine multivariate function. 10339 if (Sizes.empty()) 10340 return; 10341 10342 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 10343 if (!AR->isAffine()) 10344 return; 10345 10346 const SCEV *Res = Expr; 10347 int Last = Sizes.size() - 1; 10348 for (int i = Last; i >= 0; i--) { 10349 const SCEV *Q, *R; 10350 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 10351 10352 DEBUG({ 10353 dbgs() << "Res: " << *Res << "\n"; 10354 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 10355 dbgs() << "Res divided by Sizes[i]:\n"; 10356 dbgs() << "Quotient: " << *Q << "\n"; 10357 dbgs() << "Remainder: " << *R << "\n"; 10358 }); 10359 10360 Res = Q; 10361 10362 // Do not record the last subscript corresponding to the size of elements in 10363 // the array. 10364 if (i == Last) { 10365 10366 // Bail out if the remainder is too complex. 10367 if (isa<SCEVAddRecExpr>(R)) { 10368 Subscripts.clear(); 10369 Sizes.clear(); 10370 return; 10371 } 10372 10373 continue; 10374 } 10375 10376 // Record the access function for the current subscript. 10377 Subscripts.push_back(R); 10378 } 10379 10380 // Also push in last position the remainder of the last division: it will be 10381 // the access function of the innermost dimension. 10382 Subscripts.push_back(Res); 10383 10384 std::reverse(Subscripts.begin(), Subscripts.end()); 10385 10386 DEBUG({ 10387 dbgs() << "Subscripts:\n"; 10388 for (const SCEV *S : Subscripts) 10389 dbgs() << *S << "\n"; 10390 }); 10391 } 10392 10393 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 10394 /// sizes of an array access. Returns the remainder of the delinearization that 10395 /// is the offset start of the array. The SCEV->delinearize algorithm computes 10396 /// the multiples of SCEV coefficients: that is a pattern matching of sub 10397 /// expressions in the stride and base of a SCEV corresponding to the 10398 /// computation of a GCD (greatest common divisor) of base and stride. When 10399 /// SCEV->delinearize fails, it returns the SCEV unchanged. 10400 /// 10401 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 10402 /// 10403 /// void foo(long n, long m, long o, double A[n][m][o]) { 10404 /// 10405 /// for (long i = 0; i < n; i++) 10406 /// for (long j = 0; j < m; j++) 10407 /// for (long k = 0; k < o; k++) 10408 /// A[i][j][k] = 1.0; 10409 /// } 10410 /// 10411 /// the delinearization input is the following AddRec SCEV: 10412 /// 10413 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 10414 /// 10415 /// From this SCEV, we are able to say that the base offset of the access is %A 10416 /// because it appears as an offset that does not divide any of the strides in 10417 /// the loops: 10418 /// 10419 /// CHECK: Base offset: %A 10420 /// 10421 /// and then SCEV->delinearize determines the size of some of the dimensions of 10422 /// the array as these are the multiples by which the strides are happening: 10423 /// 10424 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 10425 /// 10426 /// Note that the outermost dimension remains of UnknownSize because there are 10427 /// no strides that would help identifying the size of the last dimension: when 10428 /// the array has been statically allocated, one could compute the size of that 10429 /// dimension by dividing the overall size of the array by the size of the known 10430 /// dimensions: %m * %o * 8. 10431 /// 10432 /// Finally delinearize provides the access functions for the array reference 10433 /// that does correspond to A[i][j][k] of the above C testcase: 10434 /// 10435 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 10436 /// 10437 /// The testcases are checking the output of a function pass: 10438 /// DelinearizationPass that walks through all loads and stores of a function 10439 /// asking for the SCEV of the memory access with respect to all enclosing 10440 /// loops, calling SCEV->delinearize on that and printing the results. 10441 void ScalarEvolution::delinearize(const SCEV *Expr, 10442 SmallVectorImpl<const SCEV *> &Subscripts, 10443 SmallVectorImpl<const SCEV *> &Sizes, 10444 const SCEV *ElementSize) { 10445 // First step: collect parametric terms. 10446 SmallVector<const SCEV *, 4> Terms; 10447 collectParametricTerms(Expr, Terms); 10448 10449 if (Terms.empty()) 10450 return; 10451 10452 // Second step: find subscript sizes. 10453 findArrayDimensions(Terms, Sizes, ElementSize); 10454 10455 if (Sizes.empty()) 10456 return; 10457 10458 // Third step: compute the access functions for each subscript. 10459 computeAccessFunctions(Expr, Subscripts, Sizes); 10460 10461 if (Subscripts.empty()) 10462 return; 10463 10464 DEBUG({ 10465 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 10466 dbgs() << "ArrayDecl[UnknownSize]"; 10467 for (const SCEV *S : Sizes) 10468 dbgs() << "[" << *S << "]"; 10469 10470 dbgs() << "\nArrayRef"; 10471 for (const SCEV *S : Subscripts) 10472 dbgs() << "[" << *S << "]"; 10473 dbgs() << "\n"; 10474 }); 10475 } 10476 10477 //===----------------------------------------------------------------------===// 10478 // SCEVCallbackVH Class Implementation 10479 //===----------------------------------------------------------------------===// 10480 10481 void ScalarEvolution::SCEVCallbackVH::deleted() { 10482 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 10483 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 10484 SE->ConstantEvolutionLoopExitValue.erase(PN); 10485 SE->eraseValueFromMap(getValPtr()); 10486 // this now dangles! 10487 } 10488 10489 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 10490 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 10491 10492 // Forget all the expressions associated with users of the old value, 10493 // so that future queries will recompute the expressions using the new 10494 // value. 10495 Value *Old = getValPtr(); 10496 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 10497 SmallPtrSet<User *, 8> Visited; 10498 while (!Worklist.empty()) { 10499 User *U = Worklist.pop_back_val(); 10500 // Deleting the Old value will cause this to dangle. Postpone 10501 // that until everything else is done. 10502 if (U == Old) 10503 continue; 10504 if (!Visited.insert(U).second) 10505 continue; 10506 if (PHINode *PN = dyn_cast<PHINode>(U)) 10507 SE->ConstantEvolutionLoopExitValue.erase(PN); 10508 SE->eraseValueFromMap(U); 10509 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 10510 } 10511 // Delete the Old value. 10512 if (PHINode *PN = dyn_cast<PHINode>(Old)) 10513 SE->ConstantEvolutionLoopExitValue.erase(PN); 10514 SE->eraseValueFromMap(Old); 10515 // this now dangles! 10516 } 10517 10518 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 10519 : CallbackVH(V), SE(se) {} 10520 10521 //===----------------------------------------------------------------------===// 10522 // ScalarEvolution Class Implementation 10523 //===----------------------------------------------------------------------===// 10524 10525 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 10526 AssumptionCache &AC, DominatorTree &DT, 10527 LoopInfo &LI) 10528 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 10529 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 10530 LoopDispositions(64), BlockDispositions(64) { 10531 // To use guards for proving predicates, we need to scan every instruction in 10532 // relevant basic blocks, and not just terminators. Doing this is a waste of 10533 // time if the IR does not actually contain any calls to 10534 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 10535 // 10536 // This pessimizes the case where a pass that preserves ScalarEvolution wants 10537 // to _add_ guards to the module when there weren't any before, and wants 10538 // ScalarEvolution to optimize based on those guards. For now we prefer to be 10539 // efficient in lieu of being smart in that rather obscure case. 10540 10541 auto *GuardDecl = F.getParent()->getFunction( 10542 Intrinsic::getName(Intrinsic::experimental_guard)); 10543 HasGuards = GuardDecl && !GuardDecl->use_empty(); 10544 } 10545 10546 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 10547 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 10548 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 10549 ValueExprMap(std::move(Arg.ValueExprMap)), 10550 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 10551 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 10552 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 10553 PredicatedBackedgeTakenCounts( 10554 std::move(Arg.PredicatedBackedgeTakenCounts)), 10555 ExitLimits(std::move(Arg.ExitLimits)), 10556 ConstantEvolutionLoopExitValue( 10557 std::move(Arg.ConstantEvolutionLoopExitValue)), 10558 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 10559 LoopDispositions(std::move(Arg.LoopDispositions)), 10560 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 10561 BlockDispositions(std::move(Arg.BlockDispositions)), 10562 UnsignedRanges(std::move(Arg.UnsignedRanges)), 10563 SignedRanges(std::move(Arg.SignedRanges)), 10564 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 10565 UniquePreds(std::move(Arg.UniquePreds)), 10566 SCEVAllocator(std::move(Arg.SCEVAllocator)), 10567 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 10568 FirstUnknown(Arg.FirstUnknown) { 10569 Arg.FirstUnknown = nullptr; 10570 } 10571 10572 ScalarEvolution::~ScalarEvolution() { 10573 // Iterate through all the SCEVUnknown instances and call their 10574 // destructors, so that they release their references to their values. 10575 for (SCEVUnknown *U = FirstUnknown; U;) { 10576 SCEVUnknown *Tmp = U; 10577 U = U->Next; 10578 Tmp->~SCEVUnknown(); 10579 } 10580 FirstUnknown = nullptr; 10581 10582 ExprValueMap.clear(); 10583 ValueExprMap.clear(); 10584 HasRecMap.clear(); 10585 10586 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 10587 // that a loop had multiple computable exits. 10588 for (auto &BTCI : BackedgeTakenCounts) 10589 BTCI.second.clear(); 10590 for (auto &BTCI : PredicatedBackedgeTakenCounts) 10591 BTCI.second.clear(); 10592 10593 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 10594 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 10595 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 10596 } 10597 10598 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 10599 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 10600 } 10601 10602 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 10603 const Loop *L) { 10604 // Print all inner loops first 10605 for (Loop *I : *L) 10606 PrintLoopInfo(OS, SE, I); 10607 10608 OS << "Loop "; 10609 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10610 OS << ": "; 10611 10612 SmallVector<BasicBlock *, 8> ExitBlocks; 10613 L->getExitBlocks(ExitBlocks); 10614 if (ExitBlocks.size() != 1) 10615 OS << "<multiple exits> "; 10616 10617 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 10618 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 10619 } else { 10620 OS << "Unpredictable backedge-taken count. "; 10621 } 10622 10623 OS << "\n" 10624 "Loop "; 10625 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10626 OS << ": "; 10627 10628 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 10629 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 10630 if (SE->isBackedgeTakenCountMaxOrZero(L)) 10631 OS << ", actual taken count either this or zero."; 10632 } else { 10633 OS << "Unpredictable max backedge-taken count. "; 10634 } 10635 10636 OS << "\n" 10637 "Loop "; 10638 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10639 OS << ": "; 10640 10641 SCEVUnionPredicate Pred; 10642 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 10643 if (!isa<SCEVCouldNotCompute>(PBT)) { 10644 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 10645 OS << " Predicates:\n"; 10646 Pred.print(OS, 4); 10647 } else { 10648 OS << "Unpredictable predicated backedge-taken count. "; 10649 } 10650 OS << "\n"; 10651 10652 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 10653 OS << "Loop "; 10654 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10655 OS << ": "; 10656 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 10657 } 10658 } 10659 10660 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 10661 switch (LD) { 10662 case ScalarEvolution::LoopVariant: 10663 return "Variant"; 10664 case ScalarEvolution::LoopInvariant: 10665 return "Invariant"; 10666 case ScalarEvolution::LoopComputable: 10667 return "Computable"; 10668 } 10669 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 10670 } 10671 10672 void ScalarEvolution::print(raw_ostream &OS) const { 10673 // ScalarEvolution's implementation of the print method is to print 10674 // out SCEV values of all instructions that are interesting. Doing 10675 // this potentially causes it to create new SCEV objects though, 10676 // which technically conflicts with the const qualifier. This isn't 10677 // observable from outside the class though, so casting away the 10678 // const isn't dangerous. 10679 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 10680 10681 OS << "Classifying expressions for: "; 10682 F.printAsOperand(OS, /*PrintType=*/false); 10683 OS << "\n"; 10684 for (Instruction &I : instructions(F)) 10685 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 10686 OS << I << '\n'; 10687 OS << " --> "; 10688 const SCEV *SV = SE.getSCEV(&I); 10689 SV->print(OS); 10690 if (!isa<SCEVCouldNotCompute>(SV)) { 10691 OS << " U: "; 10692 SE.getUnsignedRange(SV).print(OS); 10693 OS << " S: "; 10694 SE.getSignedRange(SV).print(OS); 10695 } 10696 10697 const Loop *L = LI.getLoopFor(I.getParent()); 10698 10699 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 10700 if (AtUse != SV) { 10701 OS << " --> "; 10702 AtUse->print(OS); 10703 if (!isa<SCEVCouldNotCompute>(AtUse)) { 10704 OS << " U: "; 10705 SE.getUnsignedRange(AtUse).print(OS); 10706 OS << " S: "; 10707 SE.getSignedRange(AtUse).print(OS); 10708 } 10709 } 10710 10711 if (L) { 10712 OS << "\t\t" "Exits: "; 10713 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 10714 if (!SE.isLoopInvariant(ExitValue, L)) { 10715 OS << "<<Unknown>>"; 10716 } else { 10717 OS << *ExitValue; 10718 } 10719 10720 bool First = true; 10721 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 10722 if (First) { 10723 OS << "\t\t" "LoopDispositions: { "; 10724 First = false; 10725 } else { 10726 OS << ", "; 10727 } 10728 10729 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10730 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 10731 } 10732 10733 for (auto *InnerL : depth_first(L)) { 10734 if (InnerL == L) 10735 continue; 10736 if (First) { 10737 OS << "\t\t" "LoopDispositions: { "; 10738 First = false; 10739 } else { 10740 OS << ", "; 10741 } 10742 10743 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 10744 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 10745 } 10746 10747 OS << " }"; 10748 } 10749 10750 OS << "\n"; 10751 } 10752 10753 OS << "Determining loop execution counts for: "; 10754 F.printAsOperand(OS, /*PrintType=*/false); 10755 OS << "\n"; 10756 for (Loop *I : LI) 10757 PrintLoopInfo(OS, &SE, I); 10758 } 10759 10760 ScalarEvolution::LoopDisposition 10761 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 10762 auto &Values = LoopDispositions[S]; 10763 for (auto &V : Values) { 10764 if (V.getPointer() == L) 10765 return V.getInt(); 10766 } 10767 Values.emplace_back(L, LoopVariant); 10768 LoopDisposition D = computeLoopDisposition(S, L); 10769 auto &Values2 = LoopDispositions[S]; 10770 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 10771 if (V.getPointer() == L) { 10772 V.setInt(D); 10773 break; 10774 } 10775 } 10776 return D; 10777 } 10778 10779 ScalarEvolution::LoopDisposition 10780 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 10781 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 10782 case scConstant: 10783 return LoopInvariant; 10784 case scTruncate: 10785 case scZeroExtend: 10786 case scSignExtend: 10787 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 10788 case scAddRecExpr: { 10789 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 10790 10791 // If L is the addrec's loop, it's computable. 10792 if (AR->getLoop() == L) 10793 return LoopComputable; 10794 10795 // Add recurrences are never invariant in the function-body (null loop). 10796 if (!L) 10797 return LoopVariant; 10798 10799 // This recurrence is variant w.r.t. L if L contains AR's loop. 10800 if (L->contains(AR->getLoop())) 10801 return LoopVariant; 10802 10803 // This recurrence is invariant w.r.t. L if AR's loop contains L. 10804 if (AR->getLoop()->contains(L)) 10805 return LoopInvariant; 10806 10807 // This recurrence is variant w.r.t. L if any of its operands 10808 // are variant. 10809 for (auto *Op : AR->operands()) 10810 if (!isLoopInvariant(Op, L)) 10811 return LoopVariant; 10812 10813 // Otherwise it's loop-invariant. 10814 return LoopInvariant; 10815 } 10816 case scAddExpr: 10817 case scMulExpr: 10818 case scUMaxExpr: 10819 case scSMaxExpr: { 10820 bool HasVarying = false; 10821 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 10822 LoopDisposition D = getLoopDisposition(Op, L); 10823 if (D == LoopVariant) 10824 return LoopVariant; 10825 if (D == LoopComputable) 10826 HasVarying = true; 10827 } 10828 return HasVarying ? LoopComputable : LoopInvariant; 10829 } 10830 case scUDivExpr: { 10831 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 10832 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 10833 if (LD == LoopVariant) 10834 return LoopVariant; 10835 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 10836 if (RD == LoopVariant) 10837 return LoopVariant; 10838 return (LD == LoopInvariant && RD == LoopInvariant) ? 10839 LoopInvariant : LoopComputable; 10840 } 10841 case scUnknown: 10842 // All non-instruction values are loop invariant. All instructions are loop 10843 // invariant if they are not contained in the specified loop. 10844 // Instructions are never considered invariant in the function body 10845 // (null loop) because they are defined within the "loop". 10846 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 10847 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 10848 return LoopInvariant; 10849 case scCouldNotCompute: 10850 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 10851 } 10852 llvm_unreachable("Unknown SCEV kind!"); 10853 } 10854 10855 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 10856 return getLoopDisposition(S, L) == LoopInvariant; 10857 } 10858 10859 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 10860 return getLoopDisposition(S, L) == LoopComputable; 10861 } 10862 10863 ScalarEvolution::BlockDisposition 10864 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 10865 auto &Values = BlockDispositions[S]; 10866 for (auto &V : Values) { 10867 if (V.getPointer() == BB) 10868 return V.getInt(); 10869 } 10870 Values.emplace_back(BB, DoesNotDominateBlock); 10871 BlockDisposition D = computeBlockDisposition(S, BB); 10872 auto &Values2 = BlockDispositions[S]; 10873 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 10874 if (V.getPointer() == BB) { 10875 V.setInt(D); 10876 break; 10877 } 10878 } 10879 return D; 10880 } 10881 10882 ScalarEvolution::BlockDisposition 10883 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 10884 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 10885 case scConstant: 10886 return ProperlyDominatesBlock; 10887 case scTruncate: 10888 case scZeroExtend: 10889 case scSignExtend: 10890 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 10891 case scAddRecExpr: { 10892 // This uses a "dominates" query instead of "properly dominates" query 10893 // to test for proper dominance too, because the instruction which 10894 // produces the addrec's value is a PHI, and a PHI effectively properly 10895 // dominates its entire containing block. 10896 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 10897 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 10898 return DoesNotDominateBlock; 10899 10900 // Fall through into SCEVNAryExpr handling. 10901 LLVM_FALLTHROUGH; 10902 } 10903 case scAddExpr: 10904 case scMulExpr: 10905 case scUMaxExpr: 10906 case scSMaxExpr: { 10907 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 10908 bool Proper = true; 10909 for (const SCEV *NAryOp : NAry->operands()) { 10910 BlockDisposition D = getBlockDisposition(NAryOp, BB); 10911 if (D == DoesNotDominateBlock) 10912 return DoesNotDominateBlock; 10913 if (D == DominatesBlock) 10914 Proper = false; 10915 } 10916 return Proper ? ProperlyDominatesBlock : DominatesBlock; 10917 } 10918 case scUDivExpr: { 10919 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 10920 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 10921 BlockDisposition LD = getBlockDisposition(LHS, BB); 10922 if (LD == DoesNotDominateBlock) 10923 return DoesNotDominateBlock; 10924 BlockDisposition RD = getBlockDisposition(RHS, BB); 10925 if (RD == DoesNotDominateBlock) 10926 return DoesNotDominateBlock; 10927 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 10928 ProperlyDominatesBlock : DominatesBlock; 10929 } 10930 case scUnknown: 10931 if (Instruction *I = 10932 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 10933 if (I->getParent() == BB) 10934 return DominatesBlock; 10935 if (DT.properlyDominates(I->getParent(), BB)) 10936 return ProperlyDominatesBlock; 10937 return DoesNotDominateBlock; 10938 } 10939 return ProperlyDominatesBlock; 10940 case scCouldNotCompute: 10941 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 10942 } 10943 llvm_unreachable("Unknown SCEV kind!"); 10944 } 10945 10946 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 10947 return getBlockDisposition(S, BB) >= DominatesBlock; 10948 } 10949 10950 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 10951 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 10952 } 10953 10954 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 10955 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 10956 } 10957 10958 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 10959 auto IsS = [&](const SCEV *X) { return S == X; }; 10960 auto ContainsS = [&](const SCEV *X) { 10961 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 10962 }; 10963 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 10964 } 10965 10966 void 10967 ScalarEvolution::forgetMemoizedResults(const SCEV *S, bool EraseExitLimit) { 10968 ValuesAtScopes.erase(S); 10969 LoopDispositions.erase(S); 10970 BlockDispositions.erase(S); 10971 UnsignedRanges.erase(S); 10972 SignedRanges.erase(S); 10973 ExprValueMap.erase(S); 10974 HasRecMap.erase(S); 10975 MinTrailingZerosCache.erase(S); 10976 10977 for (auto I = PredicatedSCEVRewrites.begin(); 10978 I != PredicatedSCEVRewrites.end();) { 10979 std::pair<const SCEV *, const Loop *> Entry = I->first; 10980 if (Entry.first == S) 10981 PredicatedSCEVRewrites.erase(I++); 10982 else 10983 ++I; 10984 } 10985 10986 auto RemoveSCEVFromBackedgeMap = 10987 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 10988 for (auto I = Map.begin(), E = Map.end(); I != E;) { 10989 BackedgeTakenInfo &BEInfo = I->second; 10990 if (BEInfo.hasOperand(S, this)) { 10991 BEInfo.clear(); 10992 Map.erase(I++); 10993 } else 10994 ++I; 10995 } 10996 }; 10997 10998 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 10999 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 11000 11001 // TODO: There is a suspicion that we only need to do it when there is a 11002 // SCEVUnknown somewhere inside S. Need to check this. 11003 if (EraseExitLimit) 11004 for (auto I = ExitLimits.begin(), E = ExitLimits.end(); I != E; ++I) 11005 if (I->second.hasOperand(S)) 11006 ExitLimits.erase(I); 11007 } 11008 11009 void ScalarEvolution::verify() const { 11010 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11011 ScalarEvolution SE2(F, TLI, AC, DT, LI); 11012 11013 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 11014 11015 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 11016 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 11017 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 11018 11019 const SCEV *visitConstant(const SCEVConstant *Constant) { 11020 return SE.getConstant(Constant->getAPInt()); 11021 } 11022 11023 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 11024 return SE.getUnknown(Expr->getValue()); 11025 } 11026 11027 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 11028 return SE.getCouldNotCompute(); 11029 } 11030 }; 11031 11032 SCEVMapper SCM(SE2); 11033 11034 while (!LoopStack.empty()) { 11035 auto *L = LoopStack.pop_back_val(); 11036 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 11037 11038 auto *CurBECount = SCM.visit( 11039 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 11040 auto *NewBECount = SE2.getBackedgeTakenCount(L); 11041 11042 if (CurBECount == SE2.getCouldNotCompute() || 11043 NewBECount == SE2.getCouldNotCompute()) { 11044 // NB! This situation is legal, but is very suspicious -- whatever pass 11045 // change the loop to make a trip count go from could not compute to 11046 // computable or vice-versa *should have* invalidated SCEV. However, we 11047 // choose not to assert here (for now) since we don't want false 11048 // positives. 11049 continue; 11050 } 11051 11052 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 11053 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 11054 // not propagate undef aggressively). This means we can (and do) fail 11055 // verification in cases where a transform makes the trip count of a loop 11056 // go from "undef" to "undef+1" (say). The transform is fine, since in 11057 // both cases the loop iterates "undef" times, but SCEV thinks we 11058 // increased the trip count of the loop by 1 incorrectly. 11059 continue; 11060 } 11061 11062 if (SE.getTypeSizeInBits(CurBECount->getType()) > 11063 SE.getTypeSizeInBits(NewBECount->getType())) 11064 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 11065 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 11066 SE.getTypeSizeInBits(NewBECount->getType())) 11067 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 11068 11069 auto *ConstantDelta = 11070 dyn_cast<SCEVConstant>(SE2.getMinusSCEV(CurBECount, NewBECount)); 11071 11072 if (ConstantDelta && ConstantDelta->getAPInt() != 0) { 11073 dbgs() << "Trip Count Changed!\n"; 11074 dbgs() << "Old: " << *CurBECount << "\n"; 11075 dbgs() << "New: " << *NewBECount << "\n"; 11076 dbgs() << "Delta: " << *ConstantDelta << "\n"; 11077 std::abort(); 11078 } 11079 } 11080 } 11081 11082 bool ScalarEvolution::invalidate( 11083 Function &F, const PreservedAnalyses &PA, 11084 FunctionAnalysisManager::Invalidator &Inv) { 11085 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 11086 // of its dependencies is invalidated. 11087 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 11088 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 11089 Inv.invalidate<AssumptionAnalysis>(F, PA) || 11090 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 11091 Inv.invalidate<LoopAnalysis>(F, PA); 11092 } 11093 11094 AnalysisKey ScalarEvolutionAnalysis::Key; 11095 11096 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 11097 FunctionAnalysisManager &AM) { 11098 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 11099 AM.getResult<AssumptionAnalysis>(F), 11100 AM.getResult<DominatorTreeAnalysis>(F), 11101 AM.getResult<LoopAnalysis>(F)); 11102 } 11103 11104 PreservedAnalyses 11105 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 11106 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 11107 return PreservedAnalyses::all(); 11108 } 11109 11110 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 11111 "Scalar Evolution Analysis", false, true) 11112 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 11113 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 11114 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 11115 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 11116 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 11117 "Scalar Evolution Analysis", false, true) 11118 11119 char ScalarEvolutionWrapperPass::ID = 0; 11120 11121 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 11122 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 11123 } 11124 11125 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 11126 SE.reset(new ScalarEvolution( 11127 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 11128 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 11129 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 11130 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 11131 return false; 11132 } 11133 11134 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 11135 11136 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 11137 SE->print(OS); 11138 } 11139 11140 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 11141 if (!VerifySCEV) 11142 return; 11143 11144 SE->verify(); 11145 } 11146 11147 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 11148 AU.setPreservesAll(); 11149 AU.addRequiredTransitive<AssumptionCacheTracker>(); 11150 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 11151 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 11152 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 11153 } 11154 11155 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 11156 const SCEV *RHS) { 11157 FoldingSetNodeID ID; 11158 assert(LHS->getType() == RHS->getType() && 11159 "Type mismatch between LHS and RHS"); 11160 // Unique this node based on the arguments 11161 ID.AddInteger(SCEVPredicate::P_Equal); 11162 ID.AddPointer(LHS); 11163 ID.AddPointer(RHS); 11164 void *IP = nullptr; 11165 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 11166 return S; 11167 SCEVEqualPredicate *Eq = new (SCEVAllocator) 11168 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 11169 UniquePreds.InsertNode(Eq, IP); 11170 return Eq; 11171 } 11172 11173 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 11174 const SCEVAddRecExpr *AR, 11175 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 11176 FoldingSetNodeID ID; 11177 // Unique this node based on the arguments 11178 ID.AddInteger(SCEVPredicate::P_Wrap); 11179 ID.AddPointer(AR); 11180 ID.AddInteger(AddedFlags); 11181 void *IP = nullptr; 11182 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 11183 return S; 11184 auto *OF = new (SCEVAllocator) 11185 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 11186 UniquePreds.InsertNode(OF, IP); 11187 return OF; 11188 } 11189 11190 namespace { 11191 11192 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 11193 public: 11194 SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 11195 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 11196 SCEVUnionPredicate *Pred) 11197 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 11198 11199 /// Rewrites \p S in the context of a loop L and the SCEV predication 11200 /// infrastructure. 11201 /// 11202 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 11203 /// equivalences present in \p Pred. 11204 /// 11205 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 11206 /// \p NewPreds such that the result will be an AddRecExpr. 11207 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 11208 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 11209 SCEVUnionPredicate *Pred) { 11210 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 11211 return Rewriter.visit(S); 11212 } 11213 11214 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 11215 if (Pred) { 11216 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 11217 for (auto *Pred : ExprPreds) 11218 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 11219 if (IPred->getLHS() == Expr) 11220 return IPred->getRHS(); 11221 } 11222 return convertToAddRecWithPreds(Expr); 11223 } 11224 11225 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 11226 const SCEV *Operand = visit(Expr->getOperand()); 11227 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 11228 if (AR && AR->getLoop() == L && AR->isAffine()) { 11229 // This couldn't be folded because the operand didn't have the nuw 11230 // flag. Add the nusw flag as an assumption that we could make. 11231 const SCEV *Step = AR->getStepRecurrence(SE); 11232 Type *Ty = Expr->getType(); 11233 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 11234 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 11235 SE.getSignExtendExpr(Step, Ty), L, 11236 AR->getNoWrapFlags()); 11237 } 11238 return SE.getZeroExtendExpr(Operand, Expr->getType()); 11239 } 11240 11241 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 11242 const SCEV *Operand = visit(Expr->getOperand()); 11243 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 11244 if (AR && AR->getLoop() == L && AR->isAffine()) { 11245 // This couldn't be folded because the operand didn't have the nsw 11246 // flag. Add the nssw flag as an assumption that we could make. 11247 const SCEV *Step = AR->getStepRecurrence(SE); 11248 Type *Ty = Expr->getType(); 11249 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 11250 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 11251 SE.getSignExtendExpr(Step, Ty), L, 11252 AR->getNoWrapFlags()); 11253 } 11254 return SE.getSignExtendExpr(Operand, Expr->getType()); 11255 } 11256 11257 private: 11258 bool addOverflowAssumption(const SCEVPredicate *P) { 11259 if (!NewPreds) { 11260 // Check if we've already made this assumption. 11261 return Pred && Pred->implies(P); 11262 } 11263 NewPreds->insert(P); 11264 return true; 11265 } 11266 11267 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 11268 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 11269 auto *A = SE.getWrapPredicate(AR, AddedFlags); 11270 return addOverflowAssumption(A); 11271 } 11272 11273 // If \p Expr represents a PHINode, we try to see if it can be represented 11274 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 11275 // to add this predicate as a runtime overflow check, we return the AddRec. 11276 // If \p Expr does not meet these conditions (is not a PHI node, or we 11277 // couldn't create an AddRec for it, or couldn't add the predicate), we just 11278 // return \p Expr. 11279 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 11280 if (!isa<PHINode>(Expr->getValue())) 11281 return Expr; 11282 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 11283 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 11284 if (!PredicatedRewrite) 11285 return Expr; 11286 for (auto *P : PredicatedRewrite->second){ 11287 if (!addOverflowAssumption(P)) 11288 return Expr; 11289 } 11290 return PredicatedRewrite->first; 11291 } 11292 11293 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 11294 SCEVUnionPredicate *Pred; 11295 const Loop *L; 11296 }; 11297 11298 } // end anonymous namespace 11299 11300 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 11301 SCEVUnionPredicate &Preds) { 11302 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 11303 } 11304 11305 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 11306 const SCEV *S, const Loop *L, 11307 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 11308 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 11309 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 11310 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 11311 11312 if (!AddRec) 11313 return nullptr; 11314 11315 // Since the transformation was successful, we can now transfer the SCEV 11316 // predicates. 11317 for (auto *P : TransformPreds) 11318 Preds.insert(P); 11319 11320 return AddRec; 11321 } 11322 11323 /// SCEV predicates 11324 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 11325 SCEVPredicateKind Kind) 11326 : FastID(ID), Kind(Kind) {} 11327 11328 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 11329 const SCEV *LHS, const SCEV *RHS) 11330 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 11331 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 11332 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 11333 } 11334 11335 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 11336 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 11337 11338 if (!Op) 11339 return false; 11340 11341 return Op->LHS == LHS && Op->RHS == RHS; 11342 } 11343 11344 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 11345 11346 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 11347 11348 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 11349 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 11350 } 11351 11352 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 11353 const SCEVAddRecExpr *AR, 11354 IncrementWrapFlags Flags) 11355 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 11356 11357 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 11358 11359 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 11360 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 11361 11362 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 11363 } 11364 11365 bool SCEVWrapPredicate::isAlwaysTrue() const { 11366 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 11367 IncrementWrapFlags IFlags = Flags; 11368 11369 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 11370 IFlags = clearFlags(IFlags, IncrementNSSW); 11371 11372 return IFlags == IncrementAnyWrap; 11373 } 11374 11375 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 11376 OS.indent(Depth) << *getExpr() << " Added Flags: "; 11377 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 11378 OS << "<nusw>"; 11379 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 11380 OS << "<nssw>"; 11381 OS << "\n"; 11382 } 11383 11384 SCEVWrapPredicate::IncrementWrapFlags 11385 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 11386 ScalarEvolution &SE) { 11387 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 11388 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 11389 11390 // We can safely transfer the NSW flag as NSSW. 11391 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 11392 ImpliedFlags = IncrementNSSW; 11393 11394 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 11395 // If the increment is positive, the SCEV NUW flag will also imply the 11396 // WrapPredicate NUSW flag. 11397 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 11398 if (Step->getValue()->getValue().isNonNegative()) 11399 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 11400 } 11401 11402 return ImpliedFlags; 11403 } 11404 11405 /// Union predicates don't get cached so create a dummy set ID for it. 11406 SCEVUnionPredicate::SCEVUnionPredicate() 11407 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 11408 11409 bool SCEVUnionPredicate::isAlwaysTrue() const { 11410 return all_of(Preds, 11411 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 11412 } 11413 11414 ArrayRef<const SCEVPredicate *> 11415 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 11416 auto I = SCEVToPreds.find(Expr); 11417 if (I == SCEVToPreds.end()) 11418 return ArrayRef<const SCEVPredicate *>(); 11419 return I->second; 11420 } 11421 11422 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 11423 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 11424 return all_of(Set->Preds, 11425 [this](const SCEVPredicate *I) { return this->implies(I); }); 11426 11427 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 11428 if (ScevPredsIt == SCEVToPreds.end()) 11429 return false; 11430 auto &SCEVPreds = ScevPredsIt->second; 11431 11432 return any_of(SCEVPreds, 11433 [N](const SCEVPredicate *I) { return I->implies(N); }); 11434 } 11435 11436 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 11437 11438 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 11439 for (auto Pred : Preds) 11440 Pred->print(OS, Depth); 11441 } 11442 11443 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 11444 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 11445 for (auto Pred : Set->Preds) 11446 add(Pred); 11447 return; 11448 } 11449 11450 if (implies(N)) 11451 return; 11452 11453 const SCEV *Key = N->getExpr(); 11454 assert(Key && "Only SCEVUnionPredicate doesn't have an " 11455 " associated expression!"); 11456 11457 SCEVToPreds[Key].push_back(N); 11458 Preds.push_back(N); 11459 } 11460 11461 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 11462 Loop &L) 11463 : SE(SE), L(L) {} 11464 11465 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 11466 const SCEV *Expr = SE.getSCEV(V); 11467 RewriteEntry &Entry = RewriteMap[Expr]; 11468 11469 // If we already have an entry and the version matches, return it. 11470 if (Entry.second && Generation == Entry.first) 11471 return Entry.second; 11472 11473 // We found an entry but it's stale. Rewrite the stale entry 11474 // according to the current predicate. 11475 if (Entry.second) 11476 Expr = Entry.second; 11477 11478 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 11479 Entry = {Generation, NewSCEV}; 11480 11481 return NewSCEV; 11482 } 11483 11484 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 11485 if (!BackedgeCount) { 11486 SCEVUnionPredicate BackedgePred; 11487 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 11488 addPredicate(BackedgePred); 11489 } 11490 return BackedgeCount; 11491 } 11492 11493 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 11494 if (Preds.implies(&Pred)) 11495 return; 11496 Preds.add(&Pred); 11497 updateGeneration(); 11498 } 11499 11500 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 11501 return Preds; 11502 } 11503 11504 void PredicatedScalarEvolution::updateGeneration() { 11505 // If the generation number wrapped recompute everything. 11506 if (++Generation == 0) { 11507 for (auto &II : RewriteMap) { 11508 const SCEV *Rewritten = II.second.second; 11509 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 11510 } 11511 } 11512 } 11513 11514 void PredicatedScalarEvolution::setNoOverflow( 11515 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 11516 const SCEV *Expr = getSCEV(V); 11517 const auto *AR = cast<SCEVAddRecExpr>(Expr); 11518 11519 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 11520 11521 // Clear the statically implied flags. 11522 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 11523 addPredicate(*SE.getWrapPredicate(AR, Flags)); 11524 11525 auto II = FlagsMap.insert({V, Flags}); 11526 if (!II.second) 11527 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 11528 } 11529 11530 bool PredicatedScalarEvolution::hasNoOverflow( 11531 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 11532 const SCEV *Expr = getSCEV(V); 11533 const auto *AR = cast<SCEVAddRecExpr>(Expr); 11534 11535 Flags = SCEVWrapPredicate::clearFlags( 11536 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 11537 11538 auto II = FlagsMap.find(V); 11539 11540 if (II != FlagsMap.end()) 11541 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 11542 11543 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 11544 } 11545 11546 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 11547 const SCEV *Expr = this->getSCEV(V); 11548 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 11549 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 11550 11551 if (!New) 11552 return nullptr; 11553 11554 for (auto *P : NewPreds) 11555 Preds.add(P); 11556 11557 updateGeneration(); 11558 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 11559 return New; 11560 } 11561 11562 PredicatedScalarEvolution::PredicatedScalarEvolution( 11563 const PredicatedScalarEvolution &Init) 11564 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 11565 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 11566 for (const auto &I : Init.FlagsMap) 11567 FlagsMap.insert(I); 11568 } 11569 11570 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 11571 // For each block. 11572 for (auto *BB : L.getBlocks()) 11573 for (auto &I : *BB) { 11574 if (!SE.isSCEVable(I.getType())) 11575 continue; 11576 11577 auto *Expr = SE.getSCEV(&I); 11578 auto II = RewriteMap.find(Expr); 11579 11580 if (II == RewriteMap.end()) 11581 continue; 11582 11583 // Don't print things that are not interesting. 11584 if (II->second.second == Expr) 11585 continue; 11586 11587 OS.indent(Depth) << "[PSE]" << I << ":\n"; 11588 OS.indent(Depth + 2) << *Expr << "\n"; 11589 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 11590 } 11591 } 11592