1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #include "llvm/Analysis/ScalarEvolution.h" 62 #include "llvm/ADT/APInt.h" 63 #include "llvm/ADT/ArrayRef.h" 64 #include "llvm/ADT/DenseMap.h" 65 #include "llvm/ADT/DepthFirstIterator.h" 66 #include "llvm/ADT/EquivalenceClasses.h" 67 #include "llvm/ADT/FoldingSet.h" 68 #include "llvm/ADT/None.h" 69 #include "llvm/ADT/Optional.h" 70 #include "llvm/ADT/STLExtras.h" 71 #include "llvm/ADT/ScopeExit.h" 72 #include "llvm/ADT/Sequence.h" 73 #include "llvm/ADT/SetVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallSet.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/Statistic.h" 78 #include "llvm/ADT/StringRef.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/ConstantFolding.h" 81 #include "llvm/Analysis/InstructionSimplify.h" 82 #include "llvm/Analysis/LoopInfo.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/Config/llvm-config.h" 87 #include "llvm/IR/Argument.h" 88 #include "llvm/IR/BasicBlock.h" 89 #include "llvm/IR/CFG.h" 90 #include "llvm/IR/CallSite.h" 91 #include "llvm/IR/Constant.h" 92 #include "llvm/IR/ConstantRange.h" 93 #include "llvm/IR/Constants.h" 94 #include "llvm/IR/DataLayout.h" 95 #include "llvm/IR/DerivedTypes.h" 96 #include "llvm/IR/Dominators.h" 97 #include "llvm/IR/Function.h" 98 #include "llvm/IR/GlobalAlias.h" 99 #include "llvm/IR/GlobalValue.h" 100 #include "llvm/IR/GlobalVariable.h" 101 #include "llvm/IR/InstIterator.h" 102 #include "llvm/IR/InstrTypes.h" 103 #include "llvm/IR/Instruction.h" 104 #include "llvm/IR/Instructions.h" 105 #include "llvm/IR/IntrinsicInst.h" 106 #include "llvm/IR/Intrinsics.h" 107 #include "llvm/IR/LLVMContext.h" 108 #include "llvm/IR/Metadata.h" 109 #include "llvm/IR/Operator.h" 110 #include "llvm/IR/PatternMatch.h" 111 #include "llvm/IR/Type.h" 112 #include "llvm/IR/Use.h" 113 #include "llvm/IR/User.h" 114 #include "llvm/IR/Value.h" 115 #include "llvm/Pass.h" 116 #include "llvm/Support/Casting.h" 117 #include "llvm/Support/CommandLine.h" 118 #include "llvm/Support/Compiler.h" 119 #include "llvm/Support/Debug.h" 120 #include "llvm/Support/ErrorHandling.h" 121 #include "llvm/Support/KnownBits.h" 122 #include "llvm/Support/SaveAndRestore.h" 123 #include "llvm/Support/raw_ostream.h" 124 #include <algorithm> 125 #include <cassert> 126 #include <climits> 127 #include <cstddef> 128 #include <cstdint> 129 #include <cstdlib> 130 #include <map> 131 #include <memory> 132 #include <tuple> 133 #include <utility> 134 #include <vector> 135 136 using namespace llvm; 137 138 #define DEBUG_TYPE "scalar-evolution" 139 140 STATISTIC(NumArrayLenItCounts, 141 "Number of trip counts computed with array length"); 142 STATISTIC(NumTripCountsComputed, 143 "Number of loops with predictable loop counts"); 144 STATISTIC(NumTripCountsNotComputed, 145 "Number of loops without predictable loop counts"); 146 STATISTIC(NumBruteForceTripCountsComputed, 147 "Number of loops with trip counts computed by force"); 148 149 static cl::opt<unsigned> 150 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 151 cl::desc("Maximum number of iterations SCEV will " 152 "symbolically execute a constant " 153 "derived loop"), 154 cl::init(100)); 155 156 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 157 static cl::opt<bool> VerifySCEV( 158 "verify-scev", cl::Hidden, 159 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 160 static cl::opt<bool> 161 VerifySCEVMap("verify-scev-maps", cl::Hidden, 162 cl::desc("Verify no dangling value in ScalarEvolution's " 163 "ExprValueMap (slow)")); 164 165 static cl::opt<unsigned> MulOpsInlineThreshold( 166 "scev-mulops-inline-threshold", cl::Hidden, 167 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 168 cl::init(32)); 169 170 static cl::opt<unsigned> AddOpsInlineThreshold( 171 "scev-addops-inline-threshold", cl::Hidden, 172 cl::desc("Threshold for inlining addition operands into a SCEV"), 173 cl::init(500)); 174 175 static cl::opt<unsigned> MaxSCEVCompareDepth( 176 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 177 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 178 cl::init(32)); 179 180 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 181 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 182 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 183 cl::init(2)); 184 185 static cl::opt<unsigned> MaxValueCompareDepth( 186 "scalar-evolution-max-value-compare-depth", cl::Hidden, 187 cl::desc("Maximum depth of recursive value complexity comparisons"), 188 cl::init(2)); 189 190 static cl::opt<unsigned> 191 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 192 cl::desc("Maximum depth of recursive arithmetics"), 193 cl::init(32)); 194 195 static cl::opt<unsigned> MaxConstantEvolvingDepth( 196 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 197 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 198 199 static cl::opt<unsigned> 200 MaxExtDepth("scalar-evolution-max-ext-depth", cl::Hidden, 201 cl::desc("Maximum depth of recursive SExt/ZExt"), 202 cl::init(8)); 203 204 static cl::opt<unsigned> 205 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 206 cl::desc("Max coefficients in AddRec during evolving"), 207 cl::init(16)); 208 209 //===----------------------------------------------------------------------===// 210 // SCEV class definitions 211 //===----------------------------------------------------------------------===// 212 213 //===----------------------------------------------------------------------===// 214 // Implementation of the SCEV class. 215 // 216 217 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 218 LLVM_DUMP_METHOD void SCEV::dump() const { 219 print(dbgs()); 220 dbgs() << '\n'; 221 } 222 #endif 223 224 void SCEV::print(raw_ostream &OS) const { 225 switch (static_cast<SCEVTypes>(getSCEVType())) { 226 case scConstant: 227 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 228 return; 229 case scTruncate: { 230 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 231 const SCEV *Op = Trunc->getOperand(); 232 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 233 << *Trunc->getType() << ")"; 234 return; 235 } 236 case scZeroExtend: { 237 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 238 const SCEV *Op = ZExt->getOperand(); 239 OS << "(zext " << *Op->getType() << " " << *Op << " to " 240 << *ZExt->getType() << ")"; 241 return; 242 } 243 case scSignExtend: { 244 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 245 const SCEV *Op = SExt->getOperand(); 246 OS << "(sext " << *Op->getType() << " " << *Op << " to " 247 << *SExt->getType() << ")"; 248 return; 249 } 250 case scAddRecExpr: { 251 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 252 OS << "{" << *AR->getOperand(0); 253 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 254 OS << ",+," << *AR->getOperand(i); 255 OS << "}<"; 256 if (AR->hasNoUnsignedWrap()) 257 OS << "nuw><"; 258 if (AR->hasNoSignedWrap()) 259 OS << "nsw><"; 260 if (AR->hasNoSelfWrap() && 261 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 262 OS << "nw><"; 263 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 264 OS << ">"; 265 return; 266 } 267 case scAddExpr: 268 case scMulExpr: 269 case scUMaxExpr: 270 case scSMaxExpr: { 271 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 272 const char *OpStr = nullptr; 273 switch (NAry->getSCEVType()) { 274 case scAddExpr: OpStr = " + "; break; 275 case scMulExpr: OpStr = " * "; break; 276 case scUMaxExpr: OpStr = " umax "; break; 277 case scSMaxExpr: OpStr = " smax "; break; 278 } 279 OS << "("; 280 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 281 I != E; ++I) { 282 OS << **I; 283 if (std::next(I) != E) 284 OS << OpStr; 285 } 286 OS << ")"; 287 switch (NAry->getSCEVType()) { 288 case scAddExpr: 289 case scMulExpr: 290 if (NAry->hasNoUnsignedWrap()) 291 OS << "<nuw>"; 292 if (NAry->hasNoSignedWrap()) 293 OS << "<nsw>"; 294 } 295 return; 296 } 297 case scUDivExpr: { 298 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 299 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 300 return; 301 } 302 case scUnknown: { 303 const SCEVUnknown *U = cast<SCEVUnknown>(this); 304 Type *AllocTy; 305 if (U->isSizeOf(AllocTy)) { 306 OS << "sizeof(" << *AllocTy << ")"; 307 return; 308 } 309 if (U->isAlignOf(AllocTy)) { 310 OS << "alignof(" << *AllocTy << ")"; 311 return; 312 } 313 314 Type *CTy; 315 Constant *FieldNo; 316 if (U->isOffsetOf(CTy, FieldNo)) { 317 OS << "offsetof(" << *CTy << ", "; 318 FieldNo->printAsOperand(OS, false); 319 OS << ")"; 320 return; 321 } 322 323 // Otherwise just print it normally. 324 U->getValue()->printAsOperand(OS, false); 325 return; 326 } 327 case scCouldNotCompute: 328 OS << "***COULDNOTCOMPUTE***"; 329 return; 330 } 331 llvm_unreachable("Unknown SCEV kind!"); 332 } 333 334 Type *SCEV::getType() const { 335 switch (static_cast<SCEVTypes>(getSCEVType())) { 336 case scConstant: 337 return cast<SCEVConstant>(this)->getType(); 338 case scTruncate: 339 case scZeroExtend: 340 case scSignExtend: 341 return cast<SCEVCastExpr>(this)->getType(); 342 case scAddRecExpr: 343 case scMulExpr: 344 case scUMaxExpr: 345 case scSMaxExpr: 346 return cast<SCEVNAryExpr>(this)->getType(); 347 case scAddExpr: 348 return cast<SCEVAddExpr>(this)->getType(); 349 case scUDivExpr: 350 return cast<SCEVUDivExpr>(this)->getType(); 351 case scUnknown: 352 return cast<SCEVUnknown>(this)->getType(); 353 case scCouldNotCompute: 354 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 355 } 356 llvm_unreachable("Unknown SCEV kind!"); 357 } 358 359 bool SCEV::isZero() const { 360 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 361 return SC->getValue()->isZero(); 362 return false; 363 } 364 365 bool SCEV::isOne() const { 366 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 367 return SC->getValue()->isOne(); 368 return false; 369 } 370 371 bool SCEV::isAllOnesValue() const { 372 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 373 return SC->getValue()->isMinusOne(); 374 return false; 375 } 376 377 bool SCEV::isNonConstantNegative() const { 378 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 379 if (!Mul) return false; 380 381 // If there is a constant factor, it will be first. 382 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 383 if (!SC) return false; 384 385 // Return true if the value is negative, this matches things like (-42 * V). 386 return SC->getAPInt().isNegative(); 387 } 388 389 SCEVCouldNotCompute::SCEVCouldNotCompute() : 390 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} 391 392 bool SCEVCouldNotCompute::classof(const SCEV *S) { 393 return S->getSCEVType() == scCouldNotCompute; 394 } 395 396 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 397 FoldingSetNodeID ID; 398 ID.AddInteger(scConstant); 399 ID.AddPointer(V); 400 void *IP = nullptr; 401 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 402 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 403 UniqueSCEVs.InsertNode(S, IP); 404 return S; 405 } 406 407 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 408 return getConstant(ConstantInt::get(getContext(), Val)); 409 } 410 411 const SCEV * 412 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 413 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 414 return getConstant(ConstantInt::get(ITy, V, isSigned)); 415 } 416 417 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 418 unsigned SCEVTy, const SCEV *op, Type *ty) 419 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 420 421 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 422 const SCEV *op, Type *ty) 423 : SCEVCastExpr(ID, scTruncate, op, ty) { 424 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 425 (Ty->isIntegerTy() || Ty->isPointerTy()) && 426 "Cannot truncate non-integer value!"); 427 } 428 429 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 430 const SCEV *op, Type *ty) 431 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 432 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 433 (Ty->isIntegerTy() || Ty->isPointerTy()) && 434 "Cannot zero extend non-integer value!"); 435 } 436 437 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 438 const SCEV *op, Type *ty) 439 : SCEVCastExpr(ID, scSignExtend, op, ty) { 440 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 441 (Ty->isIntegerTy() || Ty->isPointerTy()) && 442 "Cannot sign extend non-integer value!"); 443 } 444 445 void SCEVUnknown::deleted() { 446 // Clear this SCEVUnknown from various maps. 447 SE->forgetMemoizedResults(this); 448 449 // Remove this SCEVUnknown from the uniquing map. 450 SE->UniqueSCEVs.RemoveNode(this); 451 452 // Release the value. 453 setValPtr(nullptr); 454 } 455 456 void SCEVUnknown::allUsesReplacedWith(Value *New) { 457 // Remove this SCEVUnknown from the uniquing map. 458 SE->UniqueSCEVs.RemoveNode(this); 459 460 // Update this SCEVUnknown to point to the new value. This is needed 461 // because there may still be outstanding SCEVs which still point to 462 // this SCEVUnknown. 463 setValPtr(New); 464 } 465 466 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 467 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 468 if (VCE->getOpcode() == Instruction::PtrToInt) 469 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 470 if (CE->getOpcode() == Instruction::GetElementPtr && 471 CE->getOperand(0)->isNullValue() && 472 CE->getNumOperands() == 2) 473 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 474 if (CI->isOne()) { 475 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 476 ->getElementType(); 477 return true; 478 } 479 480 return false; 481 } 482 483 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 484 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 485 if (VCE->getOpcode() == Instruction::PtrToInt) 486 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 487 if (CE->getOpcode() == Instruction::GetElementPtr && 488 CE->getOperand(0)->isNullValue()) { 489 Type *Ty = 490 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 491 if (StructType *STy = dyn_cast<StructType>(Ty)) 492 if (!STy->isPacked() && 493 CE->getNumOperands() == 3 && 494 CE->getOperand(1)->isNullValue()) { 495 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 496 if (CI->isOne() && 497 STy->getNumElements() == 2 && 498 STy->getElementType(0)->isIntegerTy(1)) { 499 AllocTy = STy->getElementType(1); 500 return true; 501 } 502 } 503 } 504 505 return false; 506 } 507 508 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 509 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 510 if (VCE->getOpcode() == Instruction::PtrToInt) 511 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 512 if (CE->getOpcode() == Instruction::GetElementPtr && 513 CE->getNumOperands() == 3 && 514 CE->getOperand(0)->isNullValue() && 515 CE->getOperand(1)->isNullValue()) { 516 Type *Ty = 517 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 518 // Ignore vector types here so that ScalarEvolutionExpander doesn't 519 // emit getelementptrs that index into vectors. 520 if (Ty->isStructTy() || Ty->isArrayTy()) { 521 CTy = Ty; 522 FieldNo = CE->getOperand(2); 523 return true; 524 } 525 } 526 527 return false; 528 } 529 530 //===----------------------------------------------------------------------===// 531 // SCEV Utilities 532 //===----------------------------------------------------------------------===// 533 534 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 535 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 536 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 537 /// have been previously deemed to be "equally complex" by this routine. It is 538 /// intended to avoid exponential time complexity in cases like: 539 /// 540 /// %a = f(%x, %y) 541 /// %b = f(%a, %a) 542 /// %c = f(%b, %b) 543 /// 544 /// %d = f(%x, %y) 545 /// %e = f(%d, %d) 546 /// %f = f(%e, %e) 547 /// 548 /// CompareValueComplexity(%f, %c) 549 /// 550 /// Since we do not continue running this routine on expression trees once we 551 /// have seen unequal values, there is no need to track them in the cache. 552 static int 553 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 554 const LoopInfo *const LI, Value *LV, Value *RV, 555 unsigned Depth) { 556 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 557 return 0; 558 559 // Order pointer values after integer values. This helps SCEVExpander form 560 // GEPs. 561 bool LIsPointer = LV->getType()->isPointerTy(), 562 RIsPointer = RV->getType()->isPointerTy(); 563 if (LIsPointer != RIsPointer) 564 return (int)LIsPointer - (int)RIsPointer; 565 566 // Compare getValueID values. 567 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 568 if (LID != RID) 569 return (int)LID - (int)RID; 570 571 // Sort arguments by their position. 572 if (const auto *LA = dyn_cast<Argument>(LV)) { 573 const auto *RA = cast<Argument>(RV); 574 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 575 return (int)LArgNo - (int)RArgNo; 576 } 577 578 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 579 const auto *RGV = cast<GlobalValue>(RV); 580 581 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 582 auto LT = GV->getLinkage(); 583 return !(GlobalValue::isPrivateLinkage(LT) || 584 GlobalValue::isInternalLinkage(LT)); 585 }; 586 587 // Use the names to distinguish the two values, but only if the 588 // names are semantically important. 589 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 590 return LGV->getName().compare(RGV->getName()); 591 } 592 593 // For instructions, compare their loop depth, and their operand count. This 594 // is pretty loose. 595 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 596 const auto *RInst = cast<Instruction>(RV); 597 598 // Compare loop depths. 599 const BasicBlock *LParent = LInst->getParent(), 600 *RParent = RInst->getParent(); 601 if (LParent != RParent) { 602 unsigned LDepth = LI->getLoopDepth(LParent), 603 RDepth = LI->getLoopDepth(RParent); 604 if (LDepth != RDepth) 605 return (int)LDepth - (int)RDepth; 606 } 607 608 // Compare the number of operands. 609 unsigned LNumOps = LInst->getNumOperands(), 610 RNumOps = RInst->getNumOperands(); 611 if (LNumOps != RNumOps) 612 return (int)LNumOps - (int)RNumOps; 613 614 for (unsigned Idx : seq(0u, LNumOps)) { 615 int Result = 616 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 617 RInst->getOperand(Idx), Depth + 1); 618 if (Result != 0) 619 return Result; 620 } 621 } 622 623 EqCacheValue.unionSets(LV, RV); 624 return 0; 625 } 626 627 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 628 // than RHS, respectively. A three-way result allows recursive comparisons to be 629 // more efficient. 630 static int CompareSCEVComplexity( 631 EquivalenceClasses<const SCEV *> &EqCacheSCEV, 632 EquivalenceClasses<const Value *> &EqCacheValue, 633 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 634 DominatorTree &DT, unsigned Depth = 0) { 635 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 636 if (LHS == RHS) 637 return 0; 638 639 // Primarily, sort the SCEVs by their getSCEVType(). 640 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 641 if (LType != RType) 642 return (int)LType - (int)RType; 643 644 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS)) 645 return 0; 646 // Aside from the getSCEVType() ordering, the particular ordering 647 // isn't very important except that it's beneficial to be consistent, 648 // so that (a + b) and (b + a) don't end up as different expressions. 649 switch (static_cast<SCEVTypes>(LType)) { 650 case scUnknown: { 651 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 652 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 653 654 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 655 RU->getValue(), Depth + 1); 656 if (X == 0) 657 EqCacheSCEV.unionSets(LHS, RHS); 658 return X; 659 } 660 661 case scConstant: { 662 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 663 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 664 665 // Compare constant values. 666 const APInt &LA = LC->getAPInt(); 667 const APInt &RA = RC->getAPInt(); 668 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 669 if (LBitWidth != RBitWidth) 670 return (int)LBitWidth - (int)RBitWidth; 671 return LA.ult(RA) ? -1 : 1; 672 } 673 674 case scAddRecExpr: { 675 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 676 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 677 678 // There is always a dominance between two recs that are used by one SCEV, 679 // so we can safely sort recs by loop header dominance. We require such 680 // order in getAddExpr. 681 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 682 if (LLoop != RLoop) { 683 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 684 assert(LHead != RHead && "Two loops share the same header?"); 685 if (DT.dominates(LHead, RHead)) 686 return 1; 687 else 688 assert(DT.dominates(RHead, LHead) && 689 "No dominance between recurrences used by one SCEV?"); 690 return -1; 691 } 692 693 // Addrec complexity grows with operand count. 694 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 695 if (LNumOps != RNumOps) 696 return (int)LNumOps - (int)RNumOps; 697 698 // Compare NoWrap flags. 699 if (LA->getNoWrapFlags() != RA->getNoWrapFlags()) 700 return (int)LA->getNoWrapFlags() - (int)RA->getNoWrapFlags(); 701 702 // Lexicographically compare. 703 for (unsigned i = 0; i != LNumOps; ++i) { 704 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 705 LA->getOperand(i), RA->getOperand(i), DT, 706 Depth + 1); 707 if (X != 0) 708 return X; 709 } 710 EqCacheSCEV.unionSets(LHS, RHS); 711 return 0; 712 } 713 714 case scAddExpr: 715 case scMulExpr: 716 case scSMaxExpr: 717 case scUMaxExpr: { 718 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 719 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 720 721 // Lexicographically compare n-ary expressions. 722 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 723 if (LNumOps != RNumOps) 724 return (int)LNumOps - (int)RNumOps; 725 726 // Compare NoWrap flags. 727 if (LC->getNoWrapFlags() != RC->getNoWrapFlags()) 728 return (int)LC->getNoWrapFlags() - (int)RC->getNoWrapFlags(); 729 730 for (unsigned i = 0; i != LNumOps; ++i) { 731 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 732 LC->getOperand(i), RC->getOperand(i), DT, 733 Depth + 1); 734 if (X != 0) 735 return X; 736 } 737 EqCacheSCEV.unionSets(LHS, RHS); 738 return 0; 739 } 740 741 case scUDivExpr: { 742 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 743 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 744 745 // Lexicographically compare udiv expressions. 746 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 747 RC->getLHS(), DT, Depth + 1); 748 if (X != 0) 749 return X; 750 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 751 RC->getRHS(), DT, Depth + 1); 752 if (X == 0) 753 EqCacheSCEV.unionSets(LHS, RHS); 754 return X; 755 } 756 757 case scTruncate: 758 case scZeroExtend: 759 case scSignExtend: { 760 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 761 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 762 763 // Compare cast expressions by operand. 764 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 765 LC->getOperand(), RC->getOperand(), DT, 766 Depth + 1); 767 if (X == 0) 768 EqCacheSCEV.unionSets(LHS, RHS); 769 return X; 770 } 771 772 case scCouldNotCompute: 773 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 774 } 775 llvm_unreachable("Unknown SCEV kind!"); 776 } 777 778 /// Given a list of SCEV objects, order them by their complexity, and group 779 /// objects of the same complexity together by value. When this routine is 780 /// finished, we know that any duplicates in the vector are consecutive and that 781 /// complexity is monotonically increasing. 782 /// 783 /// Note that we go take special precautions to ensure that we get deterministic 784 /// results from this routine. In other words, we don't want the results of 785 /// this to depend on where the addresses of various SCEV objects happened to 786 /// land in memory. 787 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 788 LoopInfo *LI, DominatorTree &DT) { 789 if (Ops.size() < 2) return; // Noop 790 791 EquivalenceClasses<const SCEV *> EqCacheSCEV; 792 EquivalenceClasses<const Value *> EqCacheValue; 793 if (Ops.size() == 2) { 794 // This is the common case, which also happens to be trivially simple. 795 // Special case it. 796 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 797 if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0) 798 std::swap(LHS, RHS); 799 return; 800 } 801 802 // Do the rough sort by complexity. 803 std::stable_sort(Ops.begin(), Ops.end(), 804 [&](const SCEV *LHS, const SCEV *RHS) { 805 return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 806 LHS, RHS, DT) < 0; 807 }); 808 809 // Now that we are sorted by complexity, group elements of the same 810 // complexity. Note that this is, at worst, N^2, but the vector is likely to 811 // be extremely short in practice. Note that we take this approach because we 812 // do not want to depend on the addresses of the objects we are grouping. 813 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 814 const SCEV *S = Ops[i]; 815 unsigned Complexity = S->getSCEVType(); 816 817 // If there are any objects of the same complexity and same value as this 818 // one, group them. 819 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 820 if (Ops[j] == S) { // Found a duplicate. 821 // Move it to immediately after i'th element. 822 std::swap(Ops[i+1], Ops[j]); 823 ++i; // no need to rescan it. 824 if (i == e-2) return; // Done! 825 } 826 } 827 } 828 } 829 830 // Returns the size of the SCEV S. 831 static inline int sizeOfSCEV(const SCEV *S) { 832 struct FindSCEVSize { 833 int Size = 0; 834 835 FindSCEVSize() = default; 836 837 bool follow(const SCEV *S) { 838 ++Size; 839 // Keep looking at all operands of S. 840 return true; 841 } 842 843 bool isDone() const { 844 return false; 845 } 846 }; 847 848 FindSCEVSize F; 849 SCEVTraversal<FindSCEVSize> ST(F); 850 ST.visitAll(S); 851 return F.Size; 852 } 853 854 namespace { 855 856 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { 857 public: 858 // Computes the Quotient and Remainder of the division of Numerator by 859 // Denominator. 860 static void divide(ScalarEvolution &SE, const SCEV *Numerator, 861 const SCEV *Denominator, const SCEV **Quotient, 862 const SCEV **Remainder) { 863 assert(Numerator && Denominator && "Uninitialized SCEV"); 864 865 SCEVDivision D(SE, Numerator, Denominator); 866 867 // Check for the trivial case here to avoid having to check for it in the 868 // rest of the code. 869 if (Numerator == Denominator) { 870 *Quotient = D.One; 871 *Remainder = D.Zero; 872 return; 873 } 874 875 if (Numerator->isZero()) { 876 *Quotient = D.Zero; 877 *Remainder = D.Zero; 878 return; 879 } 880 881 // A simple case when N/1. The quotient is N. 882 if (Denominator->isOne()) { 883 *Quotient = Numerator; 884 *Remainder = D.Zero; 885 return; 886 } 887 888 // Split the Denominator when it is a product. 889 if (const SCEVMulExpr *T = dyn_cast<SCEVMulExpr>(Denominator)) { 890 const SCEV *Q, *R; 891 *Quotient = Numerator; 892 for (const SCEV *Op : T->operands()) { 893 divide(SE, *Quotient, Op, &Q, &R); 894 *Quotient = Q; 895 896 // Bail out when the Numerator is not divisible by one of the terms of 897 // the Denominator. 898 if (!R->isZero()) { 899 *Quotient = D.Zero; 900 *Remainder = Numerator; 901 return; 902 } 903 } 904 *Remainder = D.Zero; 905 return; 906 } 907 908 D.visit(Numerator); 909 *Quotient = D.Quotient; 910 *Remainder = D.Remainder; 911 } 912 913 // Except in the trivial case described above, we do not know how to divide 914 // Expr by Denominator for the following functions with empty implementation. 915 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} 916 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} 917 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} 918 void visitUDivExpr(const SCEVUDivExpr *Numerator) {} 919 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} 920 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} 921 void visitUnknown(const SCEVUnknown *Numerator) {} 922 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} 923 924 void visitConstant(const SCEVConstant *Numerator) { 925 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { 926 APInt NumeratorVal = Numerator->getAPInt(); 927 APInt DenominatorVal = D->getAPInt(); 928 uint32_t NumeratorBW = NumeratorVal.getBitWidth(); 929 uint32_t DenominatorBW = DenominatorVal.getBitWidth(); 930 931 if (NumeratorBW > DenominatorBW) 932 DenominatorVal = DenominatorVal.sext(NumeratorBW); 933 else if (NumeratorBW < DenominatorBW) 934 NumeratorVal = NumeratorVal.sext(DenominatorBW); 935 936 APInt QuotientVal(NumeratorVal.getBitWidth(), 0); 937 APInt RemainderVal(NumeratorVal.getBitWidth(), 0); 938 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); 939 Quotient = SE.getConstant(QuotientVal); 940 Remainder = SE.getConstant(RemainderVal); 941 return; 942 } 943 } 944 945 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { 946 const SCEV *StartQ, *StartR, *StepQ, *StepR; 947 if (!Numerator->isAffine()) 948 return cannotDivide(Numerator); 949 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); 950 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); 951 // Bail out if the types do not match. 952 Type *Ty = Denominator->getType(); 953 if (Ty != StartQ->getType() || Ty != StartR->getType() || 954 Ty != StepQ->getType() || Ty != StepR->getType()) 955 return cannotDivide(Numerator); 956 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), 957 Numerator->getNoWrapFlags()); 958 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), 959 Numerator->getNoWrapFlags()); 960 } 961 962 void visitAddExpr(const SCEVAddExpr *Numerator) { 963 SmallVector<const SCEV *, 2> Qs, Rs; 964 Type *Ty = Denominator->getType(); 965 966 for (const SCEV *Op : Numerator->operands()) { 967 const SCEV *Q, *R; 968 divide(SE, Op, Denominator, &Q, &R); 969 970 // Bail out if types do not match. 971 if (Ty != Q->getType() || Ty != R->getType()) 972 return cannotDivide(Numerator); 973 974 Qs.push_back(Q); 975 Rs.push_back(R); 976 } 977 978 if (Qs.size() == 1) { 979 Quotient = Qs[0]; 980 Remainder = Rs[0]; 981 return; 982 } 983 984 Quotient = SE.getAddExpr(Qs); 985 Remainder = SE.getAddExpr(Rs); 986 } 987 988 void visitMulExpr(const SCEVMulExpr *Numerator) { 989 SmallVector<const SCEV *, 2> Qs; 990 Type *Ty = Denominator->getType(); 991 992 bool FoundDenominatorTerm = false; 993 for (const SCEV *Op : Numerator->operands()) { 994 // Bail out if types do not match. 995 if (Ty != Op->getType()) 996 return cannotDivide(Numerator); 997 998 if (FoundDenominatorTerm) { 999 Qs.push_back(Op); 1000 continue; 1001 } 1002 1003 // Check whether Denominator divides one of the product operands. 1004 const SCEV *Q, *R; 1005 divide(SE, Op, Denominator, &Q, &R); 1006 if (!R->isZero()) { 1007 Qs.push_back(Op); 1008 continue; 1009 } 1010 1011 // Bail out if types do not match. 1012 if (Ty != Q->getType()) 1013 return cannotDivide(Numerator); 1014 1015 FoundDenominatorTerm = true; 1016 Qs.push_back(Q); 1017 } 1018 1019 if (FoundDenominatorTerm) { 1020 Remainder = Zero; 1021 if (Qs.size() == 1) 1022 Quotient = Qs[0]; 1023 else 1024 Quotient = SE.getMulExpr(Qs); 1025 return; 1026 } 1027 1028 if (!isa<SCEVUnknown>(Denominator)) 1029 return cannotDivide(Numerator); 1030 1031 // The Remainder is obtained by replacing Denominator by 0 in Numerator. 1032 ValueToValueMap RewriteMap; 1033 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1034 cast<SCEVConstant>(Zero)->getValue(); 1035 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1036 1037 if (Remainder->isZero()) { 1038 // The Quotient is obtained by replacing Denominator by 1 in Numerator. 1039 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1040 cast<SCEVConstant>(One)->getValue(); 1041 Quotient = 1042 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1043 return; 1044 } 1045 1046 // Quotient is (Numerator - Remainder) divided by Denominator. 1047 const SCEV *Q, *R; 1048 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); 1049 // This SCEV does not seem to simplify: fail the division here. 1050 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) 1051 return cannotDivide(Numerator); 1052 divide(SE, Diff, Denominator, &Q, &R); 1053 if (R != Zero) 1054 return cannotDivide(Numerator); 1055 Quotient = Q; 1056 } 1057 1058 private: 1059 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, 1060 const SCEV *Denominator) 1061 : SE(S), Denominator(Denominator) { 1062 Zero = SE.getZero(Denominator->getType()); 1063 One = SE.getOne(Denominator->getType()); 1064 1065 // We generally do not know how to divide Expr by Denominator. We 1066 // initialize the division to a "cannot divide" state to simplify the rest 1067 // of the code. 1068 cannotDivide(Numerator); 1069 } 1070 1071 // Convenience function for giving up on the division. We set the quotient to 1072 // be equal to zero and the remainder to be equal to the numerator. 1073 void cannotDivide(const SCEV *Numerator) { 1074 Quotient = Zero; 1075 Remainder = Numerator; 1076 } 1077 1078 ScalarEvolution &SE; 1079 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; 1080 }; 1081 1082 } // end anonymous namespace 1083 1084 //===----------------------------------------------------------------------===// 1085 // Simple SCEV method implementations 1086 //===----------------------------------------------------------------------===// 1087 1088 /// Compute BC(It, K). The result has width W. Assume, K > 0. 1089 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 1090 ScalarEvolution &SE, 1091 Type *ResultTy) { 1092 // Handle the simplest case efficiently. 1093 if (K == 1) 1094 return SE.getTruncateOrZeroExtend(It, ResultTy); 1095 1096 // We are using the following formula for BC(It, K): 1097 // 1098 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 1099 // 1100 // Suppose, W is the bitwidth of the return value. We must be prepared for 1101 // overflow. Hence, we must assure that the result of our computation is 1102 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 1103 // safe in modular arithmetic. 1104 // 1105 // However, this code doesn't use exactly that formula; the formula it uses 1106 // is something like the following, where T is the number of factors of 2 in 1107 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 1108 // exponentiation: 1109 // 1110 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 1111 // 1112 // This formula is trivially equivalent to the previous formula. However, 1113 // this formula can be implemented much more efficiently. The trick is that 1114 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 1115 // arithmetic. To do exact division in modular arithmetic, all we have 1116 // to do is multiply by the inverse. Therefore, this step can be done at 1117 // width W. 1118 // 1119 // The next issue is how to safely do the division by 2^T. The way this 1120 // is done is by doing the multiplication step at a width of at least W + T 1121 // bits. This way, the bottom W+T bits of the product are accurate. Then, 1122 // when we perform the division by 2^T (which is equivalent to a right shift 1123 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 1124 // truncated out after the division by 2^T. 1125 // 1126 // In comparison to just directly using the first formula, this technique 1127 // is much more efficient; using the first formula requires W * K bits, 1128 // but this formula less than W + K bits. Also, the first formula requires 1129 // a division step, whereas this formula only requires multiplies and shifts. 1130 // 1131 // It doesn't matter whether the subtraction step is done in the calculation 1132 // width or the input iteration count's width; if the subtraction overflows, 1133 // the result must be zero anyway. We prefer here to do it in the width of 1134 // the induction variable because it helps a lot for certain cases; CodeGen 1135 // isn't smart enough to ignore the overflow, which leads to much less 1136 // efficient code if the width of the subtraction is wider than the native 1137 // register width. 1138 // 1139 // (It's possible to not widen at all by pulling out factors of 2 before 1140 // the multiplication; for example, K=2 can be calculated as 1141 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 1142 // extra arithmetic, so it's not an obvious win, and it gets 1143 // much more complicated for K > 3.) 1144 1145 // Protection from insane SCEVs; this bound is conservative, 1146 // but it probably doesn't matter. 1147 if (K > 1000) 1148 return SE.getCouldNotCompute(); 1149 1150 unsigned W = SE.getTypeSizeInBits(ResultTy); 1151 1152 // Calculate K! / 2^T and T; we divide out the factors of two before 1153 // multiplying for calculating K! / 2^T to avoid overflow. 1154 // Other overflow doesn't matter because we only care about the bottom 1155 // W bits of the result. 1156 APInt OddFactorial(W, 1); 1157 unsigned T = 1; 1158 for (unsigned i = 3; i <= K; ++i) { 1159 APInt Mult(W, i); 1160 unsigned TwoFactors = Mult.countTrailingZeros(); 1161 T += TwoFactors; 1162 Mult.lshrInPlace(TwoFactors); 1163 OddFactorial *= Mult; 1164 } 1165 1166 // We need at least W + T bits for the multiplication step 1167 unsigned CalculationBits = W + T; 1168 1169 // Calculate 2^T, at width T+W. 1170 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1171 1172 // Calculate the multiplicative inverse of K! / 2^T; 1173 // this multiplication factor will perform the exact division by 1174 // K! / 2^T. 1175 APInt Mod = APInt::getSignedMinValue(W+1); 1176 APInt MultiplyFactor = OddFactorial.zext(W+1); 1177 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1178 MultiplyFactor = MultiplyFactor.trunc(W); 1179 1180 // Calculate the product, at width T+W 1181 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1182 CalculationBits); 1183 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1184 for (unsigned i = 1; i != K; ++i) { 1185 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1186 Dividend = SE.getMulExpr(Dividend, 1187 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1188 } 1189 1190 // Divide by 2^T 1191 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1192 1193 // Truncate the result, and divide by K! / 2^T. 1194 1195 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1196 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1197 } 1198 1199 /// Return the value of this chain of recurrences at the specified iteration 1200 /// number. We can evaluate this recurrence by multiplying each element in the 1201 /// chain by the binomial coefficient corresponding to it. In other words, we 1202 /// can evaluate {A,+,B,+,C,+,D} as: 1203 /// 1204 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1205 /// 1206 /// where BC(It, k) stands for binomial coefficient. 1207 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1208 ScalarEvolution &SE) const { 1209 const SCEV *Result = getStart(); 1210 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1211 // The computation is correct in the face of overflow provided that the 1212 // multiplication is performed _after_ the evaluation of the binomial 1213 // coefficient. 1214 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1215 if (isa<SCEVCouldNotCompute>(Coeff)) 1216 return Coeff; 1217 1218 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1219 } 1220 return Result; 1221 } 1222 1223 //===----------------------------------------------------------------------===// 1224 // SCEV Expression folder implementations 1225 //===----------------------------------------------------------------------===// 1226 1227 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 1228 Type *Ty) { 1229 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1230 "This is not a truncating conversion!"); 1231 assert(isSCEVable(Ty) && 1232 "This is not a conversion to a SCEVable type!"); 1233 Ty = getEffectiveSCEVType(Ty); 1234 1235 FoldingSetNodeID ID; 1236 ID.AddInteger(scTruncate); 1237 ID.AddPointer(Op); 1238 ID.AddPointer(Ty); 1239 void *IP = nullptr; 1240 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1241 1242 // Fold if the operand is constant. 1243 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1244 return getConstant( 1245 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1246 1247 // trunc(trunc(x)) --> trunc(x) 1248 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1249 return getTruncateExpr(ST->getOperand(), Ty); 1250 1251 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1252 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1253 return getTruncateOrSignExtend(SS->getOperand(), Ty); 1254 1255 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1256 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1257 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 1258 1259 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1260 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1261 // if after transforming we have at most one truncate, not counting truncates 1262 // that replace other casts. 1263 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1264 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1265 SmallVector<const SCEV *, 4> Operands; 1266 unsigned numTruncs = 0; 1267 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1268 ++i) { 1269 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty); 1270 if (!isa<SCEVCastExpr>(CommOp->getOperand(i)) && isa<SCEVTruncateExpr>(S)) 1271 numTruncs++; 1272 Operands.push_back(S); 1273 } 1274 if (numTruncs < 2) { 1275 if (isa<SCEVAddExpr>(Op)) 1276 return getAddExpr(Operands); 1277 else if (isa<SCEVMulExpr>(Op)) 1278 return getMulExpr(Operands); 1279 else 1280 llvm_unreachable("Unexpected SCEV type for Op."); 1281 } 1282 // Although we checked in the beginning that ID is not in the cache, it is 1283 // possible that during recursion and different modification ID was inserted 1284 // into the cache. So if we find it, just return it. 1285 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1286 return S; 1287 } 1288 1289 // If the input value is a chrec scev, truncate the chrec's operands. 1290 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1291 SmallVector<const SCEV *, 4> Operands; 1292 for (const SCEV *Op : AddRec->operands()) 1293 Operands.push_back(getTruncateExpr(Op, Ty)); 1294 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1295 } 1296 1297 // The cast wasn't folded; create an explicit cast node. We can reuse 1298 // the existing insert position since if we get here, we won't have 1299 // made any changes which would invalidate it. 1300 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1301 Op, Ty); 1302 UniqueSCEVs.InsertNode(S, IP); 1303 addToLoopUseLists(S); 1304 return S; 1305 } 1306 1307 // Get the limit of a recurrence such that incrementing by Step cannot cause 1308 // signed overflow as long as the value of the recurrence within the 1309 // loop does not exceed this limit before incrementing. 1310 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1311 ICmpInst::Predicate *Pred, 1312 ScalarEvolution *SE) { 1313 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1314 if (SE->isKnownPositive(Step)) { 1315 *Pred = ICmpInst::ICMP_SLT; 1316 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1317 SE->getSignedRangeMax(Step)); 1318 } 1319 if (SE->isKnownNegative(Step)) { 1320 *Pred = ICmpInst::ICMP_SGT; 1321 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1322 SE->getSignedRangeMin(Step)); 1323 } 1324 return nullptr; 1325 } 1326 1327 // Get the limit of a recurrence such that incrementing by Step cannot cause 1328 // unsigned overflow as long as the value of the recurrence within the loop does 1329 // not exceed this limit before incrementing. 1330 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1331 ICmpInst::Predicate *Pred, 1332 ScalarEvolution *SE) { 1333 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1334 *Pred = ICmpInst::ICMP_ULT; 1335 1336 return SE->getConstant(APInt::getMinValue(BitWidth) - 1337 SE->getUnsignedRangeMax(Step)); 1338 } 1339 1340 namespace { 1341 1342 struct ExtendOpTraitsBase { 1343 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1344 unsigned); 1345 }; 1346 1347 // Used to make code generic over signed and unsigned overflow. 1348 template <typename ExtendOp> struct ExtendOpTraits { 1349 // Members present: 1350 // 1351 // static const SCEV::NoWrapFlags WrapType; 1352 // 1353 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1354 // 1355 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1356 // ICmpInst::Predicate *Pred, 1357 // ScalarEvolution *SE); 1358 }; 1359 1360 template <> 1361 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1362 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1363 1364 static const GetExtendExprTy GetExtendExpr; 1365 1366 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1367 ICmpInst::Predicate *Pred, 1368 ScalarEvolution *SE) { 1369 return getSignedOverflowLimitForStep(Step, Pred, SE); 1370 } 1371 }; 1372 1373 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1374 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1375 1376 template <> 1377 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1378 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1379 1380 static const GetExtendExprTy GetExtendExpr; 1381 1382 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1383 ICmpInst::Predicate *Pred, 1384 ScalarEvolution *SE) { 1385 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1386 } 1387 }; 1388 1389 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1390 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1391 1392 } // end anonymous namespace 1393 1394 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1395 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1396 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1397 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1398 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1399 // expression "Step + sext/zext(PreIncAR)" is congruent with 1400 // "sext/zext(PostIncAR)" 1401 template <typename ExtendOpTy> 1402 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1403 ScalarEvolution *SE, unsigned Depth) { 1404 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1405 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1406 1407 const Loop *L = AR->getLoop(); 1408 const SCEV *Start = AR->getStart(); 1409 const SCEV *Step = AR->getStepRecurrence(*SE); 1410 1411 // Check for a simple looking step prior to loop entry. 1412 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1413 if (!SA) 1414 return nullptr; 1415 1416 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1417 // subtraction is expensive. For this purpose, perform a quick and dirty 1418 // difference, by checking for Step in the operand list. 1419 SmallVector<const SCEV *, 4> DiffOps; 1420 for (const SCEV *Op : SA->operands()) 1421 if (Op != Step) 1422 DiffOps.push_back(Op); 1423 1424 if (DiffOps.size() == SA->getNumOperands()) 1425 return nullptr; 1426 1427 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1428 // `Step`: 1429 1430 // 1. NSW/NUW flags on the step increment. 1431 auto PreStartFlags = 1432 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1433 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1434 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1435 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1436 1437 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1438 // "S+X does not sign/unsign-overflow". 1439 // 1440 1441 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1442 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1443 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1444 return PreStart; 1445 1446 // 2. Direct overflow check on the step operation's expression. 1447 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1448 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1449 const SCEV *OperandExtendedStart = 1450 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1451 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1452 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1453 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1454 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1455 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1456 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1457 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1458 } 1459 return PreStart; 1460 } 1461 1462 // 3. Loop precondition. 1463 ICmpInst::Predicate Pred; 1464 const SCEV *OverflowLimit = 1465 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1466 1467 if (OverflowLimit && 1468 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1469 return PreStart; 1470 1471 return nullptr; 1472 } 1473 1474 // Get the normalized zero or sign extended expression for this AddRec's Start. 1475 template <typename ExtendOpTy> 1476 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1477 ScalarEvolution *SE, 1478 unsigned Depth) { 1479 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1480 1481 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1482 if (!PreStart) 1483 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1484 1485 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1486 Depth), 1487 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1488 } 1489 1490 // Try to prove away overflow by looking at "nearby" add recurrences. A 1491 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1492 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1493 // 1494 // Formally: 1495 // 1496 // {S,+,X} == {S-T,+,X} + T 1497 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1498 // 1499 // If ({S-T,+,X} + T) does not overflow ... (1) 1500 // 1501 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1502 // 1503 // If {S-T,+,X} does not overflow ... (2) 1504 // 1505 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1506 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1507 // 1508 // If (S-T)+T does not overflow ... (3) 1509 // 1510 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1511 // == {Ext(S),+,Ext(X)} == LHS 1512 // 1513 // Thus, if (1), (2) and (3) are true for some T, then 1514 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1515 // 1516 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1517 // does not overflow" restricted to the 0th iteration. Therefore we only need 1518 // to check for (1) and (2). 1519 // 1520 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1521 // is `Delta` (defined below). 1522 template <typename ExtendOpTy> 1523 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1524 const SCEV *Step, 1525 const Loop *L) { 1526 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1527 1528 // We restrict `Start` to a constant to prevent SCEV from spending too much 1529 // time here. It is correct (but more expensive) to continue with a 1530 // non-constant `Start` and do a general SCEV subtraction to compute 1531 // `PreStart` below. 1532 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1533 if (!StartC) 1534 return false; 1535 1536 APInt StartAI = StartC->getAPInt(); 1537 1538 for (unsigned Delta : {-2, -1, 1, 2}) { 1539 const SCEV *PreStart = getConstant(StartAI - Delta); 1540 1541 FoldingSetNodeID ID; 1542 ID.AddInteger(scAddRecExpr); 1543 ID.AddPointer(PreStart); 1544 ID.AddPointer(Step); 1545 ID.AddPointer(L); 1546 void *IP = nullptr; 1547 const auto *PreAR = 1548 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1549 1550 // Give up if we don't already have the add recurrence we need because 1551 // actually constructing an add recurrence is relatively expensive. 1552 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1553 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1554 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1555 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1556 DeltaS, &Pred, this); 1557 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1558 return true; 1559 } 1560 } 1561 1562 return false; 1563 } 1564 1565 const SCEV * 1566 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1567 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1568 "This is not an extending conversion!"); 1569 assert(isSCEVable(Ty) && 1570 "This is not a conversion to a SCEVable type!"); 1571 Ty = getEffectiveSCEVType(Ty); 1572 1573 // Fold if the operand is constant. 1574 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1575 return getConstant( 1576 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1577 1578 // zext(zext(x)) --> zext(x) 1579 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1580 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1581 1582 // Before doing any expensive analysis, check to see if we've already 1583 // computed a SCEV for this Op and Ty. 1584 FoldingSetNodeID ID; 1585 ID.AddInteger(scZeroExtend); 1586 ID.AddPointer(Op); 1587 ID.AddPointer(Ty); 1588 void *IP = nullptr; 1589 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1590 if (Depth > MaxExtDepth) { 1591 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1592 Op, Ty); 1593 UniqueSCEVs.InsertNode(S, IP); 1594 addToLoopUseLists(S); 1595 return S; 1596 } 1597 1598 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1599 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1600 // It's possible the bits taken off by the truncate were all zero bits. If 1601 // so, we should be able to simplify this further. 1602 const SCEV *X = ST->getOperand(); 1603 ConstantRange CR = getUnsignedRange(X); 1604 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1605 unsigned NewBits = getTypeSizeInBits(Ty); 1606 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1607 CR.zextOrTrunc(NewBits))) 1608 return getTruncateOrZeroExtend(X, Ty); 1609 } 1610 1611 // If the input value is a chrec scev, and we can prove that the value 1612 // did not overflow the old, smaller, value, we can zero extend all of the 1613 // operands (often constants). This allows analysis of something like 1614 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1615 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1616 if (AR->isAffine()) { 1617 const SCEV *Start = AR->getStart(); 1618 const SCEV *Step = AR->getStepRecurrence(*this); 1619 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1620 const Loop *L = AR->getLoop(); 1621 1622 if (!AR->hasNoUnsignedWrap()) { 1623 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1624 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1625 } 1626 1627 // If we have special knowledge that this addrec won't overflow, 1628 // we don't need to do any further analysis. 1629 if (AR->hasNoUnsignedWrap()) 1630 return getAddRecExpr( 1631 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1632 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1633 1634 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1635 // Note that this serves two purposes: It filters out loops that are 1636 // simply not analyzable, and it covers the case where this code is 1637 // being called from within backedge-taken count analysis, such that 1638 // attempting to ask for the backedge-taken count would likely result 1639 // in infinite recursion. In the later case, the analysis code will 1640 // cope with a conservative value, and it will take care to purge 1641 // that value once it has finished. 1642 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1643 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1644 // Manually compute the final value for AR, checking for 1645 // overflow. 1646 1647 // Check whether the backedge-taken count can be losslessly casted to 1648 // the addrec's type. The count is always unsigned. 1649 const SCEV *CastedMaxBECount = 1650 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1651 const SCEV *RecastedMaxBECount = 1652 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1653 if (MaxBECount == RecastedMaxBECount) { 1654 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1655 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1656 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1657 SCEV::FlagAnyWrap, Depth + 1); 1658 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1659 SCEV::FlagAnyWrap, 1660 Depth + 1), 1661 WideTy, Depth + 1); 1662 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1663 const SCEV *WideMaxBECount = 1664 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1665 const SCEV *OperandExtendedAdd = 1666 getAddExpr(WideStart, 1667 getMulExpr(WideMaxBECount, 1668 getZeroExtendExpr(Step, WideTy, Depth + 1), 1669 SCEV::FlagAnyWrap, Depth + 1), 1670 SCEV::FlagAnyWrap, Depth + 1); 1671 if (ZAdd == OperandExtendedAdd) { 1672 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1673 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1674 // Return the expression with the addrec on the outside. 1675 return getAddRecExpr( 1676 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1677 Depth + 1), 1678 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1679 AR->getNoWrapFlags()); 1680 } 1681 // Similar to above, only this time treat the step value as signed. 1682 // This covers loops that count down. 1683 OperandExtendedAdd = 1684 getAddExpr(WideStart, 1685 getMulExpr(WideMaxBECount, 1686 getSignExtendExpr(Step, WideTy, Depth + 1), 1687 SCEV::FlagAnyWrap, Depth + 1), 1688 SCEV::FlagAnyWrap, Depth + 1); 1689 if (ZAdd == OperandExtendedAdd) { 1690 // Cache knowledge of AR NW, which is propagated to this AddRec. 1691 // Negative step causes unsigned wrap, but it still can't self-wrap. 1692 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1693 // Return the expression with the addrec on the outside. 1694 return getAddRecExpr( 1695 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1696 Depth + 1), 1697 getSignExtendExpr(Step, Ty, Depth + 1), L, 1698 AR->getNoWrapFlags()); 1699 } 1700 } 1701 } 1702 1703 // Normally, in the cases we can prove no-overflow via a 1704 // backedge guarding condition, we can also compute a backedge 1705 // taken count for the loop. The exceptions are assumptions and 1706 // guards present in the loop -- SCEV is not great at exploiting 1707 // these to compute max backedge taken counts, but can still use 1708 // these to prove lack of overflow. Use this fact to avoid 1709 // doing extra work that may not pay off. 1710 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1711 !AC.assumptions().empty()) { 1712 // If the backedge is guarded by a comparison with the pre-inc 1713 // value the addrec is safe. Also, if the entry is guarded by 1714 // a comparison with the start value and the backedge is 1715 // guarded by a comparison with the post-inc value, the addrec 1716 // is safe. 1717 if (isKnownPositive(Step)) { 1718 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1719 getUnsignedRangeMax(Step)); 1720 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1721 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 1722 // Cache knowledge of AR NUW, which is propagated to this 1723 // AddRec. 1724 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1725 // Return the expression with the addrec on the outside. 1726 return getAddRecExpr( 1727 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1728 Depth + 1), 1729 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1730 AR->getNoWrapFlags()); 1731 } 1732 } else if (isKnownNegative(Step)) { 1733 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1734 getSignedRangeMin(Step)); 1735 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1736 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1737 // Cache knowledge of AR NW, which is propagated to this 1738 // AddRec. Negative step causes unsigned wrap, but it 1739 // still can't self-wrap. 1740 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1741 // Return the expression with the addrec on the outside. 1742 return getAddRecExpr( 1743 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1744 Depth + 1), 1745 getSignExtendExpr(Step, Ty, Depth + 1), L, 1746 AR->getNoWrapFlags()); 1747 } 1748 } 1749 } 1750 1751 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1752 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1753 return getAddRecExpr( 1754 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1755 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1756 } 1757 } 1758 1759 // zext(A % B) --> zext(A) % zext(B) 1760 { 1761 const SCEV *LHS; 1762 const SCEV *RHS; 1763 if (matchURem(Op, LHS, RHS)) 1764 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1765 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1766 } 1767 1768 // zext(A / B) --> zext(A) / zext(B). 1769 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1770 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1771 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1772 1773 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1774 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1775 if (SA->hasNoUnsignedWrap()) { 1776 // If the addition does not unsign overflow then we can, by definition, 1777 // commute the zero extension with the addition operation. 1778 SmallVector<const SCEV *, 4> Ops; 1779 for (const auto *Op : SA->operands()) 1780 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1781 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1782 } 1783 } 1784 1785 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1786 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1787 if (SM->hasNoUnsignedWrap()) { 1788 // If the multiply does not unsign overflow then we can, by definition, 1789 // commute the zero extension with the multiply operation. 1790 SmallVector<const SCEV *, 4> Ops; 1791 for (const auto *Op : SM->operands()) 1792 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1793 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1794 } 1795 1796 // zext(2^K * (trunc X to iN)) to iM -> 1797 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1798 // 1799 // Proof: 1800 // 1801 // zext(2^K * (trunc X to iN)) to iM 1802 // = zext((trunc X to iN) << K) to iM 1803 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1804 // (because shl removes the top K bits) 1805 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1806 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1807 // 1808 if (SM->getNumOperands() == 2) 1809 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1810 if (MulLHS->getAPInt().isPowerOf2()) 1811 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1812 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1813 MulLHS->getAPInt().logBase2(); 1814 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1815 return getMulExpr( 1816 getZeroExtendExpr(MulLHS, Ty), 1817 getZeroExtendExpr( 1818 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1819 SCEV::FlagNUW, Depth + 1); 1820 } 1821 } 1822 1823 // The cast wasn't folded; create an explicit cast node. 1824 // Recompute the insert position, as it may have been invalidated. 1825 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1826 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1827 Op, Ty); 1828 UniqueSCEVs.InsertNode(S, IP); 1829 addToLoopUseLists(S); 1830 return S; 1831 } 1832 1833 const SCEV * 1834 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1835 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1836 "This is not an extending conversion!"); 1837 assert(isSCEVable(Ty) && 1838 "This is not a conversion to a SCEVable type!"); 1839 Ty = getEffectiveSCEVType(Ty); 1840 1841 // Fold if the operand is constant. 1842 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1843 return getConstant( 1844 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1845 1846 // sext(sext(x)) --> sext(x) 1847 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1848 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1849 1850 // sext(zext(x)) --> zext(x) 1851 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1852 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1853 1854 // Before doing any expensive analysis, check to see if we've already 1855 // computed a SCEV for this Op and Ty. 1856 FoldingSetNodeID ID; 1857 ID.AddInteger(scSignExtend); 1858 ID.AddPointer(Op); 1859 ID.AddPointer(Ty); 1860 void *IP = nullptr; 1861 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1862 // Limit recursion depth. 1863 if (Depth > MaxExtDepth) { 1864 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1865 Op, Ty); 1866 UniqueSCEVs.InsertNode(S, IP); 1867 addToLoopUseLists(S); 1868 return S; 1869 } 1870 1871 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1872 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1873 // It's possible the bits taken off by the truncate were all sign bits. If 1874 // so, we should be able to simplify this further. 1875 const SCEV *X = ST->getOperand(); 1876 ConstantRange CR = getSignedRange(X); 1877 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1878 unsigned NewBits = getTypeSizeInBits(Ty); 1879 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1880 CR.sextOrTrunc(NewBits))) 1881 return getTruncateOrSignExtend(X, Ty); 1882 } 1883 1884 // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2 1885 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1886 if (SA->getNumOperands() == 2) { 1887 auto *SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0)); 1888 auto *SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1)); 1889 if (SMul && SC1) { 1890 if (auto *SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) { 1891 const APInt &C1 = SC1->getAPInt(); 1892 const APInt &C2 = SC2->getAPInt(); 1893 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && 1894 C2.ugt(C1) && C2.isPowerOf2()) 1895 return getAddExpr(getSignExtendExpr(SC1, Ty, Depth + 1), 1896 getSignExtendExpr(SMul, Ty, Depth + 1), 1897 SCEV::FlagAnyWrap, Depth + 1); 1898 } 1899 } 1900 } 1901 1902 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1903 if (SA->hasNoSignedWrap()) { 1904 // If the addition does not sign overflow then we can, by definition, 1905 // commute the sign extension with the addition operation. 1906 SmallVector<const SCEV *, 4> Ops; 1907 for (const auto *Op : SA->operands()) 1908 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1909 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1910 } 1911 } 1912 // If the input value is a chrec scev, and we can prove that the value 1913 // did not overflow the old, smaller, value, we can sign extend all of the 1914 // operands (often constants). This allows analysis of something like 1915 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1916 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1917 if (AR->isAffine()) { 1918 const SCEV *Start = AR->getStart(); 1919 const SCEV *Step = AR->getStepRecurrence(*this); 1920 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1921 const Loop *L = AR->getLoop(); 1922 1923 if (!AR->hasNoSignedWrap()) { 1924 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1925 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1926 } 1927 1928 // If we have special knowledge that this addrec won't overflow, 1929 // we don't need to do any further analysis. 1930 if (AR->hasNoSignedWrap()) 1931 return getAddRecExpr( 1932 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1933 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1934 1935 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1936 // Note that this serves two purposes: It filters out loops that are 1937 // simply not analyzable, and it covers the case where this code is 1938 // being called from within backedge-taken count analysis, such that 1939 // attempting to ask for the backedge-taken count would likely result 1940 // in infinite recursion. In the later case, the analysis code will 1941 // cope with a conservative value, and it will take care to purge 1942 // that value once it has finished. 1943 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1944 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1945 // Manually compute the final value for AR, checking for 1946 // overflow. 1947 1948 // Check whether the backedge-taken count can be losslessly casted to 1949 // the addrec's type. The count is always unsigned. 1950 const SCEV *CastedMaxBECount = 1951 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1952 const SCEV *RecastedMaxBECount = 1953 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1954 if (MaxBECount == RecastedMaxBECount) { 1955 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1956 // Check whether Start+Step*MaxBECount has no signed overflow. 1957 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 1958 SCEV::FlagAnyWrap, Depth + 1); 1959 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 1960 SCEV::FlagAnyWrap, 1961 Depth + 1), 1962 WideTy, Depth + 1); 1963 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 1964 const SCEV *WideMaxBECount = 1965 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1966 const SCEV *OperandExtendedAdd = 1967 getAddExpr(WideStart, 1968 getMulExpr(WideMaxBECount, 1969 getSignExtendExpr(Step, WideTy, Depth + 1), 1970 SCEV::FlagAnyWrap, Depth + 1), 1971 SCEV::FlagAnyWrap, Depth + 1); 1972 if (SAdd == OperandExtendedAdd) { 1973 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1974 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1975 // Return the expression with the addrec on the outside. 1976 return getAddRecExpr( 1977 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1978 Depth + 1), 1979 getSignExtendExpr(Step, Ty, Depth + 1), L, 1980 AR->getNoWrapFlags()); 1981 } 1982 // Similar to above, only this time treat the step value as unsigned. 1983 // This covers loops that count up with an unsigned step. 1984 OperandExtendedAdd = 1985 getAddExpr(WideStart, 1986 getMulExpr(WideMaxBECount, 1987 getZeroExtendExpr(Step, WideTy, Depth + 1), 1988 SCEV::FlagAnyWrap, Depth + 1), 1989 SCEV::FlagAnyWrap, Depth + 1); 1990 if (SAdd == OperandExtendedAdd) { 1991 // If AR wraps around then 1992 // 1993 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 1994 // => SAdd != OperandExtendedAdd 1995 // 1996 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 1997 // (SAdd == OperandExtendedAdd => AR is NW) 1998 1999 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 2000 2001 // Return the expression with the addrec on the outside. 2002 return getAddRecExpr( 2003 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2004 Depth + 1), 2005 getZeroExtendExpr(Step, Ty, Depth + 1), L, 2006 AR->getNoWrapFlags()); 2007 } 2008 } 2009 } 2010 2011 // Normally, in the cases we can prove no-overflow via a 2012 // backedge guarding condition, we can also compute a backedge 2013 // taken count for the loop. The exceptions are assumptions and 2014 // guards present in the loop -- SCEV is not great at exploiting 2015 // these to compute max backedge taken counts, but can still use 2016 // these to prove lack of overflow. Use this fact to avoid 2017 // doing extra work that may not pay off. 2018 2019 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 2020 !AC.assumptions().empty()) { 2021 // If the backedge is guarded by a comparison with the pre-inc 2022 // value the addrec is safe. Also, if the entry is guarded by 2023 // a comparison with the start value and the backedge is 2024 // guarded by a comparison with the post-inc value, the addrec 2025 // is safe. 2026 ICmpInst::Predicate Pred; 2027 const SCEV *OverflowLimit = 2028 getSignedOverflowLimitForStep(Step, &Pred, this); 2029 if (OverflowLimit && 2030 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 2031 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 2032 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 2033 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2034 return getAddRecExpr( 2035 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2036 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2037 } 2038 } 2039 2040 // If Start and Step are constants, check if we can apply this 2041 // transformation: 2042 // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2 2043 auto *SC1 = dyn_cast<SCEVConstant>(Start); 2044 auto *SC2 = dyn_cast<SCEVConstant>(Step); 2045 if (SC1 && SC2) { 2046 const APInt &C1 = SC1->getAPInt(); 2047 const APInt &C2 = SC2->getAPInt(); 2048 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) && 2049 C2.isPowerOf2()) { 2050 Start = getSignExtendExpr(Start, Ty, Depth + 1); 2051 const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L, 2052 AR->getNoWrapFlags()); 2053 return getAddExpr(Start, getSignExtendExpr(NewAR, Ty, Depth + 1), 2054 SCEV::FlagAnyWrap, Depth + 1); 2055 } 2056 } 2057 2058 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2059 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2060 return getAddRecExpr( 2061 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2062 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2063 } 2064 } 2065 2066 // If the input value is provably positive and we could not simplify 2067 // away the sext build a zext instead. 2068 if (isKnownNonNegative(Op)) 2069 return getZeroExtendExpr(Op, Ty, Depth + 1); 2070 2071 // The cast wasn't folded; create an explicit cast node. 2072 // Recompute the insert position, as it may have been invalidated. 2073 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2074 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2075 Op, Ty); 2076 UniqueSCEVs.InsertNode(S, IP); 2077 addToLoopUseLists(S); 2078 return S; 2079 } 2080 2081 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2082 /// unspecified bits out to the given type. 2083 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2084 Type *Ty) { 2085 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2086 "This is not an extending conversion!"); 2087 assert(isSCEVable(Ty) && 2088 "This is not a conversion to a SCEVable type!"); 2089 Ty = getEffectiveSCEVType(Ty); 2090 2091 // Sign-extend negative constants. 2092 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2093 if (SC->getAPInt().isNegative()) 2094 return getSignExtendExpr(Op, Ty); 2095 2096 // Peel off a truncate cast. 2097 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2098 const SCEV *NewOp = T->getOperand(); 2099 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2100 return getAnyExtendExpr(NewOp, Ty); 2101 return getTruncateOrNoop(NewOp, Ty); 2102 } 2103 2104 // Next try a zext cast. If the cast is folded, use it. 2105 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2106 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2107 return ZExt; 2108 2109 // Next try a sext cast. If the cast is folded, use it. 2110 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2111 if (!isa<SCEVSignExtendExpr>(SExt)) 2112 return SExt; 2113 2114 // Force the cast to be folded into the operands of an addrec. 2115 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2116 SmallVector<const SCEV *, 4> Ops; 2117 for (const SCEV *Op : AR->operands()) 2118 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2119 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2120 } 2121 2122 // If the expression is obviously signed, use the sext cast value. 2123 if (isa<SCEVSMaxExpr>(Op)) 2124 return SExt; 2125 2126 // Absent any other information, use the zext cast value. 2127 return ZExt; 2128 } 2129 2130 /// Process the given Ops list, which is a list of operands to be added under 2131 /// the given scale, update the given map. This is a helper function for 2132 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2133 /// that would form an add expression like this: 2134 /// 2135 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2136 /// 2137 /// where A and B are constants, update the map with these values: 2138 /// 2139 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2140 /// 2141 /// and add 13 + A*B*29 to AccumulatedConstant. 2142 /// This will allow getAddRecExpr to produce this: 2143 /// 2144 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2145 /// 2146 /// This form often exposes folding opportunities that are hidden in 2147 /// the original operand list. 2148 /// 2149 /// Return true iff it appears that any interesting folding opportunities 2150 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2151 /// the common case where no interesting opportunities are present, and 2152 /// is also used as a check to avoid infinite recursion. 2153 static bool 2154 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2155 SmallVectorImpl<const SCEV *> &NewOps, 2156 APInt &AccumulatedConstant, 2157 const SCEV *const *Ops, size_t NumOperands, 2158 const APInt &Scale, 2159 ScalarEvolution &SE) { 2160 bool Interesting = false; 2161 2162 // Iterate over the add operands. They are sorted, with constants first. 2163 unsigned i = 0; 2164 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2165 ++i; 2166 // Pull a buried constant out to the outside. 2167 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2168 Interesting = true; 2169 AccumulatedConstant += Scale * C->getAPInt(); 2170 } 2171 2172 // Next comes everything else. We're especially interested in multiplies 2173 // here, but they're in the middle, so just visit the rest with one loop. 2174 for (; i != NumOperands; ++i) { 2175 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2176 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2177 APInt NewScale = 2178 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2179 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2180 // A multiplication of a constant with another add; recurse. 2181 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2182 Interesting |= 2183 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2184 Add->op_begin(), Add->getNumOperands(), 2185 NewScale, SE); 2186 } else { 2187 // A multiplication of a constant with some other value. Update 2188 // the map. 2189 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2190 const SCEV *Key = SE.getMulExpr(MulOps); 2191 auto Pair = M.insert({Key, NewScale}); 2192 if (Pair.second) { 2193 NewOps.push_back(Pair.first->first); 2194 } else { 2195 Pair.first->second += NewScale; 2196 // The map already had an entry for this value, which may indicate 2197 // a folding opportunity. 2198 Interesting = true; 2199 } 2200 } 2201 } else { 2202 // An ordinary operand. Update the map. 2203 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2204 M.insert({Ops[i], Scale}); 2205 if (Pair.second) { 2206 NewOps.push_back(Pair.first->first); 2207 } else { 2208 Pair.first->second += Scale; 2209 // The map already had an entry for this value, which may indicate 2210 // a folding opportunity. 2211 Interesting = true; 2212 } 2213 } 2214 } 2215 2216 return Interesting; 2217 } 2218 2219 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2220 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2221 // can't-overflow flags for the operation if possible. 2222 static SCEV::NoWrapFlags 2223 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2224 const SmallVectorImpl<const SCEV *> &Ops, 2225 SCEV::NoWrapFlags Flags) { 2226 using namespace std::placeholders; 2227 2228 using OBO = OverflowingBinaryOperator; 2229 2230 bool CanAnalyze = 2231 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2232 (void)CanAnalyze; 2233 assert(CanAnalyze && "don't call from other places!"); 2234 2235 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2236 SCEV::NoWrapFlags SignOrUnsignWrap = 2237 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2238 2239 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2240 auto IsKnownNonNegative = [&](const SCEV *S) { 2241 return SE->isKnownNonNegative(S); 2242 }; 2243 2244 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2245 Flags = 2246 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2247 2248 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2249 2250 if (SignOrUnsignWrap != SignOrUnsignMask && Type == scAddExpr && 2251 Ops.size() == 2 && isa<SCEVConstant>(Ops[0])) { 2252 2253 // (A + C) --> (A + C)<nsw> if the addition does not sign overflow 2254 // (A + C) --> (A + C)<nuw> if the addition does not unsign overflow 2255 2256 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2257 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2258 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2259 Instruction::Add, C, OBO::NoSignedWrap); 2260 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2261 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2262 } 2263 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2264 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2265 Instruction::Add, C, OBO::NoUnsignedWrap); 2266 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2267 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2268 } 2269 } 2270 2271 return Flags; 2272 } 2273 2274 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2275 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2276 } 2277 2278 /// Get a canonical add expression, or something simpler if possible. 2279 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2280 SCEV::NoWrapFlags Flags, 2281 unsigned Depth) { 2282 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2283 "only nuw or nsw allowed"); 2284 assert(!Ops.empty() && "Cannot get empty add!"); 2285 if (Ops.size() == 1) return Ops[0]; 2286 #ifndef NDEBUG 2287 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2288 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2289 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2290 "SCEVAddExpr operand types don't match!"); 2291 #endif 2292 2293 // Sort by complexity, this groups all similar expression types together. 2294 GroupByComplexity(Ops, &LI, DT); 2295 2296 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2297 2298 // If there are any constants, fold them together. 2299 unsigned Idx = 0; 2300 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2301 ++Idx; 2302 assert(Idx < Ops.size()); 2303 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2304 // We found two constants, fold them together! 2305 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2306 if (Ops.size() == 2) return Ops[0]; 2307 Ops.erase(Ops.begin()+1); // Erase the folded element 2308 LHSC = cast<SCEVConstant>(Ops[0]); 2309 } 2310 2311 // If we are left with a constant zero being added, strip it off. 2312 if (LHSC->getValue()->isZero()) { 2313 Ops.erase(Ops.begin()); 2314 --Idx; 2315 } 2316 2317 if (Ops.size() == 1) return Ops[0]; 2318 } 2319 2320 // Limit recursion calls depth. 2321 if (Depth > MaxArithDepth) 2322 return getOrCreateAddExpr(Ops, Flags); 2323 2324 // Okay, check to see if the same value occurs in the operand list more than 2325 // once. If so, merge them together into an multiply expression. Since we 2326 // sorted the list, these values are required to be adjacent. 2327 Type *Ty = Ops[0]->getType(); 2328 bool FoundMatch = false; 2329 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2330 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2331 // Scan ahead to count how many equal operands there are. 2332 unsigned Count = 2; 2333 while (i+Count != e && Ops[i+Count] == Ops[i]) 2334 ++Count; 2335 // Merge the values into a multiply. 2336 const SCEV *Scale = getConstant(Ty, Count); 2337 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2338 if (Ops.size() == Count) 2339 return Mul; 2340 Ops[i] = Mul; 2341 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2342 --i; e -= Count - 1; 2343 FoundMatch = true; 2344 } 2345 if (FoundMatch) 2346 return getAddExpr(Ops, Flags, Depth + 1); 2347 2348 // Check for truncates. If all the operands are truncated from the same 2349 // type, see if factoring out the truncate would permit the result to be 2350 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2351 // if the contents of the resulting outer trunc fold to something simple. 2352 auto FindTruncSrcType = [&]() -> Type * { 2353 // We're ultimately looking to fold an addrec of truncs and muls of only 2354 // constants and truncs, so if we find any other types of SCEV 2355 // as operands of the addrec then we bail and return nullptr here. 2356 // Otherwise, we return the type of the operand of a trunc that we find. 2357 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2358 return T->getOperand()->getType(); 2359 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2360 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2361 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2362 return T->getOperand()->getType(); 2363 } 2364 return nullptr; 2365 }; 2366 if (auto *SrcType = FindTruncSrcType()) { 2367 SmallVector<const SCEV *, 8> LargeOps; 2368 bool Ok = true; 2369 // Check all the operands to see if they can be represented in the 2370 // source type of the truncate. 2371 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2372 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2373 if (T->getOperand()->getType() != SrcType) { 2374 Ok = false; 2375 break; 2376 } 2377 LargeOps.push_back(T->getOperand()); 2378 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2379 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2380 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2381 SmallVector<const SCEV *, 8> LargeMulOps; 2382 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2383 if (const SCEVTruncateExpr *T = 2384 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2385 if (T->getOperand()->getType() != SrcType) { 2386 Ok = false; 2387 break; 2388 } 2389 LargeMulOps.push_back(T->getOperand()); 2390 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2391 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2392 } else { 2393 Ok = false; 2394 break; 2395 } 2396 } 2397 if (Ok) 2398 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2399 } else { 2400 Ok = false; 2401 break; 2402 } 2403 } 2404 if (Ok) { 2405 // Evaluate the expression in the larger type. 2406 const SCEV *Fold = getAddExpr(LargeOps, Flags, Depth + 1); 2407 // If it folds to something simple, use it. Otherwise, don't. 2408 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2409 return getTruncateExpr(Fold, Ty); 2410 } 2411 } 2412 2413 // Skip past any other cast SCEVs. 2414 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2415 ++Idx; 2416 2417 // If there are add operands they would be next. 2418 if (Idx < Ops.size()) { 2419 bool DeletedAdd = false; 2420 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2421 if (Ops.size() > AddOpsInlineThreshold || 2422 Add->getNumOperands() > AddOpsInlineThreshold) 2423 break; 2424 // If we have an add, expand the add operands onto the end of the operands 2425 // list. 2426 Ops.erase(Ops.begin()+Idx); 2427 Ops.append(Add->op_begin(), Add->op_end()); 2428 DeletedAdd = true; 2429 } 2430 2431 // If we deleted at least one add, we added operands to the end of the list, 2432 // and they are not necessarily sorted. Recurse to resort and resimplify 2433 // any operands we just acquired. 2434 if (DeletedAdd) 2435 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2436 } 2437 2438 // Skip over the add expression until we get to a multiply. 2439 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2440 ++Idx; 2441 2442 // Check to see if there are any folding opportunities present with 2443 // operands multiplied by constant values. 2444 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2445 uint64_t BitWidth = getTypeSizeInBits(Ty); 2446 DenseMap<const SCEV *, APInt> M; 2447 SmallVector<const SCEV *, 8> NewOps; 2448 APInt AccumulatedConstant(BitWidth, 0); 2449 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2450 Ops.data(), Ops.size(), 2451 APInt(BitWidth, 1), *this)) { 2452 struct APIntCompare { 2453 bool operator()(const APInt &LHS, const APInt &RHS) const { 2454 return LHS.ult(RHS); 2455 } 2456 }; 2457 2458 // Some interesting folding opportunity is present, so its worthwhile to 2459 // re-generate the operands list. Group the operands by constant scale, 2460 // to avoid multiplying by the same constant scale multiple times. 2461 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2462 for (const SCEV *NewOp : NewOps) 2463 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2464 // Re-generate the operands list. 2465 Ops.clear(); 2466 if (AccumulatedConstant != 0) 2467 Ops.push_back(getConstant(AccumulatedConstant)); 2468 for (auto &MulOp : MulOpLists) 2469 if (MulOp.first != 0) 2470 Ops.push_back(getMulExpr( 2471 getConstant(MulOp.first), 2472 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2473 SCEV::FlagAnyWrap, Depth + 1)); 2474 if (Ops.empty()) 2475 return getZero(Ty); 2476 if (Ops.size() == 1) 2477 return Ops[0]; 2478 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2479 } 2480 } 2481 2482 // If we are adding something to a multiply expression, make sure the 2483 // something is not already an operand of the multiply. If so, merge it into 2484 // the multiply. 2485 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2486 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2487 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2488 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2489 if (isa<SCEVConstant>(MulOpSCEV)) 2490 continue; 2491 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2492 if (MulOpSCEV == Ops[AddOp]) { 2493 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2494 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2495 if (Mul->getNumOperands() != 2) { 2496 // If the multiply has more than two operands, we must get the 2497 // Y*Z term. 2498 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2499 Mul->op_begin()+MulOp); 2500 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2501 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2502 } 2503 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2504 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2505 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2506 SCEV::FlagAnyWrap, Depth + 1); 2507 if (Ops.size() == 2) return OuterMul; 2508 if (AddOp < Idx) { 2509 Ops.erase(Ops.begin()+AddOp); 2510 Ops.erase(Ops.begin()+Idx-1); 2511 } else { 2512 Ops.erase(Ops.begin()+Idx); 2513 Ops.erase(Ops.begin()+AddOp-1); 2514 } 2515 Ops.push_back(OuterMul); 2516 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2517 } 2518 2519 // Check this multiply against other multiplies being added together. 2520 for (unsigned OtherMulIdx = Idx+1; 2521 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2522 ++OtherMulIdx) { 2523 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2524 // If MulOp occurs in OtherMul, we can fold the two multiplies 2525 // together. 2526 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2527 OMulOp != e; ++OMulOp) 2528 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2529 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2530 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2531 if (Mul->getNumOperands() != 2) { 2532 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2533 Mul->op_begin()+MulOp); 2534 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2535 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2536 } 2537 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2538 if (OtherMul->getNumOperands() != 2) { 2539 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2540 OtherMul->op_begin()+OMulOp); 2541 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2542 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2543 } 2544 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2545 const SCEV *InnerMulSum = 2546 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2547 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2548 SCEV::FlagAnyWrap, Depth + 1); 2549 if (Ops.size() == 2) return OuterMul; 2550 Ops.erase(Ops.begin()+Idx); 2551 Ops.erase(Ops.begin()+OtherMulIdx-1); 2552 Ops.push_back(OuterMul); 2553 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2554 } 2555 } 2556 } 2557 } 2558 2559 // If there are any add recurrences in the operands list, see if any other 2560 // added values are loop invariant. If so, we can fold them into the 2561 // recurrence. 2562 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2563 ++Idx; 2564 2565 // Scan over all recurrences, trying to fold loop invariants into them. 2566 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2567 // Scan all of the other operands to this add and add them to the vector if 2568 // they are loop invariant w.r.t. the recurrence. 2569 SmallVector<const SCEV *, 8> LIOps; 2570 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2571 const Loop *AddRecLoop = AddRec->getLoop(); 2572 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2573 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2574 LIOps.push_back(Ops[i]); 2575 Ops.erase(Ops.begin()+i); 2576 --i; --e; 2577 } 2578 2579 // If we found some loop invariants, fold them into the recurrence. 2580 if (!LIOps.empty()) { 2581 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2582 LIOps.push_back(AddRec->getStart()); 2583 2584 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2585 AddRec->op_end()); 2586 // This follows from the fact that the no-wrap flags on the outer add 2587 // expression are applicable on the 0th iteration, when the add recurrence 2588 // will be equal to its start value. 2589 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2590 2591 // Build the new addrec. Propagate the NUW and NSW flags if both the 2592 // outer add and the inner addrec are guaranteed to have no overflow. 2593 // Always propagate NW. 2594 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2595 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2596 2597 // If all of the other operands were loop invariant, we are done. 2598 if (Ops.size() == 1) return NewRec; 2599 2600 // Otherwise, add the folded AddRec by the non-invariant parts. 2601 for (unsigned i = 0;; ++i) 2602 if (Ops[i] == AddRec) { 2603 Ops[i] = NewRec; 2604 break; 2605 } 2606 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2607 } 2608 2609 // Okay, if there weren't any loop invariants to be folded, check to see if 2610 // there are multiple AddRec's with the same loop induction variable being 2611 // added together. If so, we can fold them. 2612 for (unsigned OtherIdx = Idx+1; 2613 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2614 ++OtherIdx) { 2615 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2616 // so that the 1st found AddRecExpr is dominated by all others. 2617 assert(DT.dominates( 2618 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2619 AddRec->getLoop()->getHeader()) && 2620 "AddRecExprs are not sorted in reverse dominance order?"); 2621 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2622 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2623 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2624 AddRec->op_end()); 2625 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2626 ++OtherIdx) { 2627 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2628 if (OtherAddRec->getLoop() == AddRecLoop) { 2629 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2630 i != e; ++i) { 2631 if (i >= AddRecOps.size()) { 2632 AddRecOps.append(OtherAddRec->op_begin()+i, 2633 OtherAddRec->op_end()); 2634 break; 2635 } 2636 SmallVector<const SCEV *, 2> TwoOps = { 2637 AddRecOps[i], OtherAddRec->getOperand(i)}; 2638 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2639 } 2640 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2641 } 2642 } 2643 // Step size has changed, so we cannot guarantee no self-wraparound. 2644 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2645 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2646 } 2647 } 2648 2649 // Otherwise couldn't fold anything into this recurrence. Move onto the 2650 // next one. 2651 } 2652 2653 // Okay, it looks like we really DO need an add expr. Check to see if we 2654 // already have one, otherwise create a new one. 2655 return getOrCreateAddExpr(Ops, Flags); 2656 } 2657 2658 const SCEV * 2659 ScalarEvolution::getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2660 SCEV::NoWrapFlags Flags) { 2661 FoldingSetNodeID ID; 2662 ID.AddInteger(scAddExpr); 2663 for (const SCEV *Op : Ops) 2664 ID.AddPointer(Op); 2665 void *IP = nullptr; 2666 SCEVAddExpr *S = 2667 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2668 if (!S) { 2669 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2670 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2671 S = new (SCEVAllocator) 2672 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2673 UniqueSCEVs.InsertNode(S, IP); 2674 addToLoopUseLists(S); 2675 } 2676 S->setNoWrapFlags(Flags); 2677 return S; 2678 } 2679 2680 const SCEV * 2681 ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2682 SCEV::NoWrapFlags Flags) { 2683 FoldingSetNodeID ID; 2684 ID.AddInteger(scMulExpr); 2685 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2686 ID.AddPointer(Ops[i]); 2687 void *IP = nullptr; 2688 SCEVMulExpr *S = 2689 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2690 if (!S) { 2691 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2692 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2693 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2694 O, Ops.size()); 2695 UniqueSCEVs.InsertNode(S, IP); 2696 addToLoopUseLists(S); 2697 } 2698 S->setNoWrapFlags(Flags); 2699 return S; 2700 } 2701 2702 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2703 uint64_t k = i*j; 2704 if (j > 1 && k / j != i) Overflow = true; 2705 return k; 2706 } 2707 2708 /// Compute the result of "n choose k", the binomial coefficient. If an 2709 /// intermediate computation overflows, Overflow will be set and the return will 2710 /// be garbage. Overflow is not cleared on absence of overflow. 2711 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2712 // We use the multiplicative formula: 2713 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2714 // At each iteration, we take the n-th term of the numeral and divide by the 2715 // (k-n)th term of the denominator. This division will always produce an 2716 // integral result, and helps reduce the chance of overflow in the 2717 // intermediate computations. However, we can still overflow even when the 2718 // final result would fit. 2719 2720 if (n == 0 || n == k) return 1; 2721 if (k > n) return 0; 2722 2723 if (k > n/2) 2724 k = n-k; 2725 2726 uint64_t r = 1; 2727 for (uint64_t i = 1; i <= k; ++i) { 2728 r = umul_ov(r, n-(i-1), Overflow); 2729 r /= i; 2730 } 2731 return r; 2732 } 2733 2734 /// Determine if any of the operands in this SCEV are a constant or if 2735 /// any of the add or multiply expressions in this SCEV contain a constant. 2736 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2737 struct FindConstantInAddMulChain { 2738 bool FoundConstant = false; 2739 2740 bool follow(const SCEV *S) { 2741 FoundConstant |= isa<SCEVConstant>(S); 2742 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2743 } 2744 2745 bool isDone() const { 2746 return FoundConstant; 2747 } 2748 }; 2749 2750 FindConstantInAddMulChain F; 2751 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2752 ST.visitAll(StartExpr); 2753 return F.FoundConstant; 2754 } 2755 2756 /// Get a canonical multiply expression, or something simpler if possible. 2757 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2758 SCEV::NoWrapFlags Flags, 2759 unsigned Depth) { 2760 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2761 "only nuw or nsw allowed"); 2762 assert(!Ops.empty() && "Cannot get empty mul!"); 2763 if (Ops.size() == 1) return Ops[0]; 2764 #ifndef NDEBUG 2765 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2766 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2767 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2768 "SCEVMulExpr operand types don't match!"); 2769 #endif 2770 2771 // Sort by complexity, this groups all similar expression types together. 2772 GroupByComplexity(Ops, &LI, DT); 2773 2774 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2775 2776 // Limit recursion calls depth. 2777 if (Depth > MaxArithDepth) 2778 return getOrCreateMulExpr(Ops, Flags); 2779 2780 // If there are any constants, fold them together. 2781 unsigned Idx = 0; 2782 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2783 2784 if (Ops.size() == 2) 2785 // C1*(C2+V) -> C1*C2 + C1*V 2786 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2787 // If any of Add's ops are Adds or Muls with a constant, apply this 2788 // transformation as well. 2789 // 2790 // TODO: There are some cases where this transformation is not 2791 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 2792 // this transformation should be narrowed down. 2793 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) 2794 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2795 SCEV::FlagAnyWrap, Depth + 1), 2796 getMulExpr(LHSC, Add->getOperand(1), 2797 SCEV::FlagAnyWrap, Depth + 1), 2798 SCEV::FlagAnyWrap, Depth + 1); 2799 2800 ++Idx; 2801 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2802 // We found two constants, fold them together! 2803 ConstantInt *Fold = 2804 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2805 Ops[0] = getConstant(Fold); 2806 Ops.erase(Ops.begin()+1); // Erase the folded element 2807 if (Ops.size() == 1) return Ops[0]; 2808 LHSC = cast<SCEVConstant>(Ops[0]); 2809 } 2810 2811 // If we are left with a constant one being multiplied, strip it off. 2812 if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) { 2813 Ops.erase(Ops.begin()); 2814 --Idx; 2815 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2816 // If we have a multiply of zero, it will always be zero. 2817 return Ops[0]; 2818 } else if (Ops[0]->isAllOnesValue()) { 2819 // If we have a mul by -1 of an add, try distributing the -1 among the 2820 // add operands. 2821 if (Ops.size() == 2) { 2822 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2823 SmallVector<const SCEV *, 4> NewOps; 2824 bool AnyFolded = false; 2825 for (const SCEV *AddOp : Add->operands()) { 2826 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2827 Depth + 1); 2828 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2829 NewOps.push_back(Mul); 2830 } 2831 if (AnyFolded) 2832 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2833 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2834 // Negation preserves a recurrence's no self-wrap property. 2835 SmallVector<const SCEV *, 4> Operands; 2836 for (const SCEV *AddRecOp : AddRec->operands()) 2837 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2838 Depth + 1)); 2839 2840 return getAddRecExpr(Operands, AddRec->getLoop(), 2841 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2842 } 2843 } 2844 } 2845 2846 if (Ops.size() == 1) 2847 return Ops[0]; 2848 } 2849 2850 // Skip over the add expression until we get to a multiply. 2851 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2852 ++Idx; 2853 2854 // If there are mul operands inline them all into this expression. 2855 if (Idx < Ops.size()) { 2856 bool DeletedMul = false; 2857 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2858 if (Ops.size() > MulOpsInlineThreshold) 2859 break; 2860 // If we have an mul, expand the mul operands onto the end of the 2861 // operands list. 2862 Ops.erase(Ops.begin()+Idx); 2863 Ops.append(Mul->op_begin(), Mul->op_end()); 2864 DeletedMul = true; 2865 } 2866 2867 // If we deleted at least one mul, we added operands to the end of the 2868 // list, and they are not necessarily sorted. Recurse to resort and 2869 // resimplify any operands we just acquired. 2870 if (DeletedMul) 2871 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2872 } 2873 2874 // If there are any add recurrences in the operands list, see if any other 2875 // added values are loop invariant. If so, we can fold them into the 2876 // recurrence. 2877 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2878 ++Idx; 2879 2880 // Scan over all recurrences, trying to fold loop invariants into them. 2881 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2882 // Scan all of the other operands to this mul and add them to the vector 2883 // if they are loop invariant w.r.t. the recurrence. 2884 SmallVector<const SCEV *, 8> LIOps; 2885 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2886 const Loop *AddRecLoop = AddRec->getLoop(); 2887 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2888 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2889 LIOps.push_back(Ops[i]); 2890 Ops.erase(Ops.begin()+i); 2891 --i; --e; 2892 } 2893 2894 // If we found some loop invariants, fold them into the recurrence. 2895 if (!LIOps.empty()) { 2896 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2897 SmallVector<const SCEV *, 4> NewOps; 2898 NewOps.reserve(AddRec->getNumOperands()); 2899 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 2900 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2901 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 2902 SCEV::FlagAnyWrap, Depth + 1)); 2903 2904 // Build the new addrec. Propagate the NUW and NSW flags if both the 2905 // outer mul and the inner addrec are guaranteed to have no overflow. 2906 // 2907 // No self-wrap cannot be guaranteed after changing the step size, but 2908 // will be inferred if either NUW or NSW is true. 2909 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 2910 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 2911 2912 // If all of the other operands were loop invariant, we are done. 2913 if (Ops.size() == 1) return NewRec; 2914 2915 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2916 for (unsigned i = 0;; ++i) 2917 if (Ops[i] == AddRec) { 2918 Ops[i] = NewRec; 2919 break; 2920 } 2921 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2922 } 2923 2924 // Okay, if there weren't any loop invariants to be folded, check to see 2925 // if there are multiple AddRec's with the same loop induction variable 2926 // being multiplied together. If so, we can fold them. 2927 2928 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2929 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2930 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2931 // ]]],+,...up to x=2n}. 2932 // Note that the arguments to choose() are always integers with values 2933 // known at compile time, never SCEV objects. 2934 // 2935 // The implementation avoids pointless extra computations when the two 2936 // addrec's are of different length (mathematically, it's equivalent to 2937 // an infinite stream of zeros on the right). 2938 bool OpsModified = false; 2939 for (unsigned OtherIdx = Idx+1; 2940 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2941 ++OtherIdx) { 2942 const SCEVAddRecExpr *OtherAddRec = 2943 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2944 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 2945 continue; 2946 2947 // Limit max number of arguments to avoid creation of unreasonably big 2948 // SCEVAddRecs with very complex operands. 2949 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 2950 MaxAddRecSize) 2951 continue; 2952 2953 bool Overflow = false; 2954 Type *Ty = AddRec->getType(); 2955 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 2956 SmallVector<const SCEV*, 7> AddRecOps; 2957 for (int x = 0, xe = AddRec->getNumOperands() + 2958 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 2959 const SCEV *Term = getZero(Ty); 2960 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 2961 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 2962 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 2963 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 2964 z < ze && !Overflow; ++z) { 2965 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 2966 uint64_t Coeff; 2967 if (LargerThan64Bits) 2968 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 2969 else 2970 Coeff = Coeff1*Coeff2; 2971 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 2972 const SCEV *Term1 = AddRec->getOperand(y-z); 2973 const SCEV *Term2 = OtherAddRec->getOperand(z); 2974 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1, Term2, 2975 SCEV::FlagAnyWrap, Depth + 1), 2976 SCEV::FlagAnyWrap, Depth + 1); 2977 } 2978 } 2979 AddRecOps.push_back(Term); 2980 } 2981 if (!Overflow) { 2982 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(), 2983 SCEV::FlagAnyWrap); 2984 if (Ops.size() == 2) return NewAddRec; 2985 Ops[Idx] = NewAddRec; 2986 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2987 OpsModified = true; 2988 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 2989 if (!AddRec) 2990 break; 2991 } 2992 } 2993 if (OpsModified) 2994 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2995 2996 // Otherwise couldn't fold anything into this recurrence. Move onto the 2997 // next one. 2998 } 2999 3000 // Okay, it looks like we really DO need an mul expr. Check to see if we 3001 // already have one, otherwise create a new one. 3002 return getOrCreateMulExpr(Ops, Flags); 3003 } 3004 3005 /// Represents an unsigned remainder expression based on unsigned division. 3006 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3007 const SCEV *RHS) { 3008 assert(getEffectiveSCEVType(LHS->getType()) == 3009 getEffectiveSCEVType(RHS->getType()) && 3010 "SCEVURemExpr operand types don't match!"); 3011 3012 // Short-circuit easy cases 3013 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3014 // If constant is one, the result is trivial 3015 if (RHSC->getValue()->isOne()) 3016 return getZero(LHS->getType()); // X urem 1 --> 0 3017 3018 // If constant is a power of two, fold into a zext(trunc(LHS)). 3019 if (RHSC->getAPInt().isPowerOf2()) { 3020 Type *FullTy = LHS->getType(); 3021 Type *TruncTy = 3022 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3023 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3024 } 3025 } 3026 3027 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3028 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3029 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3030 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3031 } 3032 3033 /// Get a canonical unsigned division expression, or something simpler if 3034 /// possible. 3035 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3036 const SCEV *RHS) { 3037 assert(getEffectiveSCEVType(LHS->getType()) == 3038 getEffectiveSCEVType(RHS->getType()) && 3039 "SCEVUDivExpr operand types don't match!"); 3040 3041 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3042 if (RHSC->getValue()->isOne()) 3043 return LHS; // X udiv 1 --> x 3044 // If the denominator is zero, the result of the udiv is undefined. Don't 3045 // try to analyze it, because the resolution chosen here may differ from 3046 // the resolution chosen in other parts of the compiler. 3047 if (!RHSC->getValue()->isZero()) { 3048 // Determine if the division can be folded into the operands of 3049 // its operands. 3050 // TODO: Generalize this to non-constants by using known-bits information. 3051 Type *Ty = LHS->getType(); 3052 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3053 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3054 // For non-power-of-two values, effectively round the value up to the 3055 // nearest power of two. 3056 if (!RHSC->getAPInt().isPowerOf2()) 3057 ++MaxShiftAmt; 3058 IntegerType *ExtTy = 3059 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3060 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3061 if (const SCEVConstant *Step = 3062 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3063 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3064 const APInt &StepInt = Step->getAPInt(); 3065 const APInt &DivInt = RHSC->getAPInt(); 3066 if (!StepInt.urem(DivInt) && 3067 getZeroExtendExpr(AR, ExtTy) == 3068 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3069 getZeroExtendExpr(Step, ExtTy), 3070 AR->getLoop(), SCEV::FlagAnyWrap)) { 3071 SmallVector<const SCEV *, 4> Operands; 3072 for (const SCEV *Op : AR->operands()) 3073 Operands.push_back(getUDivExpr(Op, RHS)); 3074 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3075 } 3076 /// Get a canonical UDivExpr for a recurrence. 3077 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3078 // We can currently only fold X%N if X is constant. 3079 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3080 if (StartC && !DivInt.urem(StepInt) && 3081 getZeroExtendExpr(AR, ExtTy) == 3082 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3083 getZeroExtendExpr(Step, ExtTy), 3084 AR->getLoop(), SCEV::FlagAnyWrap)) { 3085 const APInt &StartInt = StartC->getAPInt(); 3086 const APInt &StartRem = StartInt.urem(StepInt); 3087 if (StartRem != 0) 3088 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, 3089 AR->getLoop(), SCEV::FlagNW); 3090 } 3091 } 3092 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3093 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3094 SmallVector<const SCEV *, 4> Operands; 3095 for (const SCEV *Op : M->operands()) 3096 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3097 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3098 // Find an operand that's safely divisible. 3099 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3100 const SCEV *Op = M->getOperand(i); 3101 const SCEV *Div = getUDivExpr(Op, RHSC); 3102 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3103 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3104 M->op_end()); 3105 Operands[i] = Div; 3106 return getMulExpr(Operands); 3107 } 3108 } 3109 } 3110 3111 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3112 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3113 if (auto *DivisorConstant = 3114 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3115 bool Overflow = false; 3116 APInt NewRHS = 3117 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3118 if (Overflow) { 3119 return getConstant(RHSC->getType(), 0, false); 3120 } 3121 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3122 } 3123 } 3124 3125 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3126 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3127 SmallVector<const SCEV *, 4> Operands; 3128 for (const SCEV *Op : A->operands()) 3129 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3130 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3131 Operands.clear(); 3132 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3133 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3134 if (isa<SCEVUDivExpr>(Op) || 3135 getMulExpr(Op, RHS) != A->getOperand(i)) 3136 break; 3137 Operands.push_back(Op); 3138 } 3139 if (Operands.size() == A->getNumOperands()) 3140 return getAddExpr(Operands); 3141 } 3142 } 3143 3144 // Fold if both operands are constant. 3145 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3146 Constant *LHSCV = LHSC->getValue(); 3147 Constant *RHSCV = RHSC->getValue(); 3148 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3149 RHSCV))); 3150 } 3151 } 3152 } 3153 3154 FoldingSetNodeID ID; 3155 ID.AddInteger(scUDivExpr); 3156 ID.AddPointer(LHS); 3157 ID.AddPointer(RHS); 3158 void *IP = nullptr; 3159 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3160 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3161 LHS, RHS); 3162 UniqueSCEVs.InsertNode(S, IP); 3163 addToLoopUseLists(S); 3164 return S; 3165 } 3166 3167 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3168 APInt A = C1->getAPInt().abs(); 3169 APInt B = C2->getAPInt().abs(); 3170 uint32_t ABW = A.getBitWidth(); 3171 uint32_t BBW = B.getBitWidth(); 3172 3173 if (ABW > BBW) 3174 B = B.zext(ABW); 3175 else if (ABW < BBW) 3176 A = A.zext(BBW); 3177 3178 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3179 } 3180 3181 /// Get a canonical unsigned division expression, or something simpler if 3182 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3183 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3184 /// it's not exact because the udiv may be clearing bits. 3185 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3186 const SCEV *RHS) { 3187 // TODO: we could try to find factors in all sorts of things, but for now we 3188 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3189 // end of this file for inspiration. 3190 3191 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3192 if (!Mul || !Mul->hasNoUnsignedWrap()) 3193 return getUDivExpr(LHS, RHS); 3194 3195 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3196 // If the mulexpr multiplies by a constant, then that constant must be the 3197 // first element of the mulexpr. 3198 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3199 if (LHSCst == RHSCst) { 3200 SmallVector<const SCEV *, 2> Operands; 3201 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3202 return getMulExpr(Operands); 3203 } 3204 3205 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3206 // that there's a factor provided by one of the other terms. We need to 3207 // check. 3208 APInt Factor = gcd(LHSCst, RHSCst); 3209 if (!Factor.isIntN(1)) { 3210 LHSCst = 3211 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3212 RHSCst = 3213 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3214 SmallVector<const SCEV *, 2> Operands; 3215 Operands.push_back(LHSCst); 3216 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3217 LHS = getMulExpr(Operands); 3218 RHS = RHSCst; 3219 Mul = dyn_cast<SCEVMulExpr>(LHS); 3220 if (!Mul) 3221 return getUDivExactExpr(LHS, RHS); 3222 } 3223 } 3224 } 3225 3226 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3227 if (Mul->getOperand(i) == RHS) { 3228 SmallVector<const SCEV *, 2> Operands; 3229 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3230 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3231 return getMulExpr(Operands); 3232 } 3233 } 3234 3235 return getUDivExpr(LHS, RHS); 3236 } 3237 3238 /// Get an add recurrence expression for the specified loop. Simplify the 3239 /// expression as much as possible. 3240 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3241 const Loop *L, 3242 SCEV::NoWrapFlags Flags) { 3243 SmallVector<const SCEV *, 4> Operands; 3244 Operands.push_back(Start); 3245 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3246 if (StepChrec->getLoop() == L) { 3247 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3248 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3249 } 3250 3251 Operands.push_back(Step); 3252 return getAddRecExpr(Operands, L, Flags); 3253 } 3254 3255 /// Get an add recurrence expression for the specified loop. Simplify the 3256 /// expression as much as possible. 3257 const SCEV * 3258 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3259 const Loop *L, SCEV::NoWrapFlags Flags) { 3260 if (Operands.size() == 1) return Operands[0]; 3261 #ifndef NDEBUG 3262 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3263 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3264 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3265 "SCEVAddRecExpr operand types don't match!"); 3266 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3267 assert(isLoopInvariant(Operands[i], L) && 3268 "SCEVAddRecExpr operand is not loop-invariant!"); 3269 #endif 3270 3271 if (Operands.back()->isZero()) { 3272 Operands.pop_back(); 3273 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3274 } 3275 3276 // It's tempting to want to call getMaxBackedgeTakenCount count here and 3277 // use that information to infer NUW and NSW flags. However, computing a 3278 // BE count requires calling getAddRecExpr, so we may not yet have a 3279 // meaningful BE count at this point (and if we don't, we'd be stuck 3280 // with a SCEVCouldNotCompute as the cached BE count). 3281 3282 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3283 3284 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3285 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3286 const Loop *NestedLoop = NestedAR->getLoop(); 3287 if (L->contains(NestedLoop) 3288 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3289 : (!NestedLoop->contains(L) && 3290 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3291 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3292 NestedAR->op_end()); 3293 Operands[0] = NestedAR->getStart(); 3294 // AddRecs require their operands be loop-invariant with respect to their 3295 // loops. Don't perform this transformation if it would break this 3296 // requirement. 3297 bool AllInvariant = all_of( 3298 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3299 3300 if (AllInvariant) { 3301 // Create a recurrence for the outer loop with the same step size. 3302 // 3303 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3304 // inner recurrence has the same property. 3305 SCEV::NoWrapFlags OuterFlags = 3306 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3307 3308 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3309 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3310 return isLoopInvariant(Op, NestedLoop); 3311 }); 3312 3313 if (AllInvariant) { 3314 // Ok, both add recurrences are valid after the transformation. 3315 // 3316 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3317 // the outer recurrence has the same property. 3318 SCEV::NoWrapFlags InnerFlags = 3319 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3320 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3321 } 3322 } 3323 // Reset Operands to its original state. 3324 Operands[0] = NestedAR; 3325 } 3326 } 3327 3328 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3329 // already have one, otherwise create a new one. 3330 FoldingSetNodeID ID; 3331 ID.AddInteger(scAddRecExpr); 3332 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3333 ID.AddPointer(Operands[i]); 3334 ID.AddPointer(L); 3335 void *IP = nullptr; 3336 SCEVAddRecExpr *S = 3337 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 3338 if (!S) { 3339 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); 3340 std::uninitialized_copy(Operands.begin(), Operands.end(), O); 3341 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), 3342 O, Operands.size(), L); 3343 UniqueSCEVs.InsertNode(S, IP); 3344 addToLoopUseLists(S); 3345 } 3346 S->setNoWrapFlags(Flags); 3347 return S; 3348 } 3349 3350 const SCEV * 3351 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3352 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3353 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3354 // getSCEV(Base)->getType() has the same address space as Base->getType() 3355 // because SCEV::getType() preserves the address space. 3356 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); 3357 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3358 // instruction to its SCEV, because the Instruction may be guarded by control 3359 // flow and the no-overflow bits may not be valid for the expression in any 3360 // context. This can be fixed similarly to how these flags are handled for 3361 // adds. 3362 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3363 : SCEV::FlagAnyWrap; 3364 3365 const SCEV *TotalOffset = getZero(IntPtrTy); 3366 // The array size is unimportant. The first thing we do on CurTy is getting 3367 // its element type. 3368 Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0); 3369 for (const SCEV *IndexExpr : IndexExprs) { 3370 // Compute the (potentially symbolic) offset in bytes for this index. 3371 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3372 // For a struct, add the member offset. 3373 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3374 unsigned FieldNo = Index->getZExtValue(); 3375 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); 3376 3377 // Add the field offset to the running total offset. 3378 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3379 3380 // Update CurTy to the type of the field at Index. 3381 CurTy = STy->getTypeAtIndex(Index); 3382 } else { 3383 // Update CurTy to its element type. 3384 CurTy = cast<SequentialType>(CurTy)->getElementType(); 3385 // For an array, add the element offset, explicitly scaled. 3386 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); 3387 // Getelementptr indices are signed. 3388 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); 3389 3390 // Multiply the index by the element size to compute the element offset. 3391 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3392 3393 // Add the element offset to the running total offset. 3394 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3395 } 3396 } 3397 3398 // Add the total offset from all the GEP indices to the base. 3399 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3400 } 3401 3402 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 3403 const SCEV *RHS) { 3404 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3405 return getSMaxExpr(Ops); 3406 } 3407 3408 const SCEV * 3409 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3410 assert(!Ops.empty() && "Cannot get empty smax!"); 3411 if (Ops.size() == 1) return Ops[0]; 3412 #ifndef NDEBUG 3413 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3414 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3415 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3416 "SCEVSMaxExpr operand types don't match!"); 3417 #endif 3418 3419 // Sort by complexity, this groups all similar expression types together. 3420 GroupByComplexity(Ops, &LI, DT); 3421 3422 // If there are any constants, fold them together. 3423 unsigned Idx = 0; 3424 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3425 ++Idx; 3426 assert(Idx < Ops.size()); 3427 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3428 // We found two constants, fold them together! 3429 ConstantInt *Fold = ConstantInt::get( 3430 getContext(), APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt())); 3431 Ops[0] = getConstant(Fold); 3432 Ops.erase(Ops.begin()+1); // Erase the folded element 3433 if (Ops.size() == 1) return Ops[0]; 3434 LHSC = cast<SCEVConstant>(Ops[0]); 3435 } 3436 3437 // If we are left with a constant minimum-int, strip it off. 3438 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 3439 Ops.erase(Ops.begin()); 3440 --Idx; 3441 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 3442 // If we have an smax with a constant maximum-int, it will always be 3443 // maximum-int. 3444 return Ops[0]; 3445 } 3446 3447 if (Ops.size() == 1) return Ops[0]; 3448 } 3449 3450 // Find the first SMax 3451 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 3452 ++Idx; 3453 3454 // Check to see if one of the operands is an SMax. If so, expand its operands 3455 // onto our operand list, and recurse to simplify. 3456 if (Idx < Ops.size()) { 3457 bool DeletedSMax = false; 3458 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 3459 Ops.erase(Ops.begin()+Idx); 3460 Ops.append(SMax->op_begin(), SMax->op_end()); 3461 DeletedSMax = true; 3462 } 3463 3464 if (DeletedSMax) 3465 return getSMaxExpr(Ops); 3466 } 3467 3468 // Okay, check to see if the same value occurs in the operand list twice. If 3469 // so, delete one. Since we sorted the list, these values are required to 3470 // be adjacent. 3471 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3472 // X smax Y smax Y --> X smax Y 3473 // X smax Y --> X, if X is always greater than Y 3474 if (Ops[i] == Ops[i+1] || 3475 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 3476 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3477 --i; --e; 3478 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 3479 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3480 --i; --e; 3481 } 3482 3483 if (Ops.size() == 1) return Ops[0]; 3484 3485 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3486 3487 // Okay, it looks like we really DO need an smax expr. Check to see if we 3488 // already have one, otherwise create a new one. 3489 FoldingSetNodeID ID; 3490 ID.AddInteger(scSMaxExpr); 3491 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3492 ID.AddPointer(Ops[i]); 3493 void *IP = nullptr; 3494 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3495 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3496 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3497 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), 3498 O, Ops.size()); 3499 UniqueSCEVs.InsertNode(S, IP); 3500 addToLoopUseLists(S); 3501 return S; 3502 } 3503 3504 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 3505 const SCEV *RHS) { 3506 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3507 return getUMaxExpr(Ops); 3508 } 3509 3510 const SCEV * 3511 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3512 assert(!Ops.empty() && "Cannot get empty umax!"); 3513 if (Ops.size() == 1) return Ops[0]; 3514 #ifndef NDEBUG 3515 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3516 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3517 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3518 "SCEVUMaxExpr operand types don't match!"); 3519 #endif 3520 3521 // Sort by complexity, this groups all similar expression types together. 3522 GroupByComplexity(Ops, &LI, DT); 3523 3524 // If there are any constants, fold them together. 3525 unsigned Idx = 0; 3526 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3527 ++Idx; 3528 assert(Idx < Ops.size()); 3529 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3530 // We found two constants, fold them together! 3531 ConstantInt *Fold = ConstantInt::get( 3532 getContext(), APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt())); 3533 Ops[0] = getConstant(Fold); 3534 Ops.erase(Ops.begin()+1); // Erase the folded element 3535 if (Ops.size() == 1) return Ops[0]; 3536 LHSC = cast<SCEVConstant>(Ops[0]); 3537 } 3538 3539 // If we are left with a constant minimum-int, strip it off. 3540 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 3541 Ops.erase(Ops.begin()); 3542 --Idx; 3543 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 3544 // If we have an umax with a constant maximum-int, it will always be 3545 // maximum-int. 3546 return Ops[0]; 3547 } 3548 3549 if (Ops.size() == 1) return Ops[0]; 3550 } 3551 3552 // Find the first UMax 3553 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 3554 ++Idx; 3555 3556 // Check to see if one of the operands is a UMax. If so, expand its operands 3557 // onto our operand list, and recurse to simplify. 3558 if (Idx < Ops.size()) { 3559 bool DeletedUMax = false; 3560 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 3561 Ops.erase(Ops.begin()+Idx); 3562 Ops.append(UMax->op_begin(), UMax->op_end()); 3563 DeletedUMax = true; 3564 } 3565 3566 if (DeletedUMax) 3567 return getUMaxExpr(Ops); 3568 } 3569 3570 // Okay, check to see if the same value occurs in the operand list twice. If 3571 // so, delete one. Since we sorted the list, these values are required to 3572 // be adjacent. 3573 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3574 // X umax Y umax Y --> X umax Y 3575 // X umax Y --> X, if X is always greater than Y 3576 if (Ops[i] == Ops[i + 1] || isKnownViaNonRecursiveReasoning( 3577 ICmpInst::ICMP_UGE, Ops[i], Ops[i + 1])) { 3578 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3579 --i; --e; 3580 } else if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, Ops[i], 3581 Ops[i + 1])) { 3582 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3583 --i; --e; 3584 } 3585 3586 if (Ops.size() == 1) return Ops[0]; 3587 3588 assert(!Ops.empty() && "Reduced umax down to nothing!"); 3589 3590 // Okay, it looks like we really DO need a umax expr. Check to see if we 3591 // already have one, otherwise create a new one. 3592 FoldingSetNodeID ID; 3593 ID.AddInteger(scUMaxExpr); 3594 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3595 ID.AddPointer(Ops[i]); 3596 void *IP = nullptr; 3597 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3598 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3599 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3600 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 3601 O, Ops.size()); 3602 UniqueSCEVs.InsertNode(S, IP); 3603 addToLoopUseLists(S); 3604 return S; 3605 } 3606 3607 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3608 const SCEV *RHS) { 3609 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3610 return getSMinExpr(Ops); 3611 } 3612 3613 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3614 // ~smax(~x, ~y, ~z) == smin(x, y, z). 3615 SmallVector<const SCEV *, 2> NotOps; 3616 for (auto *S : Ops) 3617 NotOps.push_back(getNotSCEV(S)); 3618 return getNotSCEV(getSMaxExpr(NotOps)); 3619 } 3620 3621 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3622 const SCEV *RHS) { 3623 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3624 return getUMinExpr(Ops); 3625 } 3626 3627 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3628 assert(!Ops.empty() && "At least one operand must be!"); 3629 // Trivial case. 3630 if (Ops.size() == 1) 3631 return Ops[0]; 3632 3633 // ~umax(~x, ~y, ~z) == umin(x, y, z). 3634 SmallVector<const SCEV *, 2> NotOps; 3635 for (auto *S : Ops) 3636 NotOps.push_back(getNotSCEV(S)); 3637 return getNotSCEV(getUMaxExpr(NotOps)); 3638 } 3639 3640 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3641 // We can bypass creating a target-independent 3642 // constant expression and then folding it back into a ConstantInt. 3643 // This is just a compile-time optimization. 3644 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3645 } 3646 3647 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3648 StructType *STy, 3649 unsigned FieldNo) { 3650 // We can bypass creating a target-independent 3651 // constant expression and then folding it back into a ConstantInt. 3652 // This is just a compile-time optimization. 3653 return getConstant( 3654 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3655 } 3656 3657 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3658 // Don't attempt to do anything other than create a SCEVUnknown object 3659 // here. createSCEV only calls getUnknown after checking for all other 3660 // interesting possibilities, and any other code that calls getUnknown 3661 // is doing so in order to hide a value from SCEV canonicalization. 3662 3663 FoldingSetNodeID ID; 3664 ID.AddInteger(scUnknown); 3665 ID.AddPointer(V); 3666 void *IP = nullptr; 3667 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3668 assert(cast<SCEVUnknown>(S)->getValue() == V && 3669 "Stale SCEVUnknown in uniquing map!"); 3670 return S; 3671 } 3672 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3673 FirstUnknown); 3674 FirstUnknown = cast<SCEVUnknown>(S); 3675 UniqueSCEVs.InsertNode(S, IP); 3676 return S; 3677 } 3678 3679 //===----------------------------------------------------------------------===// 3680 // Basic SCEV Analysis and PHI Idiom Recognition Code 3681 // 3682 3683 /// Test if values of the given type are analyzable within the SCEV 3684 /// framework. This primarily includes integer types, and it can optionally 3685 /// include pointer types if the ScalarEvolution class has access to 3686 /// target-specific information. 3687 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3688 // Integers and pointers are always SCEVable. 3689 return Ty->isIntegerTy() || Ty->isPointerTy(); 3690 } 3691 3692 /// Return the size in bits of the specified type, for which isSCEVable must 3693 /// return true. 3694 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3695 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3696 if (Ty->isPointerTy()) 3697 return getDataLayout().getIndexTypeSizeInBits(Ty); 3698 return getDataLayout().getTypeSizeInBits(Ty); 3699 } 3700 3701 /// Return a type with the same bitwidth as the given type and which represents 3702 /// how SCEV will treat the given type, for which isSCEVable must return 3703 /// true. For pointer types, this is the pointer-sized integer type. 3704 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3705 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3706 3707 if (Ty->isIntegerTy()) 3708 return Ty; 3709 3710 // The only other support type is pointer. 3711 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3712 return getDataLayout().getIntPtrType(Ty); 3713 } 3714 3715 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3716 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3717 } 3718 3719 const SCEV *ScalarEvolution::getCouldNotCompute() { 3720 return CouldNotCompute.get(); 3721 } 3722 3723 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3724 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3725 auto *SU = dyn_cast<SCEVUnknown>(S); 3726 return SU && SU->getValue() == nullptr; 3727 }); 3728 3729 return !ContainsNulls; 3730 } 3731 3732 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3733 HasRecMapType::iterator I = HasRecMap.find(S); 3734 if (I != HasRecMap.end()) 3735 return I->second; 3736 3737 bool FoundAddRec = SCEVExprContains(S, isa<SCEVAddRecExpr, const SCEV *>); 3738 HasRecMap.insert({S, FoundAddRec}); 3739 return FoundAddRec; 3740 } 3741 3742 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3743 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3744 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3745 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3746 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3747 if (!Add) 3748 return {S, nullptr}; 3749 3750 if (Add->getNumOperands() != 2) 3751 return {S, nullptr}; 3752 3753 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3754 if (!ConstOp) 3755 return {S, nullptr}; 3756 3757 return {Add->getOperand(1), ConstOp->getValue()}; 3758 } 3759 3760 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3761 /// by the value and offset from any ValueOffsetPair in the set. 3762 SetVector<ScalarEvolution::ValueOffsetPair> * 3763 ScalarEvolution::getSCEVValues(const SCEV *S) { 3764 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3765 if (SI == ExprValueMap.end()) 3766 return nullptr; 3767 #ifndef NDEBUG 3768 if (VerifySCEVMap) { 3769 // Check there is no dangling Value in the set returned. 3770 for (const auto &VE : SI->second) 3771 assert(ValueExprMap.count(VE.first)); 3772 } 3773 #endif 3774 return &SI->second; 3775 } 3776 3777 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3778 /// cannot be used separately. eraseValueFromMap should be used to remove 3779 /// V from ValueExprMap and ExprValueMap at the same time. 3780 void ScalarEvolution::eraseValueFromMap(Value *V) { 3781 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3782 if (I != ValueExprMap.end()) { 3783 const SCEV *S = I->second; 3784 // Remove {V, 0} from the set of ExprValueMap[S] 3785 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3786 SV->remove({V, nullptr}); 3787 3788 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3789 const SCEV *Stripped; 3790 ConstantInt *Offset; 3791 std::tie(Stripped, Offset) = splitAddExpr(S); 3792 if (Offset != nullptr) { 3793 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3794 SV->remove({V, Offset}); 3795 } 3796 ValueExprMap.erase(V); 3797 } 3798 } 3799 3800 /// Check whether value has nuw/nsw/exact set but SCEV does not. 3801 /// TODO: In reality it is better to check the poison recursevely 3802 /// but this is better than nothing. 3803 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { 3804 if (auto *I = dyn_cast<Instruction>(V)) { 3805 if (isa<OverflowingBinaryOperator>(I)) { 3806 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { 3807 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) 3808 return true; 3809 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) 3810 return true; 3811 } 3812 } else if (isa<PossiblyExactOperator>(I) && I->isExact()) 3813 return true; 3814 } 3815 return false; 3816 } 3817 3818 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3819 /// create a new one. 3820 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3821 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3822 3823 const SCEV *S = getExistingSCEV(V); 3824 if (S == nullptr) { 3825 S = createSCEV(V); 3826 // During PHI resolution, it is possible to create two SCEVs for the same 3827 // V, so it is needed to double check whether V->S is inserted into 3828 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3829 std::pair<ValueExprMapType::iterator, bool> Pair = 3830 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3831 if (Pair.second && !SCEVLostPoisonFlags(S, V)) { 3832 ExprValueMap[S].insert({V, nullptr}); 3833 3834 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3835 // ExprValueMap. 3836 const SCEV *Stripped = S; 3837 ConstantInt *Offset = nullptr; 3838 std::tie(Stripped, Offset) = splitAddExpr(S); 3839 // If stripped is SCEVUnknown, don't bother to save 3840 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3841 // increase the complexity of the expansion code. 3842 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3843 // because it may generate add/sub instead of GEP in SCEV expansion. 3844 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3845 !isa<GetElementPtrInst>(V)) 3846 ExprValueMap[Stripped].insert({V, Offset}); 3847 } 3848 } 3849 return S; 3850 } 3851 3852 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3853 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3854 3855 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3856 if (I != ValueExprMap.end()) { 3857 const SCEV *S = I->second; 3858 if (checkValidity(S)) 3859 return S; 3860 eraseValueFromMap(V); 3861 forgetMemoizedResults(S); 3862 } 3863 return nullptr; 3864 } 3865 3866 /// Return a SCEV corresponding to -V = -1*V 3867 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3868 SCEV::NoWrapFlags Flags) { 3869 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3870 return getConstant( 3871 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3872 3873 Type *Ty = V->getType(); 3874 Ty = getEffectiveSCEVType(Ty); 3875 return getMulExpr( 3876 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 3877 } 3878 3879 /// Return a SCEV corresponding to ~V = -1-V 3880 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3881 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3882 return getConstant( 3883 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3884 3885 Type *Ty = V->getType(); 3886 Ty = getEffectiveSCEVType(Ty); 3887 const SCEV *AllOnes = 3888 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 3889 return getMinusSCEV(AllOnes, V); 3890 } 3891 3892 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3893 SCEV::NoWrapFlags Flags, 3894 unsigned Depth) { 3895 // Fast path: X - X --> 0. 3896 if (LHS == RHS) 3897 return getZero(LHS->getType()); 3898 3899 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3900 // makes it so that we cannot make much use of NUW. 3901 auto AddFlags = SCEV::FlagAnyWrap; 3902 const bool RHSIsNotMinSigned = 3903 !getSignedRangeMin(RHS).isMinSignedValue(); 3904 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3905 // Let M be the minimum representable signed value. Then (-1)*RHS 3906 // signed-wraps if and only if RHS is M. That can happen even for 3907 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3908 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3909 // (-1)*RHS, we need to prove that RHS != M. 3910 // 3911 // If LHS is non-negative and we know that LHS - RHS does not 3912 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 3913 // either by proving that RHS > M or that LHS >= 0. 3914 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 3915 AddFlags = SCEV::FlagNSW; 3916 } 3917 } 3918 3919 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 3920 // RHS is NSW and LHS >= 0. 3921 // 3922 // The difficulty here is that the NSW flag may have been proven 3923 // relative to a loop that is to be found in a recurrence in LHS and 3924 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 3925 // larger scope than intended. 3926 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3927 3928 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 3929 } 3930 3931 const SCEV * 3932 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) { 3933 Type *SrcTy = V->getType(); 3934 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3935 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3936 "Cannot truncate or zero extend with non-integer arguments!"); 3937 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3938 return V; // No conversion 3939 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3940 return getTruncateExpr(V, Ty); 3941 return getZeroExtendExpr(V, Ty); 3942 } 3943 3944 const SCEV * 3945 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 3946 Type *Ty) { 3947 Type *SrcTy = V->getType(); 3948 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3949 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3950 "Cannot truncate or zero extend with non-integer arguments!"); 3951 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3952 return V; // No conversion 3953 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3954 return getTruncateExpr(V, Ty); 3955 return getSignExtendExpr(V, Ty); 3956 } 3957 3958 const SCEV * 3959 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 3960 Type *SrcTy = V->getType(); 3961 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3962 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3963 "Cannot noop or zero extend with non-integer arguments!"); 3964 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3965 "getNoopOrZeroExtend cannot truncate!"); 3966 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3967 return V; // No conversion 3968 return getZeroExtendExpr(V, Ty); 3969 } 3970 3971 const SCEV * 3972 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 3973 Type *SrcTy = V->getType(); 3974 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3975 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3976 "Cannot noop or sign extend with non-integer arguments!"); 3977 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3978 "getNoopOrSignExtend cannot truncate!"); 3979 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3980 return V; // No conversion 3981 return getSignExtendExpr(V, Ty); 3982 } 3983 3984 const SCEV * 3985 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 3986 Type *SrcTy = V->getType(); 3987 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3988 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3989 "Cannot noop or any extend with non-integer arguments!"); 3990 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3991 "getNoopOrAnyExtend cannot truncate!"); 3992 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3993 return V; // No conversion 3994 return getAnyExtendExpr(V, Ty); 3995 } 3996 3997 const SCEV * 3998 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 3999 Type *SrcTy = V->getType(); 4000 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 4001 (Ty->isIntegerTy() || Ty->isPointerTy()) && 4002 "Cannot truncate or noop with non-integer arguments!"); 4003 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4004 "getTruncateOrNoop cannot extend!"); 4005 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4006 return V; // No conversion 4007 return getTruncateExpr(V, Ty); 4008 } 4009 4010 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4011 const SCEV *RHS) { 4012 const SCEV *PromotedLHS = LHS; 4013 const SCEV *PromotedRHS = RHS; 4014 4015 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4016 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4017 else 4018 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4019 4020 return getUMaxExpr(PromotedLHS, PromotedRHS); 4021 } 4022 4023 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4024 const SCEV *RHS) { 4025 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4026 return getUMinFromMismatchedTypes(Ops); 4027 } 4028 4029 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( 4030 SmallVectorImpl<const SCEV *> &Ops) { 4031 assert(!Ops.empty() && "At least one operand must be!"); 4032 // Trivial case. 4033 if (Ops.size() == 1) 4034 return Ops[0]; 4035 4036 // Find the max type first. 4037 Type *MaxType = nullptr; 4038 for (auto *S : Ops) 4039 if (MaxType) 4040 MaxType = getWiderType(MaxType, S->getType()); 4041 else 4042 MaxType = S->getType(); 4043 4044 // Extend all ops to max type. 4045 SmallVector<const SCEV *, 2> PromotedOps; 4046 for (auto *S : Ops) 4047 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4048 4049 // Generate umin. 4050 return getUMinExpr(PromotedOps); 4051 } 4052 4053 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4054 // A pointer operand may evaluate to a nonpointer expression, such as null. 4055 if (!V->getType()->isPointerTy()) 4056 return V; 4057 4058 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 4059 return getPointerBase(Cast->getOperand()); 4060 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 4061 const SCEV *PtrOp = nullptr; 4062 for (const SCEV *NAryOp : NAry->operands()) { 4063 if (NAryOp->getType()->isPointerTy()) { 4064 // Cannot find the base of an expression with multiple pointer operands. 4065 if (PtrOp) 4066 return V; 4067 PtrOp = NAryOp; 4068 } 4069 } 4070 if (!PtrOp) 4071 return V; 4072 return getPointerBase(PtrOp); 4073 } 4074 return V; 4075 } 4076 4077 /// Push users of the given Instruction onto the given Worklist. 4078 static void 4079 PushDefUseChildren(Instruction *I, 4080 SmallVectorImpl<Instruction *> &Worklist) { 4081 // Push the def-use children onto the Worklist stack. 4082 for (User *U : I->users()) 4083 Worklist.push_back(cast<Instruction>(U)); 4084 } 4085 4086 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 4087 SmallVector<Instruction *, 16> Worklist; 4088 PushDefUseChildren(PN, Worklist); 4089 4090 SmallPtrSet<Instruction *, 8> Visited; 4091 Visited.insert(PN); 4092 while (!Worklist.empty()) { 4093 Instruction *I = Worklist.pop_back_val(); 4094 if (!Visited.insert(I).second) 4095 continue; 4096 4097 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4098 if (It != ValueExprMap.end()) { 4099 const SCEV *Old = It->second; 4100 4101 // Short-circuit the def-use traversal if the symbolic name 4102 // ceases to appear in expressions. 4103 if (Old != SymName && !hasOperand(Old, SymName)) 4104 continue; 4105 4106 // SCEVUnknown for a PHI either means that it has an unrecognized 4107 // structure, it's a PHI that's in the progress of being computed 4108 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4109 // additional loop trip count information isn't going to change anything. 4110 // In the second case, createNodeForPHI will perform the necessary 4111 // updates on its own when it gets to that point. In the third, we do 4112 // want to forget the SCEVUnknown. 4113 if (!isa<PHINode>(I) || 4114 !isa<SCEVUnknown>(Old) || 4115 (I != PN && Old == SymName)) { 4116 eraseValueFromMap(It->first); 4117 forgetMemoizedResults(Old); 4118 } 4119 } 4120 4121 PushDefUseChildren(I, Worklist); 4122 } 4123 } 4124 4125 namespace { 4126 4127 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4128 /// expression in case its Loop is L. If it is not L then 4129 /// if IgnoreOtherLoops is true then use AddRec itself 4130 /// otherwise rewrite cannot be done. 4131 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4132 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4133 public: 4134 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4135 bool IgnoreOtherLoops = true) { 4136 SCEVInitRewriter Rewriter(L, SE); 4137 const SCEV *Result = Rewriter.visit(S); 4138 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4139 return SE.getCouldNotCompute(); 4140 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4141 ? SE.getCouldNotCompute() 4142 : Result; 4143 } 4144 4145 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4146 if (!SE.isLoopInvariant(Expr, L)) 4147 SeenLoopVariantSCEVUnknown = true; 4148 return Expr; 4149 } 4150 4151 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4152 // Only re-write AddRecExprs for this loop. 4153 if (Expr->getLoop() == L) 4154 return Expr->getStart(); 4155 SeenOtherLoops = true; 4156 return Expr; 4157 } 4158 4159 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4160 4161 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4162 4163 private: 4164 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4165 : SCEVRewriteVisitor(SE), L(L) {} 4166 4167 const Loop *L; 4168 bool SeenLoopVariantSCEVUnknown = false; 4169 bool SeenOtherLoops = false; 4170 }; 4171 4172 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4173 /// increment expression in case its Loop is L. If it is not L then 4174 /// use AddRec itself. 4175 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4176 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4177 public: 4178 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4179 SCEVPostIncRewriter Rewriter(L, SE); 4180 const SCEV *Result = Rewriter.visit(S); 4181 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4182 ? SE.getCouldNotCompute() 4183 : Result; 4184 } 4185 4186 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4187 if (!SE.isLoopInvariant(Expr, L)) 4188 SeenLoopVariantSCEVUnknown = true; 4189 return Expr; 4190 } 4191 4192 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4193 // Only re-write AddRecExprs for this loop. 4194 if (Expr->getLoop() == L) 4195 return Expr->getPostIncExpr(SE); 4196 SeenOtherLoops = true; 4197 return Expr; 4198 } 4199 4200 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4201 4202 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4203 4204 private: 4205 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4206 : SCEVRewriteVisitor(SE), L(L) {} 4207 4208 const Loop *L; 4209 bool SeenLoopVariantSCEVUnknown = false; 4210 bool SeenOtherLoops = false; 4211 }; 4212 4213 /// This class evaluates the compare condition by matching it against the 4214 /// condition of loop latch. If there is a match we assume a true value 4215 /// for the condition while building SCEV nodes. 4216 class SCEVBackedgeConditionFolder 4217 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4218 public: 4219 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4220 ScalarEvolution &SE) { 4221 bool IsPosBECond = false; 4222 Value *BECond = nullptr; 4223 if (BasicBlock *Latch = L->getLoopLatch()) { 4224 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4225 if (BI && BI->isConditional()) { 4226 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4227 "Both outgoing branches should not target same header!"); 4228 BECond = BI->getCondition(); 4229 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4230 } else { 4231 return S; 4232 } 4233 } 4234 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4235 return Rewriter.visit(S); 4236 } 4237 4238 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4239 const SCEV *Result = Expr; 4240 bool InvariantF = SE.isLoopInvariant(Expr, L); 4241 4242 if (!InvariantF) { 4243 Instruction *I = cast<Instruction>(Expr->getValue()); 4244 switch (I->getOpcode()) { 4245 case Instruction::Select: { 4246 SelectInst *SI = cast<SelectInst>(I); 4247 Optional<const SCEV *> Res = 4248 compareWithBackedgeCondition(SI->getCondition()); 4249 if (Res.hasValue()) { 4250 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4251 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4252 } 4253 break; 4254 } 4255 default: { 4256 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4257 if (Res.hasValue()) 4258 Result = Res.getValue(); 4259 break; 4260 } 4261 } 4262 } 4263 return Result; 4264 } 4265 4266 private: 4267 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4268 bool IsPosBECond, ScalarEvolution &SE) 4269 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4270 IsPositiveBECond(IsPosBECond) {} 4271 4272 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4273 4274 const Loop *L; 4275 /// Loop back condition. 4276 Value *BackedgeCond = nullptr; 4277 /// Set to true if loop back is on positive branch condition. 4278 bool IsPositiveBECond; 4279 }; 4280 4281 Optional<const SCEV *> 4282 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4283 4284 // If value matches the backedge condition for loop latch, 4285 // then return a constant evolution node based on loopback 4286 // branch taken. 4287 if (BackedgeCond == IC) 4288 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4289 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4290 return None; 4291 } 4292 4293 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4294 public: 4295 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4296 ScalarEvolution &SE) { 4297 SCEVShiftRewriter Rewriter(L, SE); 4298 const SCEV *Result = Rewriter.visit(S); 4299 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4300 } 4301 4302 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4303 // Only allow AddRecExprs for this loop. 4304 if (!SE.isLoopInvariant(Expr, L)) 4305 Valid = false; 4306 return Expr; 4307 } 4308 4309 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4310 if (Expr->getLoop() == L && Expr->isAffine()) 4311 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4312 Valid = false; 4313 return Expr; 4314 } 4315 4316 bool isValid() { return Valid; } 4317 4318 private: 4319 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4320 : SCEVRewriteVisitor(SE), L(L) {} 4321 4322 const Loop *L; 4323 bool Valid = true; 4324 }; 4325 4326 } // end anonymous namespace 4327 4328 SCEV::NoWrapFlags 4329 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4330 if (!AR->isAffine()) 4331 return SCEV::FlagAnyWrap; 4332 4333 using OBO = OverflowingBinaryOperator; 4334 4335 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4336 4337 if (!AR->hasNoSignedWrap()) { 4338 ConstantRange AddRecRange = getSignedRange(AR); 4339 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4340 4341 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4342 Instruction::Add, IncRange, OBO::NoSignedWrap); 4343 if (NSWRegion.contains(AddRecRange)) 4344 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4345 } 4346 4347 if (!AR->hasNoUnsignedWrap()) { 4348 ConstantRange AddRecRange = getUnsignedRange(AR); 4349 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4350 4351 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4352 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4353 if (NUWRegion.contains(AddRecRange)) 4354 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4355 } 4356 4357 return Result; 4358 } 4359 4360 namespace { 4361 4362 /// Represents an abstract binary operation. This may exist as a 4363 /// normal instruction or constant expression, or may have been 4364 /// derived from an expression tree. 4365 struct BinaryOp { 4366 unsigned Opcode; 4367 Value *LHS; 4368 Value *RHS; 4369 bool IsNSW = false; 4370 bool IsNUW = false; 4371 4372 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4373 /// constant expression. 4374 Operator *Op = nullptr; 4375 4376 explicit BinaryOp(Operator *Op) 4377 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4378 Op(Op) { 4379 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4380 IsNSW = OBO->hasNoSignedWrap(); 4381 IsNUW = OBO->hasNoUnsignedWrap(); 4382 } 4383 } 4384 4385 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4386 bool IsNUW = false) 4387 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 4388 }; 4389 4390 } // end anonymous namespace 4391 4392 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4393 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4394 auto *Op = dyn_cast<Operator>(V); 4395 if (!Op) 4396 return None; 4397 4398 // Implementation detail: all the cleverness here should happen without 4399 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4400 // SCEV expressions when possible, and we should not break that. 4401 4402 switch (Op->getOpcode()) { 4403 case Instruction::Add: 4404 case Instruction::Sub: 4405 case Instruction::Mul: 4406 case Instruction::UDiv: 4407 case Instruction::URem: 4408 case Instruction::And: 4409 case Instruction::Or: 4410 case Instruction::AShr: 4411 case Instruction::Shl: 4412 return BinaryOp(Op); 4413 4414 case Instruction::Xor: 4415 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4416 // If the RHS of the xor is a signmask, then this is just an add. 4417 // Instcombine turns add of signmask into xor as a strength reduction step. 4418 if (RHSC->getValue().isSignMask()) 4419 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4420 return BinaryOp(Op); 4421 4422 case Instruction::LShr: 4423 // Turn logical shift right of a constant into a unsigned divide. 4424 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4425 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4426 4427 // If the shift count is not less than the bitwidth, the result of 4428 // the shift is undefined. Don't try to analyze it, because the 4429 // resolution chosen here may differ from the resolution chosen in 4430 // other parts of the compiler. 4431 if (SA->getValue().ult(BitWidth)) { 4432 Constant *X = 4433 ConstantInt::get(SA->getContext(), 4434 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4435 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4436 } 4437 } 4438 return BinaryOp(Op); 4439 4440 case Instruction::ExtractValue: { 4441 auto *EVI = cast<ExtractValueInst>(Op); 4442 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4443 break; 4444 4445 auto *CI = dyn_cast<CallInst>(EVI->getAggregateOperand()); 4446 if (!CI) 4447 break; 4448 4449 if (auto *F = CI->getCalledFunction()) 4450 switch (F->getIntrinsicID()) { 4451 case Intrinsic::sadd_with_overflow: 4452 case Intrinsic::uadd_with_overflow: 4453 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4454 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4455 CI->getArgOperand(1)); 4456 4457 // Now that we know that all uses of the arithmetic-result component of 4458 // CI are guarded by the overflow check, we can go ahead and pretend 4459 // that the arithmetic is non-overflowing. 4460 if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow) 4461 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4462 CI->getArgOperand(1), /* IsNSW = */ true, 4463 /* IsNUW = */ false); 4464 else 4465 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4466 CI->getArgOperand(1), /* IsNSW = */ false, 4467 /* IsNUW*/ true); 4468 case Intrinsic::ssub_with_overflow: 4469 case Intrinsic::usub_with_overflow: 4470 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4471 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4472 CI->getArgOperand(1)); 4473 4474 // The same reasoning as sadd/uadd above. 4475 if (F->getIntrinsicID() == Intrinsic::ssub_with_overflow) 4476 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4477 CI->getArgOperand(1), /* IsNSW = */ true, 4478 /* IsNUW = */ false); 4479 else 4480 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4481 CI->getArgOperand(1), /* IsNSW = */ false, 4482 /* IsNUW = */ true); 4483 case Intrinsic::smul_with_overflow: 4484 case Intrinsic::umul_with_overflow: 4485 return BinaryOp(Instruction::Mul, CI->getArgOperand(0), 4486 CI->getArgOperand(1)); 4487 default: 4488 break; 4489 } 4490 break; 4491 } 4492 4493 default: 4494 break; 4495 } 4496 4497 return None; 4498 } 4499 4500 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4501 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4502 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4503 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4504 /// follows one of the following patterns: 4505 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4506 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4507 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4508 /// we return the type of the truncation operation, and indicate whether the 4509 /// truncated type should be treated as signed/unsigned by setting 4510 /// \p Signed to true/false, respectively. 4511 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4512 bool &Signed, ScalarEvolution &SE) { 4513 // The case where Op == SymbolicPHI (that is, with no type conversions on 4514 // the way) is handled by the regular add recurrence creating logic and 4515 // would have already been triggered in createAddRecForPHI. Reaching it here 4516 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4517 // because one of the other operands of the SCEVAddExpr updating this PHI is 4518 // not invariant). 4519 // 4520 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4521 // this case predicates that allow us to prove that Op == SymbolicPHI will 4522 // be added. 4523 if (Op == SymbolicPHI) 4524 return nullptr; 4525 4526 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4527 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4528 if (SourceBits != NewBits) 4529 return nullptr; 4530 4531 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4532 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4533 if (!SExt && !ZExt) 4534 return nullptr; 4535 const SCEVTruncateExpr *Trunc = 4536 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4537 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4538 if (!Trunc) 4539 return nullptr; 4540 const SCEV *X = Trunc->getOperand(); 4541 if (X != SymbolicPHI) 4542 return nullptr; 4543 Signed = SExt != nullptr; 4544 return Trunc->getType(); 4545 } 4546 4547 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4548 if (!PN->getType()->isIntegerTy()) 4549 return nullptr; 4550 const Loop *L = LI.getLoopFor(PN->getParent()); 4551 if (!L || L->getHeader() != PN->getParent()) 4552 return nullptr; 4553 return L; 4554 } 4555 4556 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4557 // computation that updates the phi follows the following pattern: 4558 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4559 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4560 // If so, try to see if it can be rewritten as an AddRecExpr under some 4561 // Predicates. If successful, return them as a pair. Also cache the results 4562 // of the analysis. 4563 // 4564 // Example usage scenario: 4565 // Say the Rewriter is called for the following SCEV: 4566 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4567 // where: 4568 // %X = phi i64 (%Start, %BEValue) 4569 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4570 // and call this function with %SymbolicPHI = %X. 4571 // 4572 // The analysis will find that the value coming around the backedge has 4573 // the following SCEV: 4574 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4575 // Upon concluding that this matches the desired pattern, the function 4576 // will return the pair {NewAddRec, SmallPredsVec} where: 4577 // NewAddRec = {%Start,+,%Step} 4578 // SmallPredsVec = {P1, P2, P3} as follows: 4579 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4580 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4581 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4582 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4583 // under the predicates {P1,P2,P3}. 4584 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4585 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4586 // 4587 // TODO's: 4588 // 4589 // 1) Extend the Induction descriptor to also support inductions that involve 4590 // casts: When needed (namely, when we are called in the context of the 4591 // vectorizer induction analysis), a Set of cast instructions will be 4592 // populated by this method, and provided back to isInductionPHI. This is 4593 // needed to allow the vectorizer to properly record them to be ignored by 4594 // the cost model and to avoid vectorizing them (otherwise these casts, 4595 // which are redundant under the runtime overflow checks, will be 4596 // vectorized, which can be costly). 4597 // 4598 // 2) Support additional induction/PHISCEV patterns: We also want to support 4599 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4600 // after the induction update operation (the induction increment): 4601 // 4602 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4603 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4604 // 4605 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4606 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4607 // 4608 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4609 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4610 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4611 SmallVector<const SCEVPredicate *, 3> Predicates; 4612 4613 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4614 // return an AddRec expression under some predicate. 4615 4616 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4617 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4618 assert(L && "Expecting an integer loop header phi"); 4619 4620 // The loop may have multiple entrances or multiple exits; we can analyze 4621 // this phi as an addrec if it has a unique entry value and a unique 4622 // backedge value. 4623 Value *BEValueV = nullptr, *StartValueV = nullptr; 4624 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4625 Value *V = PN->getIncomingValue(i); 4626 if (L->contains(PN->getIncomingBlock(i))) { 4627 if (!BEValueV) { 4628 BEValueV = V; 4629 } else if (BEValueV != V) { 4630 BEValueV = nullptr; 4631 break; 4632 } 4633 } else if (!StartValueV) { 4634 StartValueV = V; 4635 } else if (StartValueV != V) { 4636 StartValueV = nullptr; 4637 break; 4638 } 4639 } 4640 if (!BEValueV || !StartValueV) 4641 return None; 4642 4643 const SCEV *BEValue = getSCEV(BEValueV); 4644 4645 // If the value coming around the backedge is an add with the symbolic 4646 // value we just inserted, possibly with casts that we can ignore under 4647 // an appropriate runtime guard, then we found a simple induction variable! 4648 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4649 if (!Add) 4650 return None; 4651 4652 // If there is a single occurrence of the symbolic value, possibly 4653 // casted, replace it with a recurrence. 4654 unsigned FoundIndex = Add->getNumOperands(); 4655 Type *TruncTy = nullptr; 4656 bool Signed; 4657 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4658 if ((TruncTy = 4659 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4660 if (FoundIndex == e) { 4661 FoundIndex = i; 4662 break; 4663 } 4664 4665 if (FoundIndex == Add->getNumOperands()) 4666 return None; 4667 4668 // Create an add with everything but the specified operand. 4669 SmallVector<const SCEV *, 8> Ops; 4670 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4671 if (i != FoundIndex) 4672 Ops.push_back(Add->getOperand(i)); 4673 const SCEV *Accum = getAddExpr(Ops); 4674 4675 // The runtime checks will not be valid if the step amount is 4676 // varying inside the loop. 4677 if (!isLoopInvariant(Accum, L)) 4678 return None; 4679 4680 // *** Part2: Create the predicates 4681 4682 // Analysis was successful: we have a phi-with-cast pattern for which we 4683 // can return an AddRec expression under the following predicates: 4684 // 4685 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4686 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4687 // P2: An Equal predicate that guarantees that 4688 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4689 // P3: An Equal predicate that guarantees that 4690 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4691 // 4692 // As we next prove, the above predicates guarantee that: 4693 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4694 // 4695 // 4696 // More formally, we want to prove that: 4697 // Expr(i+1) = Start + (i+1) * Accum 4698 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4699 // 4700 // Given that: 4701 // 1) Expr(0) = Start 4702 // 2) Expr(1) = Start + Accum 4703 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4704 // 3) Induction hypothesis (step i): 4705 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4706 // 4707 // Proof: 4708 // Expr(i+1) = 4709 // = Start + (i+1)*Accum 4710 // = (Start + i*Accum) + Accum 4711 // = Expr(i) + Accum 4712 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4713 // :: from step i 4714 // 4715 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4716 // 4717 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4718 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4719 // + Accum :: from P3 4720 // 4721 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4722 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4723 // 4724 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4725 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4726 // 4727 // By induction, the same applies to all iterations 1<=i<n: 4728 // 4729 4730 // Create a truncated addrec for which we will add a no overflow check (P1). 4731 const SCEV *StartVal = getSCEV(StartValueV); 4732 const SCEV *PHISCEV = 4733 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4734 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4735 4736 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4737 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4738 // will be constant. 4739 // 4740 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4741 // add P1. 4742 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4743 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4744 Signed ? SCEVWrapPredicate::IncrementNSSW 4745 : SCEVWrapPredicate::IncrementNUSW; 4746 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4747 Predicates.push_back(AddRecPred); 4748 } 4749 4750 // Create the Equal Predicates P2,P3: 4751 4752 // It is possible that the predicates P2 and/or P3 are computable at 4753 // compile time due to StartVal and/or Accum being constants. 4754 // If either one is, then we can check that now and escape if either P2 4755 // or P3 is false. 4756 4757 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4758 // for each of StartVal and Accum 4759 auto getExtendedExpr = [&](const SCEV *Expr, 4760 bool CreateSignExtend) -> const SCEV * { 4761 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4762 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4763 const SCEV *ExtendedExpr = 4764 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4765 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4766 return ExtendedExpr; 4767 }; 4768 4769 // Given: 4770 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4771 // = getExtendedExpr(Expr) 4772 // Determine whether the predicate P: Expr == ExtendedExpr 4773 // is known to be false at compile time 4774 auto PredIsKnownFalse = [&](const SCEV *Expr, 4775 const SCEV *ExtendedExpr) -> bool { 4776 return Expr != ExtendedExpr && 4777 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4778 }; 4779 4780 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 4781 if (PredIsKnownFalse(StartVal, StartExtended)) { 4782 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 4783 return None; 4784 } 4785 4786 // The Step is always Signed (because the overflow checks are either 4787 // NSSW or NUSW) 4788 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 4789 if (PredIsKnownFalse(Accum, AccumExtended)) { 4790 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 4791 return None; 4792 } 4793 4794 auto AppendPredicate = [&](const SCEV *Expr, 4795 const SCEV *ExtendedExpr) -> void { 4796 if (Expr != ExtendedExpr && 4797 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4798 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4799 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 4800 Predicates.push_back(Pred); 4801 } 4802 }; 4803 4804 AppendPredicate(StartVal, StartExtended); 4805 AppendPredicate(Accum, AccumExtended); 4806 4807 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4808 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4809 // into NewAR if it will also add the runtime overflow checks specified in 4810 // Predicates. 4811 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4812 4813 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4814 std::make_pair(NewAR, Predicates); 4815 // Remember the result of the analysis for this SCEV at this locayyytion. 4816 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4817 return PredRewrite; 4818 } 4819 4820 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4821 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4822 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4823 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4824 if (!L) 4825 return None; 4826 4827 // Check to see if we already analyzed this PHI. 4828 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4829 if (I != PredicatedSCEVRewrites.end()) { 4830 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 4831 I->second; 4832 // Analysis was done before and failed to create an AddRec: 4833 if (Rewrite.first == SymbolicPHI) 4834 return None; 4835 // Analysis was done before and succeeded to create an AddRec under 4836 // a predicate: 4837 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 4838 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 4839 return Rewrite; 4840 } 4841 4842 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4843 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 4844 4845 // Record in the cache that the analysis failed 4846 if (!Rewrite) { 4847 SmallVector<const SCEVPredicate *, 3> Predicates; 4848 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 4849 return None; 4850 } 4851 4852 return Rewrite; 4853 } 4854 4855 // FIXME: This utility is currently required because the Rewriter currently 4856 // does not rewrite this expression: 4857 // {0, +, (sext ix (trunc iy to ix) to iy)} 4858 // into {0, +, %step}, 4859 // even when the following Equal predicate exists: 4860 // "%step == (sext ix (trunc iy to ix) to iy)". 4861 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 4862 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 4863 if (AR1 == AR2) 4864 return true; 4865 4866 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 4867 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 4868 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 4869 return false; 4870 return true; 4871 }; 4872 4873 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 4874 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 4875 return false; 4876 return true; 4877 } 4878 4879 /// A helper function for createAddRecFromPHI to handle simple cases. 4880 /// 4881 /// This function tries to find an AddRec expression for the simplest (yet most 4882 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 4883 /// If it fails, createAddRecFromPHI will use a more general, but slow, 4884 /// technique for finding the AddRec expression. 4885 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 4886 Value *BEValueV, 4887 Value *StartValueV) { 4888 const Loop *L = LI.getLoopFor(PN->getParent()); 4889 assert(L && L->getHeader() == PN->getParent()); 4890 assert(BEValueV && StartValueV); 4891 4892 auto BO = MatchBinaryOp(BEValueV, DT); 4893 if (!BO) 4894 return nullptr; 4895 4896 if (BO->Opcode != Instruction::Add) 4897 return nullptr; 4898 4899 const SCEV *Accum = nullptr; 4900 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 4901 Accum = getSCEV(BO->RHS); 4902 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 4903 Accum = getSCEV(BO->LHS); 4904 4905 if (!Accum) 4906 return nullptr; 4907 4908 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4909 if (BO->IsNUW) 4910 Flags = setFlags(Flags, SCEV::FlagNUW); 4911 if (BO->IsNSW) 4912 Flags = setFlags(Flags, SCEV::FlagNSW); 4913 4914 const SCEV *StartVal = getSCEV(StartValueV); 4915 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4916 4917 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4918 4919 // We can add Flags to the post-inc expression only if we 4920 // know that it is *undefined behavior* for BEValueV to 4921 // overflow. 4922 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4923 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4924 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4925 4926 return PHISCEV; 4927 } 4928 4929 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 4930 const Loop *L = LI.getLoopFor(PN->getParent()); 4931 if (!L || L->getHeader() != PN->getParent()) 4932 return nullptr; 4933 4934 // The loop may have multiple entrances or multiple exits; we can analyze 4935 // this phi as an addrec if it has a unique entry value and a unique 4936 // backedge value. 4937 Value *BEValueV = nullptr, *StartValueV = nullptr; 4938 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4939 Value *V = PN->getIncomingValue(i); 4940 if (L->contains(PN->getIncomingBlock(i))) { 4941 if (!BEValueV) { 4942 BEValueV = V; 4943 } else if (BEValueV != V) { 4944 BEValueV = nullptr; 4945 break; 4946 } 4947 } else if (!StartValueV) { 4948 StartValueV = V; 4949 } else if (StartValueV != V) { 4950 StartValueV = nullptr; 4951 break; 4952 } 4953 } 4954 if (!BEValueV || !StartValueV) 4955 return nullptr; 4956 4957 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 4958 "PHI node already processed?"); 4959 4960 // First, try to find AddRec expression without creating a fictituos symbolic 4961 // value for PN. 4962 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 4963 return S; 4964 4965 // Handle PHI node value symbolically. 4966 const SCEV *SymbolicName = getUnknown(PN); 4967 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 4968 4969 // Using this symbolic name for the PHI, analyze the value coming around 4970 // the back-edge. 4971 const SCEV *BEValue = getSCEV(BEValueV); 4972 4973 // NOTE: If BEValue is loop invariant, we know that the PHI node just 4974 // has a special value for the first iteration of the loop. 4975 4976 // If the value coming around the backedge is an add with the symbolic 4977 // value we just inserted, then we found a simple induction variable! 4978 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 4979 // If there is a single occurrence of the symbolic value, replace it 4980 // with a recurrence. 4981 unsigned FoundIndex = Add->getNumOperands(); 4982 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4983 if (Add->getOperand(i) == SymbolicName) 4984 if (FoundIndex == e) { 4985 FoundIndex = i; 4986 break; 4987 } 4988 4989 if (FoundIndex != Add->getNumOperands()) { 4990 // Create an add with everything but the specified operand. 4991 SmallVector<const SCEV *, 8> Ops; 4992 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4993 if (i != FoundIndex) 4994 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 4995 L, *this)); 4996 const SCEV *Accum = getAddExpr(Ops); 4997 4998 // This is not a valid addrec if the step amount is varying each 4999 // loop iteration, but is not itself an addrec in this loop. 5000 if (isLoopInvariant(Accum, L) || 5001 (isa<SCEVAddRecExpr>(Accum) && 5002 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5003 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5004 5005 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 5006 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5007 if (BO->IsNUW) 5008 Flags = setFlags(Flags, SCEV::FlagNUW); 5009 if (BO->IsNSW) 5010 Flags = setFlags(Flags, SCEV::FlagNSW); 5011 } 5012 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5013 // If the increment is an inbounds GEP, then we know the address 5014 // space cannot be wrapped around. We cannot make any guarantee 5015 // about signed or unsigned overflow because pointers are 5016 // unsigned but we may have a negative index from the base 5017 // pointer. We can guarantee that no unsigned wrap occurs if the 5018 // indices form a positive value. 5019 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5020 Flags = setFlags(Flags, SCEV::FlagNW); 5021 5022 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 5023 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 5024 Flags = setFlags(Flags, SCEV::FlagNUW); 5025 } 5026 5027 // We cannot transfer nuw and nsw flags from subtraction 5028 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5029 // for instance. 5030 } 5031 5032 const SCEV *StartVal = getSCEV(StartValueV); 5033 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5034 5035 // Okay, for the entire analysis of this edge we assumed the PHI 5036 // to be symbolic. We now need to go back and purge all of the 5037 // entries for the scalars that use the symbolic expression. 5038 forgetSymbolicName(PN, SymbolicName); 5039 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5040 5041 // We can add Flags to the post-inc expression only if we 5042 // know that it is *undefined behavior* for BEValueV to 5043 // overflow. 5044 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5045 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5046 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5047 5048 return PHISCEV; 5049 } 5050 } 5051 } else { 5052 // Otherwise, this could be a loop like this: 5053 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5054 // In this case, j = {1,+,1} and BEValue is j. 5055 // Because the other in-value of i (0) fits the evolution of BEValue 5056 // i really is an addrec evolution. 5057 // 5058 // We can generalize this saying that i is the shifted value of BEValue 5059 // by one iteration: 5060 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5061 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5062 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5063 if (Shifted != getCouldNotCompute() && 5064 Start != getCouldNotCompute()) { 5065 const SCEV *StartVal = getSCEV(StartValueV); 5066 if (Start == StartVal) { 5067 // Okay, for the entire analysis of this edge we assumed the PHI 5068 // to be symbolic. We now need to go back and purge all of the 5069 // entries for the scalars that use the symbolic expression. 5070 forgetSymbolicName(PN, SymbolicName); 5071 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 5072 return Shifted; 5073 } 5074 } 5075 } 5076 5077 // Remove the temporary PHI node SCEV that has been inserted while intending 5078 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5079 // as it will prevent later (possibly simpler) SCEV expressions to be added 5080 // to the ValueExprMap. 5081 eraseValueFromMap(PN); 5082 5083 return nullptr; 5084 } 5085 5086 // Checks if the SCEV S is available at BB. S is considered available at BB 5087 // if S can be materialized at BB without introducing a fault. 5088 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 5089 BasicBlock *BB) { 5090 struct CheckAvailable { 5091 bool TraversalDone = false; 5092 bool Available = true; 5093 5094 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 5095 BasicBlock *BB = nullptr; 5096 DominatorTree &DT; 5097 5098 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5099 : L(L), BB(BB), DT(DT) {} 5100 5101 bool setUnavailable() { 5102 TraversalDone = true; 5103 Available = false; 5104 return false; 5105 } 5106 5107 bool follow(const SCEV *S) { 5108 switch (S->getSCEVType()) { 5109 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 5110 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 5111 // These expressions are available if their operand(s) is/are. 5112 return true; 5113 5114 case scAddRecExpr: { 5115 // We allow add recurrences that are on the loop BB is in, or some 5116 // outer loop. This guarantees availability because the value of the 5117 // add recurrence at BB is simply the "current" value of the induction 5118 // variable. We can relax this in the future; for instance an add 5119 // recurrence on a sibling dominating loop is also available at BB. 5120 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5121 if (L && (ARLoop == L || ARLoop->contains(L))) 5122 return true; 5123 5124 return setUnavailable(); 5125 } 5126 5127 case scUnknown: { 5128 // For SCEVUnknown, we check for simple dominance. 5129 const auto *SU = cast<SCEVUnknown>(S); 5130 Value *V = SU->getValue(); 5131 5132 if (isa<Argument>(V)) 5133 return false; 5134 5135 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5136 return false; 5137 5138 return setUnavailable(); 5139 } 5140 5141 case scUDivExpr: 5142 case scCouldNotCompute: 5143 // We do not try to smart about these at all. 5144 return setUnavailable(); 5145 } 5146 llvm_unreachable("switch should be fully covered!"); 5147 } 5148 5149 bool isDone() { return TraversalDone; } 5150 }; 5151 5152 CheckAvailable CA(L, BB, DT); 5153 SCEVTraversal<CheckAvailable> ST(CA); 5154 5155 ST.visitAll(S); 5156 return CA.Available; 5157 } 5158 5159 // Try to match a control flow sequence that branches out at BI and merges back 5160 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5161 // match. 5162 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5163 Value *&C, Value *&LHS, Value *&RHS) { 5164 C = BI->getCondition(); 5165 5166 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5167 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5168 5169 if (!LeftEdge.isSingleEdge()) 5170 return false; 5171 5172 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5173 5174 Use &LeftUse = Merge->getOperandUse(0); 5175 Use &RightUse = Merge->getOperandUse(1); 5176 5177 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5178 LHS = LeftUse; 5179 RHS = RightUse; 5180 return true; 5181 } 5182 5183 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5184 LHS = RightUse; 5185 RHS = LeftUse; 5186 return true; 5187 } 5188 5189 return false; 5190 } 5191 5192 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5193 auto IsReachable = 5194 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5195 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5196 const Loop *L = LI.getLoopFor(PN->getParent()); 5197 5198 // We don't want to break LCSSA, even in a SCEV expression tree. 5199 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5200 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5201 return nullptr; 5202 5203 // Try to match 5204 // 5205 // br %cond, label %left, label %right 5206 // left: 5207 // br label %merge 5208 // right: 5209 // br label %merge 5210 // merge: 5211 // V = phi [ %x, %left ], [ %y, %right ] 5212 // 5213 // as "select %cond, %x, %y" 5214 5215 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5216 assert(IDom && "At least the entry block should dominate PN"); 5217 5218 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5219 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5220 5221 if (BI && BI->isConditional() && 5222 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5223 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5224 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5225 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5226 } 5227 5228 return nullptr; 5229 } 5230 5231 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5232 if (const SCEV *S = createAddRecFromPHI(PN)) 5233 return S; 5234 5235 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5236 return S; 5237 5238 // If the PHI has a single incoming value, follow that value, unless the 5239 // PHI's incoming blocks are in a different loop, in which case doing so 5240 // risks breaking LCSSA form. Instcombine would normally zap these, but 5241 // it doesn't have DominatorTree information, so it may miss cases. 5242 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5243 if (LI.replacementPreservesLCSSAForm(PN, V)) 5244 return getSCEV(V); 5245 5246 // If it's not a loop phi, we can't handle it yet. 5247 return getUnknown(PN); 5248 } 5249 5250 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5251 Value *Cond, 5252 Value *TrueVal, 5253 Value *FalseVal) { 5254 // Handle "constant" branch or select. This can occur for instance when a 5255 // loop pass transforms an inner loop and moves on to process the outer loop. 5256 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5257 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5258 5259 // Try to match some simple smax or umax patterns. 5260 auto *ICI = dyn_cast<ICmpInst>(Cond); 5261 if (!ICI) 5262 return getUnknown(I); 5263 5264 Value *LHS = ICI->getOperand(0); 5265 Value *RHS = ICI->getOperand(1); 5266 5267 switch (ICI->getPredicate()) { 5268 case ICmpInst::ICMP_SLT: 5269 case ICmpInst::ICMP_SLE: 5270 std::swap(LHS, RHS); 5271 LLVM_FALLTHROUGH; 5272 case ICmpInst::ICMP_SGT: 5273 case ICmpInst::ICMP_SGE: 5274 // a >s b ? a+x : b+x -> smax(a, b)+x 5275 // a >s b ? b+x : a+x -> smin(a, b)+x 5276 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5277 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5278 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5279 const SCEV *LA = getSCEV(TrueVal); 5280 const SCEV *RA = getSCEV(FalseVal); 5281 const SCEV *LDiff = getMinusSCEV(LA, LS); 5282 const SCEV *RDiff = getMinusSCEV(RA, RS); 5283 if (LDiff == RDiff) 5284 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5285 LDiff = getMinusSCEV(LA, RS); 5286 RDiff = getMinusSCEV(RA, LS); 5287 if (LDiff == RDiff) 5288 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5289 } 5290 break; 5291 case ICmpInst::ICMP_ULT: 5292 case ICmpInst::ICMP_ULE: 5293 std::swap(LHS, RHS); 5294 LLVM_FALLTHROUGH; 5295 case ICmpInst::ICMP_UGT: 5296 case ICmpInst::ICMP_UGE: 5297 // a >u b ? a+x : b+x -> umax(a, b)+x 5298 // a >u b ? b+x : a+x -> umin(a, b)+x 5299 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5300 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5301 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5302 const SCEV *LA = getSCEV(TrueVal); 5303 const SCEV *RA = getSCEV(FalseVal); 5304 const SCEV *LDiff = getMinusSCEV(LA, LS); 5305 const SCEV *RDiff = getMinusSCEV(RA, RS); 5306 if (LDiff == RDiff) 5307 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5308 LDiff = getMinusSCEV(LA, RS); 5309 RDiff = getMinusSCEV(RA, LS); 5310 if (LDiff == RDiff) 5311 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5312 } 5313 break; 5314 case ICmpInst::ICMP_NE: 5315 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5316 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5317 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5318 const SCEV *One = getOne(I->getType()); 5319 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5320 const SCEV *LA = getSCEV(TrueVal); 5321 const SCEV *RA = getSCEV(FalseVal); 5322 const SCEV *LDiff = getMinusSCEV(LA, LS); 5323 const SCEV *RDiff = getMinusSCEV(RA, One); 5324 if (LDiff == RDiff) 5325 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5326 } 5327 break; 5328 case ICmpInst::ICMP_EQ: 5329 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5330 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5331 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5332 const SCEV *One = getOne(I->getType()); 5333 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5334 const SCEV *LA = getSCEV(TrueVal); 5335 const SCEV *RA = getSCEV(FalseVal); 5336 const SCEV *LDiff = getMinusSCEV(LA, One); 5337 const SCEV *RDiff = getMinusSCEV(RA, LS); 5338 if (LDiff == RDiff) 5339 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5340 } 5341 break; 5342 default: 5343 break; 5344 } 5345 5346 return getUnknown(I); 5347 } 5348 5349 /// Expand GEP instructions into add and multiply operations. This allows them 5350 /// to be analyzed by regular SCEV code. 5351 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5352 // Don't attempt to analyze GEPs over unsized objects. 5353 if (!GEP->getSourceElementType()->isSized()) 5354 return getUnknown(GEP); 5355 5356 SmallVector<const SCEV *, 4> IndexExprs; 5357 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5358 IndexExprs.push_back(getSCEV(*Index)); 5359 return getGEPExpr(GEP, IndexExprs); 5360 } 5361 5362 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5363 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5364 return C->getAPInt().countTrailingZeros(); 5365 5366 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5367 return std::min(GetMinTrailingZeros(T->getOperand()), 5368 (uint32_t)getTypeSizeInBits(T->getType())); 5369 5370 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5371 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5372 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5373 ? getTypeSizeInBits(E->getType()) 5374 : OpRes; 5375 } 5376 5377 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5378 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5379 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5380 ? getTypeSizeInBits(E->getType()) 5381 : OpRes; 5382 } 5383 5384 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5385 // The result is the min of all operands results. 5386 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5387 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5388 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5389 return MinOpRes; 5390 } 5391 5392 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5393 // The result is the sum of all operands results. 5394 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5395 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5396 for (unsigned i = 1, e = M->getNumOperands(); 5397 SumOpRes != BitWidth && i != e; ++i) 5398 SumOpRes = 5399 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5400 return SumOpRes; 5401 } 5402 5403 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5404 // The result is the min of all operands results. 5405 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5406 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5407 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5408 return MinOpRes; 5409 } 5410 5411 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5412 // The result is the min of all operands results. 5413 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5414 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5415 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5416 return MinOpRes; 5417 } 5418 5419 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5420 // The result is the min of all operands results. 5421 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5422 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5423 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5424 return MinOpRes; 5425 } 5426 5427 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5428 // For a SCEVUnknown, ask ValueTracking. 5429 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5430 return Known.countMinTrailingZeros(); 5431 } 5432 5433 // SCEVUDivExpr 5434 return 0; 5435 } 5436 5437 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5438 auto I = MinTrailingZerosCache.find(S); 5439 if (I != MinTrailingZerosCache.end()) 5440 return I->second; 5441 5442 uint32_t Result = GetMinTrailingZerosImpl(S); 5443 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5444 assert(InsertPair.second && "Should insert a new key"); 5445 return InsertPair.first->second; 5446 } 5447 5448 /// Helper method to assign a range to V from metadata present in the IR. 5449 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5450 if (Instruction *I = dyn_cast<Instruction>(V)) 5451 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5452 return getConstantRangeFromMetadata(*MD); 5453 5454 return None; 5455 } 5456 5457 /// Determine the range for a particular SCEV. If SignHint is 5458 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5459 /// with a "cleaner" unsigned (resp. signed) representation. 5460 const ConstantRange & 5461 ScalarEvolution::getRangeRef(const SCEV *S, 5462 ScalarEvolution::RangeSignHint SignHint) { 5463 DenseMap<const SCEV *, ConstantRange> &Cache = 5464 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5465 : SignedRanges; 5466 5467 // See if we've computed this range already. 5468 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5469 if (I != Cache.end()) 5470 return I->second; 5471 5472 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5473 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5474 5475 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5476 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5477 5478 // If the value has known zeros, the maximum value will have those known zeros 5479 // as well. 5480 uint32_t TZ = GetMinTrailingZeros(S); 5481 if (TZ != 0) { 5482 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5483 ConservativeResult = 5484 ConstantRange(APInt::getMinValue(BitWidth), 5485 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5486 else 5487 ConservativeResult = ConstantRange( 5488 APInt::getSignedMinValue(BitWidth), 5489 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5490 } 5491 5492 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5493 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5494 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5495 X = X.add(getRangeRef(Add->getOperand(i), SignHint)); 5496 return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); 5497 } 5498 5499 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5500 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5501 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5502 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5503 return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); 5504 } 5505 5506 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5507 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5508 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5509 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5510 return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); 5511 } 5512 5513 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5514 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5515 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5516 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5517 return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); 5518 } 5519 5520 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5521 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5522 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5523 return setRange(UDiv, SignHint, 5524 ConservativeResult.intersectWith(X.udiv(Y))); 5525 } 5526 5527 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5528 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5529 return setRange(ZExt, SignHint, 5530 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 5531 } 5532 5533 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5534 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5535 return setRange(SExt, SignHint, 5536 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 5537 } 5538 5539 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5540 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5541 return setRange(Trunc, SignHint, 5542 ConservativeResult.intersectWith(X.truncate(BitWidth))); 5543 } 5544 5545 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5546 // If there's no unsigned wrap, the value will never be less than its 5547 // initial value. 5548 if (AddRec->hasNoUnsignedWrap()) 5549 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 5550 if (!C->getValue()->isZero()) 5551 ConservativeResult = ConservativeResult.intersectWith( 5552 ConstantRange(C->getAPInt(), APInt(BitWidth, 0))); 5553 5554 // If there's no signed wrap, and all the operands have the same sign or 5555 // zero, the value won't ever change sign. 5556 if (AddRec->hasNoSignedWrap()) { 5557 bool AllNonNeg = true; 5558 bool AllNonPos = true; 5559 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 5560 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 5561 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 5562 } 5563 if (AllNonNeg) 5564 ConservativeResult = ConservativeResult.intersectWith( 5565 ConstantRange(APInt(BitWidth, 0), 5566 APInt::getSignedMinValue(BitWidth))); 5567 else if (AllNonPos) 5568 ConservativeResult = ConservativeResult.intersectWith( 5569 ConstantRange(APInt::getSignedMinValue(BitWidth), 5570 APInt(BitWidth, 1))); 5571 } 5572 5573 // TODO: non-affine addrec 5574 if (AddRec->isAffine()) { 5575 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 5576 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5577 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5578 auto RangeFromAffine = getRangeForAffineAR( 5579 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5580 BitWidth); 5581 if (!RangeFromAffine.isFullSet()) 5582 ConservativeResult = 5583 ConservativeResult.intersectWith(RangeFromAffine); 5584 5585 auto RangeFromFactoring = getRangeViaFactoring( 5586 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5587 BitWidth); 5588 if (!RangeFromFactoring.isFullSet()) 5589 ConservativeResult = 5590 ConservativeResult.intersectWith(RangeFromFactoring); 5591 } 5592 } 5593 5594 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5595 } 5596 5597 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5598 // Check if the IR explicitly contains !range metadata. 5599 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5600 if (MDRange.hasValue()) 5601 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue()); 5602 5603 // Split here to avoid paying the compile-time cost of calling both 5604 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 5605 // if needed. 5606 const DataLayout &DL = getDataLayout(); 5607 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 5608 // For a SCEVUnknown, ask ValueTracking. 5609 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5610 if (Known.One != ~Known.Zero + 1) 5611 ConservativeResult = 5612 ConservativeResult.intersectWith(ConstantRange(Known.One, 5613 ~Known.Zero + 1)); 5614 } else { 5615 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 5616 "generalize as needed!"); 5617 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5618 if (NS > 1) 5619 ConservativeResult = ConservativeResult.intersectWith( 5620 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5621 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1)); 5622 } 5623 5624 // A range of Phi is a subset of union of all ranges of its input. 5625 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 5626 // Make sure that we do not run over cycled Phis. 5627 if (PendingPhiRanges.insert(Phi).second) { 5628 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 5629 for (auto &Op : Phi->operands()) { 5630 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 5631 RangeFromOps = RangeFromOps.unionWith(OpRange); 5632 // No point to continue if we already have a full set. 5633 if (RangeFromOps.isFullSet()) 5634 break; 5635 } 5636 ConservativeResult = ConservativeResult.intersectWith(RangeFromOps); 5637 bool Erased = PendingPhiRanges.erase(Phi); 5638 assert(Erased && "Failed to erase Phi properly?"); 5639 (void) Erased; 5640 } 5641 } 5642 5643 return setRange(U, SignHint, std::move(ConservativeResult)); 5644 } 5645 5646 return setRange(S, SignHint, std::move(ConservativeResult)); 5647 } 5648 5649 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5650 // values that the expression can take. Initially, the expression has a value 5651 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5652 // argument defines if we treat Step as signed or unsigned. 5653 static ConstantRange getRangeForAffineARHelper(APInt Step, 5654 const ConstantRange &StartRange, 5655 const APInt &MaxBECount, 5656 unsigned BitWidth, bool Signed) { 5657 // If either Step or MaxBECount is 0, then the expression won't change, and we 5658 // just need to return the initial range. 5659 if (Step == 0 || MaxBECount == 0) 5660 return StartRange; 5661 5662 // If we don't know anything about the initial value (i.e. StartRange is 5663 // FullRange), then we don't know anything about the final range either. 5664 // Return FullRange. 5665 if (StartRange.isFullSet()) 5666 return ConstantRange(BitWidth, /* isFullSet = */ true); 5667 5668 // If Step is signed and negative, then we use its absolute value, but we also 5669 // note that we're moving in the opposite direction. 5670 bool Descending = Signed && Step.isNegative(); 5671 5672 if (Signed) 5673 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 5674 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 5675 // This equations hold true due to the well-defined wrap-around behavior of 5676 // APInt. 5677 Step = Step.abs(); 5678 5679 // Check if Offset is more than full span of BitWidth. If it is, the 5680 // expression is guaranteed to overflow. 5681 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 5682 return ConstantRange(BitWidth, /* isFullSet = */ true); 5683 5684 // Offset is by how much the expression can change. Checks above guarantee no 5685 // overflow here. 5686 APInt Offset = Step * MaxBECount; 5687 5688 // Minimum value of the final range will match the minimal value of StartRange 5689 // if the expression is increasing and will be decreased by Offset otherwise. 5690 // Maximum value of the final range will match the maximal value of StartRange 5691 // if the expression is decreasing and will be increased by Offset otherwise. 5692 APInt StartLower = StartRange.getLower(); 5693 APInt StartUpper = StartRange.getUpper() - 1; 5694 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 5695 : (StartUpper + std::move(Offset)); 5696 5697 // It's possible that the new minimum/maximum value will fall into the initial 5698 // range (due to wrap around). This means that the expression can take any 5699 // value in this bitwidth, and we have to return full range. 5700 if (StartRange.contains(MovedBoundary)) 5701 return ConstantRange(BitWidth, /* isFullSet = */ true); 5702 5703 APInt NewLower = 5704 Descending ? std::move(MovedBoundary) : std::move(StartLower); 5705 APInt NewUpper = 5706 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 5707 NewUpper += 1; 5708 5709 // If we end up with full range, return a proper full range. 5710 if (NewLower == NewUpper) 5711 return ConstantRange(BitWidth, /* isFullSet = */ true); 5712 5713 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 5714 return ConstantRange(std::move(NewLower), std::move(NewUpper)); 5715 } 5716 5717 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5718 const SCEV *Step, 5719 const SCEV *MaxBECount, 5720 unsigned BitWidth) { 5721 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5722 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5723 "Precondition!"); 5724 5725 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5726 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5727 5728 // First, consider step signed. 5729 ConstantRange StartSRange = getSignedRange(Start); 5730 ConstantRange StepSRange = getSignedRange(Step); 5731 5732 // If Step can be both positive and negative, we need to find ranges for the 5733 // maximum absolute step values in both directions and union them. 5734 ConstantRange SR = 5735 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5736 MaxBECountValue, BitWidth, /* Signed = */ true); 5737 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5738 StartSRange, MaxBECountValue, 5739 BitWidth, /* Signed = */ true)); 5740 5741 // Next, consider step unsigned. 5742 ConstantRange UR = getRangeForAffineARHelper( 5743 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5744 MaxBECountValue, BitWidth, /* Signed = */ false); 5745 5746 // Finally, intersect signed and unsigned ranges. 5747 return SR.intersectWith(UR); 5748 } 5749 5750 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 5751 const SCEV *Step, 5752 const SCEV *MaxBECount, 5753 unsigned BitWidth) { 5754 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 5755 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 5756 5757 struct SelectPattern { 5758 Value *Condition = nullptr; 5759 APInt TrueValue; 5760 APInt FalseValue; 5761 5762 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 5763 const SCEV *S) { 5764 Optional<unsigned> CastOp; 5765 APInt Offset(BitWidth, 0); 5766 5767 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 5768 "Should be!"); 5769 5770 // Peel off a constant offset: 5771 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 5772 // In the future we could consider being smarter here and handle 5773 // {Start+Step,+,Step} too. 5774 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 5775 return; 5776 5777 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 5778 S = SA->getOperand(1); 5779 } 5780 5781 // Peel off a cast operation 5782 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 5783 CastOp = SCast->getSCEVType(); 5784 S = SCast->getOperand(); 5785 } 5786 5787 using namespace llvm::PatternMatch; 5788 5789 auto *SU = dyn_cast<SCEVUnknown>(S); 5790 const APInt *TrueVal, *FalseVal; 5791 if (!SU || 5792 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 5793 m_APInt(FalseVal)))) { 5794 Condition = nullptr; 5795 return; 5796 } 5797 5798 TrueValue = *TrueVal; 5799 FalseValue = *FalseVal; 5800 5801 // Re-apply the cast we peeled off earlier 5802 if (CastOp.hasValue()) 5803 switch (*CastOp) { 5804 default: 5805 llvm_unreachable("Unknown SCEV cast type!"); 5806 5807 case scTruncate: 5808 TrueValue = TrueValue.trunc(BitWidth); 5809 FalseValue = FalseValue.trunc(BitWidth); 5810 break; 5811 case scZeroExtend: 5812 TrueValue = TrueValue.zext(BitWidth); 5813 FalseValue = FalseValue.zext(BitWidth); 5814 break; 5815 case scSignExtend: 5816 TrueValue = TrueValue.sext(BitWidth); 5817 FalseValue = FalseValue.sext(BitWidth); 5818 break; 5819 } 5820 5821 // Re-apply the constant offset we peeled off earlier 5822 TrueValue += Offset; 5823 FalseValue += Offset; 5824 } 5825 5826 bool isRecognized() { return Condition != nullptr; } 5827 }; 5828 5829 SelectPattern StartPattern(*this, BitWidth, Start); 5830 if (!StartPattern.isRecognized()) 5831 return ConstantRange(BitWidth, /* isFullSet = */ true); 5832 5833 SelectPattern StepPattern(*this, BitWidth, Step); 5834 if (!StepPattern.isRecognized()) 5835 return ConstantRange(BitWidth, /* isFullSet = */ true); 5836 5837 if (StartPattern.Condition != StepPattern.Condition) { 5838 // We don't handle this case today; but we could, by considering four 5839 // possibilities below instead of two. I'm not sure if there are cases where 5840 // that will help over what getRange already does, though. 5841 return ConstantRange(BitWidth, /* isFullSet = */ true); 5842 } 5843 5844 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 5845 // construct arbitrary general SCEV expressions here. This function is called 5846 // from deep in the call stack, and calling getSCEV (on a sext instruction, 5847 // say) can end up caching a suboptimal value. 5848 5849 // FIXME: without the explicit `this` receiver below, MSVC errors out with 5850 // C2352 and C2512 (otherwise it isn't needed). 5851 5852 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 5853 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 5854 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 5855 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 5856 5857 ConstantRange TrueRange = 5858 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 5859 ConstantRange FalseRange = 5860 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 5861 5862 return TrueRange.unionWith(FalseRange); 5863 } 5864 5865 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 5866 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 5867 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 5868 5869 // Return early if there are no flags to propagate to the SCEV. 5870 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5871 if (BinOp->hasNoUnsignedWrap()) 5872 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 5873 if (BinOp->hasNoSignedWrap()) 5874 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 5875 if (Flags == SCEV::FlagAnyWrap) 5876 return SCEV::FlagAnyWrap; 5877 5878 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 5879 } 5880 5881 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 5882 // Here we check that I is in the header of the innermost loop containing I, 5883 // since we only deal with instructions in the loop header. The actual loop we 5884 // need to check later will come from an add recurrence, but getting that 5885 // requires computing the SCEV of the operands, which can be expensive. This 5886 // check we can do cheaply to rule out some cases early. 5887 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 5888 if (InnermostContainingLoop == nullptr || 5889 InnermostContainingLoop->getHeader() != I->getParent()) 5890 return false; 5891 5892 // Only proceed if we can prove that I does not yield poison. 5893 if (!programUndefinedIfFullPoison(I)) 5894 return false; 5895 5896 // At this point we know that if I is executed, then it does not wrap 5897 // according to at least one of NSW or NUW. If I is not executed, then we do 5898 // not know if the calculation that I represents would wrap. Multiple 5899 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 5900 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 5901 // derived from other instructions that map to the same SCEV. We cannot make 5902 // that guarantee for cases where I is not executed. So we need to find the 5903 // loop that I is considered in relation to and prove that I is executed for 5904 // every iteration of that loop. That implies that the value that I 5905 // calculates does not wrap anywhere in the loop, so then we can apply the 5906 // flags to the SCEV. 5907 // 5908 // We check isLoopInvariant to disambiguate in case we are adding recurrences 5909 // from different loops, so that we know which loop to prove that I is 5910 // executed in. 5911 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 5912 // I could be an extractvalue from a call to an overflow intrinsic. 5913 // TODO: We can do better here in some cases. 5914 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 5915 return false; 5916 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 5917 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 5918 bool AllOtherOpsLoopInvariant = true; 5919 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 5920 ++OtherOpIndex) { 5921 if (OtherOpIndex != OpIndex) { 5922 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 5923 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 5924 AllOtherOpsLoopInvariant = false; 5925 break; 5926 } 5927 } 5928 } 5929 if (AllOtherOpsLoopInvariant && 5930 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 5931 return true; 5932 } 5933 } 5934 return false; 5935 } 5936 5937 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 5938 // If we know that \c I can never be poison period, then that's enough. 5939 if (isSCEVExprNeverPoison(I)) 5940 return true; 5941 5942 // For an add recurrence specifically, we assume that infinite loops without 5943 // side effects are undefined behavior, and then reason as follows: 5944 // 5945 // If the add recurrence is poison in any iteration, it is poison on all 5946 // future iterations (since incrementing poison yields poison). If the result 5947 // of the add recurrence is fed into the loop latch condition and the loop 5948 // does not contain any throws or exiting blocks other than the latch, we now 5949 // have the ability to "choose" whether the backedge is taken or not (by 5950 // choosing a sufficiently evil value for the poison feeding into the branch) 5951 // for every iteration including and after the one in which \p I first became 5952 // poison. There are two possibilities (let's call the iteration in which \p 5953 // I first became poison as K): 5954 // 5955 // 1. In the set of iterations including and after K, the loop body executes 5956 // no side effects. In this case executing the backege an infinte number 5957 // of times will yield undefined behavior. 5958 // 5959 // 2. In the set of iterations including and after K, the loop body executes 5960 // at least one side effect. In this case, that specific instance of side 5961 // effect is control dependent on poison, which also yields undefined 5962 // behavior. 5963 5964 auto *ExitingBB = L->getExitingBlock(); 5965 auto *LatchBB = L->getLoopLatch(); 5966 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 5967 return false; 5968 5969 SmallPtrSet<const Instruction *, 16> Pushed; 5970 SmallVector<const Instruction *, 8> PoisonStack; 5971 5972 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 5973 // things that are known to be fully poison under that assumption go on the 5974 // PoisonStack. 5975 Pushed.insert(I); 5976 PoisonStack.push_back(I); 5977 5978 bool LatchControlDependentOnPoison = false; 5979 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 5980 const Instruction *Poison = PoisonStack.pop_back_val(); 5981 5982 for (auto *PoisonUser : Poison->users()) { 5983 if (propagatesFullPoison(cast<Instruction>(PoisonUser))) { 5984 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 5985 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 5986 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 5987 assert(BI->isConditional() && "Only possibility!"); 5988 if (BI->getParent() == LatchBB) { 5989 LatchControlDependentOnPoison = true; 5990 break; 5991 } 5992 } 5993 } 5994 } 5995 5996 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 5997 } 5998 5999 ScalarEvolution::LoopProperties 6000 ScalarEvolution::getLoopProperties(const Loop *L) { 6001 using LoopProperties = ScalarEvolution::LoopProperties; 6002 6003 auto Itr = LoopPropertiesCache.find(L); 6004 if (Itr == LoopPropertiesCache.end()) { 6005 auto HasSideEffects = [](Instruction *I) { 6006 if (auto *SI = dyn_cast<StoreInst>(I)) 6007 return !SI->isSimple(); 6008 6009 return I->mayHaveSideEffects(); 6010 }; 6011 6012 LoopProperties LP = {/* HasNoAbnormalExits */ true, 6013 /*HasNoSideEffects*/ true}; 6014 6015 for (auto *BB : L->getBlocks()) 6016 for (auto &I : *BB) { 6017 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6018 LP.HasNoAbnormalExits = false; 6019 if (HasSideEffects(&I)) 6020 LP.HasNoSideEffects = false; 6021 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 6022 break; // We're already as pessimistic as we can get. 6023 } 6024 6025 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 6026 assert(InsertPair.second && "We just checked!"); 6027 Itr = InsertPair.first; 6028 } 6029 6030 return Itr->second; 6031 } 6032 6033 const SCEV *ScalarEvolution::createSCEV(Value *V) { 6034 if (!isSCEVable(V->getType())) 6035 return getUnknown(V); 6036 6037 if (Instruction *I = dyn_cast<Instruction>(V)) { 6038 // Don't attempt to analyze instructions in blocks that aren't 6039 // reachable. Such instructions don't matter, and they aren't required 6040 // to obey basic rules for definitions dominating uses which this 6041 // analysis depends on. 6042 if (!DT.isReachableFromEntry(I->getParent())) 6043 return getUnknown(V); 6044 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 6045 return getConstant(CI); 6046 else if (isa<ConstantPointerNull>(V)) 6047 return getZero(V->getType()); 6048 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 6049 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 6050 else if (!isa<ConstantExpr>(V)) 6051 return getUnknown(V); 6052 6053 Operator *U = cast<Operator>(V); 6054 if (auto BO = MatchBinaryOp(U, DT)) { 6055 switch (BO->Opcode) { 6056 case Instruction::Add: { 6057 // The simple thing to do would be to just call getSCEV on both operands 6058 // and call getAddExpr with the result. However if we're looking at a 6059 // bunch of things all added together, this can be quite inefficient, 6060 // because it leads to N-1 getAddExpr calls for N ultimate operands. 6061 // Instead, gather up all the operands and make a single getAddExpr call. 6062 // LLVM IR canonical form means we need only traverse the left operands. 6063 SmallVector<const SCEV *, 4> AddOps; 6064 do { 6065 if (BO->Op) { 6066 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6067 AddOps.push_back(OpSCEV); 6068 break; 6069 } 6070 6071 // If a NUW or NSW flag can be applied to the SCEV for this 6072 // addition, then compute the SCEV for this addition by itself 6073 // with a separate call to getAddExpr. We need to do that 6074 // instead of pushing the operands of the addition onto AddOps, 6075 // since the flags are only known to apply to this particular 6076 // addition - they may not apply to other additions that can be 6077 // formed with operands from AddOps. 6078 const SCEV *RHS = getSCEV(BO->RHS); 6079 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6080 if (Flags != SCEV::FlagAnyWrap) { 6081 const SCEV *LHS = getSCEV(BO->LHS); 6082 if (BO->Opcode == Instruction::Sub) 6083 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 6084 else 6085 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 6086 break; 6087 } 6088 } 6089 6090 if (BO->Opcode == Instruction::Sub) 6091 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 6092 else 6093 AddOps.push_back(getSCEV(BO->RHS)); 6094 6095 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6096 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6097 NewBO->Opcode != Instruction::Sub)) { 6098 AddOps.push_back(getSCEV(BO->LHS)); 6099 break; 6100 } 6101 BO = NewBO; 6102 } while (true); 6103 6104 return getAddExpr(AddOps); 6105 } 6106 6107 case Instruction::Mul: { 6108 SmallVector<const SCEV *, 4> MulOps; 6109 do { 6110 if (BO->Op) { 6111 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6112 MulOps.push_back(OpSCEV); 6113 break; 6114 } 6115 6116 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6117 if (Flags != SCEV::FlagAnyWrap) { 6118 MulOps.push_back( 6119 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6120 break; 6121 } 6122 } 6123 6124 MulOps.push_back(getSCEV(BO->RHS)); 6125 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6126 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6127 MulOps.push_back(getSCEV(BO->LHS)); 6128 break; 6129 } 6130 BO = NewBO; 6131 } while (true); 6132 6133 return getMulExpr(MulOps); 6134 } 6135 case Instruction::UDiv: 6136 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6137 case Instruction::URem: 6138 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6139 case Instruction::Sub: { 6140 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6141 if (BO->Op) 6142 Flags = getNoWrapFlagsFromUB(BO->Op); 6143 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6144 } 6145 case Instruction::And: 6146 // For an expression like x&255 that merely masks off the high bits, 6147 // use zext(trunc(x)) as the SCEV expression. 6148 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6149 if (CI->isZero()) 6150 return getSCEV(BO->RHS); 6151 if (CI->isMinusOne()) 6152 return getSCEV(BO->LHS); 6153 const APInt &A = CI->getValue(); 6154 6155 // Instcombine's ShrinkDemandedConstant may strip bits out of 6156 // constants, obscuring what would otherwise be a low-bits mask. 6157 // Use computeKnownBits to compute what ShrinkDemandedConstant 6158 // knew about to reconstruct a low-bits mask value. 6159 unsigned LZ = A.countLeadingZeros(); 6160 unsigned TZ = A.countTrailingZeros(); 6161 unsigned BitWidth = A.getBitWidth(); 6162 KnownBits Known(BitWidth); 6163 computeKnownBits(BO->LHS, Known, getDataLayout(), 6164 0, &AC, nullptr, &DT); 6165 6166 APInt EffectiveMask = 6167 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6168 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6169 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6170 const SCEV *LHS = getSCEV(BO->LHS); 6171 const SCEV *ShiftedLHS = nullptr; 6172 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6173 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6174 // For an expression like (x * 8) & 8, simplify the multiply. 6175 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6176 unsigned GCD = std::min(MulZeros, TZ); 6177 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6178 SmallVector<const SCEV*, 4> MulOps; 6179 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6180 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6181 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6182 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6183 } 6184 } 6185 if (!ShiftedLHS) 6186 ShiftedLHS = getUDivExpr(LHS, MulCount); 6187 return getMulExpr( 6188 getZeroExtendExpr( 6189 getTruncateExpr(ShiftedLHS, 6190 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6191 BO->LHS->getType()), 6192 MulCount); 6193 } 6194 } 6195 break; 6196 6197 case Instruction::Or: 6198 // If the RHS of the Or is a constant, we may have something like: 6199 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6200 // optimizations will transparently handle this case. 6201 // 6202 // In order for this transformation to be safe, the LHS must be of the 6203 // form X*(2^n) and the Or constant must be less than 2^n. 6204 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6205 const SCEV *LHS = getSCEV(BO->LHS); 6206 const APInt &CIVal = CI->getValue(); 6207 if (GetMinTrailingZeros(LHS) >= 6208 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6209 // Build a plain add SCEV. 6210 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 6211 // If the LHS of the add was an addrec and it has no-wrap flags, 6212 // transfer the no-wrap flags, since an or won't introduce a wrap. 6213 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 6214 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 6215 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( 6216 OldAR->getNoWrapFlags()); 6217 } 6218 return S; 6219 } 6220 } 6221 break; 6222 6223 case Instruction::Xor: 6224 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6225 // If the RHS of xor is -1, then this is a not operation. 6226 if (CI->isMinusOne()) 6227 return getNotSCEV(getSCEV(BO->LHS)); 6228 6229 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6230 // This is a variant of the check for xor with -1, and it handles 6231 // the case where instcombine has trimmed non-demanded bits out 6232 // of an xor with -1. 6233 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6234 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6235 if (LBO->getOpcode() == Instruction::And && 6236 LCI->getValue() == CI->getValue()) 6237 if (const SCEVZeroExtendExpr *Z = 6238 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6239 Type *UTy = BO->LHS->getType(); 6240 const SCEV *Z0 = Z->getOperand(); 6241 Type *Z0Ty = Z0->getType(); 6242 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6243 6244 // If C is a low-bits mask, the zero extend is serving to 6245 // mask off the high bits. Complement the operand and 6246 // re-apply the zext. 6247 if (CI->getValue().isMask(Z0TySize)) 6248 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6249 6250 // If C is a single bit, it may be in the sign-bit position 6251 // before the zero-extend. In this case, represent the xor 6252 // using an add, which is equivalent, and re-apply the zext. 6253 APInt Trunc = CI->getValue().trunc(Z0TySize); 6254 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6255 Trunc.isSignMask()) 6256 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6257 UTy); 6258 } 6259 } 6260 break; 6261 6262 case Instruction::Shl: 6263 // Turn shift left of a constant amount into a multiply. 6264 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6265 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6266 6267 // If the shift count is not less than the bitwidth, the result of 6268 // the shift is undefined. Don't try to analyze it, because the 6269 // resolution chosen here may differ from the resolution chosen in 6270 // other parts of the compiler. 6271 if (SA->getValue().uge(BitWidth)) 6272 break; 6273 6274 // It is currently not resolved how to interpret NSW for left 6275 // shift by BitWidth - 1, so we avoid applying flags in that 6276 // case. Remove this check (or this comment) once the situation 6277 // is resolved. See 6278 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html 6279 // and http://reviews.llvm.org/D8890 . 6280 auto Flags = SCEV::FlagAnyWrap; 6281 if (BO->Op && SA->getValue().ult(BitWidth - 1)) 6282 Flags = getNoWrapFlagsFromUB(BO->Op); 6283 6284 Constant *X = ConstantInt::get( 6285 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6286 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6287 } 6288 break; 6289 6290 case Instruction::AShr: { 6291 // AShr X, C, where C is a constant. 6292 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6293 if (!CI) 6294 break; 6295 6296 Type *OuterTy = BO->LHS->getType(); 6297 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6298 // If the shift count is not less than the bitwidth, the result of 6299 // the shift is undefined. Don't try to analyze it, because the 6300 // resolution chosen here may differ from the resolution chosen in 6301 // other parts of the compiler. 6302 if (CI->getValue().uge(BitWidth)) 6303 break; 6304 6305 if (CI->isZero()) 6306 return getSCEV(BO->LHS); // shift by zero --> noop 6307 6308 uint64_t AShrAmt = CI->getZExtValue(); 6309 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6310 6311 Operator *L = dyn_cast<Operator>(BO->LHS); 6312 if (L && L->getOpcode() == Instruction::Shl) { 6313 // X = Shl A, n 6314 // Y = AShr X, m 6315 // Both n and m are constant. 6316 6317 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6318 if (L->getOperand(1) == BO->RHS) 6319 // For a two-shift sext-inreg, i.e. n = m, 6320 // use sext(trunc(x)) as the SCEV expression. 6321 return getSignExtendExpr( 6322 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6323 6324 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6325 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6326 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6327 if (ShlAmt > AShrAmt) { 6328 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6329 // expression. We already checked that ShlAmt < BitWidth, so 6330 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6331 // ShlAmt - AShrAmt < Amt. 6332 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6333 ShlAmt - AShrAmt); 6334 return getSignExtendExpr( 6335 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6336 getConstant(Mul)), OuterTy); 6337 } 6338 } 6339 } 6340 break; 6341 } 6342 } 6343 } 6344 6345 switch (U->getOpcode()) { 6346 case Instruction::Trunc: 6347 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6348 6349 case Instruction::ZExt: 6350 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6351 6352 case Instruction::SExt: 6353 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6354 // The NSW flag of a subtract does not always survive the conversion to 6355 // A + (-1)*B. By pushing sign extension onto its operands we are much 6356 // more likely to preserve NSW and allow later AddRec optimisations. 6357 // 6358 // NOTE: This is effectively duplicating this logic from getSignExtend: 6359 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6360 // but by that point the NSW information has potentially been lost. 6361 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6362 Type *Ty = U->getType(); 6363 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6364 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6365 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6366 } 6367 } 6368 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6369 6370 case Instruction::BitCast: 6371 // BitCasts are no-op casts so we just eliminate the cast. 6372 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6373 return getSCEV(U->getOperand(0)); 6374 break; 6375 6376 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 6377 // lead to pointer expressions which cannot safely be expanded to GEPs, 6378 // because ScalarEvolution doesn't respect the GEP aliasing rules when 6379 // simplifying integer expressions. 6380 6381 case Instruction::GetElementPtr: 6382 return createNodeForGEP(cast<GEPOperator>(U)); 6383 6384 case Instruction::PHI: 6385 return createNodeForPHI(cast<PHINode>(U)); 6386 6387 case Instruction::Select: 6388 // U can also be a select constant expr, which let fall through. Since 6389 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6390 // constant expressions cannot have instructions as operands, we'd have 6391 // returned getUnknown for a select constant expressions anyway. 6392 if (isa<Instruction>(U)) 6393 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6394 U->getOperand(1), U->getOperand(2)); 6395 break; 6396 6397 case Instruction::Call: 6398 case Instruction::Invoke: 6399 if (Value *RV = CallSite(U).getReturnedArgOperand()) 6400 return getSCEV(RV); 6401 break; 6402 } 6403 6404 return getUnknown(V); 6405 } 6406 6407 //===----------------------------------------------------------------------===// 6408 // Iteration Count Computation Code 6409 // 6410 6411 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6412 if (!ExitCount) 6413 return 0; 6414 6415 ConstantInt *ExitConst = ExitCount->getValue(); 6416 6417 // Guard against huge trip counts. 6418 if (ExitConst->getValue().getActiveBits() > 32) 6419 return 0; 6420 6421 // In case of integer overflow, this returns 0, which is correct. 6422 return ((unsigned)ExitConst->getZExtValue()) + 1; 6423 } 6424 6425 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6426 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6427 return getSmallConstantTripCount(L, ExitingBB); 6428 6429 // No trip count information for multiple exits. 6430 return 0; 6431 } 6432 6433 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6434 BasicBlock *ExitingBlock) { 6435 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6436 assert(L->isLoopExiting(ExitingBlock) && 6437 "Exiting block must actually branch out of the loop!"); 6438 const SCEVConstant *ExitCount = 6439 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6440 return getConstantTripCount(ExitCount); 6441 } 6442 6443 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6444 const auto *MaxExitCount = 6445 dyn_cast<SCEVConstant>(getMaxBackedgeTakenCount(L)); 6446 return getConstantTripCount(MaxExitCount); 6447 } 6448 6449 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6450 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6451 return getSmallConstantTripMultiple(L, ExitingBB); 6452 6453 // No trip multiple information for multiple exits. 6454 return 0; 6455 } 6456 6457 /// Returns the largest constant divisor of the trip count of this loop as a 6458 /// normal unsigned value, if possible. This means that the actual trip count is 6459 /// always a multiple of the returned value (don't forget the trip count could 6460 /// very well be zero as well!). 6461 /// 6462 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6463 /// multiple of a constant (which is also the case if the trip count is simply 6464 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6465 /// if the trip count is very large (>= 2^32). 6466 /// 6467 /// As explained in the comments for getSmallConstantTripCount, this assumes 6468 /// that control exits the loop via ExitingBlock. 6469 unsigned 6470 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6471 BasicBlock *ExitingBlock) { 6472 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6473 assert(L->isLoopExiting(ExitingBlock) && 6474 "Exiting block must actually branch out of the loop!"); 6475 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6476 if (ExitCount == getCouldNotCompute()) 6477 return 1; 6478 6479 // Get the trip count from the BE count by adding 1. 6480 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6481 6482 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6483 if (!TC) 6484 // Attempt to factor more general cases. Returns the greatest power of 6485 // two divisor. If overflow happens, the trip count expression is still 6486 // divisible by the greatest power of 2 divisor returned. 6487 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 6488 6489 ConstantInt *Result = TC->getValue(); 6490 6491 // Guard against huge trip counts (this requires checking 6492 // for zero to handle the case where the trip count == -1 and the 6493 // addition wraps). 6494 if (!Result || Result->getValue().getActiveBits() > 32 || 6495 Result->getValue().getActiveBits() == 0) 6496 return 1; 6497 6498 return (unsigned)Result->getZExtValue(); 6499 } 6500 6501 /// Get the expression for the number of loop iterations for which this loop is 6502 /// guaranteed not to exit via ExitingBlock. Otherwise return 6503 /// SCEVCouldNotCompute. 6504 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6505 BasicBlock *ExitingBlock) { 6506 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6507 } 6508 6509 const SCEV * 6510 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6511 SCEVUnionPredicate &Preds) { 6512 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 6513 } 6514 6515 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 6516 return getBackedgeTakenInfo(L).getExact(L, this); 6517 } 6518 6519 /// Similar to getBackedgeTakenCount, except return the least SCEV value that is 6520 /// known never to be less than the actual backedge taken count. 6521 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 6522 return getBackedgeTakenInfo(L).getMax(this); 6523 } 6524 6525 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 6526 return getBackedgeTakenInfo(L).isMaxOrZero(this); 6527 } 6528 6529 /// Push PHI nodes in the header of the given loop onto the given Worklist. 6530 static void 6531 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 6532 BasicBlock *Header = L->getHeader(); 6533 6534 // Push all Loop-header PHIs onto the Worklist stack. 6535 for (PHINode &PN : Header->phis()) 6536 Worklist.push_back(&PN); 6537 } 6538 6539 const ScalarEvolution::BackedgeTakenInfo & 6540 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 6541 auto &BTI = getBackedgeTakenInfo(L); 6542 if (BTI.hasFullInfo()) 6543 return BTI; 6544 6545 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6546 6547 if (!Pair.second) 6548 return Pair.first->second; 6549 6550 BackedgeTakenInfo Result = 6551 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 6552 6553 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 6554 } 6555 6556 const ScalarEvolution::BackedgeTakenInfo & 6557 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 6558 // Initially insert an invalid entry for this loop. If the insertion 6559 // succeeds, proceed to actually compute a backedge-taken count and 6560 // update the value. The temporary CouldNotCompute value tells SCEV 6561 // code elsewhere that it shouldn't attempt to request a new 6562 // backedge-taken count, which could result in infinite recursion. 6563 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 6564 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6565 if (!Pair.second) 6566 return Pair.first->second; 6567 6568 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 6569 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 6570 // must be cleared in this scope. 6571 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 6572 6573 // In product build, there are no usage of statistic. 6574 (void)NumTripCountsComputed; 6575 (void)NumTripCountsNotComputed; 6576 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 6577 const SCEV *BEExact = Result.getExact(L, this); 6578 if (BEExact != getCouldNotCompute()) { 6579 assert(isLoopInvariant(BEExact, L) && 6580 isLoopInvariant(Result.getMax(this), L) && 6581 "Computed backedge-taken count isn't loop invariant for loop!"); 6582 ++NumTripCountsComputed; 6583 } 6584 else if (Result.getMax(this) == getCouldNotCompute() && 6585 isa<PHINode>(L->getHeader()->begin())) { 6586 // Only count loops that have phi nodes as not being computable. 6587 ++NumTripCountsNotComputed; 6588 } 6589 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 6590 6591 // Now that we know more about the trip count for this loop, forget any 6592 // existing SCEV values for PHI nodes in this loop since they are only 6593 // conservative estimates made without the benefit of trip count 6594 // information. This is similar to the code in forgetLoop, except that 6595 // it handles SCEVUnknown PHI nodes specially. 6596 if (Result.hasAnyInfo()) { 6597 SmallVector<Instruction *, 16> Worklist; 6598 PushLoopPHIs(L, Worklist); 6599 6600 SmallPtrSet<Instruction *, 8> Discovered; 6601 while (!Worklist.empty()) { 6602 Instruction *I = Worklist.pop_back_val(); 6603 6604 ValueExprMapType::iterator It = 6605 ValueExprMap.find_as(static_cast<Value *>(I)); 6606 if (It != ValueExprMap.end()) { 6607 const SCEV *Old = It->second; 6608 6609 // SCEVUnknown for a PHI either means that it has an unrecognized 6610 // structure, or it's a PHI that's in the progress of being computed 6611 // by createNodeForPHI. In the former case, additional loop trip 6612 // count information isn't going to change anything. In the later 6613 // case, createNodeForPHI will perform the necessary updates on its 6614 // own when it gets to that point. 6615 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 6616 eraseValueFromMap(It->first); 6617 forgetMemoizedResults(Old); 6618 } 6619 if (PHINode *PN = dyn_cast<PHINode>(I)) 6620 ConstantEvolutionLoopExitValue.erase(PN); 6621 } 6622 6623 // Since we don't need to invalidate anything for correctness and we're 6624 // only invalidating to make SCEV's results more precise, we get to stop 6625 // early to avoid invalidating too much. This is especially important in 6626 // cases like: 6627 // 6628 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 6629 // loop0: 6630 // %pn0 = phi 6631 // ... 6632 // loop1: 6633 // %pn1 = phi 6634 // ... 6635 // 6636 // where both loop0 and loop1's backedge taken count uses the SCEV 6637 // expression for %v. If we don't have the early stop below then in cases 6638 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 6639 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 6640 // count for loop1, effectively nullifying SCEV's trip count cache. 6641 for (auto *U : I->users()) 6642 if (auto *I = dyn_cast<Instruction>(U)) { 6643 auto *LoopForUser = LI.getLoopFor(I->getParent()); 6644 if (LoopForUser && L->contains(LoopForUser) && 6645 Discovered.insert(I).second) 6646 Worklist.push_back(I); 6647 } 6648 } 6649 } 6650 6651 // Re-lookup the insert position, since the call to 6652 // computeBackedgeTakenCount above could result in a 6653 // recusive call to getBackedgeTakenInfo (on a different 6654 // loop), which would invalidate the iterator computed 6655 // earlier. 6656 return BackedgeTakenCounts.find(L)->second = std::move(Result); 6657 } 6658 6659 void ScalarEvolution::forgetLoop(const Loop *L) { 6660 // Drop any stored trip count value. 6661 auto RemoveLoopFromBackedgeMap = 6662 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 6663 auto BTCPos = Map.find(L); 6664 if (BTCPos != Map.end()) { 6665 BTCPos->second.clear(); 6666 Map.erase(BTCPos); 6667 } 6668 }; 6669 6670 SmallVector<const Loop *, 16> LoopWorklist(1, L); 6671 SmallVector<Instruction *, 32> Worklist; 6672 SmallPtrSet<Instruction *, 16> Visited; 6673 6674 // Iterate over all the loops and sub-loops to drop SCEV information. 6675 while (!LoopWorklist.empty()) { 6676 auto *CurrL = LoopWorklist.pop_back_val(); 6677 6678 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 6679 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 6680 6681 // Drop information about predicated SCEV rewrites for this loop. 6682 for (auto I = PredicatedSCEVRewrites.begin(); 6683 I != PredicatedSCEVRewrites.end();) { 6684 std::pair<const SCEV *, const Loop *> Entry = I->first; 6685 if (Entry.second == CurrL) 6686 PredicatedSCEVRewrites.erase(I++); 6687 else 6688 ++I; 6689 } 6690 6691 auto LoopUsersItr = LoopUsers.find(CurrL); 6692 if (LoopUsersItr != LoopUsers.end()) { 6693 for (auto *S : LoopUsersItr->second) 6694 forgetMemoizedResults(S); 6695 LoopUsers.erase(LoopUsersItr); 6696 } 6697 6698 // Drop information about expressions based on loop-header PHIs. 6699 PushLoopPHIs(CurrL, Worklist); 6700 6701 while (!Worklist.empty()) { 6702 Instruction *I = Worklist.pop_back_val(); 6703 if (!Visited.insert(I).second) 6704 continue; 6705 6706 ValueExprMapType::iterator It = 6707 ValueExprMap.find_as(static_cast<Value *>(I)); 6708 if (It != ValueExprMap.end()) { 6709 eraseValueFromMap(It->first); 6710 forgetMemoizedResults(It->second); 6711 if (PHINode *PN = dyn_cast<PHINode>(I)) 6712 ConstantEvolutionLoopExitValue.erase(PN); 6713 } 6714 6715 PushDefUseChildren(I, Worklist); 6716 } 6717 6718 LoopPropertiesCache.erase(CurrL); 6719 // Forget all contained loops too, to avoid dangling entries in the 6720 // ValuesAtScopes map. 6721 LoopWorklist.append(CurrL->begin(), CurrL->end()); 6722 } 6723 } 6724 6725 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 6726 while (Loop *Parent = L->getParentLoop()) 6727 L = Parent; 6728 forgetLoop(L); 6729 } 6730 6731 void ScalarEvolution::forgetValue(Value *V) { 6732 Instruction *I = dyn_cast<Instruction>(V); 6733 if (!I) return; 6734 6735 // Drop information about expressions based on loop-header PHIs. 6736 SmallVector<Instruction *, 16> Worklist; 6737 Worklist.push_back(I); 6738 6739 SmallPtrSet<Instruction *, 8> Visited; 6740 while (!Worklist.empty()) { 6741 I = Worklist.pop_back_val(); 6742 if (!Visited.insert(I).second) 6743 continue; 6744 6745 ValueExprMapType::iterator It = 6746 ValueExprMap.find_as(static_cast<Value *>(I)); 6747 if (It != ValueExprMap.end()) { 6748 eraseValueFromMap(It->first); 6749 forgetMemoizedResults(It->second); 6750 if (PHINode *PN = dyn_cast<PHINode>(I)) 6751 ConstantEvolutionLoopExitValue.erase(PN); 6752 } 6753 6754 PushDefUseChildren(I, Worklist); 6755 } 6756 } 6757 6758 /// Get the exact loop backedge taken count considering all loop exits. A 6759 /// computable result can only be returned for loops with all exiting blocks 6760 /// dominating the latch. howFarToZero assumes that the limit of each loop test 6761 /// is never skipped. This is a valid assumption as long as the loop exits via 6762 /// that test. For precise results, it is the caller's responsibility to specify 6763 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 6764 const SCEV * 6765 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 6766 SCEVUnionPredicate *Preds) const { 6767 // If any exits were not computable, the loop is not computable. 6768 if (!isComplete() || ExitNotTaken.empty()) 6769 return SE->getCouldNotCompute(); 6770 6771 const BasicBlock *Latch = L->getLoopLatch(); 6772 // All exiting blocks we have collected must dominate the only backedge. 6773 if (!Latch) 6774 return SE->getCouldNotCompute(); 6775 6776 // All exiting blocks we have gathered dominate loop's latch, so exact trip 6777 // count is simply a minimum out of all these calculated exit counts. 6778 SmallVector<const SCEV *, 2> Ops; 6779 for (auto &ENT : ExitNotTaken) { 6780 const SCEV *BECount = ENT.ExactNotTaken; 6781 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 6782 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 6783 "We should only have known counts for exiting blocks that dominate " 6784 "latch!"); 6785 6786 Ops.push_back(BECount); 6787 6788 if (Preds && !ENT.hasAlwaysTruePredicate()) 6789 Preds->add(ENT.Predicate.get()); 6790 6791 assert((Preds || ENT.hasAlwaysTruePredicate()) && 6792 "Predicate should be always true!"); 6793 } 6794 6795 return SE->getUMinFromMismatchedTypes(Ops); 6796 } 6797 6798 /// Get the exact not taken count for this loop exit. 6799 const SCEV * 6800 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 6801 ScalarEvolution *SE) const { 6802 for (auto &ENT : ExitNotTaken) 6803 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6804 return ENT.ExactNotTaken; 6805 6806 return SE->getCouldNotCompute(); 6807 } 6808 6809 /// getMax - Get the max backedge taken count for the loop. 6810 const SCEV * 6811 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 6812 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6813 return !ENT.hasAlwaysTruePredicate(); 6814 }; 6815 6816 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax()) 6817 return SE->getCouldNotCompute(); 6818 6819 assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) && 6820 "No point in having a non-constant max backedge taken count!"); 6821 return getMax(); 6822 } 6823 6824 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const { 6825 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6826 return !ENT.hasAlwaysTruePredicate(); 6827 }; 6828 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 6829 } 6830 6831 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 6832 ScalarEvolution *SE) const { 6833 if (getMax() && getMax() != SE->getCouldNotCompute() && 6834 SE->hasOperand(getMax(), S)) 6835 return true; 6836 6837 for (auto &ENT : ExitNotTaken) 6838 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 6839 SE->hasOperand(ENT.ExactNotTaken, S)) 6840 return true; 6841 6842 return false; 6843 } 6844 6845 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 6846 : ExactNotTaken(E), MaxNotTaken(E) { 6847 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6848 isa<SCEVConstant>(MaxNotTaken)) && 6849 "No point in having a non-constant max backedge taken count!"); 6850 } 6851 6852 ScalarEvolution::ExitLimit::ExitLimit( 6853 const SCEV *E, const SCEV *M, bool MaxOrZero, 6854 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 6855 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 6856 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 6857 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 6858 "Exact is not allowed to be less precise than Max"); 6859 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6860 isa<SCEVConstant>(MaxNotTaken)) && 6861 "No point in having a non-constant max backedge taken count!"); 6862 for (auto *PredSet : PredSetList) 6863 for (auto *P : *PredSet) 6864 addPredicate(P); 6865 } 6866 6867 ScalarEvolution::ExitLimit::ExitLimit( 6868 const SCEV *E, const SCEV *M, bool MaxOrZero, 6869 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 6870 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 6871 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6872 isa<SCEVConstant>(MaxNotTaken)) && 6873 "No point in having a non-constant max backedge taken count!"); 6874 } 6875 6876 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 6877 bool MaxOrZero) 6878 : ExitLimit(E, M, MaxOrZero, None) { 6879 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6880 isa<SCEVConstant>(MaxNotTaken)) && 6881 "No point in having a non-constant max backedge taken count!"); 6882 } 6883 6884 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 6885 /// computable exit into a persistent ExitNotTakenInfo array. 6886 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 6887 SmallVectorImpl<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> 6888 &&ExitCounts, 6889 bool Complete, const SCEV *MaxCount, bool MaxOrZero) 6890 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { 6891 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 6892 6893 ExitNotTaken.reserve(ExitCounts.size()); 6894 std::transform( 6895 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 6896 [&](const EdgeExitInfo &EEI) { 6897 BasicBlock *ExitBB = EEI.first; 6898 const ExitLimit &EL = EEI.second; 6899 if (EL.Predicates.empty()) 6900 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, nullptr); 6901 6902 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 6903 for (auto *Pred : EL.Predicates) 6904 Predicate->add(Pred); 6905 6906 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, std::move(Predicate)); 6907 }); 6908 assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) && 6909 "No point in having a non-constant max backedge taken count!"); 6910 } 6911 6912 /// Invalidate this result and free the ExitNotTakenInfo array. 6913 void ScalarEvolution::BackedgeTakenInfo::clear() { 6914 ExitNotTaken.clear(); 6915 } 6916 6917 /// Compute the number of times the backedge of the specified loop will execute. 6918 ScalarEvolution::BackedgeTakenInfo 6919 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 6920 bool AllowPredicates) { 6921 SmallVector<BasicBlock *, 8> ExitingBlocks; 6922 L->getExitingBlocks(ExitingBlocks); 6923 6924 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 6925 6926 SmallVector<EdgeExitInfo, 4> ExitCounts; 6927 bool CouldComputeBECount = true; 6928 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 6929 const SCEV *MustExitMaxBECount = nullptr; 6930 const SCEV *MayExitMaxBECount = nullptr; 6931 bool MustExitMaxOrZero = false; 6932 6933 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 6934 // and compute maxBECount. 6935 // Do a union of all the predicates here. 6936 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 6937 BasicBlock *ExitBB = ExitingBlocks[i]; 6938 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 6939 6940 assert((AllowPredicates || EL.Predicates.empty()) && 6941 "Predicated exit limit when predicates are not allowed!"); 6942 6943 // 1. For each exit that can be computed, add an entry to ExitCounts. 6944 // CouldComputeBECount is true only if all exits can be computed. 6945 if (EL.ExactNotTaken == getCouldNotCompute()) 6946 // We couldn't compute an exact value for this exit, so 6947 // we won't be able to compute an exact value for the loop. 6948 CouldComputeBECount = false; 6949 else 6950 ExitCounts.emplace_back(ExitBB, EL); 6951 6952 // 2. Derive the loop's MaxBECount from each exit's max number of 6953 // non-exiting iterations. Partition the loop exits into two kinds: 6954 // LoopMustExits and LoopMayExits. 6955 // 6956 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 6957 // is a LoopMayExit. If any computable LoopMustExit is found, then 6958 // MaxBECount is the minimum EL.MaxNotTaken of computable 6959 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 6960 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 6961 // computable EL.MaxNotTaken. 6962 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 6963 DT.dominates(ExitBB, Latch)) { 6964 if (!MustExitMaxBECount) { 6965 MustExitMaxBECount = EL.MaxNotTaken; 6966 MustExitMaxOrZero = EL.MaxOrZero; 6967 } else { 6968 MustExitMaxBECount = 6969 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 6970 } 6971 } else if (MayExitMaxBECount != getCouldNotCompute()) { 6972 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 6973 MayExitMaxBECount = EL.MaxNotTaken; 6974 else { 6975 MayExitMaxBECount = 6976 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 6977 } 6978 } 6979 } 6980 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 6981 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 6982 // The loop backedge will be taken the maximum or zero times if there's 6983 // a single exit that must be taken the maximum or zero times. 6984 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 6985 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 6986 MaxBECount, MaxOrZero); 6987 } 6988 6989 ScalarEvolution::ExitLimit 6990 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 6991 bool AllowPredicates) { 6992 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 6993 // If our exiting block does not dominate the latch, then its connection with 6994 // loop's exit limit may be far from trivial. 6995 const BasicBlock *Latch = L->getLoopLatch(); 6996 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 6997 return getCouldNotCompute(); 6998 6999 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 7000 TerminatorInst *Term = ExitingBlock->getTerminator(); 7001 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 7002 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 7003 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7004 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 7005 "It should have one successor in loop and one exit block!"); 7006 // Proceed to the next level to examine the exit condition expression. 7007 return computeExitLimitFromCond( 7008 L, BI->getCondition(), ExitIfTrue, 7009 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 7010 } 7011 7012 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 7013 // For switch, make sure that there is a single exit from the loop. 7014 BasicBlock *Exit = nullptr; 7015 for (auto *SBB : successors(ExitingBlock)) 7016 if (!L->contains(SBB)) { 7017 if (Exit) // Multiple exit successors. 7018 return getCouldNotCompute(); 7019 Exit = SBB; 7020 } 7021 assert(Exit && "Exiting block must have at least one exit"); 7022 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 7023 /*ControlsExit=*/IsOnlyExit); 7024 } 7025 7026 return getCouldNotCompute(); 7027 } 7028 7029 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 7030 const Loop *L, Value *ExitCond, bool ExitIfTrue, 7031 bool ControlsExit, bool AllowPredicates) { 7032 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 7033 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 7034 ControlsExit, AllowPredicates); 7035 } 7036 7037 Optional<ScalarEvolution::ExitLimit> 7038 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 7039 bool ExitIfTrue, bool ControlsExit, 7040 bool AllowPredicates) { 7041 (void)this->L; 7042 (void)this->ExitIfTrue; 7043 (void)this->AllowPredicates; 7044 7045 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7046 this->AllowPredicates == AllowPredicates && 7047 "Variance in assumed invariant key components!"); 7048 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 7049 if (Itr == TripCountMap.end()) 7050 return None; 7051 return Itr->second; 7052 } 7053 7054 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 7055 bool ExitIfTrue, 7056 bool ControlsExit, 7057 bool AllowPredicates, 7058 const ExitLimit &EL) { 7059 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7060 this->AllowPredicates == AllowPredicates && 7061 "Variance in assumed invariant key components!"); 7062 7063 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 7064 assert(InsertResult.second && "Expected successful insertion!"); 7065 (void)InsertResult; 7066 (void)ExitIfTrue; 7067 } 7068 7069 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 7070 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7071 bool ControlsExit, bool AllowPredicates) { 7072 7073 if (auto MaybeEL = 7074 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7075 return *MaybeEL; 7076 7077 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 7078 ControlsExit, AllowPredicates); 7079 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 7080 return EL; 7081 } 7082 7083 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 7084 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7085 bool ControlsExit, bool AllowPredicates) { 7086 // Check if the controlling expression for this loop is an And or Or. 7087 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 7088 if (BO->getOpcode() == Instruction::And) { 7089 // Recurse on the operands of the and. 7090 bool EitherMayExit = !ExitIfTrue; 7091 ExitLimit EL0 = computeExitLimitFromCondCached( 7092 Cache, L, BO->getOperand(0), ExitIfTrue, 7093 ControlsExit && !EitherMayExit, AllowPredicates); 7094 ExitLimit EL1 = computeExitLimitFromCondCached( 7095 Cache, L, BO->getOperand(1), ExitIfTrue, 7096 ControlsExit && !EitherMayExit, AllowPredicates); 7097 const SCEV *BECount = getCouldNotCompute(); 7098 const SCEV *MaxBECount = getCouldNotCompute(); 7099 if (EitherMayExit) { 7100 // Both conditions must be true for the loop to continue executing. 7101 // Choose the less conservative count. 7102 if (EL0.ExactNotTaken == getCouldNotCompute() || 7103 EL1.ExactNotTaken == getCouldNotCompute()) 7104 BECount = getCouldNotCompute(); 7105 else 7106 BECount = 7107 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7108 if (EL0.MaxNotTaken == getCouldNotCompute()) 7109 MaxBECount = EL1.MaxNotTaken; 7110 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7111 MaxBECount = EL0.MaxNotTaken; 7112 else 7113 MaxBECount = 7114 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7115 } else { 7116 // Both conditions must be true at the same time for the loop to exit. 7117 // For now, be conservative. 7118 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7119 MaxBECount = EL0.MaxNotTaken; 7120 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7121 BECount = EL0.ExactNotTaken; 7122 } 7123 7124 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7125 // to be more aggressive when computing BECount than when computing 7126 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7127 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7128 // to not. 7129 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7130 !isa<SCEVCouldNotCompute>(BECount)) 7131 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7132 7133 return ExitLimit(BECount, MaxBECount, false, 7134 {&EL0.Predicates, &EL1.Predicates}); 7135 } 7136 if (BO->getOpcode() == Instruction::Or) { 7137 // Recurse on the operands of the or. 7138 bool EitherMayExit = ExitIfTrue; 7139 ExitLimit EL0 = computeExitLimitFromCondCached( 7140 Cache, L, BO->getOperand(0), ExitIfTrue, 7141 ControlsExit && !EitherMayExit, AllowPredicates); 7142 ExitLimit EL1 = computeExitLimitFromCondCached( 7143 Cache, L, BO->getOperand(1), ExitIfTrue, 7144 ControlsExit && !EitherMayExit, AllowPredicates); 7145 const SCEV *BECount = getCouldNotCompute(); 7146 const SCEV *MaxBECount = getCouldNotCompute(); 7147 if (EitherMayExit) { 7148 // Both conditions must be false for the loop to continue executing. 7149 // Choose the less conservative count. 7150 if (EL0.ExactNotTaken == getCouldNotCompute() || 7151 EL1.ExactNotTaken == getCouldNotCompute()) 7152 BECount = getCouldNotCompute(); 7153 else 7154 BECount = 7155 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7156 if (EL0.MaxNotTaken == getCouldNotCompute()) 7157 MaxBECount = EL1.MaxNotTaken; 7158 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7159 MaxBECount = EL0.MaxNotTaken; 7160 else 7161 MaxBECount = 7162 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7163 } else { 7164 // Both conditions must be false at the same time for the loop to exit. 7165 // For now, be conservative. 7166 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7167 MaxBECount = EL0.MaxNotTaken; 7168 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7169 BECount = EL0.ExactNotTaken; 7170 } 7171 7172 return ExitLimit(BECount, MaxBECount, false, 7173 {&EL0.Predicates, &EL1.Predicates}); 7174 } 7175 } 7176 7177 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7178 // Proceed to the next level to examine the icmp. 7179 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7180 ExitLimit EL = 7181 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 7182 if (EL.hasFullInfo() || !AllowPredicates) 7183 return EL; 7184 7185 // Try again, but use SCEV predicates this time. 7186 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 7187 /*AllowPredicates=*/true); 7188 } 7189 7190 // Check for a constant condition. These are normally stripped out by 7191 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7192 // preserve the CFG and is temporarily leaving constant conditions 7193 // in place. 7194 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7195 if (ExitIfTrue == !CI->getZExtValue()) 7196 // The backedge is always taken. 7197 return getCouldNotCompute(); 7198 else 7199 // The backedge is never taken. 7200 return getZero(CI->getType()); 7201 } 7202 7203 // If it's not an integer or pointer comparison then compute it the hard way. 7204 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7205 } 7206 7207 ScalarEvolution::ExitLimit 7208 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7209 ICmpInst *ExitCond, 7210 bool ExitIfTrue, 7211 bool ControlsExit, 7212 bool AllowPredicates) { 7213 // If the condition was exit on true, convert the condition to exit on false 7214 ICmpInst::Predicate Pred; 7215 if (!ExitIfTrue) 7216 Pred = ExitCond->getPredicate(); 7217 else 7218 Pred = ExitCond->getInversePredicate(); 7219 const ICmpInst::Predicate OriginalPred = Pred; 7220 7221 // Handle common loops like: for (X = "string"; *X; ++X) 7222 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7223 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7224 ExitLimit ItCnt = 7225 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); 7226 if (ItCnt.hasAnyInfo()) 7227 return ItCnt; 7228 } 7229 7230 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7231 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7232 7233 // Try to evaluate any dependencies out of the loop. 7234 LHS = getSCEVAtScope(LHS, L); 7235 RHS = getSCEVAtScope(RHS, L); 7236 7237 // At this point, we would like to compute how many iterations of the 7238 // loop the predicate will return true for these inputs. 7239 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7240 // If there is a loop-invariant, force it into the RHS. 7241 std::swap(LHS, RHS); 7242 Pred = ICmpInst::getSwappedPredicate(Pred); 7243 } 7244 7245 // Simplify the operands before analyzing them. 7246 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7247 7248 // If we have a comparison of a chrec against a constant, try to use value 7249 // ranges to answer this query. 7250 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7251 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7252 if (AddRec->getLoop() == L) { 7253 // Form the constant range. 7254 ConstantRange CompRange = 7255 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 7256 7257 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7258 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7259 } 7260 7261 switch (Pred) { 7262 case ICmpInst::ICMP_NE: { // while (X != Y) 7263 // Convert to: while (X-Y != 0) 7264 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7265 AllowPredicates); 7266 if (EL.hasAnyInfo()) return EL; 7267 break; 7268 } 7269 case ICmpInst::ICMP_EQ: { // while (X == Y) 7270 // Convert to: while (X-Y == 0) 7271 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7272 if (EL.hasAnyInfo()) return EL; 7273 break; 7274 } 7275 case ICmpInst::ICMP_SLT: 7276 case ICmpInst::ICMP_ULT: { // while (X < Y) 7277 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 7278 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7279 AllowPredicates); 7280 if (EL.hasAnyInfo()) return EL; 7281 break; 7282 } 7283 case ICmpInst::ICMP_SGT: 7284 case ICmpInst::ICMP_UGT: { // while (X > Y) 7285 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 7286 ExitLimit EL = 7287 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7288 AllowPredicates); 7289 if (EL.hasAnyInfo()) return EL; 7290 break; 7291 } 7292 default: 7293 break; 7294 } 7295 7296 auto *ExhaustiveCount = 7297 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7298 7299 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7300 return ExhaustiveCount; 7301 7302 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7303 ExitCond->getOperand(1), L, OriginalPred); 7304 } 7305 7306 ScalarEvolution::ExitLimit 7307 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7308 SwitchInst *Switch, 7309 BasicBlock *ExitingBlock, 7310 bool ControlsExit) { 7311 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7312 7313 // Give up if the exit is the default dest of a switch. 7314 if (Switch->getDefaultDest() == ExitingBlock) 7315 return getCouldNotCompute(); 7316 7317 assert(L->contains(Switch->getDefaultDest()) && 7318 "Default case must not exit the loop!"); 7319 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7320 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7321 7322 // while (X != Y) --> while (X-Y != 0) 7323 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7324 if (EL.hasAnyInfo()) 7325 return EL; 7326 7327 return getCouldNotCompute(); 7328 } 7329 7330 static ConstantInt * 7331 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7332 ScalarEvolution &SE) { 7333 const SCEV *InVal = SE.getConstant(C); 7334 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7335 assert(isa<SCEVConstant>(Val) && 7336 "Evaluation of SCEV at constant didn't fold correctly?"); 7337 return cast<SCEVConstant>(Val)->getValue(); 7338 } 7339 7340 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7341 /// compute the backedge execution count. 7342 ScalarEvolution::ExitLimit 7343 ScalarEvolution::computeLoadConstantCompareExitLimit( 7344 LoadInst *LI, 7345 Constant *RHS, 7346 const Loop *L, 7347 ICmpInst::Predicate predicate) { 7348 if (LI->isVolatile()) return getCouldNotCompute(); 7349 7350 // Check to see if the loaded pointer is a getelementptr of a global. 7351 // TODO: Use SCEV instead of manually grubbing with GEPs. 7352 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7353 if (!GEP) return getCouldNotCompute(); 7354 7355 // Make sure that it is really a constant global we are gepping, with an 7356 // initializer, and make sure the first IDX is really 0. 7357 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7358 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7359 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7360 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7361 return getCouldNotCompute(); 7362 7363 // Okay, we allow one non-constant index into the GEP instruction. 7364 Value *VarIdx = nullptr; 7365 std::vector<Constant*> Indexes; 7366 unsigned VarIdxNum = 0; 7367 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7368 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7369 Indexes.push_back(CI); 7370 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7371 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7372 VarIdx = GEP->getOperand(i); 7373 VarIdxNum = i-2; 7374 Indexes.push_back(nullptr); 7375 } 7376 7377 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7378 if (!VarIdx) 7379 return getCouldNotCompute(); 7380 7381 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7382 // Check to see if X is a loop variant variable value now. 7383 const SCEV *Idx = getSCEV(VarIdx); 7384 Idx = getSCEVAtScope(Idx, L); 7385 7386 // We can only recognize very limited forms of loop index expressions, in 7387 // particular, only affine AddRec's like {C1,+,C2}. 7388 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7389 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 7390 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7391 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7392 return getCouldNotCompute(); 7393 7394 unsigned MaxSteps = MaxBruteForceIterations; 7395 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7396 ConstantInt *ItCst = ConstantInt::get( 7397 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7398 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7399 7400 // Form the GEP offset. 7401 Indexes[VarIdxNum] = Val; 7402 7403 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7404 Indexes); 7405 if (!Result) break; // Cannot compute! 7406 7407 // Evaluate the condition for this iteration. 7408 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7409 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7410 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7411 ++NumArrayLenItCounts; 7412 return getConstant(ItCst); // Found terminating iteration! 7413 } 7414 } 7415 return getCouldNotCompute(); 7416 } 7417 7418 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7419 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7420 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7421 if (!RHS) 7422 return getCouldNotCompute(); 7423 7424 const BasicBlock *Latch = L->getLoopLatch(); 7425 if (!Latch) 7426 return getCouldNotCompute(); 7427 7428 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7429 if (!Predecessor) 7430 return getCouldNotCompute(); 7431 7432 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7433 // Return LHS in OutLHS and shift_opt in OutOpCode. 7434 auto MatchPositiveShift = 7435 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7436 7437 using namespace PatternMatch; 7438 7439 ConstantInt *ShiftAmt; 7440 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7441 OutOpCode = Instruction::LShr; 7442 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7443 OutOpCode = Instruction::AShr; 7444 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7445 OutOpCode = Instruction::Shl; 7446 else 7447 return false; 7448 7449 return ShiftAmt->getValue().isStrictlyPositive(); 7450 }; 7451 7452 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 7453 // 7454 // loop: 7455 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 7456 // %iv.shifted = lshr i32 %iv, <positive constant> 7457 // 7458 // Return true on a successful match. Return the corresponding PHI node (%iv 7459 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 7460 auto MatchShiftRecurrence = 7461 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 7462 Optional<Instruction::BinaryOps> PostShiftOpCode; 7463 7464 { 7465 Instruction::BinaryOps OpC; 7466 Value *V; 7467 7468 // If we encounter a shift instruction, "peel off" the shift operation, 7469 // and remember that we did so. Later when we inspect %iv's backedge 7470 // value, we will make sure that the backedge value uses the same 7471 // operation. 7472 // 7473 // Note: the peeled shift operation does not have to be the same 7474 // instruction as the one feeding into the PHI's backedge value. We only 7475 // really care about it being the same *kind* of shift instruction -- 7476 // that's all that is required for our later inferences to hold. 7477 if (MatchPositiveShift(LHS, V, OpC)) { 7478 PostShiftOpCode = OpC; 7479 LHS = V; 7480 } 7481 } 7482 7483 PNOut = dyn_cast<PHINode>(LHS); 7484 if (!PNOut || PNOut->getParent() != L->getHeader()) 7485 return false; 7486 7487 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 7488 Value *OpLHS; 7489 7490 return 7491 // The backedge value for the PHI node must be a shift by a positive 7492 // amount 7493 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 7494 7495 // of the PHI node itself 7496 OpLHS == PNOut && 7497 7498 // and the kind of shift should be match the kind of shift we peeled 7499 // off, if any. 7500 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 7501 }; 7502 7503 PHINode *PN; 7504 Instruction::BinaryOps OpCode; 7505 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 7506 return getCouldNotCompute(); 7507 7508 const DataLayout &DL = getDataLayout(); 7509 7510 // The key rationale for this optimization is that for some kinds of shift 7511 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 7512 // within a finite number of iterations. If the condition guarding the 7513 // backedge (in the sense that the backedge is taken if the condition is true) 7514 // is false for the value the shift recurrence stabilizes to, then we know 7515 // that the backedge is taken only a finite number of times. 7516 7517 ConstantInt *StableValue = nullptr; 7518 switch (OpCode) { 7519 default: 7520 llvm_unreachable("Impossible case!"); 7521 7522 case Instruction::AShr: { 7523 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 7524 // bitwidth(K) iterations. 7525 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 7526 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 7527 Predecessor->getTerminator(), &DT); 7528 auto *Ty = cast<IntegerType>(RHS->getType()); 7529 if (Known.isNonNegative()) 7530 StableValue = ConstantInt::get(Ty, 0); 7531 else if (Known.isNegative()) 7532 StableValue = ConstantInt::get(Ty, -1, true); 7533 else 7534 return getCouldNotCompute(); 7535 7536 break; 7537 } 7538 case Instruction::LShr: 7539 case Instruction::Shl: 7540 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 7541 // stabilize to 0 in at most bitwidth(K) iterations. 7542 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 7543 break; 7544 } 7545 7546 auto *Result = 7547 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 7548 assert(Result->getType()->isIntegerTy(1) && 7549 "Otherwise cannot be an operand to a branch instruction"); 7550 7551 if (Result->isZeroValue()) { 7552 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 7553 const SCEV *UpperBound = 7554 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 7555 return ExitLimit(getCouldNotCompute(), UpperBound, false); 7556 } 7557 7558 return getCouldNotCompute(); 7559 } 7560 7561 /// Return true if we can constant fold an instruction of the specified type, 7562 /// assuming that all operands were constants. 7563 static bool CanConstantFold(const Instruction *I) { 7564 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 7565 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7566 isa<LoadInst>(I)) 7567 return true; 7568 7569 if (const CallInst *CI = dyn_cast<CallInst>(I)) 7570 if (const Function *F = CI->getCalledFunction()) 7571 return canConstantFoldCallTo(CI, F); 7572 return false; 7573 } 7574 7575 /// Determine whether this instruction can constant evolve within this loop 7576 /// assuming its operands can all constant evolve. 7577 static bool canConstantEvolve(Instruction *I, const Loop *L) { 7578 // An instruction outside of the loop can't be derived from a loop PHI. 7579 if (!L->contains(I)) return false; 7580 7581 if (isa<PHINode>(I)) { 7582 // We don't currently keep track of the control flow needed to evaluate 7583 // PHIs, so we cannot handle PHIs inside of loops. 7584 return L->getHeader() == I->getParent(); 7585 } 7586 7587 // If we won't be able to constant fold this expression even if the operands 7588 // are constants, bail early. 7589 return CanConstantFold(I); 7590 } 7591 7592 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 7593 /// recursing through each instruction operand until reaching a loop header phi. 7594 static PHINode * 7595 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 7596 DenseMap<Instruction *, PHINode *> &PHIMap, 7597 unsigned Depth) { 7598 if (Depth > MaxConstantEvolvingDepth) 7599 return nullptr; 7600 7601 // Otherwise, we can evaluate this instruction if all of its operands are 7602 // constant or derived from a PHI node themselves. 7603 PHINode *PHI = nullptr; 7604 for (Value *Op : UseInst->operands()) { 7605 if (isa<Constant>(Op)) continue; 7606 7607 Instruction *OpInst = dyn_cast<Instruction>(Op); 7608 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 7609 7610 PHINode *P = dyn_cast<PHINode>(OpInst); 7611 if (!P) 7612 // If this operand is already visited, reuse the prior result. 7613 // We may have P != PHI if this is the deepest point at which the 7614 // inconsistent paths meet. 7615 P = PHIMap.lookup(OpInst); 7616 if (!P) { 7617 // Recurse and memoize the results, whether a phi is found or not. 7618 // This recursive call invalidates pointers into PHIMap. 7619 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 7620 PHIMap[OpInst] = P; 7621 } 7622 if (!P) 7623 return nullptr; // Not evolving from PHI 7624 if (PHI && PHI != P) 7625 return nullptr; // Evolving from multiple different PHIs. 7626 PHI = P; 7627 } 7628 // This is a expression evolving from a constant PHI! 7629 return PHI; 7630 } 7631 7632 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 7633 /// in the loop that V is derived from. We allow arbitrary operations along the 7634 /// way, but the operands of an operation must either be constants or a value 7635 /// derived from a constant PHI. If this expression does not fit with these 7636 /// constraints, return null. 7637 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 7638 Instruction *I = dyn_cast<Instruction>(V); 7639 if (!I || !canConstantEvolve(I, L)) return nullptr; 7640 7641 if (PHINode *PN = dyn_cast<PHINode>(I)) 7642 return PN; 7643 7644 // Record non-constant instructions contained by the loop. 7645 DenseMap<Instruction *, PHINode *> PHIMap; 7646 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 7647 } 7648 7649 /// EvaluateExpression - Given an expression that passes the 7650 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 7651 /// in the loop has the value PHIVal. If we can't fold this expression for some 7652 /// reason, return null. 7653 static Constant *EvaluateExpression(Value *V, const Loop *L, 7654 DenseMap<Instruction *, Constant *> &Vals, 7655 const DataLayout &DL, 7656 const TargetLibraryInfo *TLI) { 7657 // Convenient constant check, but redundant for recursive calls. 7658 if (Constant *C = dyn_cast<Constant>(V)) return C; 7659 Instruction *I = dyn_cast<Instruction>(V); 7660 if (!I) return nullptr; 7661 7662 if (Constant *C = Vals.lookup(I)) return C; 7663 7664 // An instruction inside the loop depends on a value outside the loop that we 7665 // weren't given a mapping for, or a value such as a call inside the loop. 7666 if (!canConstantEvolve(I, L)) return nullptr; 7667 7668 // An unmapped PHI can be due to a branch or another loop inside this loop, 7669 // or due to this not being the initial iteration through a loop where we 7670 // couldn't compute the evolution of this particular PHI last time. 7671 if (isa<PHINode>(I)) return nullptr; 7672 7673 std::vector<Constant*> Operands(I->getNumOperands()); 7674 7675 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 7676 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 7677 if (!Operand) { 7678 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 7679 if (!Operands[i]) return nullptr; 7680 continue; 7681 } 7682 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 7683 Vals[Operand] = C; 7684 if (!C) return nullptr; 7685 Operands[i] = C; 7686 } 7687 7688 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 7689 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7690 Operands[1], DL, TLI); 7691 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7692 if (!LI->isVolatile()) 7693 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7694 } 7695 return ConstantFoldInstOperands(I, Operands, DL, TLI); 7696 } 7697 7698 7699 // If every incoming value to PN except the one for BB is a specific Constant, 7700 // return that, else return nullptr. 7701 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 7702 Constant *IncomingVal = nullptr; 7703 7704 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 7705 if (PN->getIncomingBlock(i) == BB) 7706 continue; 7707 7708 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 7709 if (!CurrentVal) 7710 return nullptr; 7711 7712 if (IncomingVal != CurrentVal) { 7713 if (IncomingVal) 7714 return nullptr; 7715 IncomingVal = CurrentVal; 7716 } 7717 } 7718 7719 return IncomingVal; 7720 } 7721 7722 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 7723 /// in the header of its containing loop, we know the loop executes a 7724 /// constant number of times, and the PHI node is just a recurrence 7725 /// involving constants, fold it. 7726 Constant * 7727 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 7728 const APInt &BEs, 7729 const Loop *L) { 7730 auto I = ConstantEvolutionLoopExitValue.find(PN); 7731 if (I != ConstantEvolutionLoopExitValue.end()) 7732 return I->second; 7733 7734 if (BEs.ugt(MaxBruteForceIterations)) 7735 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 7736 7737 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 7738 7739 DenseMap<Instruction *, Constant *> CurrentIterVals; 7740 BasicBlock *Header = L->getHeader(); 7741 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7742 7743 BasicBlock *Latch = L->getLoopLatch(); 7744 if (!Latch) 7745 return nullptr; 7746 7747 for (PHINode &PHI : Header->phis()) { 7748 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 7749 CurrentIterVals[&PHI] = StartCST; 7750 } 7751 if (!CurrentIterVals.count(PN)) 7752 return RetVal = nullptr; 7753 7754 Value *BEValue = PN->getIncomingValueForBlock(Latch); 7755 7756 // Execute the loop symbolically to determine the exit value. 7757 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 7758 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 7759 7760 unsigned NumIterations = BEs.getZExtValue(); // must be in range 7761 unsigned IterationNum = 0; 7762 const DataLayout &DL = getDataLayout(); 7763 for (; ; ++IterationNum) { 7764 if (IterationNum == NumIterations) 7765 return RetVal = CurrentIterVals[PN]; // Got exit value! 7766 7767 // Compute the value of the PHIs for the next iteration. 7768 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 7769 DenseMap<Instruction *, Constant *> NextIterVals; 7770 Constant *NextPHI = 7771 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7772 if (!NextPHI) 7773 return nullptr; // Couldn't evaluate! 7774 NextIterVals[PN] = NextPHI; 7775 7776 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 7777 7778 // Also evaluate the other PHI nodes. However, we don't get to stop if we 7779 // cease to be able to evaluate one of them or if they stop evolving, 7780 // because that doesn't necessarily prevent us from computing PN. 7781 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 7782 for (const auto &I : CurrentIterVals) { 7783 PHINode *PHI = dyn_cast<PHINode>(I.first); 7784 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 7785 PHIsToCompute.emplace_back(PHI, I.second); 7786 } 7787 // We use two distinct loops because EvaluateExpression may invalidate any 7788 // iterators into CurrentIterVals. 7789 for (const auto &I : PHIsToCompute) { 7790 PHINode *PHI = I.first; 7791 Constant *&NextPHI = NextIterVals[PHI]; 7792 if (!NextPHI) { // Not already computed. 7793 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7794 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7795 } 7796 if (NextPHI != I.second) 7797 StoppedEvolving = false; 7798 } 7799 7800 // If all entries in CurrentIterVals == NextIterVals then we can stop 7801 // iterating, the loop can't continue to change. 7802 if (StoppedEvolving) 7803 return RetVal = CurrentIterVals[PN]; 7804 7805 CurrentIterVals.swap(NextIterVals); 7806 } 7807 } 7808 7809 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 7810 Value *Cond, 7811 bool ExitWhen) { 7812 PHINode *PN = getConstantEvolvingPHI(Cond, L); 7813 if (!PN) return getCouldNotCompute(); 7814 7815 // If the loop is canonicalized, the PHI will have exactly two entries. 7816 // That's the only form we support here. 7817 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 7818 7819 DenseMap<Instruction *, Constant *> CurrentIterVals; 7820 BasicBlock *Header = L->getHeader(); 7821 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7822 7823 BasicBlock *Latch = L->getLoopLatch(); 7824 assert(Latch && "Should follow from NumIncomingValues == 2!"); 7825 7826 for (PHINode &PHI : Header->phis()) { 7827 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 7828 CurrentIterVals[&PHI] = StartCST; 7829 } 7830 if (!CurrentIterVals.count(PN)) 7831 return getCouldNotCompute(); 7832 7833 // Okay, we find a PHI node that defines the trip count of this loop. Execute 7834 // the loop symbolically to determine when the condition gets a value of 7835 // "ExitWhen". 7836 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 7837 const DataLayout &DL = getDataLayout(); 7838 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 7839 auto *CondVal = dyn_cast_or_null<ConstantInt>( 7840 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 7841 7842 // Couldn't symbolically evaluate. 7843 if (!CondVal) return getCouldNotCompute(); 7844 7845 if (CondVal->getValue() == uint64_t(ExitWhen)) { 7846 ++NumBruteForceTripCountsComputed; 7847 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 7848 } 7849 7850 // Update all the PHI nodes for the next iteration. 7851 DenseMap<Instruction *, Constant *> NextIterVals; 7852 7853 // Create a list of which PHIs we need to compute. We want to do this before 7854 // calling EvaluateExpression on them because that may invalidate iterators 7855 // into CurrentIterVals. 7856 SmallVector<PHINode *, 8> PHIsToCompute; 7857 for (const auto &I : CurrentIterVals) { 7858 PHINode *PHI = dyn_cast<PHINode>(I.first); 7859 if (!PHI || PHI->getParent() != Header) continue; 7860 PHIsToCompute.push_back(PHI); 7861 } 7862 for (PHINode *PHI : PHIsToCompute) { 7863 Constant *&NextPHI = NextIterVals[PHI]; 7864 if (NextPHI) continue; // Already computed! 7865 7866 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7867 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7868 } 7869 CurrentIterVals.swap(NextIterVals); 7870 } 7871 7872 // Too many iterations were needed to evaluate. 7873 return getCouldNotCompute(); 7874 } 7875 7876 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 7877 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 7878 ValuesAtScopes[V]; 7879 // Check to see if we've folded this expression at this loop before. 7880 for (auto &LS : Values) 7881 if (LS.first == L) 7882 return LS.second ? LS.second : V; 7883 7884 Values.emplace_back(L, nullptr); 7885 7886 // Otherwise compute it. 7887 const SCEV *C = computeSCEVAtScope(V, L); 7888 for (auto &LS : reverse(ValuesAtScopes[V])) 7889 if (LS.first == L) { 7890 LS.second = C; 7891 break; 7892 } 7893 return C; 7894 } 7895 7896 /// This builds up a Constant using the ConstantExpr interface. That way, we 7897 /// will return Constants for objects which aren't represented by a 7898 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 7899 /// Returns NULL if the SCEV isn't representable as a Constant. 7900 static Constant *BuildConstantFromSCEV(const SCEV *V) { 7901 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 7902 case scCouldNotCompute: 7903 case scAddRecExpr: 7904 break; 7905 case scConstant: 7906 return cast<SCEVConstant>(V)->getValue(); 7907 case scUnknown: 7908 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 7909 case scSignExtend: { 7910 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 7911 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 7912 return ConstantExpr::getSExt(CastOp, SS->getType()); 7913 break; 7914 } 7915 case scZeroExtend: { 7916 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 7917 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 7918 return ConstantExpr::getZExt(CastOp, SZ->getType()); 7919 break; 7920 } 7921 case scTruncate: { 7922 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 7923 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 7924 return ConstantExpr::getTrunc(CastOp, ST->getType()); 7925 break; 7926 } 7927 case scAddExpr: { 7928 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 7929 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 7930 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7931 unsigned AS = PTy->getAddressSpace(); 7932 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7933 C = ConstantExpr::getBitCast(C, DestPtrTy); 7934 } 7935 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 7936 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 7937 if (!C2) return nullptr; 7938 7939 // First pointer! 7940 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 7941 unsigned AS = C2->getType()->getPointerAddressSpace(); 7942 std::swap(C, C2); 7943 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 7944 // The offsets have been converted to bytes. We can add bytes to an 7945 // i8* by GEP with the byte count in the first index. 7946 C = ConstantExpr::getBitCast(C, DestPtrTy); 7947 } 7948 7949 // Don't bother trying to sum two pointers. We probably can't 7950 // statically compute a load that results from it anyway. 7951 if (C2->getType()->isPointerTy()) 7952 return nullptr; 7953 7954 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 7955 if (PTy->getElementType()->isStructTy()) 7956 C2 = ConstantExpr::getIntegerCast( 7957 C2, Type::getInt32Ty(C->getContext()), true); 7958 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 7959 } else 7960 C = ConstantExpr::getAdd(C, C2); 7961 } 7962 return C; 7963 } 7964 break; 7965 } 7966 case scMulExpr: { 7967 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 7968 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 7969 // Don't bother with pointers at all. 7970 if (C->getType()->isPointerTy()) return nullptr; 7971 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 7972 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 7973 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 7974 C = ConstantExpr::getMul(C, C2); 7975 } 7976 return C; 7977 } 7978 break; 7979 } 7980 case scUDivExpr: { 7981 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 7982 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 7983 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 7984 if (LHS->getType() == RHS->getType()) 7985 return ConstantExpr::getUDiv(LHS, RHS); 7986 break; 7987 } 7988 case scSMaxExpr: 7989 case scUMaxExpr: 7990 break; // TODO: smax, umax. 7991 } 7992 return nullptr; 7993 } 7994 7995 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 7996 if (isa<SCEVConstant>(V)) return V; 7997 7998 // If this instruction is evolved from a constant-evolving PHI, compute the 7999 // exit value from the loop without using SCEVs. 8000 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 8001 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 8002 const Loop *LI = this->LI[I->getParent()]; 8003 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 8004 if (PHINode *PN = dyn_cast<PHINode>(I)) 8005 if (PN->getParent() == LI->getHeader()) { 8006 // Okay, there is no closed form solution for the PHI node. Check 8007 // to see if the loop that contains it has a known backedge-taken 8008 // count. If so, we may be able to force computation of the exit 8009 // value. 8010 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 8011 if (const SCEVConstant *BTCC = 8012 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 8013 8014 // This trivial case can show up in some degenerate cases where 8015 // the incoming IR has not yet been fully simplified. 8016 if (BTCC->getValue()->isZero()) { 8017 Value *InitValue = nullptr; 8018 bool MultipleInitValues = false; 8019 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 8020 if (!LI->contains(PN->getIncomingBlock(i))) { 8021 if (!InitValue) 8022 InitValue = PN->getIncomingValue(i); 8023 else if (InitValue != PN->getIncomingValue(i)) { 8024 MultipleInitValues = true; 8025 break; 8026 } 8027 } 8028 if (!MultipleInitValues && InitValue) 8029 return getSCEV(InitValue); 8030 } 8031 } 8032 // Okay, we know how many times the containing loop executes. If 8033 // this is a constant evolving PHI node, get the final value at 8034 // the specified iteration number. 8035 Constant *RV = 8036 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 8037 if (RV) return getSCEV(RV); 8038 } 8039 } 8040 8041 // Okay, this is an expression that we cannot symbolically evaluate 8042 // into a SCEV. Check to see if it's possible to symbolically evaluate 8043 // the arguments into constants, and if so, try to constant propagate the 8044 // result. This is particularly useful for computing loop exit values. 8045 if (CanConstantFold(I)) { 8046 SmallVector<Constant *, 4> Operands; 8047 bool MadeImprovement = false; 8048 for (Value *Op : I->operands()) { 8049 if (Constant *C = dyn_cast<Constant>(Op)) { 8050 Operands.push_back(C); 8051 continue; 8052 } 8053 8054 // If any of the operands is non-constant and if they are 8055 // non-integer and non-pointer, don't even try to analyze them 8056 // with scev techniques. 8057 if (!isSCEVable(Op->getType())) 8058 return V; 8059 8060 const SCEV *OrigV = getSCEV(Op); 8061 const SCEV *OpV = getSCEVAtScope(OrigV, L); 8062 MadeImprovement |= OrigV != OpV; 8063 8064 Constant *C = BuildConstantFromSCEV(OpV); 8065 if (!C) return V; 8066 if (C->getType() != Op->getType()) 8067 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 8068 Op->getType(), 8069 false), 8070 C, Op->getType()); 8071 Operands.push_back(C); 8072 } 8073 8074 // Check to see if getSCEVAtScope actually made an improvement. 8075 if (MadeImprovement) { 8076 Constant *C = nullptr; 8077 const DataLayout &DL = getDataLayout(); 8078 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 8079 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8080 Operands[1], DL, &TLI); 8081 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 8082 if (!LI->isVolatile()) 8083 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 8084 } else 8085 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 8086 if (!C) return V; 8087 return getSCEV(C); 8088 } 8089 } 8090 } 8091 8092 // This is some other type of SCEVUnknown, just return it. 8093 return V; 8094 } 8095 8096 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 8097 // Avoid performing the look-up in the common case where the specified 8098 // expression has no loop-variant portions. 8099 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 8100 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8101 if (OpAtScope != Comm->getOperand(i)) { 8102 // Okay, at least one of these operands is loop variant but might be 8103 // foldable. Build a new instance of the folded commutative expression. 8104 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 8105 Comm->op_begin()+i); 8106 NewOps.push_back(OpAtScope); 8107 8108 for (++i; i != e; ++i) { 8109 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8110 NewOps.push_back(OpAtScope); 8111 } 8112 if (isa<SCEVAddExpr>(Comm)) 8113 return getAddExpr(NewOps); 8114 if (isa<SCEVMulExpr>(Comm)) 8115 return getMulExpr(NewOps); 8116 if (isa<SCEVSMaxExpr>(Comm)) 8117 return getSMaxExpr(NewOps); 8118 if (isa<SCEVUMaxExpr>(Comm)) 8119 return getUMaxExpr(NewOps); 8120 llvm_unreachable("Unknown commutative SCEV type!"); 8121 } 8122 } 8123 // If we got here, all operands are loop invariant. 8124 return Comm; 8125 } 8126 8127 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 8128 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 8129 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 8130 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 8131 return Div; // must be loop invariant 8132 return getUDivExpr(LHS, RHS); 8133 } 8134 8135 // If this is a loop recurrence for a loop that does not contain L, then we 8136 // are dealing with the final value computed by the loop. 8137 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 8138 // First, attempt to evaluate each operand. 8139 // Avoid performing the look-up in the common case where the specified 8140 // expression has no loop-variant portions. 8141 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 8142 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 8143 if (OpAtScope == AddRec->getOperand(i)) 8144 continue; 8145 8146 // Okay, at least one of these operands is loop variant but might be 8147 // foldable. Build a new instance of the folded commutative expression. 8148 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8149 AddRec->op_begin()+i); 8150 NewOps.push_back(OpAtScope); 8151 for (++i; i != e; ++i) 8152 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8153 8154 const SCEV *FoldedRec = 8155 getAddRecExpr(NewOps, AddRec->getLoop(), 8156 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8157 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8158 // The addrec may be folded to a nonrecurrence, for example, if the 8159 // induction variable is multiplied by zero after constant folding. Go 8160 // ahead and return the folded value. 8161 if (!AddRec) 8162 return FoldedRec; 8163 break; 8164 } 8165 8166 // If the scope is outside the addrec's loop, evaluate it by using the 8167 // loop exit value of the addrec. 8168 if (!AddRec->getLoop()->contains(L)) { 8169 // To evaluate this recurrence, we need to know how many times the AddRec 8170 // loop iterates. Compute this now. 8171 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8172 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8173 8174 // Then, evaluate the AddRec. 8175 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8176 } 8177 8178 return AddRec; 8179 } 8180 8181 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8182 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8183 if (Op == Cast->getOperand()) 8184 return Cast; // must be loop invariant 8185 return getZeroExtendExpr(Op, Cast->getType()); 8186 } 8187 8188 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 8189 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8190 if (Op == Cast->getOperand()) 8191 return Cast; // must be loop invariant 8192 return getSignExtendExpr(Op, Cast->getType()); 8193 } 8194 8195 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 8196 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8197 if (Op == Cast->getOperand()) 8198 return Cast; // must be loop invariant 8199 return getTruncateExpr(Op, Cast->getType()); 8200 } 8201 8202 llvm_unreachable("Unknown SCEV type!"); 8203 } 8204 8205 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 8206 return getSCEVAtScope(getSCEV(V), L); 8207 } 8208 8209 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 8210 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 8211 return stripInjectiveFunctions(ZExt->getOperand()); 8212 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 8213 return stripInjectiveFunctions(SExt->getOperand()); 8214 return S; 8215 } 8216 8217 /// Finds the minimum unsigned root of the following equation: 8218 /// 8219 /// A * X = B (mod N) 8220 /// 8221 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 8222 /// A and B isn't important. 8223 /// 8224 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 8225 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 8226 ScalarEvolution &SE) { 8227 uint32_t BW = A.getBitWidth(); 8228 assert(BW == SE.getTypeSizeInBits(B->getType())); 8229 assert(A != 0 && "A must be non-zero."); 8230 8231 // 1. D = gcd(A, N) 8232 // 8233 // The gcd of A and N may have only one prime factor: 2. The number of 8234 // trailing zeros in A is its multiplicity 8235 uint32_t Mult2 = A.countTrailingZeros(); 8236 // D = 2^Mult2 8237 8238 // 2. Check if B is divisible by D. 8239 // 8240 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8241 // is not less than multiplicity of this prime factor for D. 8242 if (SE.GetMinTrailingZeros(B) < Mult2) 8243 return SE.getCouldNotCompute(); 8244 8245 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8246 // modulo (N / D). 8247 // 8248 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8249 // (N / D) in general. The inverse itself always fits into BW bits, though, 8250 // so we immediately truncate it. 8251 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8252 APInt Mod(BW + 1, 0); 8253 Mod.setBit(BW - Mult2); // Mod = N / D 8254 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8255 8256 // 4. Compute the minimum unsigned root of the equation: 8257 // I * (B / D) mod (N / D) 8258 // To simplify the computation, we factor out the divide by D: 8259 // (I * B mod N) / D 8260 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8261 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8262 } 8263 8264 /// Find the roots of the quadratic equation for the given quadratic chrec 8265 /// {L,+,M,+,N}. This returns either the two roots (which might be the same) or 8266 /// two SCEVCouldNotCompute objects. 8267 static Optional<std::pair<const SCEVConstant *,const SCEVConstant *>> 8268 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8269 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8270 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8271 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8272 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8273 8274 // We currently can only solve this if the coefficients are constants. 8275 if (!LC || !MC || !NC) 8276 return None; 8277 8278 uint32_t BitWidth = LC->getAPInt().getBitWidth(); 8279 const APInt &L = LC->getAPInt(); 8280 const APInt &M = MC->getAPInt(); 8281 const APInt &N = NC->getAPInt(); 8282 APInt Two(BitWidth, 2); 8283 8284 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 8285 8286 // The A coefficient is N/2 8287 APInt A = N.sdiv(Two); 8288 8289 // The B coefficient is M-N/2 8290 APInt B = M; 8291 B -= A; // A is the same as N/2. 8292 8293 // The C coefficient is L. 8294 const APInt& C = L; 8295 8296 // Compute the B^2-4ac term. 8297 APInt SqrtTerm = B; 8298 SqrtTerm *= B; 8299 SqrtTerm -= 4 * (A * C); 8300 8301 if (SqrtTerm.isNegative()) { 8302 // The loop is provably infinite. 8303 return None; 8304 } 8305 8306 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 8307 // integer value or else APInt::sqrt() will assert. 8308 APInt SqrtVal = SqrtTerm.sqrt(); 8309 8310 // Compute the two solutions for the quadratic formula. 8311 // The divisions must be performed as signed divisions. 8312 APInt NegB = -std::move(B); 8313 APInt TwoA = std::move(A); 8314 TwoA <<= 1; 8315 if (TwoA.isNullValue()) 8316 return None; 8317 8318 LLVMContext &Context = SE.getContext(); 8319 8320 ConstantInt *Solution1 = 8321 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); 8322 ConstantInt *Solution2 = 8323 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); 8324 8325 return std::make_pair(cast<SCEVConstant>(SE.getConstant(Solution1)), 8326 cast<SCEVConstant>(SE.getConstant(Solution2))); 8327 } 8328 8329 ScalarEvolution::ExitLimit 8330 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 8331 bool AllowPredicates) { 8332 8333 // This is only used for loops with a "x != y" exit test. The exit condition 8334 // is now expressed as a single expression, V = x-y. So the exit test is 8335 // effectively V != 0. We know and take advantage of the fact that this 8336 // expression only being used in a comparison by zero context. 8337 8338 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 8339 // If the value is a constant 8340 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8341 // If the value is already zero, the branch will execute zero times. 8342 if (C->getValue()->isZero()) return C; 8343 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8344 } 8345 8346 const SCEVAddRecExpr *AddRec = 8347 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 8348 8349 if (!AddRec && AllowPredicates) 8350 // Try to make this an AddRec using runtime tests, in the first X 8351 // iterations of this loop, where X is the SCEV expression found by the 8352 // algorithm below. 8353 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 8354 8355 if (!AddRec || AddRec->getLoop() != L) 8356 return getCouldNotCompute(); 8357 8358 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 8359 // the quadratic equation to solve it. 8360 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 8361 if (auto Roots = SolveQuadraticEquation(AddRec, *this)) { 8362 const SCEVConstant *R1 = Roots->first; 8363 const SCEVConstant *R2 = Roots->second; 8364 // Pick the smallest positive root value. 8365 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 8366 CmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 8367 if (!CB->getZExtValue()) 8368 std::swap(R1, R2); // R1 is the minimum root now. 8369 8370 // We can only use this value if the chrec ends up with an exact zero 8371 // value at this index. When solving for "X*X != 5", for example, we 8372 // should not accept a root of 2. 8373 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); 8374 if (Val->isZero()) 8375 // We found a quadratic root! 8376 return ExitLimit(R1, R1, false, Predicates); 8377 } 8378 } 8379 return getCouldNotCompute(); 8380 } 8381 8382 // Otherwise we can only handle this if it is affine. 8383 if (!AddRec->isAffine()) 8384 return getCouldNotCompute(); 8385 8386 // If this is an affine expression, the execution count of this branch is 8387 // the minimum unsigned root of the following equation: 8388 // 8389 // Start + Step*N = 0 (mod 2^BW) 8390 // 8391 // equivalent to: 8392 // 8393 // Step*N = -Start (mod 2^BW) 8394 // 8395 // where BW is the common bit width of Start and Step. 8396 8397 // Get the initial value for the loop. 8398 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 8399 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 8400 8401 // For now we handle only constant steps. 8402 // 8403 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 8404 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 8405 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 8406 // We have not yet seen any such cases. 8407 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 8408 if (!StepC || StepC->getValue()->isZero()) 8409 return getCouldNotCompute(); 8410 8411 // For positive steps (counting up until unsigned overflow): 8412 // N = -Start/Step (as unsigned) 8413 // For negative steps (counting down to zero): 8414 // N = Start/-Step 8415 // First compute the unsigned distance from zero in the direction of Step. 8416 bool CountDown = StepC->getAPInt().isNegative(); 8417 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 8418 8419 // Handle unitary steps, which cannot wraparound. 8420 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 8421 // N = Distance (as unsigned) 8422 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 8423 APInt MaxBECount = getUnsignedRangeMax(Distance); 8424 8425 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 8426 // we end up with a loop whose backedge-taken count is n - 1. Detect this 8427 // case, and see if we can improve the bound. 8428 // 8429 // Explicitly handling this here is necessary because getUnsignedRange 8430 // isn't context-sensitive; it doesn't know that we only care about the 8431 // range inside the loop. 8432 const SCEV *Zero = getZero(Distance->getType()); 8433 const SCEV *One = getOne(Distance->getType()); 8434 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 8435 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 8436 // If Distance + 1 doesn't overflow, we can compute the maximum distance 8437 // as "unsigned_max(Distance + 1) - 1". 8438 ConstantRange CR = getUnsignedRange(DistancePlusOne); 8439 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 8440 } 8441 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 8442 } 8443 8444 // If the condition controls loop exit (the loop exits only if the expression 8445 // is true) and the addition is no-wrap we can use unsigned divide to 8446 // compute the backedge count. In this case, the step may not divide the 8447 // distance, but we don't care because if the condition is "missed" the loop 8448 // will have undefined behavior due to wrapping. 8449 if (ControlsExit && AddRec->hasNoSelfWrap() && 8450 loopHasNoAbnormalExits(AddRec->getLoop())) { 8451 const SCEV *Exact = 8452 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 8453 const SCEV *Max = 8454 Exact == getCouldNotCompute() 8455 ? Exact 8456 : getConstant(getUnsignedRangeMax(Exact)); 8457 return ExitLimit(Exact, Max, false, Predicates); 8458 } 8459 8460 // Solve the general equation. 8461 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 8462 getNegativeSCEV(Start), *this); 8463 const SCEV *M = E == getCouldNotCompute() 8464 ? E 8465 : getConstant(getUnsignedRangeMax(E)); 8466 return ExitLimit(E, M, false, Predicates); 8467 } 8468 8469 ScalarEvolution::ExitLimit 8470 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 8471 // Loops that look like: while (X == 0) are very strange indeed. We don't 8472 // handle them yet except for the trivial case. This could be expanded in the 8473 // future as needed. 8474 8475 // If the value is a constant, check to see if it is known to be non-zero 8476 // already. If so, the backedge will execute zero times. 8477 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8478 if (!C->getValue()->isZero()) 8479 return getZero(C->getType()); 8480 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8481 } 8482 8483 // We could implement others, but I really doubt anyone writes loops like 8484 // this, and if they did, they would already be constant folded. 8485 return getCouldNotCompute(); 8486 } 8487 8488 std::pair<BasicBlock *, BasicBlock *> 8489 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 8490 // If the block has a unique predecessor, then there is no path from the 8491 // predecessor to the block that does not go through the direct edge 8492 // from the predecessor to the block. 8493 if (BasicBlock *Pred = BB->getSinglePredecessor()) 8494 return {Pred, BB}; 8495 8496 // A loop's header is defined to be a block that dominates the loop. 8497 // If the header has a unique predecessor outside the loop, it must be 8498 // a block that has exactly one successor that can reach the loop. 8499 if (Loop *L = LI.getLoopFor(BB)) 8500 return {L->getLoopPredecessor(), L->getHeader()}; 8501 8502 return {nullptr, nullptr}; 8503 } 8504 8505 /// SCEV structural equivalence is usually sufficient for testing whether two 8506 /// expressions are equal, however for the purposes of looking for a condition 8507 /// guarding a loop, it can be useful to be a little more general, since a 8508 /// front-end may have replicated the controlling expression. 8509 static bool HasSameValue(const SCEV *A, const SCEV *B) { 8510 // Quick check to see if they are the same SCEV. 8511 if (A == B) return true; 8512 8513 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 8514 // Not all instructions that are "identical" compute the same value. For 8515 // instance, two distinct alloca instructions allocating the same type are 8516 // identical and do not read memory; but compute distinct values. 8517 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 8518 }; 8519 8520 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 8521 // two different instructions with the same value. Check for this case. 8522 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 8523 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 8524 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 8525 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 8526 if (ComputesEqualValues(AI, BI)) 8527 return true; 8528 8529 // Otherwise assume they may have a different value. 8530 return false; 8531 } 8532 8533 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 8534 const SCEV *&LHS, const SCEV *&RHS, 8535 unsigned Depth) { 8536 bool Changed = false; 8537 8538 // If we hit the max recursion limit bail out. 8539 if (Depth >= 3) 8540 return false; 8541 8542 // Canonicalize a constant to the right side. 8543 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 8544 // Check for both operands constant. 8545 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 8546 if (ConstantExpr::getICmp(Pred, 8547 LHSC->getValue(), 8548 RHSC->getValue())->isNullValue()) 8549 goto trivially_false; 8550 else 8551 goto trivially_true; 8552 } 8553 // Otherwise swap the operands to put the constant on the right. 8554 std::swap(LHS, RHS); 8555 Pred = ICmpInst::getSwappedPredicate(Pred); 8556 Changed = true; 8557 } 8558 8559 // If we're comparing an addrec with a value which is loop-invariant in the 8560 // addrec's loop, put the addrec on the left. Also make a dominance check, 8561 // as both operands could be addrecs loop-invariant in each other's loop. 8562 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 8563 const Loop *L = AR->getLoop(); 8564 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 8565 std::swap(LHS, RHS); 8566 Pred = ICmpInst::getSwappedPredicate(Pred); 8567 Changed = true; 8568 } 8569 } 8570 8571 // If there's a constant operand, canonicalize comparisons with boundary 8572 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 8573 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 8574 const APInt &RA = RC->getAPInt(); 8575 8576 bool SimplifiedByConstantRange = false; 8577 8578 if (!ICmpInst::isEquality(Pred)) { 8579 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 8580 if (ExactCR.isFullSet()) 8581 goto trivially_true; 8582 else if (ExactCR.isEmptySet()) 8583 goto trivially_false; 8584 8585 APInt NewRHS; 8586 CmpInst::Predicate NewPred; 8587 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 8588 ICmpInst::isEquality(NewPred)) { 8589 // We were able to convert an inequality to an equality. 8590 Pred = NewPred; 8591 RHS = getConstant(NewRHS); 8592 Changed = SimplifiedByConstantRange = true; 8593 } 8594 } 8595 8596 if (!SimplifiedByConstantRange) { 8597 switch (Pred) { 8598 default: 8599 break; 8600 case ICmpInst::ICMP_EQ: 8601 case ICmpInst::ICMP_NE: 8602 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 8603 if (!RA) 8604 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 8605 if (const SCEVMulExpr *ME = 8606 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 8607 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 8608 ME->getOperand(0)->isAllOnesValue()) { 8609 RHS = AE->getOperand(1); 8610 LHS = ME->getOperand(1); 8611 Changed = true; 8612 } 8613 break; 8614 8615 8616 // The "Should have been caught earlier!" messages refer to the fact 8617 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 8618 // should have fired on the corresponding cases, and canonicalized the 8619 // check to trivially_true or trivially_false. 8620 8621 case ICmpInst::ICMP_UGE: 8622 assert(!RA.isMinValue() && "Should have been caught earlier!"); 8623 Pred = ICmpInst::ICMP_UGT; 8624 RHS = getConstant(RA - 1); 8625 Changed = true; 8626 break; 8627 case ICmpInst::ICMP_ULE: 8628 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 8629 Pred = ICmpInst::ICMP_ULT; 8630 RHS = getConstant(RA + 1); 8631 Changed = true; 8632 break; 8633 case ICmpInst::ICMP_SGE: 8634 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 8635 Pred = ICmpInst::ICMP_SGT; 8636 RHS = getConstant(RA - 1); 8637 Changed = true; 8638 break; 8639 case ICmpInst::ICMP_SLE: 8640 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 8641 Pred = ICmpInst::ICMP_SLT; 8642 RHS = getConstant(RA + 1); 8643 Changed = true; 8644 break; 8645 } 8646 } 8647 } 8648 8649 // Check for obvious equality. 8650 if (HasSameValue(LHS, RHS)) { 8651 if (ICmpInst::isTrueWhenEqual(Pred)) 8652 goto trivially_true; 8653 if (ICmpInst::isFalseWhenEqual(Pred)) 8654 goto trivially_false; 8655 } 8656 8657 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 8658 // adding or subtracting 1 from one of the operands. 8659 switch (Pred) { 8660 case ICmpInst::ICMP_SLE: 8661 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 8662 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8663 SCEV::FlagNSW); 8664 Pred = ICmpInst::ICMP_SLT; 8665 Changed = true; 8666 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 8667 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 8668 SCEV::FlagNSW); 8669 Pred = ICmpInst::ICMP_SLT; 8670 Changed = true; 8671 } 8672 break; 8673 case ICmpInst::ICMP_SGE: 8674 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 8675 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 8676 SCEV::FlagNSW); 8677 Pred = ICmpInst::ICMP_SGT; 8678 Changed = true; 8679 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 8680 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8681 SCEV::FlagNSW); 8682 Pred = ICmpInst::ICMP_SGT; 8683 Changed = true; 8684 } 8685 break; 8686 case ICmpInst::ICMP_ULE: 8687 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 8688 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8689 SCEV::FlagNUW); 8690 Pred = ICmpInst::ICMP_ULT; 8691 Changed = true; 8692 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 8693 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 8694 Pred = ICmpInst::ICMP_ULT; 8695 Changed = true; 8696 } 8697 break; 8698 case ICmpInst::ICMP_UGE: 8699 if (!getUnsignedRangeMin(RHS).isMinValue()) { 8700 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 8701 Pred = ICmpInst::ICMP_UGT; 8702 Changed = true; 8703 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 8704 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8705 SCEV::FlagNUW); 8706 Pred = ICmpInst::ICMP_UGT; 8707 Changed = true; 8708 } 8709 break; 8710 default: 8711 break; 8712 } 8713 8714 // TODO: More simplifications are possible here. 8715 8716 // Recursively simplify until we either hit a recursion limit or nothing 8717 // changes. 8718 if (Changed) 8719 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 8720 8721 return Changed; 8722 8723 trivially_true: 8724 // Return 0 == 0. 8725 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8726 Pred = ICmpInst::ICMP_EQ; 8727 return true; 8728 8729 trivially_false: 8730 // Return 0 != 0. 8731 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8732 Pred = ICmpInst::ICMP_NE; 8733 return true; 8734 } 8735 8736 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 8737 return getSignedRangeMax(S).isNegative(); 8738 } 8739 8740 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 8741 return getSignedRangeMin(S).isStrictlyPositive(); 8742 } 8743 8744 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 8745 return !getSignedRangeMin(S).isNegative(); 8746 } 8747 8748 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 8749 return !getSignedRangeMax(S).isStrictlyPositive(); 8750 } 8751 8752 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 8753 return isKnownNegative(S) || isKnownPositive(S); 8754 } 8755 8756 std::pair<const SCEV *, const SCEV *> 8757 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 8758 // Compute SCEV on entry of loop L. 8759 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 8760 if (Start == getCouldNotCompute()) 8761 return { Start, Start }; 8762 // Compute post increment SCEV for loop L. 8763 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 8764 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 8765 return { Start, PostInc }; 8766 } 8767 8768 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 8769 const SCEV *LHS, const SCEV *RHS) { 8770 // First collect all loops. 8771 SmallPtrSet<const Loop *, 8> LoopsUsed; 8772 getUsedLoops(LHS, LoopsUsed); 8773 getUsedLoops(RHS, LoopsUsed); 8774 8775 if (LoopsUsed.empty()) 8776 return false; 8777 8778 // Domination relationship must be a linear order on collected loops. 8779 #ifndef NDEBUG 8780 for (auto *L1 : LoopsUsed) 8781 for (auto *L2 : LoopsUsed) 8782 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 8783 DT.dominates(L2->getHeader(), L1->getHeader())) && 8784 "Domination relationship is not a linear order"); 8785 #endif 8786 8787 const Loop *MDL = 8788 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 8789 [&](const Loop *L1, const Loop *L2) { 8790 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 8791 }); 8792 8793 // Get init and post increment value for LHS. 8794 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 8795 // if LHS contains unknown non-invariant SCEV then bail out. 8796 if (SplitLHS.first == getCouldNotCompute()) 8797 return false; 8798 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 8799 // Get init and post increment value for RHS. 8800 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 8801 // if RHS contains unknown non-invariant SCEV then bail out. 8802 if (SplitRHS.first == getCouldNotCompute()) 8803 return false; 8804 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 8805 // It is possible that init SCEV contains an invariant load but it does 8806 // not dominate MDL and is not available at MDL loop entry, so we should 8807 // check it here. 8808 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 8809 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 8810 return false; 8811 8812 return isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first) && 8813 isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 8814 SplitRHS.second); 8815 } 8816 8817 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 8818 const SCEV *LHS, const SCEV *RHS) { 8819 // Canonicalize the inputs first. 8820 (void)SimplifyICmpOperands(Pred, LHS, RHS); 8821 8822 if (isKnownViaInduction(Pred, LHS, RHS)) 8823 return true; 8824 8825 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 8826 return true; 8827 8828 // Otherwise see what can be done with some simple reasoning. 8829 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 8830 } 8831 8832 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 8833 const SCEVAddRecExpr *LHS, 8834 const SCEV *RHS) { 8835 const Loop *L = LHS->getLoop(); 8836 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 8837 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 8838 } 8839 8840 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 8841 ICmpInst::Predicate Pred, 8842 bool &Increasing) { 8843 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 8844 8845 #ifndef NDEBUG 8846 // Verify an invariant: inverting the predicate should turn a monotonically 8847 // increasing change to a monotonically decreasing one, and vice versa. 8848 bool IncreasingSwapped; 8849 bool ResultSwapped = isMonotonicPredicateImpl( 8850 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 8851 8852 assert(Result == ResultSwapped && "should be able to analyze both!"); 8853 if (ResultSwapped) 8854 assert(Increasing == !IncreasingSwapped && 8855 "monotonicity should flip as we flip the predicate"); 8856 #endif 8857 8858 return Result; 8859 } 8860 8861 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 8862 ICmpInst::Predicate Pred, 8863 bool &Increasing) { 8864 8865 // A zero step value for LHS means the induction variable is essentially a 8866 // loop invariant value. We don't really depend on the predicate actually 8867 // flipping from false to true (for increasing predicates, and the other way 8868 // around for decreasing predicates), all we care about is that *if* the 8869 // predicate changes then it only changes from false to true. 8870 // 8871 // A zero step value in itself is not very useful, but there may be places 8872 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 8873 // as general as possible. 8874 8875 switch (Pred) { 8876 default: 8877 return false; // Conservative answer 8878 8879 case ICmpInst::ICMP_UGT: 8880 case ICmpInst::ICMP_UGE: 8881 case ICmpInst::ICMP_ULT: 8882 case ICmpInst::ICMP_ULE: 8883 if (!LHS->hasNoUnsignedWrap()) 8884 return false; 8885 8886 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 8887 return true; 8888 8889 case ICmpInst::ICMP_SGT: 8890 case ICmpInst::ICMP_SGE: 8891 case ICmpInst::ICMP_SLT: 8892 case ICmpInst::ICMP_SLE: { 8893 if (!LHS->hasNoSignedWrap()) 8894 return false; 8895 8896 const SCEV *Step = LHS->getStepRecurrence(*this); 8897 8898 if (isKnownNonNegative(Step)) { 8899 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 8900 return true; 8901 } 8902 8903 if (isKnownNonPositive(Step)) { 8904 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 8905 return true; 8906 } 8907 8908 return false; 8909 } 8910 8911 } 8912 8913 llvm_unreachable("switch has default clause!"); 8914 } 8915 8916 bool ScalarEvolution::isLoopInvariantPredicate( 8917 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 8918 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 8919 const SCEV *&InvariantRHS) { 8920 8921 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 8922 if (!isLoopInvariant(RHS, L)) { 8923 if (!isLoopInvariant(LHS, L)) 8924 return false; 8925 8926 std::swap(LHS, RHS); 8927 Pred = ICmpInst::getSwappedPredicate(Pred); 8928 } 8929 8930 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 8931 if (!ArLHS || ArLHS->getLoop() != L) 8932 return false; 8933 8934 bool Increasing; 8935 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 8936 return false; 8937 8938 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 8939 // true as the loop iterates, and the backedge is control dependent on 8940 // "ArLHS `Pred` RHS" == true then we can reason as follows: 8941 // 8942 // * if the predicate was false in the first iteration then the predicate 8943 // is never evaluated again, since the loop exits without taking the 8944 // backedge. 8945 // * if the predicate was true in the first iteration then it will 8946 // continue to be true for all future iterations since it is 8947 // monotonically increasing. 8948 // 8949 // For both the above possibilities, we can replace the loop varying 8950 // predicate with its value on the first iteration of the loop (which is 8951 // loop invariant). 8952 // 8953 // A similar reasoning applies for a monotonically decreasing predicate, by 8954 // replacing true with false and false with true in the above two bullets. 8955 8956 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 8957 8958 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 8959 return false; 8960 8961 InvariantPred = Pred; 8962 InvariantLHS = ArLHS->getStart(); 8963 InvariantRHS = RHS; 8964 return true; 8965 } 8966 8967 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 8968 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 8969 if (HasSameValue(LHS, RHS)) 8970 return ICmpInst::isTrueWhenEqual(Pred); 8971 8972 // This code is split out from isKnownPredicate because it is called from 8973 // within isLoopEntryGuardedByCond. 8974 8975 auto CheckRanges = 8976 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 8977 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 8978 .contains(RangeLHS); 8979 }; 8980 8981 // The check at the top of the function catches the case where the values are 8982 // known to be equal. 8983 if (Pred == CmpInst::ICMP_EQ) 8984 return false; 8985 8986 if (Pred == CmpInst::ICMP_NE) 8987 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 8988 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 8989 isKnownNonZero(getMinusSCEV(LHS, RHS)); 8990 8991 if (CmpInst::isSigned(Pred)) 8992 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 8993 8994 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 8995 } 8996 8997 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 8998 const SCEV *LHS, 8999 const SCEV *RHS) { 9000 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 9001 // Return Y via OutY. 9002 auto MatchBinaryAddToConst = 9003 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 9004 SCEV::NoWrapFlags ExpectedFlags) { 9005 const SCEV *NonConstOp, *ConstOp; 9006 SCEV::NoWrapFlags FlagsPresent; 9007 9008 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 9009 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 9010 return false; 9011 9012 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 9013 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 9014 }; 9015 9016 APInt C; 9017 9018 switch (Pred) { 9019 default: 9020 break; 9021 9022 case ICmpInst::ICMP_SGE: 9023 std::swap(LHS, RHS); 9024 LLVM_FALLTHROUGH; 9025 case ICmpInst::ICMP_SLE: 9026 // X s<= (X + C)<nsw> if C >= 0 9027 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 9028 return true; 9029 9030 // (X + C)<nsw> s<= X if C <= 0 9031 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 9032 !C.isStrictlyPositive()) 9033 return true; 9034 break; 9035 9036 case ICmpInst::ICMP_SGT: 9037 std::swap(LHS, RHS); 9038 LLVM_FALLTHROUGH; 9039 case ICmpInst::ICMP_SLT: 9040 // X s< (X + C)<nsw> if C > 0 9041 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 9042 C.isStrictlyPositive()) 9043 return true; 9044 9045 // (X + C)<nsw> s< X if C < 0 9046 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 9047 return true; 9048 break; 9049 } 9050 9051 return false; 9052 } 9053 9054 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 9055 const SCEV *LHS, 9056 const SCEV *RHS) { 9057 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 9058 return false; 9059 9060 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 9061 // the stack can result in exponential time complexity. 9062 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 9063 9064 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 9065 // 9066 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 9067 // isKnownPredicate. isKnownPredicate is more powerful, but also more 9068 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 9069 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 9070 // use isKnownPredicate later if needed. 9071 return isKnownNonNegative(RHS) && 9072 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 9073 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 9074 } 9075 9076 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 9077 ICmpInst::Predicate Pred, 9078 const SCEV *LHS, const SCEV *RHS) { 9079 // No need to even try if we know the module has no guards. 9080 if (!HasGuards) 9081 return false; 9082 9083 return any_of(*BB, [&](Instruction &I) { 9084 using namespace llvm::PatternMatch; 9085 9086 Value *Condition; 9087 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 9088 m_Value(Condition))) && 9089 isImpliedCond(Pred, LHS, RHS, Condition, false); 9090 }); 9091 } 9092 9093 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 9094 /// protected by a conditional between LHS and RHS. This is used to 9095 /// to eliminate casts. 9096 bool 9097 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 9098 ICmpInst::Predicate Pred, 9099 const SCEV *LHS, const SCEV *RHS) { 9100 // Interpret a null as meaning no loop, where there is obviously no guard 9101 // (interprocedural conditions notwithstanding). 9102 if (!L) return true; 9103 9104 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9105 return true; 9106 9107 BasicBlock *Latch = L->getLoopLatch(); 9108 if (!Latch) 9109 return false; 9110 9111 BranchInst *LoopContinuePredicate = 9112 dyn_cast<BranchInst>(Latch->getTerminator()); 9113 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 9114 isImpliedCond(Pred, LHS, RHS, 9115 LoopContinuePredicate->getCondition(), 9116 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 9117 return true; 9118 9119 // We don't want more than one activation of the following loops on the stack 9120 // -- that can lead to O(n!) time complexity. 9121 if (WalkingBEDominatingConds) 9122 return false; 9123 9124 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 9125 9126 // See if we can exploit a trip count to prove the predicate. 9127 const auto &BETakenInfo = getBackedgeTakenInfo(L); 9128 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 9129 if (LatchBECount != getCouldNotCompute()) { 9130 // We know that Latch branches back to the loop header exactly 9131 // LatchBECount times. This means the backdege condition at Latch is 9132 // equivalent to "{0,+,1} u< LatchBECount". 9133 Type *Ty = LatchBECount->getType(); 9134 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 9135 const SCEV *LoopCounter = 9136 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 9137 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 9138 LatchBECount)) 9139 return true; 9140 } 9141 9142 // Check conditions due to any @llvm.assume intrinsics. 9143 for (auto &AssumeVH : AC.assumptions()) { 9144 if (!AssumeVH) 9145 continue; 9146 auto *CI = cast<CallInst>(AssumeVH); 9147 if (!DT.dominates(CI, Latch->getTerminator())) 9148 continue; 9149 9150 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 9151 return true; 9152 } 9153 9154 // If the loop is not reachable from the entry block, we risk running into an 9155 // infinite loop as we walk up into the dom tree. These loops do not matter 9156 // anyway, so we just return a conservative answer when we see them. 9157 if (!DT.isReachableFromEntry(L->getHeader())) 9158 return false; 9159 9160 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 9161 return true; 9162 9163 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 9164 DTN != HeaderDTN; DTN = DTN->getIDom()) { 9165 assert(DTN && "should reach the loop header before reaching the root!"); 9166 9167 BasicBlock *BB = DTN->getBlock(); 9168 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 9169 return true; 9170 9171 BasicBlock *PBB = BB->getSinglePredecessor(); 9172 if (!PBB) 9173 continue; 9174 9175 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 9176 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 9177 continue; 9178 9179 Value *Condition = ContinuePredicate->getCondition(); 9180 9181 // If we have an edge `E` within the loop body that dominates the only 9182 // latch, the condition guarding `E` also guards the backedge. This 9183 // reasoning works only for loops with a single latch. 9184 9185 BasicBlockEdge DominatingEdge(PBB, BB); 9186 if (DominatingEdge.isSingleEdge()) { 9187 // We're constructively (and conservatively) enumerating edges within the 9188 // loop body that dominate the latch. The dominator tree better agree 9189 // with us on this: 9190 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 9191 9192 if (isImpliedCond(Pred, LHS, RHS, Condition, 9193 BB != ContinuePredicate->getSuccessor(0))) 9194 return true; 9195 } 9196 } 9197 9198 return false; 9199 } 9200 9201 bool 9202 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 9203 ICmpInst::Predicate Pred, 9204 const SCEV *LHS, const SCEV *RHS) { 9205 // Interpret a null as meaning no loop, where there is obviously no guard 9206 // (interprocedural conditions notwithstanding). 9207 if (!L) return false; 9208 9209 // Both LHS and RHS must be available at loop entry. 9210 assert(isAvailableAtLoopEntry(LHS, L) && 9211 "LHS is not available at Loop Entry"); 9212 assert(isAvailableAtLoopEntry(RHS, L) && 9213 "RHS is not available at Loop Entry"); 9214 9215 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9216 return true; 9217 9218 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 9219 // the facts (a >= b && a != b) separately. A typical situation is when the 9220 // non-strict comparison is known from ranges and non-equality is known from 9221 // dominating predicates. If we are proving strict comparison, we always try 9222 // to prove non-equality and non-strict comparison separately. 9223 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 9224 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 9225 bool ProvedNonStrictComparison = false; 9226 bool ProvedNonEquality = false; 9227 9228 if (ProvingStrictComparison) { 9229 ProvedNonStrictComparison = 9230 isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS); 9231 ProvedNonEquality = 9232 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, LHS, RHS); 9233 if (ProvedNonStrictComparison && ProvedNonEquality) 9234 return true; 9235 } 9236 9237 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 9238 auto ProveViaGuard = [&](BasicBlock *Block) { 9239 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 9240 return true; 9241 if (ProvingStrictComparison) { 9242 if (!ProvedNonStrictComparison) 9243 ProvedNonStrictComparison = 9244 isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS); 9245 if (!ProvedNonEquality) 9246 ProvedNonEquality = 9247 isImpliedViaGuard(Block, ICmpInst::ICMP_NE, LHS, RHS); 9248 if (ProvedNonStrictComparison && ProvedNonEquality) 9249 return true; 9250 } 9251 return false; 9252 }; 9253 9254 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 9255 auto ProveViaCond = [&](Value *Condition, bool Inverse) { 9256 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse)) 9257 return true; 9258 if (ProvingStrictComparison) { 9259 if (!ProvedNonStrictComparison) 9260 ProvedNonStrictComparison = 9261 isImpliedCond(NonStrictPredicate, LHS, RHS, Condition, Inverse); 9262 if (!ProvedNonEquality) 9263 ProvedNonEquality = 9264 isImpliedCond(ICmpInst::ICMP_NE, LHS, RHS, Condition, Inverse); 9265 if (ProvedNonStrictComparison && ProvedNonEquality) 9266 return true; 9267 } 9268 return false; 9269 }; 9270 9271 // Starting at the loop predecessor, climb up the predecessor chain, as long 9272 // as there are predecessors that can be found that have unique successors 9273 // leading to the original header. 9274 for (std::pair<BasicBlock *, BasicBlock *> 9275 Pair(L->getLoopPredecessor(), L->getHeader()); 9276 Pair.first; 9277 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 9278 9279 if (ProveViaGuard(Pair.first)) 9280 return true; 9281 9282 BranchInst *LoopEntryPredicate = 9283 dyn_cast<BranchInst>(Pair.first->getTerminator()); 9284 if (!LoopEntryPredicate || 9285 LoopEntryPredicate->isUnconditional()) 9286 continue; 9287 9288 if (ProveViaCond(LoopEntryPredicate->getCondition(), 9289 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 9290 return true; 9291 } 9292 9293 // Check conditions due to any @llvm.assume intrinsics. 9294 for (auto &AssumeVH : AC.assumptions()) { 9295 if (!AssumeVH) 9296 continue; 9297 auto *CI = cast<CallInst>(AssumeVH); 9298 if (!DT.dominates(CI, L->getHeader())) 9299 continue; 9300 9301 if (ProveViaCond(CI->getArgOperand(0), false)) 9302 return true; 9303 } 9304 9305 return false; 9306 } 9307 9308 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 9309 const SCEV *LHS, const SCEV *RHS, 9310 Value *FoundCondValue, 9311 bool Inverse) { 9312 if (!PendingLoopPredicates.insert(FoundCondValue).second) 9313 return false; 9314 9315 auto ClearOnExit = 9316 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 9317 9318 // Recursively handle And and Or conditions. 9319 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 9320 if (BO->getOpcode() == Instruction::And) { 9321 if (!Inverse) 9322 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9323 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9324 } else if (BO->getOpcode() == Instruction::Or) { 9325 if (Inverse) 9326 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9327 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9328 } 9329 } 9330 9331 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 9332 if (!ICI) return false; 9333 9334 // Now that we found a conditional branch that dominates the loop or controls 9335 // the loop latch. Check to see if it is the comparison we are looking for. 9336 ICmpInst::Predicate FoundPred; 9337 if (Inverse) 9338 FoundPred = ICI->getInversePredicate(); 9339 else 9340 FoundPred = ICI->getPredicate(); 9341 9342 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 9343 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 9344 9345 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 9346 } 9347 9348 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 9349 const SCEV *RHS, 9350 ICmpInst::Predicate FoundPred, 9351 const SCEV *FoundLHS, 9352 const SCEV *FoundRHS) { 9353 // Balance the types. 9354 if (getTypeSizeInBits(LHS->getType()) < 9355 getTypeSizeInBits(FoundLHS->getType())) { 9356 if (CmpInst::isSigned(Pred)) { 9357 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 9358 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 9359 } else { 9360 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 9361 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 9362 } 9363 } else if (getTypeSizeInBits(LHS->getType()) > 9364 getTypeSizeInBits(FoundLHS->getType())) { 9365 if (CmpInst::isSigned(FoundPred)) { 9366 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 9367 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 9368 } else { 9369 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 9370 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 9371 } 9372 } 9373 9374 // Canonicalize the query to match the way instcombine will have 9375 // canonicalized the comparison. 9376 if (SimplifyICmpOperands(Pred, LHS, RHS)) 9377 if (LHS == RHS) 9378 return CmpInst::isTrueWhenEqual(Pred); 9379 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 9380 if (FoundLHS == FoundRHS) 9381 return CmpInst::isFalseWhenEqual(FoundPred); 9382 9383 // Check to see if we can make the LHS or RHS match. 9384 if (LHS == FoundRHS || RHS == FoundLHS) { 9385 if (isa<SCEVConstant>(RHS)) { 9386 std::swap(FoundLHS, FoundRHS); 9387 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 9388 } else { 9389 std::swap(LHS, RHS); 9390 Pred = ICmpInst::getSwappedPredicate(Pred); 9391 } 9392 } 9393 9394 // Check whether the found predicate is the same as the desired predicate. 9395 if (FoundPred == Pred) 9396 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9397 9398 // Check whether swapping the found predicate makes it the same as the 9399 // desired predicate. 9400 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 9401 if (isa<SCEVConstant>(RHS)) 9402 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 9403 else 9404 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 9405 RHS, LHS, FoundLHS, FoundRHS); 9406 } 9407 9408 // Unsigned comparison is the same as signed comparison when both the operands 9409 // are non-negative. 9410 if (CmpInst::isUnsigned(FoundPred) && 9411 CmpInst::getSignedPredicate(FoundPred) == Pred && 9412 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 9413 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9414 9415 // Check if we can make progress by sharpening ranges. 9416 if (FoundPred == ICmpInst::ICMP_NE && 9417 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 9418 9419 const SCEVConstant *C = nullptr; 9420 const SCEV *V = nullptr; 9421 9422 if (isa<SCEVConstant>(FoundLHS)) { 9423 C = cast<SCEVConstant>(FoundLHS); 9424 V = FoundRHS; 9425 } else { 9426 C = cast<SCEVConstant>(FoundRHS); 9427 V = FoundLHS; 9428 } 9429 9430 // The guarding predicate tells us that C != V. If the known range 9431 // of V is [C, t), we can sharpen the range to [C + 1, t). The 9432 // range we consider has to correspond to same signedness as the 9433 // predicate we're interested in folding. 9434 9435 APInt Min = ICmpInst::isSigned(Pred) ? 9436 getSignedRangeMin(V) : getUnsignedRangeMin(V); 9437 9438 if (Min == C->getAPInt()) { 9439 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 9440 // This is true even if (Min + 1) wraps around -- in case of 9441 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 9442 9443 APInt SharperMin = Min + 1; 9444 9445 switch (Pred) { 9446 case ICmpInst::ICMP_SGE: 9447 case ICmpInst::ICMP_UGE: 9448 // We know V `Pred` SharperMin. If this implies LHS `Pred` 9449 // RHS, we're done. 9450 if (isImpliedCondOperands(Pred, LHS, RHS, V, 9451 getConstant(SharperMin))) 9452 return true; 9453 LLVM_FALLTHROUGH; 9454 9455 case ICmpInst::ICMP_SGT: 9456 case ICmpInst::ICMP_UGT: 9457 // We know from the range information that (V `Pred` Min || 9458 // V == Min). We know from the guarding condition that !(V 9459 // == Min). This gives us 9460 // 9461 // V `Pred` Min || V == Min && !(V == Min) 9462 // => V `Pred` Min 9463 // 9464 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 9465 9466 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 9467 return true; 9468 LLVM_FALLTHROUGH; 9469 9470 default: 9471 // No change 9472 break; 9473 } 9474 } 9475 } 9476 9477 // Check whether the actual condition is beyond sufficient. 9478 if (FoundPred == ICmpInst::ICMP_EQ) 9479 if (ICmpInst::isTrueWhenEqual(Pred)) 9480 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9481 return true; 9482 if (Pred == ICmpInst::ICMP_NE) 9483 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 9484 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 9485 return true; 9486 9487 // Otherwise assume the worst. 9488 return false; 9489 } 9490 9491 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 9492 const SCEV *&L, const SCEV *&R, 9493 SCEV::NoWrapFlags &Flags) { 9494 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 9495 if (!AE || AE->getNumOperands() != 2) 9496 return false; 9497 9498 L = AE->getOperand(0); 9499 R = AE->getOperand(1); 9500 Flags = AE->getNoWrapFlags(); 9501 return true; 9502 } 9503 9504 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 9505 const SCEV *Less) { 9506 // We avoid subtracting expressions here because this function is usually 9507 // fairly deep in the call stack (i.e. is called many times). 9508 9509 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 9510 const auto *LAR = cast<SCEVAddRecExpr>(Less); 9511 const auto *MAR = cast<SCEVAddRecExpr>(More); 9512 9513 if (LAR->getLoop() != MAR->getLoop()) 9514 return None; 9515 9516 // We look at affine expressions only; not for correctness but to keep 9517 // getStepRecurrence cheap. 9518 if (!LAR->isAffine() || !MAR->isAffine()) 9519 return None; 9520 9521 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 9522 return None; 9523 9524 Less = LAR->getStart(); 9525 More = MAR->getStart(); 9526 9527 // fall through 9528 } 9529 9530 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 9531 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 9532 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 9533 return M - L; 9534 } 9535 9536 SCEV::NoWrapFlags Flags; 9537 const SCEV *LLess = nullptr, *RLess = nullptr; 9538 const SCEV *LMore = nullptr, *RMore = nullptr; 9539 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 9540 // Compare (X + C1) vs X. 9541 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 9542 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 9543 if (RLess == More) 9544 return -(C1->getAPInt()); 9545 9546 // Compare X vs (X + C2). 9547 if (splitBinaryAdd(More, LMore, RMore, Flags)) 9548 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 9549 if (RMore == Less) 9550 return C2->getAPInt(); 9551 9552 // Compare (X + C1) vs (X + C2). 9553 if (C1 && C2 && RLess == RMore) 9554 return C2->getAPInt() - C1->getAPInt(); 9555 9556 return None; 9557 } 9558 9559 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 9560 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 9561 const SCEV *FoundLHS, const SCEV *FoundRHS) { 9562 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 9563 return false; 9564 9565 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9566 if (!AddRecLHS) 9567 return false; 9568 9569 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 9570 if (!AddRecFoundLHS) 9571 return false; 9572 9573 // We'd like to let SCEV reason about control dependencies, so we constrain 9574 // both the inequalities to be about add recurrences on the same loop. This 9575 // way we can use isLoopEntryGuardedByCond later. 9576 9577 const Loop *L = AddRecFoundLHS->getLoop(); 9578 if (L != AddRecLHS->getLoop()) 9579 return false; 9580 9581 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 9582 // 9583 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 9584 // ... (2) 9585 // 9586 // Informal proof for (2), assuming (1) [*]: 9587 // 9588 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 9589 // 9590 // Then 9591 // 9592 // FoundLHS s< FoundRHS s< INT_MIN - C 9593 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 9594 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 9595 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 9596 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 9597 // <=> FoundLHS + C s< FoundRHS + C 9598 // 9599 // [*]: (1) can be proved by ruling out overflow. 9600 // 9601 // [**]: This can be proved by analyzing all the four possibilities: 9602 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 9603 // (A s>= 0, B s>= 0). 9604 // 9605 // Note: 9606 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 9607 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 9608 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 9609 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 9610 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 9611 // C)". 9612 9613 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 9614 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 9615 if (!LDiff || !RDiff || *LDiff != *RDiff) 9616 return false; 9617 9618 if (LDiff->isMinValue()) 9619 return true; 9620 9621 APInt FoundRHSLimit; 9622 9623 if (Pred == CmpInst::ICMP_ULT) { 9624 FoundRHSLimit = -(*RDiff); 9625 } else { 9626 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 9627 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 9628 } 9629 9630 // Try to prove (1) or (2), as needed. 9631 return isAvailableAtLoopEntry(FoundRHS, L) && 9632 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 9633 getConstant(FoundRHSLimit)); 9634 } 9635 9636 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 9637 const SCEV *LHS, const SCEV *RHS, 9638 const SCEV *FoundLHS, 9639 const SCEV *FoundRHS, unsigned Depth) { 9640 const PHINode *LPhi = nullptr, *RPhi = nullptr; 9641 9642 auto ClearOnExit = make_scope_exit([&]() { 9643 if (LPhi) { 9644 bool Erased = PendingMerges.erase(LPhi); 9645 assert(Erased && "Failed to erase LPhi!"); 9646 (void)Erased; 9647 } 9648 if (RPhi) { 9649 bool Erased = PendingMerges.erase(RPhi); 9650 assert(Erased && "Failed to erase RPhi!"); 9651 (void)Erased; 9652 } 9653 }); 9654 9655 // Find respective Phis and check that they are not being pending. 9656 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 9657 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 9658 if (!PendingMerges.insert(Phi).second) 9659 return false; 9660 LPhi = Phi; 9661 } 9662 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 9663 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 9664 // If we detect a loop of Phi nodes being processed by this method, for 9665 // example: 9666 // 9667 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 9668 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 9669 // 9670 // we don't want to deal with a case that complex, so return conservative 9671 // answer false. 9672 if (!PendingMerges.insert(Phi).second) 9673 return false; 9674 RPhi = Phi; 9675 } 9676 9677 // If none of LHS, RHS is a Phi, nothing to do here. 9678 if (!LPhi && !RPhi) 9679 return false; 9680 9681 // If there is a SCEVUnknown Phi we are interested in, make it left. 9682 if (!LPhi) { 9683 std::swap(LHS, RHS); 9684 std::swap(FoundLHS, FoundRHS); 9685 std::swap(LPhi, RPhi); 9686 Pred = ICmpInst::getSwappedPredicate(Pred); 9687 } 9688 9689 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 9690 const BasicBlock *LBB = LPhi->getParent(); 9691 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 9692 9693 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 9694 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 9695 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 9696 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 9697 }; 9698 9699 if (RPhi && RPhi->getParent() == LBB) { 9700 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 9701 // If we compare two Phis from the same block, and for each entry block 9702 // the predicate is true for incoming values from this block, then the 9703 // predicate is also true for the Phis. 9704 for (const BasicBlock *IncBB : predecessors(LBB)) { 9705 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 9706 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 9707 if (!ProvedEasily(L, R)) 9708 return false; 9709 } 9710 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 9711 // Case two: RHS is also a Phi from the same basic block, and it is an 9712 // AddRec. It means that there is a loop which has both AddRec and Unknown 9713 // PHIs, for it we can compare incoming values of AddRec from above the loop 9714 // and latch with their respective incoming values of LPhi. 9715 assert(LPhi->getNumIncomingValues() == 2 && 9716 "Phi node standing in loop header does not have exactly 2 inputs?"); 9717 auto *RLoop = RAR->getLoop(); 9718 auto *Predecessor = RLoop->getLoopPredecessor(); 9719 assert(Predecessor && "Loop with AddRec with no predecessor?"); 9720 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 9721 if (!ProvedEasily(L1, RAR->getStart())) 9722 return false; 9723 auto *Latch = RLoop->getLoopLatch(); 9724 assert(Latch && "Loop with AddRec with no latch?"); 9725 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 9726 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 9727 return false; 9728 } else { 9729 // In all other cases go over inputs of LHS and compare each of them to RHS, 9730 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 9731 // At this point RHS is either a non-Phi, or it is a Phi from some block 9732 // different from LBB. 9733 for (const BasicBlock *IncBB : predecessors(LBB)) { 9734 // Check that RHS is available in this block. 9735 if (!dominates(RHS, IncBB)) 9736 return false; 9737 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 9738 if (!ProvedEasily(L, RHS)) 9739 return false; 9740 } 9741 } 9742 return true; 9743 } 9744 9745 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 9746 const SCEV *LHS, const SCEV *RHS, 9747 const SCEV *FoundLHS, 9748 const SCEV *FoundRHS) { 9749 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9750 return true; 9751 9752 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9753 return true; 9754 9755 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 9756 FoundLHS, FoundRHS) || 9757 // ~x < ~y --> x > y 9758 isImpliedCondOperandsHelper(Pred, LHS, RHS, 9759 getNotSCEV(FoundRHS), 9760 getNotSCEV(FoundLHS)); 9761 } 9762 9763 /// If Expr computes ~A, return A else return nullptr 9764 static const SCEV *MatchNotExpr(const SCEV *Expr) { 9765 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 9766 if (!Add || Add->getNumOperands() != 2 || 9767 !Add->getOperand(0)->isAllOnesValue()) 9768 return nullptr; 9769 9770 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 9771 if (!AddRHS || AddRHS->getNumOperands() != 2 || 9772 !AddRHS->getOperand(0)->isAllOnesValue()) 9773 return nullptr; 9774 9775 return AddRHS->getOperand(1); 9776 } 9777 9778 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values? 9779 template<typename MaxExprType> 9780 static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, 9781 const SCEV *Candidate) { 9782 const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr); 9783 if (!MaxExpr) return false; 9784 9785 return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end(); 9786 } 9787 9788 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values? 9789 template<typename MaxExprType> 9790 static bool IsMinConsistingOf(ScalarEvolution &SE, 9791 const SCEV *MaybeMinExpr, 9792 const SCEV *Candidate) { 9793 const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr); 9794 if (!MaybeMaxExpr) 9795 return false; 9796 9797 return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate)); 9798 } 9799 9800 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 9801 ICmpInst::Predicate Pred, 9802 const SCEV *LHS, const SCEV *RHS) { 9803 // If both sides are affine addrecs for the same loop, with equal 9804 // steps, and we know the recurrences don't wrap, then we only 9805 // need to check the predicate on the starting values. 9806 9807 if (!ICmpInst::isRelational(Pred)) 9808 return false; 9809 9810 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 9811 if (!LAR) 9812 return false; 9813 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 9814 if (!RAR) 9815 return false; 9816 if (LAR->getLoop() != RAR->getLoop()) 9817 return false; 9818 if (!LAR->isAffine() || !RAR->isAffine()) 9819 return false; 9820 9821 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 9822 return false; 9823 9824 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 9825 SCEV::FlagNSW : SCEV::FlagNUW; 9826 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 9827 return false; 9828 9829 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 9830 } 9831 9832 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 9833 /// expression? 9834 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 9835 ICmpInst::Predicate Pred, 9836 const SCEV *LHS, const SCEV *RHS) { 9837 switch (Pred) { 9838 default: 9839 return false; 9840 9841 case ICmpInst::ICMP_SGE: 9842 std::swap(LHS, RHS); 9843 LLVM_FALLTHROUGH; 9844 case ICmpInst::ICMP_SLE: 9845 return 9846 // min(A, ...) <= A 9847 IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) || 9848 // A <= max(A, ...) 9849 IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 9850 9851 case ICmpInst::ICMP_UGE: 9852 std::swap(LHS, RHS); 9853 LLVM_FALLTHROUGH; 9854 case ICmpInst::ICMP_ULE: 9855 return 9856 // min(A, ...) <= A 9857 IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) || 9858 // A <= max(A, ...) 9859 IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 9860 } 9861 9862 llvm_unreachable("covered switch fell through?!"); 9863 } 9864 9865 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 9866 const SCEV *LHS, const SCEV *RHS, 9867 const SCEV *FoundLHS, 9868 const SCEV *FoundRHS, 9869 unsigned Depth) { 9870 assert(getTypeSizeInBits(LHS->getType()) == 9871 getTypeSizeInBits(RHS->getType()) && 9872 "LHS and RHS have different sizes?"); 9873 assert(getTypeSizeInBits(FoundLHS->getType()) == 9874 getTypeSizeInBits(FoundRHS->getType()) && 9875 "FoundLHS and FoundRHS have different sizes?"); 9876 // We want to avoid hurting the compile time with analysis of too big trees. 9877 if (Depth > MaxSCEVOperationsImplicationDepth) 9878 return false; 9879 // We only want to work with ICMP_SGT comparison so far. 9880 // TODO: Extend to ICMP_UGT? 9881 if (Pred == ICmpInst::ICMP_SLT) { 9882 Pred = ICmpInst::ICMP_SGT; 9883 std::swap(LHS, RHS); 9884 std::swap(FoundLHS, FoundRHS); 9885 } 9886 if (Pred != ICmpInst::ICMP_SGT) 9887 return false; 9888 9889 auto GetOpFromSExt = [&](const SCEV *S) { 9890 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 9891 return Ext->getOperand(); 9892 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 9893 // the constant in some cases. 9894 return S; 9895 }; 9896 9897 // Acquire values from extensions. 9898 auto *OrigLHS = LHS; 9899 auto *OrigFoundLHS = FoundLHS; 9900 LHS = GetOpFromSExt(LHS); 9901 FoundLHS = GetOpFromSExt(FoundLHS); 9902 9903 // Is the SGT predicate can be proved trivially or using the found context. 9904 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 9905 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 9906 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 9907 FoundRHS, Depth + 1); 9908 }; 9909 9910 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 9911 // We want to avoid creation of any new non-constant SCEV. Since we are 9912 // going to compare the operands to RHS, we should be certain that we don't 9913 // need any size extensions for this. So let's decline all cases when the 9914 // sizes of types of LHS and RHS do not match. 9915 // TODO: Maybe try to get RHS from sext to catch more cases? 9916 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 9917 return false; 9918 9919 // Should not overflow. 9920 if (!LHSAddExpr->hasNoSignedWrap()) 9921 return false; 9922 9923 auto *LL = LHSAddExpr->getOperand(0); 9924 auto *LR = LHSAddExpr->getOperand(1); 9925 auto *MinusOne = getNegativeSCEV(getOne(RHS->getType())); 9926 9927 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 9928 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 9929 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 9930 }; 9931 // Try to prove the following rule: 9932 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 9933 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 9934 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 9935 return true; 9936 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 9937 Value *LL, *LR; 9938 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 9939 9940 using namespace llvm::PatternMatch; 9941 9942 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 9943 // Rules for division. 9944 // We are going to perform some comparisons with Denominator and its 9945 // derivative expressions. In general case, creating a SCEV for it may 9946 // lead to a complex analysis of the entire graph, and in particular it 9947 // can request trip count recalculation for the same loop. This would 9948 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 9949 // this, we only want to create SCEVs that are constants in this section. 9950 // So we bail if Denominator is not a constant. 9951 if (!isa<ConstantInt>(LR)) 9952 return false; 9953 9954 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 9955 9956 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 9957 // then a SCEV for the numerator already exists and matches with FoundLHS. 9958 auto *Numerator = getExistingSCEV(LL); 9959 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 9960 return false; 9961 9962 // Make sure that the numerator matches with FoundLHS and the denominator 9963 // is positive. 9964 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 9965 return false; 9966 9967 auto *DTy = Denominator->getType(); 9968 auto *FRHSTy = FoundRHS->getType(); 9969 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 9970 // One of types is a pointer and another one is not. We cannot extend 9971 // them properly to a wider type, so let us just reject this case. 9972 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 9973 // to avoid this check. 9974 return false; 9975 9976 // Given that: 9977 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 9978 auto *WTy = getWiderType(DTy, FRHSTy); 9979 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 9980 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 9981 9982 // Try to prove the following rule: 9983 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 9984 // For example, given that FoundLHS > 2. It means that FoundLHS is at 9985 // least 3. If we divide it by Denominator < 4, we will have at least 1. 9986 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 9987 if (isKnownNonPositive(RHS) && 9988 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 9989 return true; 9990 9991 // Try to prove the following rule: 9992 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 9993 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 9994 // If we divide it by Denominator > 2, then: 9995 // 1. If FoundLHS is negative, then the result is 0. 9996 // 2. If FoundLHS is non-negative, then the result is non-negative. 9997 // Anyways, the result is non-negative. 9998 auto *MinusOne = getNegativeSCEV(getOne(WTy)); 9999 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 10000 if (isKnownNegative(RHS) && 10001 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 10002 return true; 10003 } 10004 } 10005 10006 // If our expression contained SCEVUnknown Phis, and we split it down and now 10007 // need to prove something for them, try to prove the predicate for every 10008 // possible incoming values of those Phis. 10009 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 10010 return true; 10011 10012 return false; 10013 } 10014 10015 bool 10016 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 10017 const SCEV *LHS, const SCEV *RHS) { 10018 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 10019 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 10020 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 10021 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 10022 } 10023 10024 bool 10025 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 10026 const SCEV *LHS, const SCEV *RHS, 10027 const SCEV *FoundLHS, 10028 const SCEV *FoundRHS) { 10029 switch (Pred) { 10030 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 10031 case ICmpInst::ICMP_EQ: 10032 case ICmpInst::ICMP_NE: 10033 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 10034 return true; 10035 break; 10036 case ICmpInst::ICMP_SLT: 10037 case ICmpInst::ICMP_SLE: 10038 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 10039 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 10040 return true; 10041 break; 10042 case ICmpInst::ICMP_SGT: 10043 case ICmpInst::ICMP_SGE: 10044 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 10045 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 10046 return true; 10047 break; 10048 case ICmpInst::ICMP_ULT: 10049 case ICmpInst::ICMP_ULE: 10050 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 10051 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 10052 return true; 10053 break; 10054 case ICmpInst::ICMP_UGT: 10055 case ICmpInst::ICMP_UGE: 10056 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 10057 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 10058 return true; 10059 break; 10060 } 10061 10062 // Maybe it can be proved via operations? 10063 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10064 return true; 10065 10066 return false; 10067 } 10068 10069 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 10070 const SCEV *LHS, 10071 const SCEV *RHS, 10072 const SCEV *FoundLHS, 10073 const SCEV *FoundRHS) { 10074 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 10075 // The restriction on `FoundRHS` be lifted easily -- it exists only to 10076 // reduce the compile time impact of this optimization. 10077 return false; 10078 10079 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 10080 if (!Addend) 10081 return false; 10082 10083 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 10084 10085 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 10086 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 10087 ConstantRange FoundLHSRange = 10088 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 10089 10090 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 10091 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 10092 10093 // We can also compute the range of values for `LHS` that satisfy the 10094 // consequent, "`LHS` `Pred` `RHS`": 10095 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 10096 ConstantRange SatisfyingLHSRange = 10097 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 10098 10099 // The antecedent implies the consequent if every value of `LHS` that 10100 // satisfies the antecedent also satisfies the consequent. 10101 return SatisfyingLHSRange.contains(LHSRange); 10102 } 10103 10104 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 10105 bool IsSigned, bool NoWrap) { 10106 assert(isKnownPositive(Stride) && "Positive stride expected!"); 10107 10108 if (NoWrap) return false; 10109 10110 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10111 const SCEV *One = getOne(Stride->getType()); 10112 10113 if (IsSigned) { 10114 APInt MaxRHS = getSignedRangeMax(RHS); 10115 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 10116 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10117 10118 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 10119 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 10120 } 10121 10122 APInt MaxRHS = getUnsignedRangeMax(RHS); 10123 APInt MaxValue = APInt::getMaxValue(BitWidth); 10124 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10125 10126 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 10127 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 10128 } 10129 10130 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 10131 bool IsSigned, bool NoWrap) { 10132 if (NoWrap) return false; 10133 10134 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10135 const SCEV *One = getOne(Stride->getType()); 10136 10137 if (IsSigned) { 10138 APInt MinRHS = getSignedRangeMin(RHS); 10139 APInt MinValue = APInt::getSignedMinValue(BitWidth); 10140 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10141 10142 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 10143 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 10144 } 10145 10146 APInt MinRHS = getUnsignedRangeMin(RHS); 10147 APInt MinValue = APInt::getMinValue(BitWidth); 10148 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10149 10150 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 10151 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 10152 } 10153 10154 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 10155 bool Equality) { 10156 const SCEV *One = getOne(Step->getType()); 10157 Delta = Equality ? getAddExpr(Delta, Step) 10158 : getAddExpr(Delta, getMinusSCEV(Step, One)); 10159 return getUDivExpr(Delta, Step); 10160 } 10161 10162 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 10163 const SCEV *Stride, 10164 const SCEV *End, 10165 unsigned BitWidth, 10166 bool IsSigned) { 10167 10168 assert(!isKnownNonPositive(Stride) && 10169 "Stride is expected strictly positive!"); 10170 // Calculate the maximum backedge count based on the range of values 10171 // permitted by Start, End, and Stride. 10172 const SCEV *MaxBECount; 10173 APInt MinStart = 10174 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 10175 10176 APInt StrideForMaxBECount = 10177 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 10178 10179 // We already know that the stride is positive, so we paper over conservatism 10180 // in our range computation by forcing StrideForMaxBECount to be at least one. 10181 // In theory this is unnecessary, but we expect MaxBECount to be a 10182 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 10183 // is nothing to constant fold it to). 10184 APInt One(BitWidth, 1, IsSigned); 10185 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 10186 10187 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 10188 : APInt::getMaxValue(BitWidth); 10189 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 10190 10191 // Although End can be a MAX expression we estimate MaxEnd considering only 10192 // the case End = RHS of the loop termination condition. This is safe because 10193 // in the other case (End - Start) is zero, leading to a zero maximum backedge 10194 // taken count. 10195 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 10196 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 10197 10198 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 10199 getConstant(StrideForMaxBECount) /* Step */, 10200 false /* Equality */); 10201 10202 return MaxBECount; 10203 } 10204 10205 ScalarEvolution::ExitLimit 10206 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 10207 const Loop *L, bool IsSigned, 10208 bool ControlsExit, bool AllowPredicates) { 10209 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10210 10211 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10212 bool PredicatedIV = false; 10213 10214 if (!IV && AllowPredicates) { 10215 // Try to make this an AddRec using runtime tests, in the first X 10216 // iterations of this loop, where X is the SCEV expression found by the 10217 // algorithm below. 10218 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10219 PredicatedIV = true; 10220 } 10221 10222 // Avoid weird loops 10223 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10224 return getCouldNotCompute(); 10225 10226 bool NoWrap = ControlsExit && 10227 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10228 10229 const SCEV *Stride = IV->getStepRecurrence(*this); 10230 10231 bool PositiveStride = isKnownPositive(Stride); 10232 10233 // Avoid negative or zero stride values. 10234 if (!PositiveStride) { 10235 // We can compute the correct backedge taken count for loops with unknown 10236 // strides if we can prove that the loop is not an infinite loop with side 10237 // effects. Here's the loop structure we are trying to handle - 10238 // 10239 // i = start 10240 // do { 10241 // A[i] = i; 10242 // i += s; 10243 // } while (i < end); 10244 // 10245 // The backedge taken count for such loops is evaluated as - 10246 // (max(end, start + stride) - start - 1) /u stride 10247 // 10248 // The additional preconditions that we need to check to prove correctness 10249 // of the above formula is as follows - 10250 // 10251 // a) IV is either nuw or nsw depending upon signedness (indicated by the 10252 // NoWrap flag). 10253 // b) loop is single exit with no side effects. 10254 // 10255 // 10256 // Precondition a) implies that if the stride is negative, this is a single 10257 // trip loop. The backedge taken count formula reduces to zero in this case. 10258 // 10259 // Precondition b) implies that the unknown stride cannot be zero otherwise 10260 // we have UB. 10261 // 10262 // The positive stride case is the same as isKnownPositive(Stride) returning 10263 // true (original behavior of the function). 10264 // 10265 // We want to make sure that the stride is truly unknown as there are edge 10266 // cases where ScalarEvolution propagates no wrap flags to the 10267 // post-increment/decrement IV even though the increment/decrement operation 10268 // itself is wrapping. The computed backedge taken count may be wrong in 10269 // such cases. This is prevented by checking that the stride is not known to 10270 // be either positive or non-positive. For example, no wrap flags are 10271 // propagated to the post-increment IV of this loop with a trip count of 2 - 10272 // 10273 // unsigned char i; 10274 // for(i=127; i<128; i+=129) 10275 // A[i] = i; 10276 // 10277 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 10278 !loopHasNoSideEffects(L)) 10279 return getCouldNotCompute(); 10280 } else if (!Stride->isOne() && 10281 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 10282 // Avoid proven overflow cases: this will ensure that the backedge taken 10283 // count will not generate any unsigned overflow. Relaxed no-overflow 10284 // conditions exploit NoWrapFlags, allowing to optimize in presence of 10285 // undefined behaviors like the case of C language. 10286 return getCouldNotCompute(); 10287 10288 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 10289 : ICmpInst::ICMP_ULT; 10290 const SCEV *Start = IV->getStart(); 10291 const SCEV *End = RHS; 10292 // When the RHS is not invariant, we do not know the end bound of the loop and 10293 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 10294 // calculate the MaxBECount, given the start, stride and max value for the end 10295 // bound of the loop (RHS), and the fact that IV does not overflow (which is 10296 // checked above). 10297 if (!isLoopInvariant(RHS, L)) { 10298 const SCEV *MaxBECount = computeMaxBECountForLT( 10299 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10300 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 10301 false /*MaxOrZero*/, Predicates); 10302 } 10303 // If the backedge is taken at least once, then it will be taken 10304 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 10305 // is the LHS value of the less-than comparison the first time it is evaluated 10306 // and End is the RHS. 10307 const SCEV *BECountIfBackedgeTaken = 10308 computeBECount(getMinusSCEV(End, Start), Stride, false); 10309 // If the loop entry is guarded by the result of the backedge test of the 10310 // first loop iteration, then we know the backedge will be taken at least 10311 // once and so the backedge taken count is as above. If not then we use the 10312 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 10313 // as if the backedge is taken at least once max(End,Start) is End and so the 10314 // result is as above, and if not max(End,Start) is Start so we get a backedge 10315 // count of zero. 10316 const SCEV *BECount; 10317 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 10318 BECount = BECountIfBackedgeTaken; 10319 else { 10320 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 10321 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 10322 } 10323 10324 const SCEV *MaxBECount; 10325 bool MaxOrZero = false; 10326 if (isa<SCEVConstant>(BECount)) 10327 MaxBECount = BECount; 10328 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 10329 // If we know exactly how many times the backedge will be taken if it's 10330 // taken at least once, then the backedge count will either be that or 10331 // zero. 10332 MaxBECount = BECountIfBackedgeTaken; 10333 MaxOrZero = true; 10334 } else { 10335 MaxBECount = computeMaxBECountForLT( 10336 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10337 } 10338 10339 if (isa<SCEVCouldNotCompute>(MaxBECount) && 10340 !isa<SCEVCouldNotCompute>(BECount)) 10341 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 10342 10343 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 10344 } 10345 10346 ScalarEvolution::ExitLimit 10347 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 10348 const Loop *L, bool IsSigned, 10349 bool ControlsExit, bool AllowPredicates) { 10350 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10351 // We handle only IV > Invariant 10352 if (!isLoopInvariant(RHS, L)) 10353 return getCouldNotCompute(); 10354 10355 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10356 if (!IV && AllowPredicates) 10357 // Try to make this an AddRec using runtime tests, in the first X 10358 // iterations of this loop, where X is the SCEV expression found by the 10359 // algorithm below. 10360 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10361 10362 // Avoid weird loops 10363 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10364 return getCouldNotCompute(); 10365 10366 bool NoWrap = ControlsExit && 10367 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10368 10369 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 10370 10371 // Avoid negative or zero stride values 10372 if (!isKnownPositive(Stride)) 10373 return getCouldNotCompute(); 10374 10375 // Avoid proven overflow cases: this will ensure that the backedge taken count 10376 // will not generate any unsigned overflow. Relaxed no-overflow conditions 10377 // exploit NoWrapFlags, allowing to optimize in presence of undefined 10378 // behaviors like the case of C language. 10379 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 10380 return getCouldNotCompute(); 10381 10382 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 10383 : ICmpInst::ICMP_UGT; 10384 10385 const SCEV *Start = IV->getStart(); 10386 const SCEV *End = RHS; 10387 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) 10388 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 10389 10390 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 10391 10392 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 10393 : getUnsignedRangeMax(Start); 10394 10395 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 10396 : getUnsignedRangeMin(Stride); 10397 10398 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 10399 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 10400 : APInt::getMinValue(BitWidth) + (MinStride - 1); 10401 10402 // Although End can be a MIN expression we estimate MinEnd considering only 10403 // the case End = RHS. This is safe because in the other case (Start - End) 10404 // is zero, leading to a zero maximum backedge taken count. 10405 APInt MinEnd = 10406 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 10407 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 10408 10409 10410 const SCEV *MaxBECount = getCouldNotCompute(); 10411 if (isa<SCEVConstant>(BECount)) 10412 MaxBECount = BECount; 10413 else 10414 MaxBECount = computeBECount(getConstant(MaxStart - MinEnd), 10415 getConstant(MinStride), false); 10416 10417 if (isa<SCEVCouldNotCompute>(MaxBECount)) 10418 MaxBECount = BECount; 10419 10420 return ExitLimit(BECount, MaxBECount, false, Predicates); 10421 } 10422 10423 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 10424 ScalarEvolution &SE) const { 10425 if (Range.isFullSet()) // Infinite loop. 10426 return SE.getCouldNotCompute(); 10427 10428 // If the start is a non-zero constant, shift the range to simplify things. 10429 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 10430 if (!SC->getValue()->isZero()) { 10431 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 10432 Operands[0] = SE.getZero(SC->getType()); 10433 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 10434 getNoWrapFlags(FlagNW)); 10435 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 10436 return ShiftedAddRec->getNumIterationsInRange( 10437 Range.subtract(SC->getAPInt()), SE); 10438 // This is strange and shouldn't happen. 10439 return SE.getCouldNotCompute(); 10440 } 10441 10442 // The only time we can solve this is when we have all constant indices. 10443 // Otherwise, we cannot determine the overflow conditions. 10444 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 10445 return SE.getCouldNotCompute(); 10446 10447 // Okay at this point we know that all elements of the chrec are constants and 10448 // that the start element is zero. 10449 10450 // First check to see if the range contains zero. If not, the first 10451 // iteration exits. 10452 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 10453 if (!Range.contains(APInt(BitWidth, 0))) 10454 return SE.getZero(getType()); 10455 10456 if (isAffine()) { 10457 // If this is an affine expression then we have this situation: 10458 // Solve {0,+,A} in Range === Ax in Range 10459 10460 // We know that zero is in the range. If A is positive then we know that 10461 // the upper value of the range must be the first possible exit value. 10462 // If A is negative then the lower of the range is the last possible loop 10463 // value. Also note that we already checked for a full range. 10464 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 10465 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 10466 10467 // The exit value should be (End+A)/A. 10468 APInt ExitVal = (End + A).udiv(A); 10469 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 10470 10471 // Evaluate at the exit value. If we really did fall out of the valid 10472 // range, then we computed our trip count, otherwise wrap around or other 10473 // things must have happened. 10474 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 10475 if (Range.contains(Val->getValue())) 10476 return SE.getCouldNotCompute(); // Something strange happened 10477 10478 // Ensure that the previous value is in the range. This is a sanity check. 10479 assert(Range.contains( 10480 EvaluateConstantChrecAtConstant(this, 10481 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 10482 "Linear scev computation is off in a bad way!"); 10483 return SE.getConstant(ExitValue); 10484 } else if (isQuadratic()) { 10485 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 10486 // quadratic equation to solve it. To do this, we must frame our problem in 10487 // terms of figuring out when zero is crossed, instead of when 10488 // Range.getUpper() is crossed. 10489 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); 10490 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 10491 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(), FlagAnyWrap); 10492 10493 // Next, solve the constructed addrec 10494 if (auto Roots = 10495 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE)) { 10496 const SCEVConstant *R1 = Roots->first; 10497 const SCEVConstant *R2 = Roots->second; 10498 // Pick the smallest positive root value. 10499 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 10500 ICmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 10501 if (!CB->getZExtValue()) 10502 std::swap(R1, R2); // R1 is the minimum root now. 10503 10504 // Make sure the root is not off by one. The returned iteration should 10505 // not be in the range, but the previous one should be. When solving 10506 // for "X*X < 5", for example, we should not return a root of 2. 10507 ConstantInt *R1Val = 10508 EvaluateConstantChrecAtConstant(this, R1->getValue(), SE); 10509 if (Range.contains(R1Val->getValue())) { 10510 // The next iteration must be out of the range... 10511 ConstantInt *NextVal = 10512 ConstantInt::get(SE.getContext(), R1->getAPInt() + 1); 10513 10514 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 10515 if (!Range.contains(R1Val->getValue())) 10516 return SE.getConstant(NextVal); 10517 return SE.getCouldNotCompute(); // Something strange happened 10518 } 10519 10520 // If R1 was not in the range, then it is a good return value. Make 10521 // sure that R1-1 WAS in the range though, just in case. 10522 ConstantInt *NextVal = 10523 ConstantInt::get(SE.getContext(), R1->getAPInt() - 1); 10524 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 10525 if (Range.contains(R1Val->getValue())) 10526 return R1; 10527 return SE.getCouldNotCompute(); // Something strange happened 10528 } 10529 } 10530 } 10531 10532 return SE.getCouldNotCompute(); 10533 } 10534 10535 const SCEVAddRecExpr * 10536 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 10537 assert(getNumOperands() > 1 && "AddRec with zero step?"); 10538 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 10539 // but in this case we cannot guarantee that the value returned will be an 10540 // AddRec because SCEV does not have a fixed point where it stops 10541 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 10542 // may happen if we reach arithmetic depth limit while simplifying. So we 10543 // construct the returned value explicitly. 10544 SmallVector<const SCEV *, 3> Ops; 10545 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 10546 // (this + Step) is {A+B,+,B+C,+...,+,N}. 10547 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 10548 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 10549 // We know that the last operand is not a constant zero (otherwise it would 10550 // have been popped out earlier). This guarantees us that if the result has 10551 // the same last operand, then it will also not be popped out, meaning that 10552 // the returned value will be an AddRec. 10553 const SCEV *Last = getOperand(getNumOperands() - 1); 10554 assert(!Last->isZero() && "Recurrency with zero step?"); 10555 Ops.push_back(Last); 10556 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 10557 SCEV::FlagAnyWrap)); 10558 } 10559 10560 // Return true when S contains at least an undef value. 10561 static inline bool containsUndefs(const SCEV *S) { 10562 return SCEVExprContains(S, [](const SCEV *S) { 10563 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 10564 return isa<UndefValue>(SU->getValue()); 10565 else if (const auto *SC = dyn_cast<SCEVConstant>(S)) 10566 return isa<UndefValue>(SC->getValue()); 10567 return false; 10568 }); 10569 } 10570 10571 namespace { 10572 10573 // Collect all steps of SCEV expressions. 10574 struct SCEVCollectStrides { 10575 ScalarEvolution &SE; 10576 SmallVectorImpl<const SCEV *> &Strides; 10577 10578 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 10579 : SE(SE), Strides(S) {} 10580 10581 bool follow(const SCEV *S) { 10582 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 10583 Strides.push_back(AR->getStepRecurrence(SE)); 10584 return true; 10585 } 10586 10587 bool isDone() const { return false; } 10588 }; 10589 10590 // Collect all SCEVUnknown and SCEVMulExpr expressions. 10591 struct SCEVCollectTerms { 10592 SmallVectorImpl<const SCEV *> &Terms; 10593 10594 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 10595 10596 bool follow(const SCEV *S) { 10597 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 10598 isa<SCEVSignExtendExpr>(S)) { 10599 if (!containsUndefs(S)) 10600 Terms.push_back(S); 10601 10602 // Stop recursion: once we collected a term, do not walk its operands. 10603 return false; 10604 } 10605 10606 // Keep looking. 10607 return true; 10608 } 10609 10610 bool isDone() const { return false; } 10611 }; 10612 10613 // Check if a SCEV contains an AddRecExpr. 10614 struct SCEVHasAddRec { 10615 bool &ContainsAddRec; 10616 10617 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 10618 ContainsAddRec = false; 10619 } 10620 10621 bool follow(const SCEV *S) { 10622 if (isa<SCEVAddRecExpr>(S)) { 10623 ContainsAddRec = true; 10624 10625 // Stop recursion: once we collected a term, do not walk its operands. 10626 return false; 10627 } 10628 10629 // Keep looking. 10630 return true; 10631 } 10632 10633 bool isDone() const { return false; } 10634 }; 10635 10636 // Find factors that are multiplied with an expression that (possibly as a 10637 // subexpression) contains an AddRecExpr. In the expression: 10638 // 10639 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 10640 // 10641 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 10642 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 10643 // parameters as they form a product with an induction variable. 10644 // 10645 // This collector expects all array size parameters to be in the same MulExpr. 10646 // It might be necessary to later add support for collecting parameters that are 10647 // spread over different nested MulExpr. 10648 struct SCEVCollectAddRecMultiplies { 10649 SmallVectorImpl<const SCEV *> &Terms; 10650 ScalarEvolution &SE; 10651 10652 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 10653 : Terms(T), SE(SE) {} 10654 10655 bool follow(const SCEV *S) { 10656 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 10657 bool HasAddRec = false; 10658 SmallVector<const SCEV *, 0> Operands; 10659 for (auto Op : Mul->operands()) { 10660 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 10661 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 10662 Operands.push_back(Op); 10663 } else if (Unknown) { 10664 HasAddRec = true; 10665 } else { 10666 bool ContainsAddRec; 10667 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 10668 visitAll(Op, ContiansAddRec); 10669 HasAddRec |= ContainsAddRec; 10670 } 10671 } 10672 if (Operands.size() == 0) 10673 return true; 10674 10675 if (!HasAddRec) 10676 return false; 10677 10678 Terms.push_back(SE.getMulExpr(Operands)); 10679 // Stop recursion: once we collected a term, do not walk its operands. 10680 return false; 10681 } 10682 10683 // Keep looking. 10684 return true; 10685 } 10686 10687 bool isDone() const { return false; } 10688 }; 10689 10690 } // end anonymous namespace 10691 10692 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 10693 /// two places: 10694 /// 1) The strides of AddRec expressions. 10695 /// 2) Unknowns that are multiplied with AddRec expressions. 10696 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 10697 SmallVectorImpl<const SCEV *> &Terms) { 10698 SmallVector<const SCEV *, 4> Strides; 10699 SCEVCollectStrides StrideCollector(*this, Strides); 10700 visitAll(Expr, StrideCollector); 10701 10702 LLVM_DEBUG({ 10703 dbgs() << "Strides:\n"; 10704 for (const SCEV *S : Strides) 10705 dbgs() << *S << "\n"; 10706 }); 10707 10708 for (const SCEV *S : Strides) { 10709 SCEVCollectTerms TermCollector(Terms); 10710 visitAll(S, TermCollector); 10711 } 10712 10713 LLVM_DEBUG({ 10714 dbgs() << "Terms:\n"; 10715 for (const SCEV *T : Terms) 10716 dbgs() << *T << "\n"; 10717 }); 10718 10719 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 10720 visitAll(Expr, MulCollector); 10721 } 10722 10723 static bool findArrayDimensionsRec(ScalarEvolution &SE, 10724 SmallVectorImpl<const SCEV *> &Terms, 10725 SmallVectorImpl<const SCEV *> &Sizes) { 10726 int Last = Terms.size() - 1; 10727 const SCEV *Step = Terms[Last]; 10728 10729 // End of recursion. 10730 if (Last == 0) { 10731 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 10732 SmallVector<const SCEV *, 2> Qs; 10733 for (const SCEV *Op : M->operands()) 10734 if (!isa<SCEVConstant>(Op)) 10735 Qs.push_back(Op); 10736 10737 Step = SE.getMulExpr(Qs); 10738 } 10739 10740 Sizes.push_back(Step); 10741 return true; 10742 } 10743 10744 for (const SCEV *&Term : Terms) { 10745 // Normalize the terms before the next call to findArrayDimensionsRec. 10746 const SCEV *Q, *R; 10747 SCEVDivision::divide(SE, Term, Step, &Q, &R); 10748 10749 // Bail out when GCD does not evenly divide one of the terms. 10750 if (!R->isZero()) 10751 return false; 10752 10753 Term = Q; 10754 } 10755 10756 // Remove all SCEVConstants. 10757 Terms.erase( 10758 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 10759 Terms.end()); 10760 10761 if (Terms.size() > 0) 10762 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 10763 return false; 10764 10765 Sizes.push_back(Step); 10766 return true; 10767 } 10768 10769 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 10770 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 10771 for (const SCEV *T : Terms) 10772 if (SCEVExprContains(T, isa<SCEVUnknown, const SCEV *>)) 10773 return true; 10774 return false; 10775 } 10776 10777 // Return the number of product terms in S. 10778 static inline int numberOfTerms(const SCEV *S) { 10779 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 10780 return Expr->getNumOperands(); 10781 return 1; 10782 } 10783 10784 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 10785 if (isa<SCEVConstant>(T)) 10786 return nullptr; 10787 10788 if (isa<SCEVUnknown>(T)) 10789 return T; 10790 10791 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 10792 SmallVector<const SCEV *, 2> Factors; 10793 for (const SCEV *Op : M->operands()) 10794 if (!isa<SCEVConstant>(Op)) 10795 Factors.push_back(Op); 10796 10797 return SE.getMulExpr(Factors); 10798 } 10799 10800 return T; 10801 } 10802 10803 /// Return the size of an element read or written by Inst. 10804 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 10805 Type *Ty; 10806 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 10807 Ty = Store->getValueOperand()->getType(); 10808 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 10809 Ty = Load->getType(); 10810 else 10811 return nullptr; 10812 10813 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 10814 return getSizeOfExpr(ETy, Ty); 10815 } 10816 10817 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 10818 SmallVectorImpl<const SCEV *> &Sizes, 10819 const SCEV *ElementSize) { 10820 if (Terms.size() < 1 || !ElementSize) 10821 return; 10822 10823 // Early return when Terms do not contain parameters: we do not delinearize 10824 // non parametric SCEVs. 10825 if (!containsParameters(Terms)) 10826 return; 10827 10828 LLVM_DEBUG({ 10829 dbgs() << "Terms:\n"; 10830 for (const SCEV *T : Terms) 10831 dbgs() << *T << "\n"; 10832 }); 10833 10834 // Remove duplicates. 10835 array_pod_sort(Terms.begin(), Terms.end()); 10836 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 10837 10838 // Put larger terms first. 10839 llvm::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) { 10840 return numberOfTerms(LHS) > numberOfTerms(RHS); 10841 }); 10842 10843 // Try to divide all terms by the element size. If term is not divisible by 10844 // element size, proceed with the original term. 10845 for (const SCEV *&Term : Terms) { 10846 const SCEV *Q, *R; 10847 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 10848 if (!Q->isZero()) 10849 Term = Q; 10850 } 10851 10852 SmallVector<const SCEV *, 4> NewTerms; 10853 10854 // Remove constant factors. 10855 for (const SCEV *T : Terms) 10856 if (const SCEV *NewT = removeConstantFactors(*this, T)) 10857 NewTerms.push_back(NewT); 10858 10859 LLVM_DEBUG({ 10860 dbgs() << "Terms after sorting:\n"; 10861 for (const SCEV *T : NewTerms) 10862 dbgs() << *T << "\n"; 10863 }); 10864 10865 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 10866 Sizes.clear(); 10867 return; 10868 } 10869 10870 // The last element to be pushed into Sizes is the size of an element. 10871 Sizes.push_back(ElementSize); 10872 10873 LLVM_DEBUG({ 10874 dbgs() << "Sizes:\n"; 10875 for (const SCEV *S : Sizes) 10876 dbgs() << *S << "\n"; 10877 }); 10878 } 10879 10880 void ScalarEvolution::computeAccessFunctions( 10881 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 10882 SmallVectorImpl<const SCEV *> &Sizes) { 10883 // Early exit in case this SCEV is not an affine multivariate function. 10884 if (Sizes.empty()) 10885 return; 10886 10887 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 10888 if (!AR->isAffine()) 10889 return; 10890 10891 const SCEV *Res = Expr; 10892 int Last = Sizes.size() - 1; 10893 for (int i = Last; i >= 0; i--) { 10894 const SCEV *Q, *R; 10895 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 10896 10897 LLVM_DEBUG({ 10898 dbgs() << "Res: " << *Res << "\n"; 10899 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 10900 dbgs() << "Res divided by Sizes[i]:\n"; 10901 dbgs() << "Quotient: " << *Q << "\n"; 10902 dbgs() << "Remainder: " << *R << "\n"; 10903 }); 10904 10905 Res = Q; 10906 10907 // Do not record the last subscript corresponding to the size of elements in 10908 // the array. 10909 if (i == Last) { 10910 10911 // Bail out if the remainder is too complex. 10912 if (isa<SCEVAddRecExpr>(R)) { 10913 Subscripts.clear(); 10914 Sizes.clear(); 10915 return; 10916 } 10917 10918 continue; 10919 } 10920 10921 // Record the access function for the current subscript. 10922 Subscripts.push_back(R); 10923 } 10924 10925 // Also push in last position the remainder of the last division: it will be 10926 // the access function of the innermost dimension. 10927 Subscripts.push_back(Res); 10928 10929 std::reverse(Subscripts.begin(), Subscripts.end()); 10930 10931 LLVM_DEBUG({ 10932 dbgs() << "Subscripts:\n"; 10933 for (const SCEV *S : Subscripts) 10934 dbgs() << *S << "\n"; 10935 }); 10936 } 10937 10938 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 10939 /// sizes of an array access. Returns the remainder of the delinearization that 10940 /// is the offset start of the array. The SCEV->delinearize algorithm computes 10941 /// the multiples of SCEV coefficients: that is a pattern matching of sub 10942 /// expressions in the stride and base of a SCEV corresponding to the 10943 /// computation of a GCD (greatest common divisor) of base and stride. When 10944 /// SCEV->delinearize fails, it returns the SCEV unchanged. 10945 /// 10946 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 10947 /// 10948 /// void foo(long n, long m, long o, double A[n][m][o]) { 10949 /// 10950 /// for (long i = 0; i < n; i++) 10951 /// for (long j = 0; j < m; j++) 10952 /// for (long k = 0; k < o; k++) 10953 /// A[i][j][k] = 1.0; 10954 /// } 10955 /// 10956 /// the delinearization input is the following AddRec SCEV: 10957 /// 10958 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 10959 /// 10960 /// From this SCEV, we are able to say that the base offset of the access is %A 10961 /// because it appears as an offset that does not divide any of the strides in 10962 /// the loops: 10963 /// 10964 /// CHECK: Base offset: %A 10965 /// 10966 /// and then SCEV->delinearize determines the size of some of the dimensions of 10967 /// the array as these are the multiples by which the strides are happening: 10968 /// 10969 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 10970 /// 10971 /// Note that the outermost dimension remains of UnknownSize because there are 10972 /// no strides that would help identifying the size of the last dimension: when 10973 /// the array has been statically allocated, one could compute the size of that 10974 /// dimension by dividing the overall size of the array by the size of the known 10975 /// dimensions: %m * %o * 8. 10976 /// 10977 /// Finally delinearize provides the access functions for the array reference 10978 /// that does correspond to A[i][j][k] of the above C testcase: 10979 /// 10980 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 10981 /// 10982 /// The testcases are checking the output of a function pass: 10983 /// DelinearizationPass that walks through all loads and stores of a function 10984 /// asking for the SCEV of the memory access with respect to all enclosing 10985 /// loops, calling SCEV->delinearize on that and printing the results. 10986 void ScalarEvolution::delinearize(const SCEV *Expr, 10987 SmallVectorImpl<const SCEV *> &Subscripts, 10988 SmallVectorImpl<const SCEV *> &Sizes, 10989 const SCEV *ElementSize) { 10990 // First step: collect parametric terms. 10991 SmallVector<const SCEV *, 4> Terms; 10992 collectParametricTerms(Expr, Terms); 10993 10994 if (Terms.empty()) 10995 return; 10996 10997 // Second step: find subscript sizes. 10998 findArrayDimensions(Terms, Sizes, ElementSize); 10999 11000 if (Sizes.empty()) 11001 return; 11002 11003 // Third step: compute the access functions for each subscript. 11004 computeAccessFunctions(Expr, Subscripts, Sizes); 11005 11006 if (Subscripts.empty()) 11007 return; 11008 11009 LLVM_DEBUG({ 11010 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 11011 dbgs() << "ArrayDecl[UnknownSize]"; 11012 for (const SCEV *S : Sizes) 11013 dbgs() << "[" << *S << "]"; 11014 11015 dbgs() << "\nArrayRef"; 11016 for (const SCEV *S : Subscripts) 11017 dbgs() << "[" << *S << "]"; 11018 dbgs() << "\n"; 11019 }); 11020 } 11021 11022 //===----------------------------------------------------------------------===// 11023 // SCEVCallbackVH Class Implementation 11024 //===----------------------------------------------------------------------===// 11025 11026 void ScalarEvolution::SCEVCallbackVH::deleted() { 11027 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11028 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 11029 SE->ConstantEvolutionLoopExitValue.erase(PN); 11030 SE->eraseValueFromMap(getValPtr()); 11031 // this now dangles! 11032 } 11033 11034 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 11035 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11036 11037 // Forget all the expressions associated with users of the old value, 11038 // so that future queries will recompute the expressions using the new 11039 // value. 11040 Value *Old = getValPtr(); 11041 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 11042 SmallPtrSet<User *, 8> Visited; 11043 while (!Worklist.empty()) { 11044 User *U = Worklist.pop_back_val(); 11045 // Deleting the Old value will cause this to dangle. Postpone 11046 // that until everything else is done. 11047 if (U == Old) 11048 continue; 11049 if (!Visited.insert(U).second) 11050 continue; 11051 if (PHINode *PN = dyn_cast<PHINode>(U)) 11052 SE->ConstantEvolutionLoopExitValue.erase(PN); 11053 SE->eraseValueFromMap(U); 11054 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 11055 } 11056 // Delete the Old value. 11057 if (PHINode *PN = dyn_cast<PHINode>(Old)) 11058 SE->ConstantEvolutionLoopExitValue.erase(PN); 11059 SE->eraseValueFromMap(Old); 11060 // this now dangles! 11061 } 11062 11063 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 11064 : CallbackVH(V), SE(se) {} 11065 11066 //===----------------------------------------------------------------------===// 11067 // ScalarEvolution Class Implementation 11068 //===----------------------------------------------------------------------===// 11069 11070 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 11071 AssumptionCache &AC, DominatorTree &DT, 11072 LoopInfo &LI) 11073 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 11074 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 11075 LoopDispositions(64), BlockDispositions(64) { 11076 // To use guards for proving predicates, we need to scan every instruction in 11077 // relevant basic blocks, and not just terminators. Doing this is a waste of 11078 // time if the IR does not actually contain any calls to 11079 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 11080 // 11081 // This pessimizes the case where a pass that preserves ScalarEvolution wants 11082 // to _add_ guards to the module when there weren't any before, and wants 11083 // ScalarEvolution to optimize based on those guards. For now we prefer to be 11084 // efficient in lieu of being smart in that rather obscure case. 11085 11086 auto *GuardDecl = F.getParent()->getFunction( 11087 Intrinsic::getName(Intrinsic::experimental_guard)); 11088 HasGuards = GuardDecl && !GuardDecl->use_empty(); 11089 } 11090 11091 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 11092 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 11093 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 11094 ValueExprMap(std::move(Arg.ValueExprMap)), 11095 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 11096 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 11097 PendingMerges(std::move(Arg.PendingMerges)), 11098 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 11099 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 11100 PredicatedBackedgeTakenCounts( 11101 std::move(Arg.PredicatedBackedgeTakenCounts)), 11102 ConstantEvolutionLoopExitValue( 11103 std::move(Arg.ConstantEvolutionLoopExitValue)), 11104 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 11105 LoopDispositions(std::move(Arg.LoopDispositions)), 11106 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 11107 BlockDispositions(std::move(Arg.BlockDispositions)), 11108 UnsignedRanges(std::move(Arg.UnsignedRanges)), 11109 SignedRanges(std::move(Arg.SignedRanges)), 11110 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 11111 UniquePreds(std::move(Arg.UniquePreds)), 11112 SCEVAllocator(std::move(Arg.SCEVAllocator)), 11113 LoopUsers(std::move(Arg.LoopUsers)), 11114 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 11115 FirstUnknown(Arg.FirstUnknown) { 11116 Arg.FirstUnknown = nullptr; 11117 } 11118 11119 ScalarEvolution::~ScalarEvolution() { 11120 // Iterate through all the SCEVUnknown instances and call their 11121 // destructors, so that they release their references to their values. 11122 for (SCEVUnknown *U = FirstUnknown; U;) { 11123 SCEVUnknown *Tmp = U; 11124 U = U->Next; 11125 Tmp->~SCEVUnknown(); 11126 } 11127 FirstUnknown = nullptr; 11128 11129 ExprValueMap.clear(); 11130 ValueExprMap.clear(); 11131 HasRecMap.clear(); 11132 11133 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 11134 // that a loop had multiple computable exits. 11135 for (auto &BTCI : BackedgeTakenCounts) 11136 BTCI.second.clear(); 11137 for (auto &BTCI : PredicatedBackedgeTakenCounts) 11138 BTCI.second.clear(); 11139 11140 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 11141 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 11142 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 11143 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 11144 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 11145 } 11146 11147 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 11148 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 11149 } 11150 11151 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 11152 const Loop *L) { 11153 // Print all inner loops first 11154 for (Loop *I : *L) 11155 PrintLoopInfo(OS, SE, I); 11156 11157 OS << "Loop "; 11158 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11159 OS << ": "; 11160 11161 SmallVector<BasicBlock *, 8> ExitBlocks; 11162 L->getExitBlocks(ExitBlocks); 11163 if (ExitBlocks.size() != 1) 11164 OS << "<multiple exits> "; 11165 11166 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 11167 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 11168 } else { 11169 OS << "Unpredictable backedge-taken count. "; 11170 } 11171 11172 OS << "\n" 11173 "Loop "; 11174 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11175 OS << ": "; 11176 11177 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 11178 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 11179 if (SE->isBackedgeTakenCountMaxOrZero(L)) 11180 OS << ", actual taken count either this or zero."; 11181 } else { 11182 OS << "Unpredictable max backedge-taken count. "; 11183 } 11184 11185 OS << "\n" 11186 "Loop "; 11187 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11188 OS << ": "; 11189 11190 SCEVUnionPredicate Pred; 11191 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 11192 if (!isa<SCEVCouldNotCompute>(PBT)) { 11193 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 11194 OS << " Predicates:\n"; 11195 Pred.print(OS, 4); 11196 } else { 11197 OS << "Unpredictable predicated backedge-taken count. "; 11198 } 11199 OS << "\n"; 11200 11201 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 11202 OS << "Loop "; 11203 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11204 OS << ": "; 11205 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 11206 } 11207 } 11208 11209 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 11210 switch (LD) { 11211 case ScalarEvolution::LoopVariant: 11212 return "Variant"; 11213 case ScalarEvolution::LoopInvariant: 11214 return "Invariant"; 11215 case ScalarEvolution::LoopComputable: 11216 return "Computable"; 11217 } 11218 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 11219 } 11220 11221 void ScalarEvolution::print(raw_ostream &OS) const { 11222 // ScalarEvolution's implementation of the print method is to print 11223 // out SCEV values of all instructions that are interesting. Doing 11224 // this potentially causes it to create new SCEV objects though, 11225 // which technically conflicts with the const qualifier. This isn't 11226 // observable from outside the class though, so casting away the 11227 // const isn't dangerous. 11228 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11229 11230 OS << "Classifying expressions for: "; 11231 F.printAsOperand(OS, /*PrintType=*/false); 11232 OS << "\n"; 11233 for (Instruction &I : instructions(F)) 11234 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 11235 OS << I << '\n'; 11236 OS << " --> "; 11237 const SCEV *SV = SE.getSCEV(&I); 11238 SV->print(OS); 11239 if (!isa<SCEVCouldNotCompute>(SV)) { 11240 OS << " U: "; 11241 SE.getUnsignedRange(SV).print(OS); 11242 OS << " S: "; 11243 SE.getSignedRange(SV).print(OS); 11244 } 11245 11246 const Loop *L = LI.getLoopFor(I.getParent()); 11247 11248 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 11249 if (AtUse != SV) { 11250 OS << " --> "; 11251 AtUse->print(OS); 11252 if (!isa<SCEVCouldNotCompute>(AtUse)) { 11253 OS << " U: "; 11254 SE.getUnsignedRange(AtUse).print(OS); 11255 OS << " S: "; 11256 SE.getSignedRange(AtUse).print(OS); 11257 } 11258 } 11259 11260 if (L) { 11261 OS << "\t\t" "Exits: "; 11262 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 11263 if (!SE.isLoopInvariant(ExitValue, L)) { 11264 OS << "<<Unknown>>"; 11265 } else { 11266 OS << *ExitValue; 11267 } 11268 11269 bool First = true; 11270 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 11271 if (First) { 11272 OS << "\t\t" "LoopDispositions: { "; 11273 First = false; 11274 } else { 11275 OS << ", "; 11276 } 11277 11278 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11279 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 11280 } 11281 11282 for (auto *InnerL : depth_first(L)) { 11283 if (InnerL == L) 11284 continue; 11285 if (First) { 11286 OS << "\t\t" "LoopDispositions: { "; 11287 First = false; 11288 } else { 11289 OS << ", "; 11290 } 11291 11292 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11293 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 11294 } 11295 11296 OS << " }"; 11297 } 11298 11299 OS << "\n"; 11300 } 11301 11302 OS << "Determining loop execution counts for: "; 11303 F.printAsOperand(OS, /*PrintType=*/false); 11304 OS << "\n"; 11305 for (Loop *I : LI) 11306 PrintLoopInfo(OS, &SE, I); 11307 } 11308 11309 ScalarEvolution::LoopDisposition 11310 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 11311 auto &Values = LoopDispositions[S]; 11312 for (auto &V : Values) { 11313 if (V.getPointer() == L) 11314 return V.getInt(); 11315 } 11316 Values.emplace_back(L, LoopVariant); 11317 LoopDisposition D = computeLoopDisposition(S, L); 11318 auto &Values2 = LoopDispositions[S]; 11319 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11320 if (V.getPointer() == L) { 11321 V.setInt(D); 11322 break; 11323 } 11324 } 11325 return D; 11326 } 11327 11328 ScalarEvolution::LoopDisposition 11329 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 11330 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11331 case scConstant: 11332 return LoopInvariant; 11333 case scTruncate: 11334 case scZeroExtend: 11335 case scSignExtend: 11336 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 11337 case scAddRecExpr: { 11338 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11339 11340 // If L is the addrec's loop, it's computable. 11341 if (AR->getLoop() == L) 11342 return LoopComputable; 11343 11344 // Add recurrences are never invariant in the function-body (null loop). 11345 if (!L) 11346 return LoopVariant; 11347 11348 // Everything that is not defined at loop entry is variant. 11349 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 11350 return LoopVariant; 11351 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 11352 " dominate the contained loop's header?"); 11353 11354 // This recurrence is invariant w.r.t. L if AR's loop contains L. 11355 if (AR->getLoop()->contains(L)) 11356 return LoopInvariant; 11357 11358 // This recurrence is variant w.r.t. L if any of its operands 11359 // are variant. 11360 for (auto *Op : AR->operands()) 11361 if (!isLoopInvariant(Op, L)) 11362 return LoopVariant; 11363 11364 // Otherwise it's loop-invariant. 11365 return LoopInvariant; 11366 } 11367 case scAddExpr: 11368 case scMulExpr: 11369 case scUMaxExpr: 11370 case scSMaxExpr: { 11371 bool HasVarying = false; 11372 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 11373 LoopDisposition D = getLoopDisposition(Op, L); 11374 if (D == LoopVariant) 11375 return LoopVariant; 11376 if (D == LoopComputable) 11377 HasVarying = true; 11378 } 11379 return HasVarying ? LoopComputable : LoopInvariant; 11380 } 11381 case scUDivExpr: { 11382 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11383 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 11384 if (LD == LoopVariant) 11385 return LoopVariant; 11386 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 11387 if (RD == LoopVariant) 11388 return LoopVariant; 11389 return (LD == LoopInvariant && RD == LoopInvariant) ? 11390 LoopInvariant : LoopComputable; 11391 } 11392 case scUnknown: 11393 // All non-instruction values are loop invariant. All instructions are loop 11394 // invariant if they are not contained in the specified loop. 11395 // Instructions are never considered invariant in the function body 11396 // (null loop) because they are defined within the "loop". 11397 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 11398 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 11399 return LoopInvariant; 11400 case scCouldNotCompute: 11401 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11402 } 11403 llvm_unreachable("Unknown SCEV kind!"); 11404 } 11405 11406 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 11407 return getLoopDisposition(S, L) == LoopInvariant; 11408 } 11409 11410 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 11411 return getLoopDisposition(S, L) == LoopComputable; 11412 } 11413 11414 ScalarEvolution::BlockDisposition 11415 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11416 auto &Values = BlockDispositions[S]; 11417 for (auto &V : Values) { 11418 if (V.getPointer() == BB) 11419 return V.getInt(); 11420 } 11421 Values.emplace_back(BB, DoesNotDominateBlock); 11422 BlockDisposition D = computeBlockDisposition(S, BB); 11423 auto &Values2 = BlockDispositions[S]; 11424 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11425 if (V.getPointer() == BB) { 11426 V.setInt(D); 11427 break; 11428 } 11429 } 11430 return D; 11431 } 11432 11433 ScalarEvolution::BlockDisposition 11434 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11435 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11436 case scConstant: 11437 return ProperlyDominatesBlock; 11438 case scTruncate: 11439 case scZeroExtend: 11440 case scSignExtend: 11441 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 11442 case scAddRecExpr: { 11443 // This uses a "dominates" query instead of "properly dominates" query 11444 // to test for proper dominance too, because the instruction which 11445 // produces the addrec's value is a PHI, and a PHI effectively properly 11446 // dominates its entire containing block. 11447 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11448 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 11449 return DoesNotDominateBlock; 11450 11451 // Fall through into SCEVNAryExpr handling. 11452 LLVM_FALLTHROUGH; 11453 } 11454 case scAddExpr: 11455 case scMulExpr: 11456 case scUMaxExpr: 11457 case scSMaxExpr: { 11458 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 11459 bool Proper = true; 11460 for (const SCEV *NAryOp : NAry->operands()) { 11461 BlockDisposition D = getBlockDisposition(NAryOp, BB); 11462 if (D == DoesNotDominateBlock) 11463 return DoesNotDominateBlock; 11464 if (D == DominatesBlock) 11465 Proper = false; 11466 } 11467 return Proper ? ProperlyDominatesBlock : DominatesBlock; 11468 } 11469 case scUDivExpr: { 11470 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11471 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 11472 BlockDisposition LD = getBlockDisposition(LHS, BB); 11473 if (LD == DoesNotDominateBlock) 11474 return DoesNotDominateBlock; 11475 BlockDisposition RD = getBlockDisposition(RHS, BB); 11476 if (RD == DoesNotDominateBlock) 11477 return DoesNotDominateBlock; 11478 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 11479 ProperlyDominatesBlock : DominatesBlock; 11480 } 11481 case scUnknown: 11482 if (Instruction *I = 11483 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 11484 if (I->getParent() == BB) 11485 return DominatesBlock; 11486 if (DT.properlyDominates(I->getParent(), BB)) 11487 return ProperlyDominatesBlock; 11488 return DoesNotDominateBlock; 11489 } 11490 return ProperlyDominatesBlock; 11491 case scCouldNotCompute: 11492 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11493 } 11494 llvm_unreachable("Unknown SCEV kind!"); 11495 } 11496 11497 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 11498 return getBlockDisposition(S, BB) >= DominatesBlock; 11499 } 11500 11501 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 11502 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 11503 } 11504 11505 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 11506 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 11507 } 11508 11509 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 11510 auto IsS = [&](const SCEV *X) { return S == X; }; 11511 auto ContainsS = [&](const SCEV *X) { 11512 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 11513 }; 11514 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 11515 } 11516 11517 void 11518 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 11519 ValuesAtScopes.erase(S); 11520 LoopDispositions.erase(S); 11521 BlockDispositions.erase(S); 11522 UnsignedRanges.erase(S); 11523 SignedRanges.erase(S); 11524 ExprValueMap.erase(S); 11525 HasRecMap.erase(S); 11526 MinTrailingZerosCache.erase(S); 11527 11528 for (auto I = PredicatedSCEVRewrites.begin(); 11529 I != PredicatedSCEVRewrites.end();) { 11530 std::pair<const SCEV *, const Loop *> Entry = I->first; 11531 if (Entry.first == S) 11532 PredicatedSCEVRewrites.erase(I++); 11533 else 11534 ++I; 11535 } 11536 11537 auto RemoveSCEVFromBackedgeMap = 11538 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 11539 for (auto I = Map.begin(), E = Map.end(); I != E;) { 11540 BackedgeTakenInfo &BEInfo = I->second; 11541 if (BEInfo.hasOperand(S, this)) { 11542 BEInfo.clear(); 11543 Map.erase(I++); 11544 } else 11545 ++I; 11546 } 11547 }; 11548 11549 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 11550 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 11551 } 11552 11553 void 11554 ScalarEvolution::getUsedLoops(const SCEV *S, 11555 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 11556 struct FindUsedLoops { 11557 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 11558 : LoopsUsed(LoopsUsed) {} 11559 SmallPtrSetImpl<const Loop *> &LoopsUsed; 11560 bool follow(const SCEV *S) { 11561 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 11562 LoopsUsed.insert(AR->getLoop()); 11563 return true; 11564 } 11565 11566 bool isDone() const { return false; } 11567 }; 11568 11569 FindUsedLoops F(LoopsUsed); 11570 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 11571 } 11572 11573 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 11574 SmallPtrSet<const Loop *, 8> LoopsUsed; 11575 getUsedLoops(S, LoopsUsed); 11576 for (auto *L : LoopsUsed) 11577 LoopUsers[L].push_back(S); 11578 } 11579 11580 void ScalarEvolution::verify() const { 11581 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11582 ScalarEvolution SE2(F, TLI, AC, DT, LI); 11583 11584 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 11585 11586 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 11587 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 11588 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 11589 11590 const SCEV *visitConstant(const SCEVConstant *Constant) { 11591 return SE.getConstant(Constant->getAPInt()); 11592 } 11593 11594 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 11595 return SE.getUnknown(Expr->getValue()); 11596 } 11597 11598 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 11599 return SE.getCouldNotCompute(); 11600 } 11601 }; 11602 11603 SCEVMapper SCM(SE2); 11604 11605 while (!LoopStack.empty()) { 11606 auto *L = LoopStack.pop_back_val(); 11607 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 11608 11609 auto *CurBECount = SCM.visit( 11610 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 11611 auto *NewBECount = SE2.getBackedgeTakenCount(L); 11612 11613 if (CurBECount == SE2.getCouldNotCompute() || 11614 NewBECount == SE2.getCouldNotCompute()) { 11615 // NB! This situation is legal, but is very suspicious -- whatever pass 11616 // change the loop to make a trip count go from could not compute to 11617 // computable or vice-versa *should have* invalidated SCEV. However, we 11618 // choose not to assert here (for now) since we don't want false 11619 // positives. 11620 continue; 11621 } 11622 11623 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 11624 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 11625 // not propagate undef aggressively). This means we can (and do) fail 11626 // verification in cases where a transform makes the trip count of a loop 11627 // go from "undef" to "undef+1" (say). The transform is fine, since in 11628 // both cases the loop iterates "undef" times, but SCEV thinks we 11629 // increased the trip count of the loop by 1 incorrectly. 11630 continue; 11631 } 11632 11633 if (SE.getTypeSizeInBits(CurBECount->getType()) > 11634 SE.getTypeSizeInBits(NewBECount->getType())) 11635 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 11636 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 11637 SE.getTypeSizeInBits(NewBECount->getType())) 11638 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 11639 11640 auto *ConstantDelta = 11641 dyn_cast<SCEVConstant>(SE2.getMinusSCEV(CurBECount, NewBECount)); 11642 11643 if (ConstantDelta && ConstantDelta->getAPInt() != 0) { 11644 dbgs() << "Trip Count Changed!\n"; 11645 dbgs() << "Old: " << *CurBECount << "\n"; 11646 dbgs() << "New: " << *NewBECount << "\n"; 11647 dbgs() << "Delta: " << *ConstantDelta << "\n"; 11648 std::abort(); 11649 } 11650 } 11651 } 11652 11653 bool ScalarEvolution::invalidate( 11654 Function &F, const PreservedAnalyses &PA, 11655 FunctionAnalysisManager::Invalidator &Inv) { 11656 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 11657 // of its dependencies is invalidated. 11658 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 11659 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 11660 Inv.invalidate<AssumptionAnalysis>(F, PA) || 11661 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 11662 Inv.invalidate<LoopAnalysis>(F, PA); 11663 } 11664 11665 AnalysisKey ScalarEvolutionAnalysis::Key; 11666 11667 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 11668 FunctionAnalysisManager &AM) { 11669 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 11670 AM.getResult<AssumptionAnalysis>(F), 11671 AM.getResult<DominatorTreeAnalysis>(F), 11672 AM.getResult<LoopAnalysis>(F)); 11673 } 11674 11675 PreservedAnalyses 11676 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 11677 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 11678 return PreservedAnalyses::all(); 11679 } 11680 11681 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 11682 "Scalar Evolution Analysis", false, true) 11683 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 11684 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 11685 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 11686 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 11687 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 11688 "Scalar Evolution Analysis", false, true) 11689 11690 char ScalarEvolutionWrapperPass::ID = 0; 11691 11692 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 11693 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 11694 } 11695 11696 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 11697 SE.reset(new ScalarEvolution( 11698 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 11699 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 11700 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 11701 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 11702 return false; 11703 } 11704 11705 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 11706 11707 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 11708 SE->print(OS); 11709 } 11710 11711 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 11712 if (!VerifySCEV) 11713 return; 11714 11715 SE->verify(); 11716 } 11717 11718 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 11719 AU.setPreservesAll(); 11720 AU.addRequiredTransitive<AssumptionCacheTracker>(); 11721 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 11722 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 11723 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 11724 } 11725 11726 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 11727 const SCEV *RHS) { 11728 FoldingSetNodeID ID; 11729 assert(LHS->getType() == RHS->getType() && 11730 "Type mismatch between LHS and RHS"); 11731 // Unique this node based on the arguments 11732 ID.AddInteger(SCEVPredicate::P_Equal); 11733 ID.AddPointer(LHS); 11734 ID.AddPointer(RHS); 11735 void *IP = nullptr; 11736 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 11737 return S; 11738 SCEVEqualPredicate *Eq = new (SCEVAllocator) 11739 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 11740 UniquePreds.InsertNode(Eq, IP); 11741 return Eq; 11742 } 11743 11744 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 11745 const SCEVAddRecExpr *AR, 11746 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 11747 FoldingSetNodeID ID; 11748 // Unique this node based on the arguments 11749 ID.AddInteger(SCEVPredicate::P_Wrap); 11750 ID.AddPointer(AR); 11751 ID.AddInteger(AddedFlags); 11752 void *IP = nullptr; 11753 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 11754 return S; 11755 auto *OF = new (SCEVAllocator) 11756 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 11757 UniquePreds.InsertNode(OF, IP); 11758 return OF; 11759 } 11760 11761 namespace { 11762 11763 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 11764 public: 11765 11766 /// Rewrites \p S in the context of a loop L and the SCEV predication 11767 /// infrastructure. 11768 /// 11769 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 11770 /// equivalences present in \p Pred. 11771 /// 11772 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 11773 /// \p NewPreds such that the result will be an AddRecExpr. 11774 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 11775 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 11776 SCEVUnionPredicate *Pred) { 11777 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 11778 return Rewriter.visit(S); 11779 } 11780 11781 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 11782 if (Pred) { 11783 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 11784 for (auto *Pred : ExprPreds) 11785 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 11786 if (IPred->getLHS() == Expr) 11787 return IPred->getRHS(); 11788 } 11789 return convertToAddRecWithPreds(Expr); 11790 } 11791 11792 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 11793 const SCEV *Operand = visit(Expr->getOperand()); 11794 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 11795 if (AR && AR->getLoop() == L && AR->isAffine()) { 11796 // This couldn't be folded because the operand didn't have the nuw 11797 // flag. Add the nusw flag as an assumption that we could make. 11798 const SCEV *Step = AR->getStepRecurrence(SE); 11799 Type *Ty = Expr->getType(); 11800 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 11801 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 11802 SE.getSignExtendExpr(Step, Ty), L, 11803 AR->getNoWrapFlags()); 11804 } 11805 return SE.getZeroExtendExpr(Operand, Expr->getType()); 11806 } 11807 11808 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 11809 const SCEV *Operand = visit(Expr->getOperand()); 11810 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 11811 if (AR && AR->getLoop() == L && AR->isAffine()) { 11812 // This couldn't be folded because the operand didn't have the nsw 11813 // flag. Add the nssw flag as an assumption that we could make. 11814 const SCEV *Step = AR->getStepRecurrence(SE); 11815 Type *Ty = Expr->getType(); 11816 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 11817 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 11818 SE.getSignExtendExpr(Step, Ty), L, 11819 AR->getNoWrapFlags()); 11820 } 11821 return SE.getSignExtendExpr(Operand, Expr->getType()); 11822 } 11823 11824 private: 11825 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 11826 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 11827 SCEVUnionPredicate *Pred) 11828 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 11829 11830 bool addOverflowAssumption(const SCEVPredicate *P) { 11831 if (!NewPreds) { 11832 // Check if we've already made this assumption. 11833 return Pred && Pred->implies(P); 11834 } 11835 NewPreds->insert(P); 11836 return true; 11837 } 11838 11839 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 11840 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 11841 auto *A = SE.getWrapPredicate(AR, AddedFlags); 11842 return addOverflowAssumption(A); 11843 } 11844 11845 // If \p Expr represents a PHINode, we try to see if it can be represented 11846 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 11847 // to add this predicate as a runtime overflow check, we return the AddRec. 11848 // If \p Expr does not meet these conditions (is not a PHI node, or we 11849 // couldn't create an AddRec for it, or couldn't add the predicate), we just 11850 // return \p Expr. 11851 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 11852 if (!isa<PHINode>(Expr->getValue())) 11853 return Expr; 11854 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 11855 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 11856 if (!PredicatedRewrite) 11857 return Expr; 11858 for (auto *P : PredicatedRewrite->second){ 11859 // Wrap predicates from outer loops are not supported. 11860 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 11861 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 11862 if (L != AR->getLoop()) 11863 return Expr; 11864 } 11865 if (!addOverflowAssumption(P)) 11866 return Expr; 11867 } 11868 return PredicatedRewrite->first; 11869 } 11870 11871 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 11872 SCEVUnionPredicate *Pred; 11873 const Loop *L; 11874 }; 11875 11876 } // end anonymous namespace 11877 11878 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 11879 SCEVUnionPredicate &Preds) { 11880 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 11881 } 11882 11883 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 11884 const SCEV *S, const Loop *L, 11885 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 11886 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 11887 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 11888 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 11889 11890 if (!AddRec) 11891 return nullptr; 11892 11893 // Since the transformation was successful, we can now transfer the SCEV 11894 // predicates. 11895 for (auto *P : TransformPreds) 11896 Preds.insert(P); 11897 11898 return AddRec; 11899 } 11900 11901 /// SCEV predicates 11902 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 11903 SCEVPredicateKind Kind) 11904 : FastID(ID), Kind(Kind) {} 11905 11906 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 11907 const SCEV *LHS, const SCEV *RHS) 11908 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 11909 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 11910 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 11911 } 11912 11913 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 11914 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 11915 11916 if (!Op) 11917 return false; 11918 11919 return Op->LHS == LHS && Op->RHS == RHS; 11920 } 11921 11922 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 11923 11924 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 11925 11926 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 11927 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 11928 } 11929 11930 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 11931 const SCEVAddRecExpr *AR, 11932 IncrementWrapFlags Flags) 11933 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 11934 11935 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 11936 11937 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 11938 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 11939 11940 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 11941 } 11942 11943 bool SCEVWrapPredicate::isAlwaysTrue() const { 11944 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 11945 IncrementWrapFlags IFlags = Flags; 11946 11947 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 11948 IFlags = clearFlags(IFlags, IncrementNSSW); 11949 11950 return IFlags == IncrementAnyWrap; 11951 } 11952 11953 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 11954 OS.indent(Depth) << *getExpr() << " Added Flags: "; 11955 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 11956 OS << "<nusw>"; 11957 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 11958 OS << "<nssw>"; 11959 OS << "\n"; 11960 } 11961 11962 SCEVWrapPredicate::IncrementWrapFlags 11963 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 11964 ScalarEvolution &SE) { 11965 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 11966 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 11967 11968 // We can safely transfer the NSW flag as NSSW. 11969 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 11970 ImpliedFlags = IncrementNSSW; 11971 11972 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 11973 // If the increment is positive, the SCEV NUW flag will also imply the 11974 // WrapPredicate NUSW flag. 11975 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 11976 if (Step->getValue()->getValue().isNonNegative()) 11977 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 11978 } 11979 11980 return ImpliedFlags; 11981 } 11982 11983 /// Union predicates don't get cached so create a dummy set ID for it. 11984 SCEVUnionPredicate::SCEVUnionPredicate() 11985 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 11986 11987 bool SCEVUnionPredicate::isAlwaysTrue() const { 11988 return all_of(Preds, 11989 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 11990 } 11991 11992 ArrayRef<const SCEVPredicate *> 11993 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 11994 auto I = SCEVToPreds.find(Expr); 11995 if (I == SCEVToPreds.end()) 11996 return ArrayRef<const SCEVPredicate *>(); 11997 return I->second; 11998 } 11999 12000 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 12001 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 12002 return all_of(Set->Preds, 12003 [this](const SCEVPredicate *I) { return this->implies(I); }); 12004 12005 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 12006 if (ScevPredsIt == SCEVToPreds.end()) 12007 return false; 12008 auto &SCEVPreds = ScevPredsIt->second; 12009 12010 return any_of(SCEVPreds, 12011 [N](const SCEVPredicate *I) { return I->implies(N); }); 12012 } 12013 12014 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 12015 12016 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 12017 for (auto Pred : Preds) 12018 Pred->print(OS, Depth); 12019 } 12020 12021 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 12022 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 12023 for (auto Pred : Set->Preds) 12024 add(Pred); 12025 return; 12026 } 12027 12028 if (implies(N)) 12029 return; 12030 12031 const SCEV *Key = N->getExpr(); 12032 assert(Key && "Only SCEVUnionPredicate doesn't have an " 12033 " associated expression!"); 12034 12035 SCEVToPreds[Key].push_back(N); 12036 Preds.push_back(N); 12037 } 12038 12039 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 12040 Loop &L) 12041 : SE(SE), L(L) {} 12042 12043 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 12044 const SCEV *Expr = SE.getSCEV(V); 12045 RewriteEntry &Entry = RewriteMap[Expr]; 12046 12047 // If we already have an entry and the version matches, return it. 12048 if (Entry.second && Generation == Entry.first) 12049 return Entry.second; 12050 12051 // We found an entry but it's stale. Rewrite the stale entry 12052 // according to the current predicate. 12053 if (Entry.second) 12054 Expr = Entry.second; 12055 12056 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 12057 Entry = {Generation, NewSCEV}; 12058 12059 return NewSCEV; 12060 } 12061 12062 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 12063 if (!BackedgeCount) { 12064 SCEVUnionPredicate BackedgePred; 12065 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 12066 addPredicate(BackedgePred); 12067 } 12068 return BackedgeCount; 12069 } 12070 12071 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 12072 if (Preds.implies(&Pred)) 12073 return; 12074 Preds.add(&Pred); 12075 updateGeneration(); 12076 } 12077 12078 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 12079 return Preds; 12080 } 12081 12082 void PredicatedScalarEvolution::updateGeneration() { 12083 // If the generation number wrapped recompute everything. 12084 if (++Generation == 0) { 12085 for (auto &II : RewriteMap) { 12086 const SCEV *Rewritten = II.second.second; 12087 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 12088 } 12089 } 12090 } 12091 12092 void PredicatedScalarEvolution::setNoOverflow( 12093 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12094 const SCEV *Expr = getSCEV(V); 12095 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12096 12097 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 12098 12099 // Clear the statically implied flags. 12100 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 12101 addPredicate(*SE.getWrapPredicate(AR, Flags)); 12102 12103 auto II = FlagsMap.insert({V, Flags}); 12104 if (!II.second) 12105 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 12106 } 12107 12108 bool PredicatedScalarEvolution::hasNoOverflow( 12109 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12110 const SCEV *Expr = getSCEV(V); 12111 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12112 12113 Flags = SCEVWrapPredicate::clearFlags( 12114 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 12115 12116 auto II = FlagsMap.find(V); 12117 12118 if (II != FlagsMap.end()) 12119 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 12120 12121 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 12122 } 12123 12124 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 12125 const SCEV *Expr = this->getSCEV(V); 12126 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 12127 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 12128 12129 if (!New) 12130 return nullptr; 12131 12132 for (auto *P : NewPreds) 12133 Preds.add(P); 12134 12135 updateGeneration(); 12136 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 12137 return New; 12138 } 12139 12140 PredicatedScalarEvolution::PredicatedScalarEvolution( 12141 const PredicatedScalarEvolution &Init) 12142 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 12143 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 12144 for (const auto &I : Init.FlagsMap) 12145 FlagsMap.insert(I); 12146 } 12147 12148 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 12149 // For each block. 12150 for (auto *BB : L.getBlocks()) 12151 for (auto &I : *BB) { 12152 if (!SE.isSCEVable(I.getType())) 12153 continue; 12154 12155 auto *Expr = SE.getSCEV(&I); 12156 auto II = RewriteMap.find(Expr); 12157 12158 if (II == RewriteMap.end()) 12159 continue; 12160 12161 // Don't print things that are not interesting. 12162 if (II->second.second == Expr) 12163 continue; 12164 12165 OS.indent(Depth) << "[PSE]" << I << ":\n"; 12166 OS.indent(Depth + 2) << *Expr << "\n"; 12167 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 12168 } 12169 } 12170 12171 // Match the mathematical pattern A - (A / B) * B, where A and B can be 12172 // arbitrary expressions. 12173 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 12174 // 4, A / B becomes X / 8). 12175 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 12176 const SCEV *&RHS) { 12177 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 12178 if (Add == nullptr || Add->getNumOperands() != 2) 12179 return false; 12180 12181 const SCEV *A = Add->getOperand(1); 12182 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 12183 12184 if (Mul == nullptr) 12185 return false; 12186 12187 const auto MatchURemWithDivisor = [&](const SCEV *B) { 12188 // (SomeExpr + (-(SomeExpr / B) * B)). 12189 if (Expr == getURemExpr(A, B)) { 12190 LHS = A; 12191 RHS = B; 12192 return true; 12193 } 12194 return false; 12195 }; 12196 12197 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 12198 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 12199 return MatchURemWithDivisor(Mul->getOperand(1)) || 12200 MatchURemWithDivisor(Mul->getOperand(2)); 12201 12202 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 12203 if (Mul->getNumOperands() == 2) 12204 return MatchURemWithDivisor(Mul->getOperand(1)) || 12205 MatchURemWithDivisor(Mul->getOperand(0)) || 12206 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 12207 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 12208 return false; 12209 } 12210