1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #include "llvm/Analysis/ScalarEvolution.h" 62 #include "llvm/ADT/APInt.h" 63 #include "llvm/ADT/ArrayRef.h" 64 #include "llvm/ADT/DenseMap.h" 65 #include "llvm/ADT/DepthFirstIterator.h" 66 #include "llvm/ADT/EquivalenceClasses.h" 67 #include "llvm/ADT/FoldingSet.h" 68 #include "llvm/ADT/None.h" 69 #include "llvm/ADT/Optional.h" 70 #include "llvm/ADT/STLExtras.h" 71 #include "llvm/ADT/ScopeExit.h" 72 #include "llvm/ADT/Sequence.h" 73 #include "llvm/ADT/SetVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallSet.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/Statistic.h" 78 #include "llvm/ADT/StringRef.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/ConstantFolding.h" 81 #include "llvm/Analysis/InstructionSimplify.h" 82 #include "llvm/Analysis/LoopInfo.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/Config/llvm-config.h" 87 #include "llvm/IR/Argument.h" 88 #include "llvm/IR/BasicBlock.h" 89 #include "llvm/IR/CFG.h" 90 #include "llvm/IR/CallSite.h" 91 #include "llvm/IR/Constant.h" 92 #include "llvm/IR/ConstantRange.h" 93 #include "llvm/IR/Constants.h" 94 #include "llvm/IR/DataLayout.h" 95 #include "llvm/IR/DerivedTypes.h" 96 #include "llvm/IR/Dominators.h" 97 #include "llvm/IR/Function.h" 98 #include "llvm/IR/GlobalAlias.h" 99 #include "llvm/IR/GlobalValue.h" 100 #include "llvm/IR/GlobalVariable.h" 101 #include "llvm/IR/InstIterator.h" 102 #include "llvm/IR/InstrTypes.h" 103 #include "llvm/IR/Instruction.h" 104 #include "llvm/IR/Instructions.h" 105 #include "llvm/IR/IntrinsicInst.h" 106 #include "llvm/IR/Intrinsics.h" 107 #include "llvm/IR/LLVMContext.h" 108 #include "llvm/IR/Metadata.h" 109 #include "llvm/IR/Operator.h" 110 #include "llvm/IR/PatternMatch.h" 111 #include "llvm/IR/Type.h" 112 #include "llvm/IR/Use.h" 113 #include "llvm/IR/User.h" 114 #include "llvm/IR/Value.h" 115 #include "llvm/Pass.h" 116 #include "llvm/Support/Casting.h" 117 #include "llvm/Support/CommandLine.h" 118 #include "llvm/Support/Compiler.h" 119 #include "llvm/Support/Debug.h" 120 #include "llvm/Support/ErrorHandling.h" 121 #include "llvm/Support/KnownBits.h" 122 #include "llvm/Support/SaveAndRestore.h" 123 #include "llvm/Support/raw_ostream.h" 124 #include <algorithm> 125 #include <cassert> 126 #include <climits> 127 #include <cstddef> 128 #include <cstdint> 129 #include <cstdlib> 130 #include <map> 131 #include <memory> 132 #include <tuple> 133 #include <utility> 134 #include <vector> 135 136 using namespace llvm; 137 138 #define DEBUG_TYPE "scalar-evolution" 139 140 STATISTIC(NumArrayLenItCounts, 141 "Number of trip counts computed with array length"); 142 STATISTIC(NumTripCountsComputed, 143 "Number of loops with predictable loop counts"); 144 STATISTIC(NumTripCountsNotComputed, 145 "Number of loops without predictable loop counts"); 146 STATISTIC(NumBruteForceTripCountsComputed, 147 "Number of loops with trip counts computed by force"); 148 149 static cl::opt<unsigned> 150 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 151 cl::desc("Maximum number of iterations SCEV will " 152 "symbolically execute a constant " 153 "derived loop"), 154 cl::init(100)); 155 156 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 157 static cl::opt<bool> VerifySCEV( 158 "verify-scev", cl::Hidden, 159 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 160 static cl::opt<bool> 161 VerifySCEVMap("verify-scev-maps", cl::Hidden, 162 cl::desc("Verify no dangling value in ScalarEvolution's " 163 "ExprValueMap (slow)")); 164 165 static cl::opt<unsigned> MulOpsInlineThreshold( 166 "scev-mulops-inline-threshold", cl::Hidden, 167 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 168 cl::init(32)); 169 170 static cl::opt<unsigned> AddOpsInlineThreshold( 171 "scev-addops-inline-threshold", cl::Hidden, 172 cl::desc("Threshold for inlining addition operands into a SCEV"), 173 cl::init(500)); 174 175 static cl::opt<unsigned> MaxSCEVCompareDepth( 176 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 177 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 178 cl::init(32)); 179 180 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 181 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 182 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 183 cl::init(2)); 184 185 static cl::opt<unsigned> MaxValueCompareDepth( 186 "scalar-evolution-max-value-compare-depth", cl::Hidden, 187 cl::desc("Maximum depth of recursive value complexity comparisons"), 188 cl::init(2)); 189 190 static cl::opt<unsigned> 191 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 192 cl::desc("Maximum depth of recursive arithmetics"), 193 cl::init(32)); 194 195 static cl::opt<unsigned> MaxConstantEvolvingDepth( 196 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 197 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 198 199 static cl::opt<unsigned> 200 MaxExtDepth("scalar-evolution-max-ext-depth", cl::Hidden, 201 cl::desc("Maximum depth of recursive SExt/ZExt"), 202 cl::init(8)); 203 204 static cl::opt<unsigned> 205 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 206 cl::desc("Max coefficients in AddRec during evolving"), 207 cl::init(8)); 208 209 //===----------------------------------------------------------------------===// 210 // SCEV class definitions 211 //===----------------------------------------------------------------------===// 212 213 //===----------------------------------------------------------------------===// 214 // Implementation of the SCEV class. 215 // 216 217 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 218 LLVM_DUMP_METHOD void SCEV::dump() const { 219 print(dbgs()); 220 dbgs() << '\n'; 221 } 222 #endif 223 224 void SCEV::print(raw_ostream &OS) const { 225 switch (static_cast<SCEVTypes>(getSCEVType())) { 226 case scConstant: 227 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 228 return; 229 case scTruncate: { 230 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 231 const SCEV *Op = Trunc->getOperand(); 232 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 233 << *Trunc->getType() << ")"; 234 return; 235 } 236 case scZeroExtend: { 237 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 238 const SCEV *Op = ZExt->getOperand(); 239 OS << "(zext " << *Op->getType() << " " << *Op << " to " 240 << *ZExt->getType() << ")"; 241 return; 242 } 243 case scSignExtend: { 244 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 245 const SCEV *Op = SExt->getOperand(); 246 OS << "(sext " << *Op->getType() << " " << *Op << " to " 247 << *SExt->getType() << ")"; 248 return; 249 } 250 case scAddRecExpr: { 251 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 252 OS << "{" << *AR->getOperand(0); 253 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 254 OS << ",+," << *AR->getOperand(i); 255 OS << "}<"; 256 if (AR->hasNoUnsignedWrap()) 257 OS << "nuw><"; 258 if (AR->hasNoSignedWrap()) 259 OS << "nsw><"; 260 if (AR->hasNoSelfWrap() && 261 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 262 OS << "nw><"; 263 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 264 OS << ">"; 265 return; 266 } 267 case scAddExpr: 268 case scMulExpr: 269 case scUMaxExpr: 270 case scSMaxExpr: { 271 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 272 const char *OpStr = nullptr; 273 switch (NAry->getSCEVType()) { 274 case scAddExpr: OpStr = " + "; break; 275 case scMulExpr: OpStr = " * "; break; 276 case scUMaxExpr: OpStr = " umax "; break; 277 case scSMaxExpr: OpStr = " smax "; break; 278 } 279 OS << "("; 280 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 281 I != E; ++I) { 282 OS << **I; 283 if (std::next(I) != E) 284 OS << OpStr; 285 } 286 OS << ")"; 287 switch (NAry->getSCEVType()) { 288 case scAddExpr: 289 case scMulExpr: 290 if (NAry->hasNoUnsignedWrap()) 291 OS << "<nuw>"; 292 if (NAry->hasNoSignedWrap()) 293 OS << "<nsw>"; 294 } 295 return; 296 } 297 case scUDivExpr: { 298 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 299 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 300 return; 301 } 302 case scUnknown: { 303 const SCEVUnknown *U = cast<SCEVUnknown>(this); 304 Type *AllocTy; 305 if (U->isSizeOf(AllocTy)) { 306 OS << "sizeof(" << *AllocTy << ")"; 307 return; 308 } 309 if (U->isAlignOf(AllocTy)) { 310 OS << "alignof(" << *AllocTy << ")"; 311 return; 312 } 313 314 Type *CTy; 315 Constant *FieldNo; 316 if (U->isOffsetOf(CTy, FieldNo)) { 317 OS << "offsetof(" << *CTy << ", "; 318 FieldNo->printAsOperand(OS, false); 319 OS << ")"; 320 return; 321 } 322 323 // Otherwise just print it normally. 324 U->getValue()->printAsOperand(OS, false); 325 return; 326 } 327 case scCouldNotCompute: 328 OS << "***COULDNOTCOMPUTE***"; 329 return; 330 } 331 llvm_unreachable("Unknown SCEV kind!"); 332 } 333 334 Type *SCEV::getType() const { 335 switch (static_cast<SCEVTypes>(getSCEVType())) { 336 case scConstant: 337 return cast<SCEVConstant>(this)->getType(); 338 case scTruncate: 339 case scZeroExtend: 340 case scSignExtend: 341 return cast<SCEVCastExpr>(this)->getType(); 342 case scAddRecExpr: 343 case scMulExpr: 344 case scUMaxExpr: 345 case scSMaxExpr: 346 return cast<SCEVNAryExpr>(this)->getType(); 347 case scAddExpr: 348 return cast<SCEVAddExpr>(this)->getType(); 349 case scUDivExpr: 350 return cast<SCEVUDivExpr>(this)->getType(); 351 case scUnknown: 352 return cast<SCEVUnknown>(this)->getType(); 353 case scCouldNotCompute: 354 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 355 } 356 llvm_unreachable("Unknown SCEV kind!"); 357 } 358 359 bool SCEV::isZero() const { 360 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 361 return SC->getValue()->isZero(); 362 return false; 363 } 364 365 bool SCEV::isOne() const { 366 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 367 return SC->getValue()->isOne(); 368 return false; 369 } 370 371 bool SCEV::isAllOnesValue() const { 372 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 373 return SC->getValue()->isMinusOne(); 374 return false; 375 } 376 377 bool SCEV::isNonConstantNegative() const { 378 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 379 if (!Mul) return false; 380 381 // If there is a constant factor, it will be first. 382 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 383 if (!SC) return false; 384 385 // Return true if the value is negative, this matches things like (-42 * V). 386 return SC->getAPInt().isNegative(); 387 } 388 389 SCEVCouldNotCompute::SCEVCouldNotCompute() : 390 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} 391 392 bool SCEVCouldNotCompute::classof(const SCEV *S) { 393 return S->getSCEVType() == scCouldNotCompute; 394 } 395 396 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 397 FoldingSetNodeID ID; 398 ID.AddInteger(scConstant); 399 ID.AddPointer(V); 400 void *IP = nullptr; 401 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 402 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 403 UniqueSCEVs.InsertNode(S, IP); 404 return S; 405 } 406 407 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 408 return getConstant(ConstantInt::get(getContext(), Val)); 409 } 410 411 const SCEV * 412 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 413 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 414 return getConstant(ConstantInt::get(ITy, V, isSigned)); 415 } 416 417 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 418 unsigned SCEVTy, const SCEV *op, Type *ty) 419 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 420 421 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 422 const SCEV *op, Type *ty) 423 : SCEVCastExpr(ID, scTruncate, op, ty) { 424 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 425 "Cannot truncate non-integer value!"); 426 } 427 428 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 429 const SCEV *op, Type *ty) 430 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 431 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 432 "Cannot zero extend non-integer value!"); 433 } 434 435 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 436 const SCEV *op, Type *ty) 437 : SCEVCastExpr(ID, scSignExtend, op, ty) { 438 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 439 "Cannot sign extend non-integer value!"); 440 } 441 442 void SCEVUnknown::deleted() { 443 // Clear this SCEVUnknown from various maps. 444 SE->forgetMemoizedResults(this); 445 446 // Remove this SCEVUnknown from the uniquing map. 447 SE->UniqueSCEVs.RemoveNode(this); 448 449 // Release the value. 450 setValPtr(nullptr); 451 } 452 453 void SCEVUnknown::allUsesReplacedWith(Value *New) { 454 // Remove this SCEVUnknown from the uniquing map. 455 SE->UniqueSCEVs.RemoveNode(this); 456 457 // Update this SCEVUnknown to point to the new value. This is needed 458 // because there may still be outstanding SCEVs which still point to 459 // this SCEVUnknown. 460 setValPtr(New); 461 } 462 463 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 464 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 465 if (VCE->getOpcode() == Instruction::PtrToInt) 466 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 467 if (CE->getOpcode() == Instruction::GetElementPtr && 468 CE->getOperand(0)->isNullValue() && 469 CE->getNumOperands() == 2) 470 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 471 if (CI->isOne()) { 472 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 473 ->getElementType(); 474 return true; 475 } 476 477 return false; 478 } 479 480 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 481 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 482 if (VCE->getOpcode() == Instruction::PtrToInt) 483 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 484 if (CE->getOpcode() == Instruction::GetElementPtr && 485 CE->getOperand(0)->isNullValue()) { 486 Type *Ty = 487 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 488 if (StructType *STy = dyn_cast<StructType>(Ty)) 489 if (!STy->isPacked() && 490 CE->getNumOperands() == 3 && 491 CE->getOperand(1)->isNullValue()) { 492 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 493 if (CI->isOne() && 494 STy->getNumElements() == 2 && 495 STy->getElementType(0)->isIntegerTy(1)) { 496 AllocTy = STy->getElementType(1); 497 return true; 498 } 499 } 500 } 501 502 return false; 503 } 504 505 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 506 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 507 if (VCE->getOpcode() == Instruction::PtrToInt) 508 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 509 if (CE->getOpcode() == Instruction::GetElementPtr && 510 CE->getNumOperands() == 3 && 511 CE->getOperand(0)->isNullValue() && 512 CE->getOperand(1)->isNullValue()) { 513 Type *Ty = 514 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 515 // Ignore vector types here so that ScalarEvolutionExpander doesn't 516 // emit getelementptrs that index into vectors. 517 if (Ty->isStructTy() || Ty->isArrayTy()) { 518 CTy = Ty; 519 FieldNo = CE->getOperand(2); 520 return true; 521 } 522 } 523 524 return false; 525 } 526 527 //===----------------------------------------------------------------------===// 528 // SCEV Utilities 529 //===----------------------------------------------------------------------===// 530 531 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 532 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 533 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 534 /// have been previously deemed to be "equally complex" by this routine. It is 535 /// intended to avoid exponential time complexity in cases like: 536 /// 537 /// %a = f(%x, %y) 538 /// %b = f(%a, %a) 539 /// %c = f(%b, %b) 540 /// 541 /// %d = f(%x, %y) 542 /// %e = f(%d, %d) 543 /// %f = f(%e, %e) 544 /// 545 /// CompareValueComplexity(%f, %c) 546 /// 547 /// Since we do not continue running this routine on expression trees once we 548 /// have seen unequal values, there is no need to track them in the cache. 549 static int 550 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 551 const LoopInfo *const LI, Value *LV, Value *RV, 552 unsigned Depth) { 553 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 554 return 0; 555 556 // Order pointer values after integer values. This helps SCEVExpander form 557 // GEPs. 558 bool LIsPointer = LV->getType()->isPointerTy(), 559 RIsPointer = RV->getType()->isPointerTy(); 560 if (LIsPointer != RIsPointer) 561 return (int)LIsPointer - (int)RIsPointer; 562 563 // Compare getValueID values. 564 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 565 if (LID != RID) 566 return (int)LID - (int)RID; 567 568 // Sort arguments by their position. 569 if (const auto *LA = dyn_cast<Argument>(LV)) { 570 const auto *RA = cast<Argument>(RV); 571 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 572 return (int)LArgNo - (int)RArgNo; 573 } 574 575 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 576 const auto *RGV = cast<GlobalValue>(RV); 577 578 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 579 auto LT = GV->getLinkage(); 580 return !(GlobalValue::isPrivateLinkage(LT) || 581 GlobalValue::isInternalLinkage(LT)); 582 }; 583 584 // Use the names to distinguish the two values, but only if the 585 // names are semantically important. 586 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 587 return LGV->getName().compare(RGV->getName()); 588 } 589 590 // For instructions, compare their loop depth, and their operand count. This 591 // is pretty loose. 592 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 593 const auto *RInst = cast<Instruction>(RV); 594 595 // Compare loop depths. 596 const BasicBlock *LParent = LInst->getParent(), 597 *RParent = RInst->getParent(); 598 if (LParent != RParent) { 599 unsigned LDepth = LI->getLoopDepth(LParent), 600 RDepth = LI->getLoopDepth(RParent); 601 if (LDepth != RDepth) 602 return (int)LDepth - (int)RDepth; 603 } 604 605 // Compare the number of operands. 606 unsigned LNumOps = LInst->getNumOperands(), 607 RNumOps = RInst->getNumOperands(); 608 if (LNumOps != RNumOps) 609 return (int)LNumOps - (int)RNumOps; 610 611 for (unsigned Idx : seq(0u, LNumOps)) { 612 int Result = 613 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 614 RInst->getOperand(Idx), Depth + 1); 615 if (Result != 0) 616 return Result; 617 } 618 } 619 620 EqCacheValue.unionSets(LV, RV); 621 return 0; 622 } 623 624 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 625 // than RHS, respectively. A three-way result allows recursive comparisons to be 626 // more efficient. 627 static int CompareSCEVComplexity( 628 EquivalenceClasses<const SCEV *> &EqCacheSCEV, 629 EquivalenceClasses<const Value *> &EqCacheValue, 630 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 631 DominatorTree &DT, unsigned Depth = 0) { 632 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 633 if (LHS == RHS) 634 return 0; 635 636 // Primarily, sort the SCEVs by their getSCEVType(). 637 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 638 if (LType != RType) 639 return (int)LType - (int)RType; 640 641 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS)) 642 return 0; 643 // Aside from the getSCEVType() ordering, the particular ordering 644 // isn't very important except that it's beneficial to be consistent, 645 // so that (a + b) and (b + a) don't end up as different expressions. 646 switch (static_cast<SCEVTypes>(LType)) { 647 case scUnknown: { 648 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 649 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 650 651 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 652 RU->getValue(), Depth + 1); 653 if (X == 0) 654 EqCacheSCEV.unionSets(LHS, RHS); 655 return X; 656 } 657 658 case scConstant: { 659 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 660 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 661 662 // Compare constant values. 663 const APInt &LA = LC->getAPInt(); 664 const APInt &RA = RC->getAPInt(); 665 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 666 if (LBitWidth != RBitWidth) 667 return (int)LBitWidth - (int)RBitWidth; 668 return LA.ult(RA) ? -1 : 1; 669 } 670 671 case scAddRecExpr: { 672 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 673 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 674 675 // There is always a dominance between two recs that are used by one SCEV, 676 // so we can safely sort recs by loop header dominance. We require such 677 // order in getAddExpr. 678 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 679 if (LLoop != RLoop) { 680 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 681 assert(LHead != RHead && "Two loops share the same header?"); 682 if (DT.dominates(LHead, RHead)) 683 return 1; 684 else 685 assert(DT.dominates(RHead, LHead) && 686 "No dominance between recurrences used by one SCEV?"); 687 return -1; 688 } 689 690 // Addrec complexity grows with operand count. 691 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 692 if (LNumOps != RNumOps) 693 return (int)LNumOps - (int)RNumOps; 694 695 // Lexicographically compare. 696 for (unsigned i = 0; i != LNumOps; ++i) { 697 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 698 LA->getOperand(i), RA->getOperand(i), DT, 699 Depth + 1); 700 if (X != 0) 701 return X; 702 } 703 EqCacheSCEV.unionSets(LHS, RHS); 704 return 0; 705 } 706 707 case scAddExpr: 708 case scMulExpr: 709 case scSMaxExpr: 710 case scUMaxExpr: { 711 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 712 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 713 714 // Lexicographically compare n-ary expressions. 715 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 716 if (LNumOps != RNumOps) 717 return (int)LNumOps - (int)RNumOps; 718 719 for (unsigned i = 0; i != LNumOps; ++i) { 720 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 721 LC->getOperand(i), RC->getOperand(i), DT, 722 Depth + 1); 723 if (X != 0) 724 return X; 725 } 726 EqCacheSCEV.unionSets(LHS, RHS); 727 return 0; 728 } 729 730 case scUDivExpr: { 731 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 732 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 733 734 // Lexicographically compare udiv expressions. 735 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 736 RC->getLHS(), DT, Depth + 1); 737 if (X != 0) 738 return X; 739 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 740 RC->getRHS(), DT, Depth + 1); 741 if (X == 0) 742 EqCacheSCEV.unionSets(LHS, RHS); 743 return X; 744 } 745 746 case scTruncate: 747 case scZeroExtend: 748 case scSignExtend: { 749 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 750 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 751 752 // Compare cast expressions by operand. 753 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 754 LC->getOperand(), RC->getOperand(), DT, 755 Depth + 1); 756 if (X == 0) 757 EqCacheSCEV.unionSets(LHS, RHS); 758 return X; 759 } 760 761 case scCouldNotCompute: 762 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 763 } 764 llvm_unreachable("Unknown SCEV kind!"); 765 } 766 767 /// Given a list of SCEV objects, order them by their complexity, and group 768 /// objects of the same complexity together by value. When this routine is 769 /// finished, we know that any duplicates in the vector are consecutive and that 770 /// complexity is monotonically increasing. 771 /// 772 /// Note that we go take special precautions to ensure that we get deterministic 773 /// results from this routine. In other words, we don't want the results of 774 /// this to depend on where the addresses of various SCEV objects happened to 775 /// land in memory. 776 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 777 LoopInfo *LI, DominatorTree &DT) { 778 if (Ops.size() < 2) return; // Noop 779 780 EquivalenceClasses<const SCEV *> EqCacheSCEV; 781 EquivalenceClasses<const Value *> EqCacheValue; 782 if (Ops.size() == 2) { 783 // This is the common case, which also happens to be trivially simple. 784 // Special case it. 785 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 786 if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0) 787 std::swap(LHS, RHS); 788 return; 789 } 790 791 // Do the rough sort by complexity. 792 std::stable_sort(Ops.begin(), Ops.end(), 793 [&](const SCEV *LHS, const SCEV *RHS) { 794 return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 795 LHS, RHS, DT) < 0; 796 }); 797 798 // Now that we are sorted by complexity, group elements of the same 799 // complexity. Note that this is, at worst, N^2, but the vector is likely to 800 // be extremely short in practice. Note that we take this approach because we 801 // do not want to depend on the addresses of the objects we are grouping. 802 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 803 const SCEV *S = Ops[i]; 804 unsigned Complexity = S->getSCEVType(); 805 806 // If there are any objects of the same complexity and same value as this 807 // one, group them. 808 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 809 if (Ops[j] == S) { // Found a duplicate. 810 // Move it to immediately after i'th element. 811 std::swap(Ops[i+1], Ops[j]); 812 ++i; // no need to rescan it. 813 if (i == e-2) return; // Done! 814 } 815 } 816 } 817 } 818 819 // Returns the size of the SCEV S. 820 static inline int sizeOfSCEV(const SCEV *S) { 821 struct FindSCEVSize { 822 int Size = 0; 823 824 FindSCEVSize() = default; 825 826 bool follow(const SCEV *S) { 827 ++Size; 828 // Keep looking at all operands of S. 829 return true; 830 } 831 832 bool isDone() const { 833 return false; 834 } 835 }; 836 837 FindSCEVSize F; 838 SCEVTraversal<FindSCEVSize> ST(F); 839 ST.visitAll(S); 840 return F.Size; 841 } 842 843 namespace { 844 845 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { 846 public: 847 // Computes the Quotient and Remainder of the division of Numerator by 848 // Denominator. 849 static void divide(ScalarEvolution &SE, const SCEV *Numerator, 850 const SCEV *Denominator, const SCEV **Quotient, 851 const SCEV **Remainder) { 852 assert(Numerator && Denominator && "Uninitialized SCEV"); 853 854 SCEVDivision D(SE, Numerator, Denominator); 855 856 // Check for the trivial case here to avoid having to check for it in the 857 // rest of the code. 858 if (Numerator == Denominator) { 859 *Quotient = D.One; 860 *Remainder = D.Zero; 861 return; 862 } 863 864 if (Numerator->isZero()) { 865 *Quotient = D.Zero; 866 *Remainder = D.Zero; 867 return; 868 } 869 870 // A simple case when N/1. The quotient is N. 871 if (Denominator->isOne()) { 872 *Quotient = Numerator; 873 *Remainder = D.Zero; 874 return; 875 } 876 877 // Split the Denominator when it is a product. 878 if (const SCEVMulExpr *T = dyn_cast<SCEVMulExpr>(Denominator)) { 879 const SCEV *Q, *R; 880 *Quotient = Numerator; 881 for (const SCEV *Op : T->operands()) { 882 divide(SE, *Quotient, Op, &Q, &R); 883 *Quotient = Q; 884 885 // Bail out when the Numerator is not divisible by one of the terms of 886 // the Denominator. 887 if (!R->isZero()) { 888 *Quotient = D.Zero; 889 *Remainder = Numerator; 890 return; 891 } 892 } 893 *Remainder = D.Zero; 894 return; 895 } 896 897 D.visit(Numerator); 898 *Quotient = D.Quotient; 899 *Remainder = D.Remainder; 900 } 901 902 // Except in the trivial case described above, we do not know how to divide 903 // Expr by Denominator for the following functions with empty implementation. 904 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} 905 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} 906 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} 907 void visitUDivExpr(const SCEVUDivExpr *Numerator) {} 908 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} 909 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} 910 void visitUnknown(const SCEVUnknown *Numerator) {} 911 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} 912 913 void visitConstant(const SCEVConstant *Numerator) { 914 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { 915 APInt NumeratorVal = Numerator->getAPInt(); 916 APInt DenominatorVal = D->getAPInt(); 917 uint32_t NumeratorBW = NumeratorVal.getBitWidth(); 918 uint32_t DenominatorBW = DenominatorVal.getBitWidth(); 919 920 if (NumeratorBW > DenominatorBW) 921 DenominatorVal = DenominatorVal.sext(NumeratorBW); 922 else if (NumeratorBW < DenominatorBW) 923 NumeratorVal = NumeratorVal.sext(DenominatorBW); 924 925 APInt QuotientVal(NumeratorVal.getBitWidth(), 0); 926 APInt RemainderVal(NumeratorVal.getBitWidth(), 0); 927 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); 928 Quotient = SE.getConstant(QuotientVal); 929 Remainder = SE.getConstant(RemainderVal); 930 return; 931 } 932 } 933 934 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { 935 const SCEV *StartQ, *StartR, *StepQ, *StepR; 936 if (!Numerator->isAffine()) 937 return cannotDivide(Numerator); 938 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); 939 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); 940 // Bail out if the types do not match. 941 Type *Ty = Denominator->getType(); 942 if (Ty != StartQ->getType() || Ty != StartR->getType() || 943 Ty != StepQ->getType() || Ty != StepR->getType()) 944 return cannotDivide(Numerator); 945 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), 946 Numerator->getNoWrapFlags()); 947 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), 948 Numerator->getNoWrapFlags()); 949 } 950 951 void visitAddExpr(const SCEVAddExpr *Numerator) { 952 SmallVector<const SCEV *, 2> Qs, Rs; 953 Type *Ty = Denominator->getType(); 954 955 for (const SCEV *Op : Numerator->operands()) { 956 const SCEV *Q, *R; 957 divide(SE, Op, Denominator, &Q, &R); 958 959 // Bail out if types do not match. 960 if (Ty != Q->getType() || Ty != R->getType()) 961 return cannotDivide(Numerator); 962 963 Qs.push_back(Q); 964 Rs.push_back(R); 965 } 966 967 if (Qs.size() == 1) { 968 Quotient = Qs[0]; 969 Remainder = Rs[0]; 970 return; 971 } 972 973 Quotient = SE.getAddExpr(Qs); 974 Remainder = SE.getAddExpr(Rs); 975 } 976 977 void visitMulExpr(const SCEVMulExpr *Numerator) { 978 SmallVector<const SCEV *, 2> Qs; 979 Type *Ty = Denominator->getType(); 980 981 bool FoundDenominatorTerm = false; 982 for (const SCEV *Op : Numerator->operands()) { 983 // Bail out if types do not match. 984 if (Ty != Op->getType()) 985 return cannotDivide(Numerator); 986 987 if (FoundDenominatorTerm) { 988 Qs.push_back(Op); 989 continue; 990 } 991 992 // Check whether Denominator divides one of the product operands. 993 const SCEV *Q, *R; 994 divide(SE, Op, Denominator, &Q, &R); 995 if (!R->isZero()) { 996 Qs.push_back(Op); 997 continue; 998 } 999 1000 // Bail out if types do not match. 1001 if (Ty != Q->getType()) 1002 return cannotDivide(Numerator); 1003 1004 FoundDenominatorTerm = true; 1005 Qs.push_back(Q); 1006 } 1007 1008 if (FoundDenominatorTerm) { 1009 Remainder = Zero; 1010 if (Qs.size() == 1) 1011 Quotient = Qs[0]; 1012 else 1013 Quotient = SE.getMulExpr(Qs); 1014 return; 1015 } 1016 1017 if (!isa<SCEVUnknown>(Denominator)) 1018 return cannotDivide(Numerator); 1019 1020 // The Remainder is obtained by replacing Denominator by 0 in Numerator. 1021 ValueToValueMap RewriteMap; 1022 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1023 cast<SCEVConstant>(Zero)->getValue(); 1024 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1025 1026 if (Remainder->isZero()) { 1027 // The Quotient is obtained by replacing Denominator by 1 in Numerator. 1028 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1029 cast<SCEVConstant>(One)->getValue(); 1030 Quotient = 1031 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1032 return; 1033 } 1034 1035 // Quotient is (Numerator - Remainder) divided by Denominator. 1036 const SCEV *Q, *R; 1037 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); 1038 // This SCEV does not seem to simplify: fail the division here. 1039 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) 1040 return cannotDivide(Numerator); 1041 divide(SE, Diff, Denominator, &Q, &R); 1042 if (R != Zero) 1043 return cannotDivide(Numerator); 1044 Quotient = Q; 1045 } 1046 1047 private: 1048 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, 1049 const SCEV *Denominator) 1050 : SE(S), Denominator(Denominator) { 1051 Zero = SE.getZero(Denominator->getType()); 1052 One = SE.getOne(Denominator->getType()); 1053 1054 // We generally do not know how to divide Expr by Denominator. We 1055 // initialize the division to a "cannot divide" state to simplify the rest 1056 // of the code. 1057 cannotDivide(Numerator); 1058 } 1059 1060 // Convenience function for giving up on the division. We set the quotient to 1061 // be equal to zero and the remainder to be equal to the numerator. 1062 void cannotDivide(const SCEV *Numerator) { 1063 Quotient = Zero; 1064 Remainder = Numerator; 1065 } 1066 1067 ScalarEvolution &SE; 1068 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; 1069 }; 1070 1071 } // end anonymous namespace 1072 1073 //===----------------------------------------------------------------------===// 1074 // Simple SCEV method implementations 1075 //===----------------------------------------------------------------------===// 1076 1077 /// Compute BC(It, K). The result has width W. Assume, K > 0. 1078 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 1079 ScalarEvolution &SE, 1080 Type *ResultTy) { 1081 // Handle the simplest case efficiently. 1082 if (K == 1) 1083 return SE.getTruncateOrZeroExtend(It, ResultTy); 1084 1085 // We are using the following formula for BC(It, K): 1086 // 1087 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 1088 // 1089 // Suppose, W is the bitwidth of the return value. We must be prepared for 1090 // overflow. Hence, we must assure that the result of our computation is 1091 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 1092 // safe in modular arithmetic. 1093 // 1094 // However, this code doesn't use exactly that formula; the formula it uses 1095 // is something like the following, where T is the number of factors of 2 in 1096 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 1097 // exponentiation: 1098 // 1099 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 1100 // 1101 // This formula is trivially equivalent to the previous formula. However, 1102 // this formula can be implemented much more efficiently. The trick is that 1103 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 1104 // arithmetic. To do exact division in modular arithmetic, all we have 1105 // to do is multiply by the inverse. Therefore, this step can be done at 1106 // width W. 1107 // 1108 // The next issue is how to safely do the division by 2^T. The way this 1109 // is done is by doing the multiplication step at a width of at least W + T 1110 // bits. This way, the bottom W+T bits of the product are accurate. Then, 1111 // when we perform the division by 2^T (which is equivalent to a right shift 1112 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 1113 // truncated out after the division by 2^T. 1114 // 1115 // In comparison to just directly using the first formula, this technique 1116 // is much more efficient; using the first formula requires W * K bits, 1117 // but this formula less than W + K bits. Also, the first formula requires 1118 // a division step, whereas this formula only requires multiplies and shifts. 1119 // 1120 // It doesn't matter whether the subtraction step is done in the calculation 1121 // width or the input iteration count's width; if the subtraction overflows, 1122 // the result must be zero anyway. We prefer here to do it in the width of 1123 // the induction variable because it helps a lot for certain cases; CodeGen 1124 // isn't smart enough to ignore the overflow, which leads to much less 1125 // efficient code if the width of the subtraction is wider than the native 1126 // register width. 1127 // 1128 // (It's possible to not widen at all by pulling out factors of 2 before 1129 // the multiplication; for example, K=2 can be calculated as 1130 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 1131 // extra arithmetic, so it's not an obvious win, and it gets 1132 // much more complicated for K > 3.) 1133 1134 // Protection from insane SCEVs; this bound is conservative, 1135 // but it probably doesn't matter. 1136 if (K > 1000) 1137 return SE.getCouldNotCompute(); 1138 1139 unsigned W = SE.getTypeSizeInBits(ResultTy); 1140 1141 // Calculate K! / 2^T and T; we divide out the factors of two before 1142 // multiplying for calculating K! / 2^T to avoid overflow. 1143 // Other overflow doesn't matter because we only care about the bottom 1144 // W bits of the result. 1145 APInt OddFactorial(W, 1); 1146 unsigned T = 1; 1147 for (unsigned i = 3; i <= K; ++i) { 1148 APInt Mult(W, i); 1149 unsigned TwoFactors = Mult.countTrailingZeros(); 1150 T += TwoFactors; 1151 Mult.lshrInPlace(TwoFactors); 1152 OddFactorial *= Mult; 1153 } 1154 1155 // We need at least W + T bits for the multiplication step 1156 unsigned CalculationBits = W + T; 1157 1158 // Calculate 2^T, at width T+W. 1159 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1160 1161 // Calculate the multiplicative inverse of K! / 2^T; 1162 // this multiplication factor will perform the exact division by 1163 // K! / 2^T. 1164 APInt Mod = APInt::getSignedMinValue(W+1); 1165 APInt MultiplyFactor = OddFactorial.zext(W+1); 1166 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1167 MultiplyFactor = MultiplyFactor.trunc(W); 1168 1169 // Calculate the product, at width T+W 1170 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1171 CalculationBits); 1172 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1173 for (unsigned i = 1; i != K; ++i) { 1174 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1175 Dividend = SE.getMulExpr(Dividend, 1176 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1177 } 1178 1179 // Divide by 2^T 1180 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1181 1182 // Truncate the result, and divide by K! / 2^T. 1183 1184 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1185 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1186 } 1187 1188 /// Return the value of this chain of recurrences at the specified iteration 1189 /// number. We can evaluate this recurrence by multiplying each element in the 1190 /// chain by the binomial coefficient corresponding to it. In other words, we 1191 /// can evaluate {A,+,B,+,C,+,D} as: 1192 /// 1193 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1194 /// 1195 /// where BC(It, k) stands for binomial coefficient. 1196 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1197 ScalarEvolution &SE) const { 1198 const SCEV *Result = getStart(); 1199 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1200 // The computation is correct in the face of overflow provided that the 1201 // multiplication is performed _after_ the evaluation of the binomial 1202 // coefficient. 1203 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1204 if (isa<SCEVCouldNotCompute>(Coeff)) 1205 return Coeff; 1206 1207 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1208 } 1209 return Result; 1210 } 1211 1212 //===----------------------------------------------------------------------===// 1213 // SCEV Expression folder implementations 1214 //===----------------------------------------------------------------------===// 1215 1216 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 1217 Type *Ty) { 1218 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1219 "This is not a truncating conversion!"); 1220 assert(isSCEVable(Ty) && 1221 "This is not a conversion to a SCEVable type!"); 1222 Ty = getEffectiveSCEVType(Ty); 1223 1224 FoldingSetNodeID ID; 1225 ID.AddInteger(scTruncate); 1226 ID.AddPointer(Op); 1227 ID.AddPointer(Ty); 1228 void *IP = nullptr; 1229 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1230 1231 // Fold if the operand is constant. 1232 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1233 return getConstant( 1234 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1235 1236 // trunc(trunc(x)) --> trunc(x) 1237 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1238 return getTruncateExpr(ST->getOperand(), Ty); 1239 1240 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1241 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1242 return getTruncateOrSignExtend(SS->getOperand(), Ty); 1243 1244 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1245 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1246 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 1247 1248 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1249 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1250 // if after transforming we have at most one truncate, not counting truncates 1251 // that replace other casts. 1252 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1253 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1254 SmallVector<const SCEV *, 4> Operands; 1255 unsigned numTruncs = 0; 1256 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1257 ++i) { 1258 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty); 1259 if (!isa<SCEVCastExpr>(CommOp->getOperand(i)) && isa<SCEVTruncateExpr>(S)) 1260 numTruncs++; 1261 Operands.push_back(S); 1262 } 1263 if (numTruncs < 2) { 1264 if (isa<SCEVAddExpr>(Op)) 1265 return getAddExpr(Operands); 1266 else if (isa<SCEVMulExpr>(Op)) 1267 return getMulExpr(Operands); 1268 else 1269 llvm_unreachable("Unexpected SCEV type for Op."); 1270 } 1271 // Although we checked in the beginning that ID is not in the cache, it is 1272 // possible that during recursion and different modification ID was inserted 1273 // into the cache. So if we find it, just return it. 1274 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1275 return S; 1276 } 1277 1278 // If the input value is a chrec scev, truncate the chrec's operands. 1279 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1280 SmallVector<const SCEV *, 4> Operands; 1281 for (const SCEV *Op : AddRec->operands()) 1282 Operands.push_back(getTruncateExpr(Op, Ty)); 1283 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1284 } 1285 1286 // The cast wasn't folded; create an explicit cast node. We can reuse 1287 // the existing insert position since if we get here, we won't have 1288 // made any changes which would invalidate it. 1289 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1290 Op, Ty); 1291 UniqueSCEVs.InsertNode(S, IP); 1292 addToLoopUseLists(S); 1293 return S; 1294 } 1295 1296 // Get the limit of a recurrence such that incrementing by Step cannot cause 1297 // signed overflow as long as the value of the recurrence within the 1298 // loop does not exceed this limit before incrementing. 1299 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1300 ICmpInst::Predicate *Pred, 1301 ScalarEvolution *SE) { 1302 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1303 if (SE->isKnownPositive(Step)) { 1304 *Pred = ICmpInst::ICMP_SLT; 1305 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1306 SE->getSignedRangeMax(Step)); 1307 } 1308 if (SE->isKnownNegative(Step)) { 1309 *Pred = ICmpInst::ICMP_SGT; 1310 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1311 SE->getSignedRangeMin(Step)); 1312 } 1313 return nullptr; 1314 } 1315 1316 // Get the limit of a recurrence such that incrementing by Step cannot cause 1317 // unsigned overflow as long as the value of the recurrence within the loop does 1318 // not exceed this limit before incrementing. 1319 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1320 ICmpInst::Predicate *Pred, 1321 ScalarEvolution *SE) { 1322 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1323 *Pred = ICmpInst::ICMP_ULT; 1324 1325 return SE->getConstant(APInt::getMinValue(BitWidth) - 1326 SE->getUnsignedRangeMax(Step)); 1327 } 1328 1329 namespace { 1330 1331 struct ExtendOpTraitsBase { 1332 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1333 unsigned); 1334 }; 1335 1336 // Used to make code generic over signed and unsigned overflow. 1337 template <typename ExtendOp> struct ExtendOpTraits { 1338 // Members present: 1339 // 1340 // static const SCEV::NoWrapFlags WrapType; 1341 // 1342 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1343 // 1344 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1345 // ICmpInst::Predicate *Pred, 1346 // ScalarEvolution *SE); 1347 }; 1348 1349 template <> 1350 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1351 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1352 1353 static const GetExtendExprTy GetExtendExpr; 1354 1355 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1356 ICmpInst::Predicate *Pred, 1357 ScalarEvolution *SE) { 1358 return getSignedOverflowLimitForStep(Step, Pred, SE); 1359 } 1360 }; 1361 1362 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1363 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1364 1365 template <> 1366 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1367 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1368 1369 static const GetExtendExprTy GetExtendExpr; 1370 1371 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1372 ICmpInst::Predicate *Pred, 1373 ScalarEvolution *SE) { 1374 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1375 } 1376 }; 1377 1378 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1379 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1380 1381 } // end anonymous namespace 1382 1383 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1384 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1385 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1386 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1387 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1388 // expression "Step + sext/zext(PreIncAR)" is congruent with 1389 // "sext/zext(PostIncAR)" 1390 template <typename ExtendOpTy> 1391 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1392 ScalarEvolution *SE, unsigned Depth) { 1393 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1394 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1395 1396 const Loop *L = AR->getLoop(); 1397 const SCEV *Start = AR->getStart(); 1398 const SCEV *Step = AR->getStepRecurrence(*SE); 1399 1400 // Check for a simple looking step prior to loop entry. 1401 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1402 if (!SA) 1403 return nullptr; 1404 1405 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1406 // subtraction is expensive. For this purpose, perform a quick and dirty 1407 // difference, by checking for Step in the operand list. 1408 SmallVector<const SCEV *, 4> DiffOps; 1409 for (const SCEV *Op : SA->operands()) 1410 if (Op != Step) 1411 DiffOps.push_back(Op); 1412 1413 if (DiffOps.size() == SA->getNumOperands()) 1414 return nullptr; 1415 1416 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1417 // `Step`: 1418 1419 // 1. NSW/NUW flags on the step increment. 1420 auto PreStartFlags = 1421 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1422 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1423 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1424 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1425 1426 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1427 // "S+X does not sign/unsign-overflow". 1428 // 1429 1430 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1431 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1432 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1433 return PreStart; 1434 1435 // 2. Direct overflow check on the step operation's expression. 1436 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1437 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1438 const SCEV *OperandExtendedStart = 1439 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1440 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1441 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1442 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1443 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1444 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1445 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1446 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1447 } 1448 return PreStart; 1449 } 1450 1451 // 3. Loop precondition. 1452 ICmpInst::Predicate Pred; 1453 const SCEV *OverflowLimit = 1454 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1455 1456 if (OverflowLimit && 1457 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1458 return PreStart; 1459 1460 return nullptr; 1461 } 1462 1463 // Get the normalized zero or sign extended expression for this AddRec's Start. 1464 template <typename ExtendOpTy> 1465 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1466 ScalarEvolution *SE, 1467 unsigned Depth) { 1468 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1469 1470 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1471 if (!PreStart) 1472 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1473 1474 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1475 Depth), 1476 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1477 } 1478 1479 // Try to prove away overflow by looking at "nearby" add recurrences. A 1480 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1481 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1482 // 1483 // Formally: 1484 // 1485 // {S,+,X} == {S-T,+,X} + T 1486 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1487 // 1488 // If ({S-T,+,X} + T) does not overflow ... (1) 1489 // 1490 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1491 // 1492 // If {S-T,+,X} does not overflow ... (2) 1493 // 1494 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1495 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1496 // 1497 // If (S-T)+T does not overflow ... (3) 1498 // 1499 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1500 // == {Ext(S),+,Ext(X)} == LHS 1501 // 1502 // Thus, if (1), (2) and (3) are true for some T, then 1503 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1504 // 1505 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1506 // does not overflow" restricted to the 0th iteration. Therefore we only need 1507 // to check for (1) and (2). 1508 // 1509 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1510 // is `Delta` (defined below). 1511 template <typename ExtendOpTy> 1512 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1513 const SCEV *Step, 1514 const Loop *L) { 1515 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1516 1517 // We restrict `Start` to a constant to prevent SCEV from spending too much 1518 // time here. It is correct (but more expensive) to continue with a 1519 // non-constant `Start` and do a general SCEV subtraction to compute 1520 // `PreStart` below. 1521 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1522 if (!StartC) 1523 return false; 1524 1525 APInt StartAI = StartC->getAPInt(); 1526 1527 for (unsigned Delta : {-2, -1, 1, 2}) { 1528 const SCEV *PreStart = getConstant(StartAI - Delta); 1529 1530 FoldingSetNodeID ID; 1531 ID.AddInteger(scAddRecExpr); 1532 ID.AddPointer(PreStart); 1533 ID.AddPointer(Step); 1534 ID.AddPointer(L); 1535 void *IP = nullptr; 1536 const auto *PreAR = 1537 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1538 1539 // Give up if we don't already have the add recurrence we need because 1540 // actually constructing an add recurrence is relatively expensive. 1541 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1542 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1543 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1544 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1545 DeltaS, &Pred, this); 1546 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1547 return true; 1548 } 1549 } 1550 1551 return false; 1552 } 1553 1554 // Finds an integer D for an expression (C + x + y + ...) such that the top 1555 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1556 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1557 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1558 // the (C + x + y + ...) expression is \p WholeAddExpr. 1559 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1560 const SCEVConstant *ConstantTerm, 1561 const SCEVAddExpr *WholeAddExpr) { 1562 const APInt C = ConstantTerm->getAPInt(); 1563 const unsigned BitWidth = C.getBitWidth(); 1564 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1565 uint32_t TZ = BitWidth; 1566 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1567 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1568 if (TZ) { 1569 // Set D to be as many least significant bits of C as possible while still 1570 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1571 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1572 } 1573 return APInt(BitWidth, 0); 1574 } 1575 1576 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1577 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1578 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1579 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1580 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1581 const APInt &ConstantStart, 1582 const SCEV *Step) { 1583 const unsigned BitWidth = ConstantStart.getBitWidth(); 1584 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1585 if (TZ) 1586 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1587 : ConstantStart; 1588 return APInt(BitWidth, 0); 1589 } 1590 1591 const SCEV * 1592 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1593 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1594 "This is not an extending conversion!"); 1595 assert(isSCEVable(Ty) && 1596 "This is not a conversion to a SCEVable type!"); 1597 Ty = getEffectiveSCEVType(Ty); 1598 1599 // Fold if the operand is constant. 1600 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1601 return getConstant( 1602 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1603 1604 // zext(zext(x)) --> zext(x) 1605 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1606 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1607 1608 // Before doing any expensive analysis, check to see if we've already 1609 // computed a SCEV for this Op and Ty. 1610 FoldingSetNodeID ID; 1611 ID.AddInteger(scZeroExtend); 1612 ID.AddPointer(Op); 1613 ID.AddPointer(Ty); 1614 void *IP = nullptr; 1615 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1616 if (Depth > MaxExtDepth) { 1617 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1618 Op, Ty); 1619 UniqueSCEVs.InsertNode(S, IP); 1620 addToLoopUseLists(S); 1621 return S; 1622 } 1623 1624 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1625 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1626 // It's possible the bits taken off by the truncate were all zero bits. If 1627 // so, we should be able to simplify this further. 1628 const SCEV *X = ST->getOperand(); 1629 ConstantRange CR = getUnsignedRange(X); 1630 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1631 unsigned NewBits = getTypeSizeInBits(Ty); 1632 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1633 CR.zextOrTrunc(NewBits))) 1634 return getTruncateOrZeroExtend(X, Ty); 1635 } 1636 1637 // If the input value is a chrec scev, and we can prove that the value 1638 // did not overflow the old, smaller, value, we can zero extend all of the 1639 // operands (often constants). This allows analysis of something like 1640 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1641 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1642 if (AR->isAffine()) { 1643 const SCEV *Start = AR->getStart(); 1644 const SCEV *Step = AR->getStepRecurrence(*this); 1645 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1646 const Loop *L = AR->getLoop(); 1647 1648 if (!AR->hasNoUnsignedWrap()) { 1649 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1650 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1651 } 1652 1653 // If we have special knowledge that this addrec won't overflow, 1654 // we don't need to do any further analysis. 1655 if (AR->hasNoUnsignedWrap()) 1656 return getAddRecExpr( 1657 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1658 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1659 1660 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1661 // Note that this serves two purposes: It filters out loops that are 1662 // simply not analyzable, and it covers the case where this code is 1663 // being called from within backedge-taken count analysis, such that 1664 // attempting to ask for the backedge-taken count would likely result 1665 // in infinite recursion. In the later case, the analysis code will 1666 // cope with a conservative value, and it will take care to purge 1667 // that value once it has finished. 1668 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1669 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1670 // Manually compute the final value for AR, checking for 1671 // overflow. 1672 1673 // Check whether the backedge-taken count can be losslessly casted to 1674 // the addrec's type. The count is always unsigned. 1675 const SCEV *CastedMaxBECount = 1676 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1677 const SCEV *RecastedMaxBECount = 1678 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1679 if (MaxBECount == RecastedMaxBECount) { 1680 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1681 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1682 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1683 SCEV::FlagAnyWrap, Depth + 1); 1684 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1685 SCEV::FlagAnyWrap, 1686 Depth + 1), 1687 WideTy, Depth + 1); 1688 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1689 const SCEV *WideMaxBECount = 1690 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1691 const SCEV *OperandExtendedAdd = 1692 getAddExpr(WideStart, 1693 getMulExpr(WideMaxBECount, 1694 getZeroExtendExpr(Step, WideTy, Depth + 1), 1695 SCEV::FlagAnyWrap, Depth + 1), 1696 SCEV::FlagAnyWrap, Depth + 1); 1697 if (ZAdd == OperandExtendedAdd) { 1698 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1699 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1700 // Return the expression with the addrec on the outside. 1701 return getAddRecExpr( 1702 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1703 Depth + 1), 1704 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1705 AR->getNoWrapFlags()); 1706 } 1707 // Similar to above, only this time treat the step value as signed. 1708 // This covers loops that count down. 1709 OperandExtendedAdd = 1710 getAddExpr(WideStart, 1711 getMulExpr(WideMaxBECount, 1712 getSignExtendExpr(Step, WideTy, Depth + 1), 1713 SCEV::FlagAnyWrap, Depth + 1), 1714 SCEV::FlagAnyWrap, Depth + 1); 1715 if (ZAdd == OperandExtendedAdd) { 1716 // Cache knowledge of AR NW, which is propagated to this AddRec. 1717 // Negative step causes unsigned wrap, but it still can't self-wrap. 1718 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1719 // Return the expression with the addrec on the outside. 1720 return getAddRecExpr( 1721 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1722 Depth + 1), 1723 getSignExtendExpr(Step, Ty, Depth + 1), L, 1724 AR->getNoWrapFlags()); 1725 } 1726 } 1727 } 1728 1729 // Normally, in the cases we can prove no-overflow via a 1730 // backedge guarding condition, we can also compute a backedge 1731 // taken count for the loop. The exceptions are assumptions and 1732 // guards present in the loop -- SCEV is not great at exploiting 1733 // these to compute max backedge taken counts, but can still use 1734 // these to prove lack of overflow. Use this fact to avoid 1735 // doing extra work that may not pay off. 1736 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1737 !AC.assumptions().empty()) { 1738 // If the backedge is guarded by a comparison with the pre-inc 1739 // value the addrec is safe. Also, if the entry is guarded by 1740 // a comparison with the start value and the backedge is 1741 // guarded by a comparison with the post-inc value, the addrec 1742 // is safe. 1743 if (isKnownPositive(Step)) { 1744 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1745 getUnsignedRangeMax(Step)); 1746 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1747 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 1748 // Cache knowledge of AR NUW, which is propagated to this 1749 // AddRec. 1750 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1751 // Return the expression with the addrec on the outside. 1752 return getAddRecExpr( 1753 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1754 Depth + 1), 1755 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1756 AR->getNoWrapFlags()); 1757 } 1758 } else if (isKnownNegative(Step)) { 1759 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1760 getSignedRangeMin(Step)); 1761 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1762 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1763 // Cache knowledge of AR NW, which is propagated to this 1764 // AddRec. Negative step causes unsigned wrap, but it 1765 // still can't self-wrap. 1766 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1767 // Return the expression with the addrec on the outside. 1768 return getAddRecExpr( 1769 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1770 Depth + 1), 1771 getSignExtendExpr(Step, Ty, Depth + 1), L, 1772 AR->getNoWrapFlags()); 1773 } 1774 } 1775 } 1776 1777 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1778 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1779 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1780 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1781 const APInt &C = SC->getAPInt(); 1782 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1783 if (D != 0) { 1784 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1785 const SCEV *SResidual = 1786 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1787 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1788 return getAddExpr(SZExtD, SZExtR, 1789 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1790 Depth + 1); 1791 } 1792 } 1793 1794 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1795 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1796 return getAddRecExpr( 1797 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1798 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1799 } 1800 } 1801 1802 // zext(A % B) --> zext(A) % zext(B) 1803 { 1804 const SCEV *LHS; 1805 const SCEV *RHS; 1806 if (matchURem(Op, LHS, RHS)) 1807 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1808 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1809 } 1810 1811 // zext(A / B) --> zext(A) / zext(B). 1812 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1813 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1814 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1815 1816 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1817 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1818 if (SA->hasNoUnsignedWrap()) { 1819 // If the addition does not unsign overflow then we can, by definition, 1820 // commute the zero extension with the addition operation. 1821 SmallVector<const SCEV *, 4> Ops; 1822 for (const auto *Op : SA->operands()) 1823 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1824 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1825 } 1826 1827 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1828 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1829 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1830 // 1831 // Often address arithmetics contain expressions like 1832 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1833 // This transformation is useful while proving that such expressions are 1834 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1835 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1836 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1837 if (D != 0) { 1838 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1839 const SCEV *SResidual = 1840 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1841 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1842 return getAddExpr(SZExtD, SZExtR, 1843 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1844 Depth + 1); 1845 } 1846 } 1847 } 1848 1849 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1850 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1851 if (SM->hasNoUnsignedWrap()) { 1852 // If the multiply does not unsign overflow then we can, by definition, 1853 // commute the zero extension with the multiply operation. 1854 SmallVector<const SCEV *, 4> Ops; 1855 for (const auto *Op : SM->operands()) 1856 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1857 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1858 } 1859 1860 // zext(2^K * (trunc X to iN)) to iM -> 1861 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1862 // 1863 // Proof: 1864 // 1865 // zext(2^K * (trunc X to iN)) to iM 1866 // = zext((trunc X to iN) << K) to iM 1867 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1868 // (because shl removes the top K bits) 1869 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1870 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1871 // 1872 if (SM->getNumOperands() == 2) 1873 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1874 if (MulLHS->getAPInt().isPowerOf2()) 1875 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1876 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1877 MulLHS->getAPInt().logBase2(); 1878 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1879 return getMulExpr( 1880 getZeroExtendExpr(MulLHS, Ty), 1881 getZeroExtendExpr( 1882 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1883 SCEV::FlagNUW, Depth + 1); 1884 } 1885 } 1886 1887 // The cast wasn't folded; create an explicit cast node. 1888 // Recompute the insert position, as it may have been invalidated. 1889 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1890 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1891 Op, Ty); 1892 UniqueSCEVs.InsertNode(S, IP); 1893 addToLoopUseLists(S); 1894 return S; 1895 } 1896 1897 const SCEV * 1898 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1899 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1900 "This is not an extending conversion!"); 1901 assert(isSCEVable(Ty) && 1902 "This is not a conversion to a SCEVable type!"); 1903 Ty = getEffectiveSCEVType(Ty); 1904 1905 // Fold if the operand is constant. 1906 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1907 return getConstant( 1908 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1909 1910 // sext(sext(x)) --> sext(x) 1911 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1912 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1913 1914 // sext(zext(x)) --> zext(x) 1915 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1916 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1917 1918 // Before doing any expensive analysis, check to see if we've already 1919 // computed a SCEV for this Op and Ty. 1920 FoldingSetNodeID ID; 1921 ID.AddInteger(scSignExtend); 1922 ID.AddPointer(Op); 1923 ID.AddPointer(Ty); 1924 void *IP = nullptr; 1925 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1926 // Limit recursion depth. 1927 if (Depth > MaxExtDepth) { 1928 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1929 Op, Ty); 1930 UniqueSCEVs.InsertNode(S, IP); 1931 addToLoopUseLists(S); 1932 return S; 1933 } 1934 1935 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1936 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1937 // It's possible the bits taken off by the truncate were all sign bits. If 1938 // so, we should be able to simplify this further. 1939 const SCEV *X = ST->getOperand(); 1940 ConstantRange CR = getSignedRange(X); 1941 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1942 unsigned NewBits = getTypeSizeInBits(Ty); 1943 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1944 CR.sextOrTrunc(NewBits))) 1945 return getTruncateOrSignExtend(X, Ty); 1946 } 1947 1948 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1949 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1950 if (SA->hasNoSignedWrap()) { 1951 // If the addition does not sign overflow then we can, by definition, 1952 // commute the sign extension with the addition operation. 1953 SmallVector<const SCEV *, 4> Ops; 1954 for (const auto *Op : SA->operands()) 1955 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1956 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1957 } 1958 1959 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 1960 // if D + (C - D + x + y + ...) could be proven to not signed wrap 1961 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1962 // 1963 // For instance, this will bring two seemingly different expressions: 1964 // 1 + sext(5 + 20 * %x + 24 * %y) and 1965 // sext(6 + 20 * %x + 24 * %y) 1966 // to the same form: 1967 // 2 + sext(4 + 20 * %x + 24 * %y) 1968 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1969 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1970 if (D != 0) { 1971 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1972 const SCEV *SResidual = 1973 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1974 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1975 return getAddExpr(SSExtD, SSExtR, 1976 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1977 Depth + 1); 1978 } 1979 } 1980 } 1981 // If the input value is a chrec scev, and we can prove that the value 1982 // did not overflow the old, smaller, value, we can sign extend all of the 1983 // operands (often constants). This allows analysis of something like 1984 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1985 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1986 if (AR->isAffine()) { 1987 const SCEV *Start = AR->getStart(); 1988 const SCEV *Step = AR->getStepRecurrence(*this); 1989 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1990 const Loop *L = AR->getLoop(); 1991 1992 if (!AR->hasNoSignedWrap()) { 1993 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1994 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1995 } 1996 1997 // If we have special knowledge that this addrec won't overflow, 1998 // we don't need to do any further analysis. 1999 if (AR->hasNoSignedWrap()) 2000 return getAddRecExpr( 2001 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2002 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 2003 2004 // Check whether the backedge-taken count is SCEVCouldNotCompute. 2005 // Note that this serves two purposes: It filters out loops that are 2006 // simply not analyzable, and it covers the case where this code is 2007 // being called from within backedge-taken count analysis, such that 2008 // attempting to ask for the backedge-taken count would likely result 2009 // in infinite recursion. In the later case, the analysis code will 2010 // cope with a conservative value, and it will take care to purge 2011 // that value once it has finished. 2012 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 2013 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 2014 // Manually compute the final value for AR, checking for 2015 // overflow. 2016 2017 // Check whether the backedge-taken count can be losslessly casted to 2018 // the addrec's type. The count is always unsigned. 2019 const SCEV *CastedMaxBECount = 2020 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 2021 const SCEV *RecastedMaxBECount = 2022 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 2023 if (MaxBECount == RecastedMaxBECount) { 2024 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 2025 // Check whether Start+Step*MaxBECount has no signed overflow. 2026 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 2027 SCEV::FlagAnyWrap, Depth + 1); 2028 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 2029 SCEV::FlagAnyWrap, 2030 Depth + 1), 2031 WideTy, Depth + 1); 2032 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 2033 const SCEV *WideMaxBECount = 2034 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 2035 const SCEV *OperandExtendedAdd = 2036 getAddExpr(WideStart, 2037 getMulExpr(WideMaxBECount, 2038 getSignExtendExpr(Step, WideTy, Depth + 1), 2039 SCEV::FlagAnyWrap, Depth + 1), 2040 SCEV::FlagAnyWrap, Depth + 1); 2041 if (SAdd == OperandExtendedAdd) { 2042 // Cache knowledge of AR NSW, which is propagated to this AddRec. 2043 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2044 // Return the expression with the addrec on the outside. 2045 return getAddRecExpr( 2046 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2047 Depth + 1), 2048 getSignExtendExpr(Step, Ty, Depth + 1), L, 2049 AR->getNoWrapFlags()); 2050 } 2051 // Similar to above, only this time treat the step value as unsigned. 2052 // This covers loops that count up with an unsigned step. 2053 OperandExtendedAdd = 2054 getAddExpr(WideStart, 2055 getMulExpr(WideMaxBECount, 2056 getZeroExtendExpr(Step, WideTy, Depth + 1), 2057 SCEV::FlagAnyWrap, Depth + 1), 2058 SCEV::FlagAnyWrap, Depth + 1); 2059 if (SAdd == OperandExtendedAdd) { 2060 // If AR wraps around then 2061 // 2062 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 2063 // => SAdd != OperandExtendedAdd 2064 // 2065 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 2066 // (SAdd == OperandExtendedAdd => AR is NW) 2067 2068 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 2069 2070 // Return the expression with the addrec on the outside. 2071 return getAddRecExpr( 2072 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2073 Depth + 1), 2074 getZeroExtendExpr(Step, Ty, Depth + 1), L, 2075 AR->getNoWrapFlags()); 2076 } 2077 } 2078 } 2079 2080 // Normally, in the cases we can prove no-overflow via a 2081 // backedge guarding condition, we can also compute a backedge 2082 // taken count for the loop. The exceptions are assumptions and 2083 // guards present in the loop -- SCEV is not great at exploiting 2084 // these to compute max backedge taken counts, but can still use 2085 // these to prove lack of overflow. Use this fact to avoid 2086 // doing extra work that may not pay off. 2087 2088 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 2089 !AC.assumptions().empty()) { 2090 // If the backedge is guarded by a comparison with the pre-inc 2091 // value the addrec is safe. Also, if the entry is guarded by 2092 // a comparison with the start value and the backedge is 2093 // guarded by a comparison with the post-inc value, the addrec 2094 // is safe. 2095 ICmpInst::Predicate Pred; 2096 const SCEV *OverflowLimit = 2097 getSignedOverflowLimitForStep(Step, &Pred, this); 2098 if (OverflowLimit && 2099 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 2100 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 2101 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 2102 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2103 return getAddRecExpr( 2104 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2105 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2106 } 2107 } 2108 2109 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 2110 // if D + (C - D + Step * n) could be proven to not signed wrap 2111 // where D maximizes the number of trailing zeros of (C - D + Step * n) 2112 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 2113 const APInt &C = SC->getAPInt(); 2114 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 2115 if (D != 0) { 2116 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2117 const SCEV *SResidual = 2118 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 2119 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2120 return getAddExpr(SSExtD, SSExtR, 2121 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2122 Depth + 1); 2123 } 2124 } 2125 2126 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2127 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2128 return getAddRecExpr( 2129 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2130 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2131 } 2132 } 2133 2134 // If the input value is provably positive and we could not simplify 2135 // away the sext build a zext instead. 2136 if (isKnownNonNegative(Op)) 2137 return getZeroExtendExpr(Op, Ty, Depth + 1); 2138 2139 // The cast wasn't folded; create an explicit cast node. 2140 // Recompute the insert position, as it may have been invalidated. 2141 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2142 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2143 Op, Ty); 2144 UniqueSCEVs.InsertNode(S, IP); 2145 addToLoopUseLists(S); 2146 return S; 2147 } 2148 2149 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2150 /// unspecified bits out to the given type. 2151 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2152 Type *Ty) { 2153 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2154 "This is not an extending conversion!"); 2155 assert(isSCEVable(Ty) && 2156 "This is not a conversion to a SCEVable type!"); 2157 Ty = getEffectiveSCEVType(Ty); 2158 2159 // Sign-extend negative constants. 2160 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2161 if (SC->getAPInt().isNegative()) 2162 return getSignExtendExpr(Op, Ty); 2163 2164 // Peel off a truncate cast. 2165 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2166 const SCEV *NewOp = T->getOperand(); 2167 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2168 return getAnyExtendExpr(NewOp, Ty); 2169 return getTruncateOrNoop(NewOp, Ty); 2170 } 2171 2172 // Next try a zext cast. If the cast is folded, use it. 2173 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2174 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2175 return ZExt; 2176 2177 // Next try a sext cast. If the cast is folded, use it. 2178 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2179 if (!isa<SCEVSignExtendExpr>(SExt)) 2180 return SExt; 2181 2182 // Force the cast to be folded into the operands of an addrec. 2183 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2184 SmallVector<const SCEV *, 4> Ops; 2185 for (const SCEV *Op : AR->operands()) 2186 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2187 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2188 } 2189 2190 // If the expression is obviously signed, use the sext cast value. 2191 if (isa<SCEVSMaxExpr>(Op)) 2192 return SExt; 2193 2194 // Absent any other information, use the zext cast value. 2195 return ZExt; 2196 } 2197 2198 /// Process the given Ops list, which is a list of operands to be added under 2199 /// the given scale, update the given map. This is a helper function for 2200 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2201 /// that would form an add expression like this: 2202 /// 2203 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2204 /// 2205 /// where A and B are constants, update the map with these values: 2206 /// 2207 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2208 /// 2209 /// and add 13 + A*B*29 to AccumulatedConstant. 2210 /// This will allow getAddRecExpr to produce this: 2211 /// 2212 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2213 /// 2214 /// This form often exposes folding opportunities that are hidden in 2215 /// the original operand list. 2216 /// 2217 /// Return true iff it appears that any interesting folding opportunities 2218 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2219 /// the common case where no interesting opportunities are present, and 2220 /// is also used as a check to avoid infinite recursion. 2221 static bool 2222 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2223 SmallVectorImpl<const SCEV *> &NewOps, 2224 APInt &AccumulatedConstant, 2225 const SCEV *const *Ops, size_t NumOperands, 2226 const APInt &Scale, 2227 ScalarEvolution &SE) { 2228 bool Interesting = false; 2229 2230 // Iterate over the add operands. They are sorted, with constants first. 2231 unsigned i = 0; 2232 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2233 ++i; 2234 // Pull a buried constant out to the outside. 2235 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2236 Interesting = true; 2237 AccumulatedConstant += Scale * C->getAPInt(); 2238 } 2239 2240 // Next comes everything else. We're especially interested in multiplies 2241 // here, but they're in the middle, so just visit the rest with one loop. 2242 for (; i != NumOperands; ++i) { 2243 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2244 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2245 APInt NewScale = 2246 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2247 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2248 // A multiplication of a constant with another add; recurse. 2249 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2250 Interesting |= 2251 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2252 Add->op_begin(), Add->getNumOperands(), 2253 NewScale, SE); 2254 } else { 2255 // A multiplication of a constant with some other value. Update 2256 // the map. 2257 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2258 const SCEV *Key = SE.getMulExpr(MulOps); 2259 auto Pair = M.insert({Key, NewScale}); 2260 if (Pair.second) { 2261 NewOps.push_back(Pair.first->first); 2262 } else { 2263 Pair.first->second += NewScale; 2264 // The map already had an entry for this value, which may indicate 2265 // a folding opportunity. 2266 Interesting = true; 2267 } 2268 } 2269 } else { 2270 // An ordinary operand. Update the map. 2271 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2272 M.insert({Ops[i], Scale}); 2273 if (Pair.second) { 2274 NewOps.push_back(Pair.first->first); 2275 } else { 2276 Pair.first->second += Scale; 2277 // The map already had an entry for this value, which may indicate 2278 // a folding opportunity. 2279 Interesting = true; 2280 } 2281 } 2282 } 2283 2284 return Interesting; 2285 } 2286 2287 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2288 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2289 // can't-overflow flags for the operation if possible. 2290 static SCEV::NoWrapFlags 2291 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2292 const SmallVectorImpl<const SCEV *> &Ops, 2293 SCEV::NoWrapFlags Flags) { 2294 using namespace std::placeholders; 2295 2296 using OBO = OverflowingBinaryOperator; 2297 2298 bool CanAnalyze = 2299 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2300 (void)CanAnalyze; 2301 assert(CanAnalyze && "don't call from other places!"); 2302 2303 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2304 SCEV::NoWrapFlags SignOrUnsignWrap = 2305 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2306 2307 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2308 auto IsKnownNonNegative = [&](const SCEV *S) { 2309 return SE->isKnownNonNegative(S); 2310 }; 2311 2312 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2313 Flags = 2314 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2315 2316 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2317 2318 if (SignOrUnsignWrap != SignOrUnsignMask && 2319 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2320 isa<SCEVConstant>(Ops[0])) { 2321 2322 auto Opcode = [&] { 2323 switch (Type) { 2324 case scAddExpr: 2325 return Instruction::Add; 2326 case scMulExpr: 2327 return Instruction::Mul; 2328 default: 2329 llvm_unreachable("Unexpected SCEV op."); 2330 } 2331 }(); 2332 2333 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2334 2335 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2336 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2337 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2338 Opcode, C, OBO::NoSignedWrap); 2339 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2340 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2341 } 2342 2343 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2344 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2345 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2346 Opcode, C, OBO::NoUnsignedWrap); 2347 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2348 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2349 } 2350 } 2351 2352 return Flags; 2353 } 2354 2355 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2356 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2357 } 2358 2359 /// Get a canonical add expression, or something simpler if possible. 2360 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2361 SCEV::NoWrapFlags Flags, 2362 unsigned Depth) { 2363 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2364 "only nuw or nsw allowed"); 2365 assert(!Ops.empty() && "Cannot get empty add!"); 2366 if (Ops.size() == 1) return Ops[0]; 2367 #ifndef NDEBUG 2368 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2369 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2370 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2371 "SCEVAddExpr operand types don't match!"); 2372 #endif 2373 2374 // Sort by complexity, this groups all similar expression types together. 2375 GroupByComplexity(Ops, &LI, DT); 2376 2377 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2378 2379 // If there are any constants, fold them together. 2380 unsigned Idx = 0; 2381 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2382 ++Idx; 2383 assert(Idx < Ops.size()); 2384 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2385 // We found two constants, fold them together! 2386 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2387 if (Ops.size() == 2) return Ops[0]; 2388 Ops.erase(Ops.begin()+1); // Erase the folded element 2389 LHSC = cast<SCEVConstant>(Ops[0]); 2390 } 2391 2392 // If we are left with a constant zero being added, strip it off. 2393 if (LHSC->getValue()->isZero()) { 2394 Ops.erase(Ops.begin()); 2395 --Idx; 2396 } 2397 2398 if (Ops.size() == 1) return Ops[0]; 2399 } 2400 2401 // Limit recursion calls depth. 2402 if (Depth > MaxArithDepth) 2403 return getOrCreateAddExpr(Ops, Flags); 2404 2405 // Okay, check to see if the same value occurs in the operand list more than 2406 // once. If so, merge them together into an multiply expression. Since we 2407 // sorted the list, these values are required to be adjacent. 2408 Type *Ty = Ops[0]->getType(); 2409 bool FoundMatch = false; 2410 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2411 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2412 // Scan ahead to count how many equal operands there are. 2413 unsigned Count = 2; 2414 while (i+Count != e && Ops[i+Count] == Ops[i]) 2415 ++Count; 2416 // Merge the values into a multiply. 2417 const SCEV *Scale = getConstant(Ty, Count); 2418 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2419 if (Ops.size() == Count) 2420 return Mul; 2421 Ops[i] = Mul; 2422 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2423 --i; e -= Count - 1; 2424 FoundMatch = true; 2425 } 2426 if (FoundMatch) 2427 return getAddExpr(Ops, Flags, Depth + 1); 2428 2429 // Check for truncates. If all the operands are truncated from the same 2430 // type, see if factoring out the truncate would permit the result to be 2431 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2432 // if the contents of the resulting outer trunc fold to something simple. 2433 auto FindTruncSrcType = [&]() -> Type * { 2434 // We're ultimately looking to fold an addrec of truncs and muls of only 2435 // constants and truncs, so if we find any other types of SCEV 2436 // as operands of the addrec then we bail and return nullptr here. 2437 // Otherwise, we return the type of the operand of a trunc that we find. 2438 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2439 return T->getOperand()->getType(); 2440 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2441 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2442 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2443 return T->getOperand()->getType(); 2444 } 2445 return nullptr; 2446 }; 2447 if (auto *SrcType = FindTruncSrcType()) { 2448 SmallVector<const SCEV *, 8> LargeOps; 2449 bool Ok = true; 2450 // Check all the operands to see if they can be represented in the 2451 // source type of the truncate. 2452 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2453 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2454 if (T->getOperand()->getType() != SrcType) { 2455 Ok = false; 2456 break; 2457 } 2458 LargeOps.push_back(T->getOperand()); 2459 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2460 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2461 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2462 SmallVector<const SCEV *, 8> LargeMulOps; 2463 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2464 if (const SCEVTruncateExpr *T = 2465 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2466 if (T->getOperand()->getType() != SrcType) { 2467 Ok = false; 2468 break; 2469 } 2470 LargeMulOps.push_back(T->getOperand()); 2471 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2472 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2473 } else { 2474 Ok = false; 2475 break; 2476 } 2477 } 2478 if (Ok) 2479 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2480 } else { 2481 Ok = false; 2482 break; 2483 } 2484 } 2485 if (Ok) { 2486 // Evaluate the expression in the larger type. 2487 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2488 // If it folds to something simple, use it. Otherwise, don't. 2489 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2490 return getTruncateExpr(Fold, Ty); 2491 } 2492 } 2493 2494 // Skip past any other cast SCEVs. 2495 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2496 ++Idx; 2497 2498 // If there are add operands they would be next. 2499 if (Idx < Ops.size()) { 2500 bool DeletedAdd = false; 2501 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2502 if (Ops.size() > AddOpsInlineThreshold || 2503 Add->getNumOperands() > AddOpsInlineThreshold) 2504 break; 2505 // If we have an add, expand the add operands onto the end of the operands 2506 // list. 2507 Ops.erase(Ops.begin()+Idx); 2508 Ops.append(Add->op_begin(), Add->op_end()); 2509 DeletedAdd = true; 2510 } 2511 2512 // If we deleted at least one add, we added operands to the end of the list, 2513 // and they are not necessarily sorted. Recurse to resort and resimplify 2514 // any operands we just acquired. 2515 if (DeletedAdd) 2516 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2517 } 2518 2519 // Skip over the add expression until we get to a multiply. 2520 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2521 ++Idx; 2522 2523 // Check to see if there are any folding opportunities present with 2524 // operands multiplied by constant values. 2525 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2526 uint64_t BitWidth = getTypeSizeInBits(Ty); 2527 DenseMap<const SCEV *, APInt> M; 2528 SmallVector<const SCEV *, 8> NewOps; 2529 APInt AccumulatedConstant(BitWidth, 0); 2530 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2531 Ops.data(), Ops.size(), 2532 APInt(BitWidth, 1), *this)) { 2533 struct APIntCompare { 2534 bool operator()(const APInt &LHS, const APInt &RHS) const { 2535 return LHS.ult(RHS); 2536 } 2537 }; 2538 2539 // Some interesting folding opportunity is present, so its worthwhile to 2540 // re-generate the operands list. Group the operands by constant scale, 2541 // to avoid multiplying by the same constant scale multiple times. 2542 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2543 for (const SCEV *NewOp : NewOps) 2544 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2545 // Re-generate the operands list. 2546 Ops.clear(); 2547 if (AccumulatedConstant != 0) 2548 Ops.push_back(getConstant(AccumulatedConstant)); 2549 for (auto &MulOp : MulOpLists) 2550 if (MulOp.first != 0) 2551 Ops.push_back(getMulExpr( 2552 getConstant(MulOp.first), 2553 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2554 SCEV::FlagAnyWrap, Depth + 1)); 2555 if (Ops.empty()) 2556 return getZero(Ty); 2557 if (Ops.size() == 1) 2558 return Ops[0]; 2559 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2560 } 2561 } 2562 2563 // If we are adding something to a multiply expression, make sure the 2564 // something is not already an operand of the multiply. If so, merge it into 2565 // the multiply. 2566 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2567 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2568 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2569 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2570 if (isa<SCEVConstant>(MulOpSCEV)) 2571 continue; 2572 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2573 if (MulOpSCEV == Ops[AddOp]) { 2574 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2575 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2576 if (Mul->getNumOperands() != 2) { 2577 // If the multiply has more than two operands, we must get the 2578 // Y*Z term. 2579 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2580 Mul->op_begin()+MulOp); 2581 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2582 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2583 } 2584 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2585 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2586 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2587 SCEV::FlagAnyWrap, Depth + 1); 2588 if (Ops.size() == 2) return OuterMul; 2589 if (AddOp < Idx) { 2590 Ops.erase(Ops.begin()+AddOp); 2591 Ops.erase(Ops.begin()+Idx-1); 2592 } else { 2593 Ops.erase(Ops.begin()+Idx); 2594 Ops.erase(Ops.begin()+AddOp-1); 2595 } 2596 Ops.push_back(OuterMul); 2597 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2598 } 2599 2600 // Check this multiply against other multiplies being added together. 2601 for (unsigned OtherMulIdx = Idx+1; 2602 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2603 ++OtherMulIdx) { 2604 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2605 // If MulOp occurs in OtherMul, we can fold the two multiplies 2606 // together. 2607 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2608 OMulOp != e; ++OMulOp) 2609 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2610 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2611 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2612 if (Mul->getNumOperands() != 2) { 2613 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2614 Mul->op_begin()+MulOp); 2615 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2616 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2617 } 2618 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2619 if (OtherMul->getNumOperands() != 2) { 2620 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2621 OtherMul->op_begin()+OMulOp); 2622 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2623 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2624 } 2625 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2626 const SCEV *InnerMulSum = 2627 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2628 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2629 SCEV::FlagAnyWrap, Depth + 1); 2630 if (Ops.size() == 2) return OuterMul; 2631 Ops.erase(Ops.begin()+Idx); 2632 Ops.erase(Ops.begin()+OtherMulIdx-1); 2633 Ops.push_back(OuterMul); 2634 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2635 } 2636 } 2637 } 2638 } 2639 2640 // If there are any add recurrences in the operands list, see if any other 2641 // added values are loop invariant. If so, we can fold them into the 2642 // recurrence. 2643 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2644 ++Idx; 2645 2646 // Scan over all recurrences, trying to fold loop invariants into them. 2647 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2648 // Scan all of the other operands to this add and add them to the vector if 2649 // they are loop invariant w.r.t. the recurrence. 2650 SmallVector<const SCEV *, 8> LIOps; 2651 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2652 const Loop *AddRecLoop = AddRec->getLoop(); 2653 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2654 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2655 LIOps.push_back(Ops[i]); 2656 Ops.erase(Ops.begin()+i); 2657 --i; --e; 2658 } 2659 2660 // If we found some loop invariants, fold them into the recurrence. 2661 if (!LIOps.empty()) { 2662 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2663 LIOps.push_back(AddRec->getStart()); 2664 2665 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2666 AddRec->op_end()); 2667 // This follows from the fact that the no-wrap flags on the outer add 2668 // expression are applicable on the 0th iteration, when the add recurrence 2669 // will be equal to its start value. 2670 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2671 2672 // Build the new addrec. Propagate the NUW and NSW flags if both the 2673 // outer add and the inner addrec are guaranteed to have no overflow. 2674 // Always propagate NW. 2675 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2676 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2677 2678 // If all of the other operands were loop invariant, we are done. 2679 if (Ops.size() == 1) return NewRec; 2680 2681 // Otherwise, add the folded AddRec by the non-invariant parts. 2682 for (unsigned i = 0;; ++i) 2683 if (Ops[i] == AddRec) { 2684 Ops[i] = NewRec; 2685 break; 2686 } 2687 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2688 } 2689 2690 // Okay, if there weren't any loop invariants to be folded, check to see if 2691 // there are multiple AddRec's with the same loop induction variable being 2692 // added together. If so, we can fold them. 2693 for (unsigned OtherIdx = Idx+1; 2694 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2695 ++OtherIdx) { 2696 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2697 // so that the 1st found AddRecExpr is dominated by all others. 2698 assert(DT.dominates( 2699 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2700 AddRec->getLoop()->getHeader()) && 2701 "AddRecExprs are not sorted in reverse dominance order?"); 2702 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2703 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2704 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2705 AddRec->op_end()); 2706 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2707 ++OtherIdx) { 2708 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2709 if (OtherAddRec->getLoop() == AddRecLoop) { 2710 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2711 i != e; ++i) { 2712 if (i >= AddRecOps.size()) { 2713 AddRecOps.append(OtherAddRec->op_begin()+i, 2714 OtherAddRec->op_end()); 2715 break; 2716 } 2717 SmallVector<const SCEV *, 2> TwoOps = { 2718 AddRecOps[i], OtherAddRec->getOperand(i)}; 2719 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2720 } 2721 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2722 } 2723 } 2724 // Step size has changed, so we cannot guarantee no self-wraparound. 2725 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2726 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2727 } 2728 } 2729 2730 // Otherwise couldn't fold anything into this recurrence. Move onto the 2731 // next one. 2732 } 2733 2734 // Okay, it looks like we really DO need an add expr. Check to see if we 2735 // already have one, otherwise create a new one. 2736 return getOrCreateAddExpr(Ops, Flags); 2737 } 2738 2739 const SCEV * 2740 ScalarEvolution::getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2741 SCEV::NoWrapFlags Flags) { 2742 FoldingSetNodeID ID; 2743 ID.AddInteger(scAddExpr); 2744 for (const SCEV *Op : Ops) 2745 ID.AddPointer(Op); 2746 void *IP = nullptr; 2747 SCEVAddExpr *S = 2748 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2749 if (!S) { 2750 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2751 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2752 S = new (SCEVAllocator) 2753 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2754 UniqueSCEVs.InsertNode(S, IP); 2755 addToLoopUseLists(S); 2756 } 2757 S->setNoWrapFlags(Flags); 2758 return S; 2759 } 2760 2761 const SCEV * 2762 ScalarEvolution::getOrCreateAddRecExpr(SmallVectorImpl<const SCEV *> &Ops, 2763 const Loop *L, SCEV::NoWrapFlags Flags) { 2764 FoldingSetNodeID ID; 2765 ID.AddInteger(scAddRecExpr); 2766 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2767 ID.AddPointer(Ops[i]); 2768 ID.AddPointer(L); 2769 void *IP = nullptr; 2770 SCEVAddRecExpr *S = 2771 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2772 if (!S) { 2773 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2774 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2775 S = new (SCEVAllocator) 2776 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 2777 UniqueSCEVs.InsertNode(S, IP); 2778 addToLoopUseLists(S); 2779 } 2780 S->setNoWrapFlags(Flags); 2781 return S; 2782 } 2783 2784 const SCEV * 2785 ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2786 SCEV::NoWrapFlags Flags) { 2787 FoldingSetNodeID ID; 2788 ID.AddInteger(scMulExpr); 2789 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2790 ID.AddPointer(Ops[i]); 2791 void *IP = nullptr; 2792 SCEVMulExpr *S = 2793 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2794 if (!S) { 2795 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2796 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2797 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2798 O, Ops.size()); 2799 UniqueSCEVs.InsertNode(S, IP); 2800 addToLoopUseLists(S); 2801 } 2802 S->setNoWrapFlags(Flags); 2803 return S; 2804 } 2805 2806 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2807 uint64_t k = i*j; 2808 if (j > 1 && k / j != i) Overflow = true; 2809 return k; 2810 } 2811 2812 /// Compute the result of "n choose k", the binomial coefficient. If an 2813 /// intermediate computation overflows, Overflow will be set and the return will 2814 /// be garbage. Overflow is not cleared on absence of overflow. 2815 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2816 // We use the multiplicative formula: 2817 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2818 // At each iteration, we take the n-th term of the numeral and divide by the 2819 // (k-n)th term of the denominator. This division will always produce an 2820 // integral result, and helps reduce the chance of overflow in the 2821 // intermediate computations. However, we can still overflow even when the 2822 // final result would fit. 2823 2824 if (n == 0 || n == k) return 1; 2825 if (k > n) return 0; 2826 2827 if (k > n/2) 2828 k = n-k; 2829 2830 uint64_t r = 1; 2831 for (uint64_t i = 1; i <= k; ++i) { 2832 r = umul_ov(r, n-(i-1), Overflow); 2833 r /= i; 2834 } 2835 return r; 2836 } 2837 2838 /// Determine if any of the operands in this SCEV are a constant or if 2839 /// any of the add or multiply expressions in this SCEV contain a constant. 2840 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2841 struct FindConstantInAddMulChain { 2842 bool FoundConstant = false; 2843 2844 bool follow(const SCEV *S) { 2845 FoundConstant |= isa<SCEVConstant>(S); 2846 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2847 } 2848 2849 bool isDone() const { 2850 return FoundConstant; 2851 } 2852 }; 2853 2854 FindConstantInAddMulChain F; 2855 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2856 ST.visitAll(StartExpr); 2857 return F.FoundConstant; 2858 } 2859 2860 /// Get a canonical multiply expression, or something simpler if possible. 2861 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2862 SCEV::NoWrapFlags Flags, 2863 unsigned Depth) { 2864 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2865 "only nuw or nsw allowed"); 2866 assert(!Ops.empty() && "Cannot get empty mul!"); 2867 if (Ops.size() == 1) return Ops[0]; 2868 #ifndef NDEBUG 2869 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2870 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2871 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2872 "SCEVMulExpr operand types don't match!"); 2873 #endif 2874 2875 // Sort by complexity, this groups all similar expression types together. 2876 GroupByComplexity(Ops, &LI, DT); 2877 2878 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2879 2880 // Limit recursion calls depth. 2881 if (Depth > MaxArithDepth) 2882 return getOrCreateMulExpr(Ops, Flags); 2883 2884 // If there are any constants, fold them together. 2885 unsigned Idx = 0; 2886 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2887 2888 if (Ops.size() == 2) 2889 // C1*(C2+V) -> C1*C2 + C1*V 2890 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2891 // If any of Add's ops are Adds or Muls with a constant, apply this 2892 // transformation as well. 2893 // 2894 // TODO: There are some cases where this transformation is not 2895 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 2896 // this transformation should be narrowed down. 2897 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) 2898 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2899 SCEV::FlagAnyWrap, Depth + 1), 2900 getMulExpr(LHSC, Add->getOperand(1), 2901 SCEV::FlagAnyWrap, Depth + 1), 2902 SCEV::FlagAnyWrap, Depth + 1); 2903 2904 ++Idx; 2905 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2906 // We found two constants, fold them together! 2907 ConstantInt *Fold = 2908 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2909 Ops[0] = getConstant(Fold); 2910 Ops.erase(Ops.begin()+1); // Erase the folded element 2911 if (Ops.size() == 1) return Ops[0]; 2912 LHSC = cast<SCEVConstant>(Ops[0]); 2913 } 2914 2915 // If we are left with a constant one being multiplied, strip it off. 2916 if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) { 2917 Ops.erase(Ops.begin()); 2918 --Idx; 2919 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2920 // If we have a multiply of zero, it will always be zero. 2921 return Ops[0]; 2922 } else if (Ops[0]->isAllOnesValue()) { 2923 // If we have a mul by -1 of an add, try distributing the -1 among the 2924 // add operands. 2925 if (Ops.size() == 2) { 2926 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2927 SmallVector<const SCEV *, 4> NewOps; 2928 bool AnyFolded = false; 2929 for (const SCEV *AddOp : Add->operands()) { 2930 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2931 Depth + 1); 2932 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2933 NewOps.push_back(Mul); 2934 } 2935 if (AnyFolded) 2936 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2937 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2938 // Negation preserves a recurrence's no self-wrap property. 2939 SmallVector<const SCEV *, 4> Operands; 2940 for (const SCEV *AddRecOp : AddRec->operands()) 2941 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2942 Depth + 1)); 2943 2944 return getAddRecExpr(Operands, AddRec->getLoop(), 2945 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2946 } 2947 } 2948 } 2949 2950 if (Ops.size() == 1) 2951 return Ops[0]; 2952 } 2953 2954 // Skip over the add expression until we get to a multiply. 2955 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2956 ++Idx; 2957 2958 // If there are mul operands inline them all into this expression. 2959 if (Idx < Ops.size()) { 2960 bool DeletedMul = false; 2961 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2962 if (Ops.size() > MulOpsInlineThreshold) 2963 break; 2964 // If we have an mul, expand the mul operands onto the end of the 2965 // operands list. 2966 Ops.erase(Ops.begin()+Idx); 2967 Ops.append(Mul->op_begin(), Mul->op_end()); 2968 DeletedMul = true; 2969 } 2970 2971 // If we deleted at least one mul, we added operands to the end of the 2972 // list, and they are not necessarily sorted. Recurse to resort and 2973 // resimplify any operands we just acquired. 2974 if (DeletedMul) 2975 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2976 } 2977 2978 // If there are any add recurrences in the operands list, see if any other 2979 // added values are loop invariant. If so, we can fold them into the 2980 // recurrence. 2981 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2982 ++Idx; 2983 2984 // Scan over all recurrences, trying to fold loop invariants into them. 2985 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2986 // Scan all of the other operands to this mul and add them to the vector 2987 // if they are loop invariant w.r.t. the recurrence. 2988 SmallVector<const SCEV *, 8> LIOps; 2989 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2990 const Loop *AddRecLoop = AddRec->getLoop(); 2991 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2992 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2993 LIOps.push_back(Ops[i]); 2994 Ops.erase(Ops.begin()+i); 2995 --i; --e; 2996 } 2997 2998 // If we found some loop invariants, fold them into the recurrence. 2999 if (!LIOps.empty()) { 3000 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 3001 SmallVector<const SCEV *, 4> NewOps; 3002 NewOps.reserve(AddRec->getNumOperands()); 3003 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 3004 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 3005 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 3006 SCEV::FlagAnyWrap, Depth + 1)); 3007 3008 // Build the new addrec. Propagate the NUW and NSW flags if both the 3009 // outer mul and the inner addrec are guaranteed to have no overflow. 3010 // 3011 // No self-wrap cannot be guaranteed after changing the step size, but 3012 // will be inferred if either NUW or NSW is true. 3013 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 3014 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 3015 3016 // If all of the other operands were loop invariant, we are done. 3017 if (Ops.size() == 1) return NewRec; 3018 3019 // Otherwise, multiply the folded AddRec by the non-invariant parts. 3020 for (unsigned i = 0;; ++i) 3021 if (Ops[i] == AddRec) { 3022 Ops[i] = NewRec; 3023 break; 3024 } 3025 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3026 } 3027 3028 // Okay, if there weren't any loop invariants to be folded, check to see 3029 // if there are multiple AddRec's with the same loop induction variable 3030 // being multiplied together. If so, we can fold them. 3031 3032 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 3033 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 3034 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 3035 // ]]],+,...up to x=2n}. 3036 // Note that the arguments to choose() are always integers with values 3037 // known at compile time, never SCEV objects. 3038 // 3039 // The implementation avoids pointless extra computations when the two 3040 // addrec's are of different length (mathematically, it's equivalent to 3041 // an infinite stream of zeros on the right). 3042 bool OpsModified = false; 3043 for (unsigned OtherIdx = Idx+1; 3044 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 3045 ++OtherIdx) { 3046 const SCEVAddRecExpr *OtherAddRec = 3047 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 3048 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 3049 continue; 3050 3051 // Limit max number of arguments to avoid creation of unreasonably big 3052 // SCEVAddRecs with very complex operands. 3053 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 3054 MaxAddRecSize) 3055 continue; 3056 3057 bool Overflow = false; 3058 Type *Ty = AddRec->getType(); 3059 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 3060 SmallVector<const SCEV*, 7> AddRecOps; 3061 for (int x = 0, xe = AddRec->getNumOperands() + 3062 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 3063 SmallVector <const SCEV *, 7> SumOps; 3064 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 3065 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 3066 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 3067 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 3068 z < ze && !Overflow; ++z) { 3069 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 3070 uint64_t Coeff; 3071 if (LargerThan64Bits) 3072 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 3073 else 3074 Coeff = Coeff1*Coeff2; 3075 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 3076 const SCEV *Term1 = AddRec->getOperand(y-z); 3077 const SCEV *Term2 = OtherAddRec->getOperand(z); 3078 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 3079 SCEV::FlagAnyWrap, Depth + 1)); 3080 } 3081 } 3082 if (SumOps.empty()) 3083 SumOps.push_back(getZero(Ty)); 3084 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 3085 } 3086 if (!Overflow) { 3087 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(), 3088 SCEV::FlagAnyWrap); 3089 if (Ops.size() == 2) return NewAddRec; 3090 Ops[Idx] = NewAddRec; 3091 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3092 OpsModified = true; 3093 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 3094 if (!AddRec) 3095 break; 3096 } 3097 } 3098 if (OpsModified) 3099 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3100 3101 // Otherwise couldn't fold anything into this recurrence. Move onto the 3102 // next one. 3103 } 3104 3105 // Okay, it looks like we really DO need an mul expr. Check to see if we 3106 // already have one, otherwise create a new one. 3107 return getOrCreateMulExpr(Ops, Flags); 3108 } 3109 3110 /// Represents an unsigned remainder expression based on unsigned division. 3111 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3112 const SCEV *RHS) { 3113 assert(getEffectiveSCEVType(LHS->getType()) == 3114 getEffectiveSCEVType(RHS->getType()) && 3115 "SCEVURemExpr operand types don't match!"); 3116 3117 // Short-circuit easy cases 3118 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3119 // If constant is one, the result is trivial 3120 if (RHSC->getValue()->isOne()) 3121 return getZero(LHS->getType()); // X urem 1 --> 0 3122 3123 // If constant is a power of two, fold into a zext(trunc(LHS)). 3124 if (RHSC->getAPInt().isPowerOf2()) { 3125 Type *FullTy = LHS->getType(); 3126 Type *TruncTy = 3127 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3128 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3129 } 3130 } 3131 3132 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3133 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3134 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3135 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3136 } 3137 3138 /// Get a canonical unsigned division expression, or something simpler if 3139 /// possible. 3140 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3141 const SCEV *RHS) { 3142 assert(getEffectiveSCEVType(LHS->getType()) == 3143 getEffectiveSCEVType(RHS->getType()) && 3144 "SCEVUDivExpr operand types don't match!"); 3145 3146 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3147 if (RHSC->getValue()->isOne()) 3148 return LHS; // X udiv 1 --> x 3149 // If the denominator is zero, the result of the udiv is undefined. Don't 3150 // try to analyze it, because the resolution chosen here may differ from 3151 // the resolution chosen in other parts of the compiler. 3152 if (!RHSC->getValue()->isZero()) { 3153 // Determine if the division can be folded into the operands of 3154 // its operands. 3155 // TODO: Generalize this to non-constants by using known-bits information. 3156 Type *Ty = LHS->getType(); 3157 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3158 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3159 // For non-power-of-two values, effectively round the value up to the 3160 // nearest power of two. 3161 if (!RHSC->getAPInt().isPowerOf2()) 3162 ++MaxShiftAmt; 3163 IntegerType *ExtTy = 3164 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3165 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3166 if (const SCEVConstant *Step = 3167 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3168 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3169 const APInt &StepInt = Step->getAPInt(); 3170 const APInt &DivInt = RHSC->getAPInt(); 3171 if (!StepInt.urem(DivInt) && 3172 getZeroExtendExpr(AR, ExtTy) == 3173 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3174 getZeroExtendExpr(Step, ExtTy), 3175 AR->getLoop(), SCEV::FlagAnyWrap)) { 3176 SmallVector<const SCEV *, 4> Operands; 3177 for (const SCEV *Op : AR->operands()) 3178 Operands.push_back(getUDivExpr(Op, RHS)); 3179 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3180 } 3181 /// Get a canonical UDivExpr for a recurrence. 3182 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3183 // We can currently only fold X%N if X is constant. 3184 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3185 if (StartC && !DivInt.urem(StepInt) && 3186 getZeroExtendExpr(AR, ExtTy) == 3187 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3188 getZeroExtendExpr(Step, ExtTy), 3189 AR->getLoop(), SCEV::FlagAnyWrap)) { 3190 const APInt &StartInt = StartC->getAPInt(); 3191 const APInt &StartRem = StartInt.urem(StepInt); 3192 if (StartRem != 0) 3193 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, 3194 AR->getLoop(), SCEV::FlagNW); 3195 } 3196 } 3197 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3198 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3199 SmallVector<const SCEV *, 4> Operands; 3200 for (const SCEV *Op : M->operands()) 3201 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3202 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3203 // Find an operand that's safely divisible. 3204 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3205 const SCEV *Op = M->getOperand(i); 3206 const SCEV *Div = getUDivExpr(Op, RHSC); 3207 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3208 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3209 M->op_end()); 3210 Operands[i] = Div; 3211 return getMulExpr(Operands); 3212 } 3213 } 3214 } 3215 3216 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3217 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3218 if (auto *DivisorConstant = 3219 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3220 bool Overflow = false; 3221 APInt NewRHS = 3222 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3223 if (Overflow) { 3224 return getConstant(RHSC->getType(), 0, false); 3225 } 3226 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3227 } 3228 } 3229 3230 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3231 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3232 SmallVector<const SCEV *, 4> Operands; 3233 for (const SCEV *Op : A->operands()) 3234 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3235 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3236 Operands.clear(); 3237 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3238 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3239 if (isa<SCEVUDivExpr>(Op) || 3240 getMulExpr(Op, RHS) != A->getOperand(i)) 3241 break; 3242 Operands.push_back(Op); 3243 } 3244 if (Operands.size() == A->getNumOperands()) 3245 return getAddExpr(Operands); 3246 } 3247 } 3248 3249 // Fold if both operands are constant. 3250 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3251 Constant *LHSCV = LHSC->getValue(); 3252 Constant *RHSCV = RHSC->getValue(); 3253 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3254 RHSCV))); 3255 } 3256 } 3257 } 3258 3259 FoldingSetNodeID ID; 3260 ID.AddInteger(scUDivExpr); 3261 ID.AddPointer(LHS); 3262 ID.AddPointer(RHS); 3263 void *IP = nullptr; 3264 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3265 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3266 LHS, RHS); 3267 UniqueSCEVs.InsertNode(S, IP); 3268 addToLoopUseLists(S); 3269 return S; 3270 } 3271 3272 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3273 APInt A = C1->getAPInt().abs(); 3274 APInt B = C2->getAPInt().abs(); 3275 uint32_t ABW = A.getBitWidth(); 3276 uint32_t BBW = B.getBitWidth(); 3277 3278 if (ABW > BBW) 3279 B = B.zext(ABW); 3280 else if (ABW < BBW) 3281 A = A.zext(BBW); 3282 3283 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3284 } 3285 3286 /// Get a canonical unsigned division expression, or something simpler if 3287 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3288 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3289 /// it's not exact because the udiv may be clearing bits. 3290 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3291 const SCEV *RHS) { 3292 // TODO: we could try to find factors in all sorts of things, but for now we 3293 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3294 // end of this file for inspiration. 3295 3296 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3297 if (!Mul || !Mul->hasNoUnsignedWrap()) 3298 return getUDivExpr(LHS, RHS); 3299 3300 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3301 // If the mulexpr multiplies by a constant, then that constant must be the 3302 // first element of the mulexpr. 3303 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3304 if (LHSCst == RHSCst) { 3305 SmallVector<const SCEV *, 2> Operands; 3306 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3307 return getMulExpr(Operands); 3308 } 3309 3310 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3311 // that there's a factor provided by one of the other terms. We need to 3312 // check. 3313 APInt Factor = gcd(LHSCst, RHSCst); 3314 if (!Factor.isIntN(1)) { 3315 LHSCst = 3316 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3317 RHSCst = 3318 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3319 SmallVector<const SCEV *, 2> Operands; 3320 Operands.push_back(LHSCst); 3321 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3322 LHS = getMulExpr(Operands); 3323 RHS = RHSCst; 3324 Mul = dyn_cast<SCEVMulExpr>(LHS); 3325 if (!Mul) 3326 return getUDivExactExpr(LHS, RHS); 3327 } 3328 } 3329 } 3330 3331 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3332 if (Mul->getOperand(i) == RHS) { 3333 SmallVector<const SCEV *, 2> Operands; 3334 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3335 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3336 return getMulExpr(Operands); 3337 } 3338 } 3339 3340 return getUDivExpr(LHS, RHS); 3341 } 3342 3343 /// Get an add recurrence expression for the specified loop. Simplify the 3344 /// expression as much as possible. 3345 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3346 const Loop *L, 3347 SCEV::NoWrapFlags Flags) { 3348 SmallVector<const SCEV *, 4> Operands; 3349 Operands.push_back(Start); 3350 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3351 if (StepChrec->getLoop() == L) { 3352 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3353 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3354 } 3355 3356 Operands.push_back(Step); 3357 return getAddRecExpr(Operands, L, Flags); 3358 } 3359 3360 /// Get an add recurrence expression for the specified loop. Simplify the 3361 /// expression as much as possible. 3362 const SCEV * 3363 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3364 const Loop *L, SCEV::NoWrapFlags Flags) { 3365 if (Operands.size() == 1) return Operands[0]; 3366 #ifndef NDEBUG 3367 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3368 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3369 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3370 "SCEVAddRecExpr operand types don't match!"); 3371 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3372 assert(isLoopInvariant(Operands[i], L) && 3373 "SCEVAddRecExpr operand is not loop-invariant!"); 3374 #endif 3375 3376 if (Operands.back()->isZero()) { 3377 Operands.pop_back(); 3378 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3379 } 3380 3381 // It's tempting to want to call getMaxBackedgeTakenCount count here and 3382 // use that information to infer NUW and NSW flags. However, computing a 3383 // BE count requires calling getAddRecExpr, so we may not yet have a 3384 // meaningful BE count at this point (and if we don't, we'd be stuck 3385 // with a SCEVCouldNotCompute as the cached BE count). 3386 3387 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3388 3389 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3390 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3391 const Loop *NestedLoop = NestedAR->getLoop(); 3392 if (L->contains(NestedLoop) 3393 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3394 : (!NestedLoop->contains(L) && 3395 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3396 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3397 NestedAR->op_end()); 3398 Operands[0] = NestedAR->getStart(); 3399 // AddRecs require their operands be loop-invariant with respect to their 3400 // loops. Don't perform this transformation if it would break this 3401 // requirement. 3402 bool AllInvariant = all_of( 3403 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3404 3405 if (AllInvariant) { 3406 // Create a recurrence for the outer loop with the same step size. 3407 // 3408 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3409 // inner recurrence has the same property. 3410 SCEV::NoWrapFlags OuterFlags = 3411 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3412 3413 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3414 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3415 return isLoopInvariant(Op, NestedLoop); 3416 }); 3417 3418 if (AllInvariant) { 3419 // Ok, both add recurrences are valid after the transformation. 3420 // 3421 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3422 // the outer recurrence has the same property. 3423 SCEV::NoWrapFlags InnerFlags = 3424 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3425 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3426 } 3427 } 3428 // Reset Operands to its original state. 3429 Operands[0] = NestedAR; 3430 } 3431 } 3432 3433 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3434 // already have one, otherwise create a new one. 3435 return getOrCreateAddRecExpr(Operands, L, Flags); 3436 } 3437 3438 const SCEV * 3439 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3440 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3441 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3442 // getSCEV(Base)->getType() has the same address space as Base->getType() 3443 // because SCEV::getType() preserves the address space. 3444 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); 3445 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3446 // instruction to its SCEV, because the Instruction may be guarded by control 3447 // flow and the no-overflow bits may not be valid for the expression in any 3448 // context. This can be fixed similarly to how these flags are handled for 3449 // adds. 3450 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3451 : SCEV::FlagAnyWrap; 3452 3453 const SCEV *TotalOffset = getZero(IntPtrTy); 3454 // The array size is unimportant. The first thing we do on CurTy is getting 3455 // its element type. 3456 Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0); 3457 for (const SCEV *IndexExpr : IndexExprs) { 3458 // Compute the (potentially symbolic) offset in bytes for this index. 3459 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3460 // For a struct, add the member offset. 3461 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3462 unsigned FieldNo = Index->getZExtValue(); 3463 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); 3464 3465 // Add the field offset to the running total offset. 3466 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3467 3468 // Update CurTy to the type of the field at Index. 3469 CurTy = STy->getTypeAtIndex(Index); 3470 } else { 3471 // Update CurTy to its element type. 3472 CurTy = cast<SequentialType>(CurTy)->getElementType(); 3473 // For an array, add the element offset, explicitly scaled. 3474 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); 3475 // Getelementptr indices are signed. 3476 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); 3477 3478 // Multiply the index by the element size to compute the element offset. 3479 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3480 3481 // Add the element offset to the running total offset. 3482 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3483 } 3484 } 3485 3486 // Add the total offset from all the GEP indices to the base. 3487 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3488 } 3489 3490 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 3491 const SCEV *RHS) { 3492 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3493 return getSMaxExpr(Ops); 3494 } 3495 3496 const SCEV * 3497 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3498 assert(!Ops.empty() && "Cannot get empty smax!"); 3499 if (Ops.size() == 1) return Ops[0]; 3500 #ifndef NDEBUG 3501 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3502 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3503 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3504 "SCEVSMaxExpr operand types don't match!"); 3505 #endif 3506 3507 // Sort by complexity, this groups all similar expression types together. 3508 GroupByComplexity(Ops, &LI, DT); 3509 3510 // If there are any constants, fold them together. 3511 unsigned Idx = 0; 3512 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3513 ++Idx; 3514 assert(Idx < Ops.size()); 3515 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3516 // We found two constants, fold them together! 3517 ConstantInt *Fold = ConstantInt::get( 3518 getContext(), APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt())); 3519 Ops[0] = getConstant(Fold); 3520 Ops.erase(Ops.begin()+1); // Erase the folded element 3521 if (Ops.size() == 1) return Ops[0]; 3522 LHSC = cast<SCEVConstant>(Ops[0]); 3523 } 3524 3525 // If we are left with a constant minimum-int, strip it off. 3526 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 3527 Ops.erase(Ops.begin()); 3528 --Idx; 3529 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 3530 // If we have an smax with a constant maximum-int, it will always be 3531 // maximum-int. 3532 return Ops[0]; 3533 } 3534 3535 if (Ops.size() == 1) return Ops[0]; 3536 } 3537 3538 // Find the first SMax 3539 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 3540 ++Idx; 3541 3542 // Check to see if one of the operands is an SMax. If so, expand its operands 3543 // onto our operand list, and recurse to simplify. 3544 if (Idx < Ops.size()) { 3545 bool DeletedSMax = false; 3546 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 3547 Ops.erase(Ops.begin()+Idx); 3548 Ops.append(SMax->op_begin(), SMax->op_end()); 3549 DeletedSMax = true; 3550 } 3551 3552 if (DeletedSMax) 3553 return getSMaxExpr(Ops); 3554 } 3555 3556 // Okay, check to see if the same value occurs in the operand list twice. If 3557 // so, delete one. Since we sorted the list, these values are required to 3558 // be adjacent. 3559 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3560 // X smax Y smax Y --> X smax Y 3561 // X smax Y --> X, if X is always greater than Y 3562 if (Ops[i] == Ops[i+1] || 3563 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 3564 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3565 --i; --e; 3566 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 3567 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3568 --i; --e; 3569 } 3570 3571 if (Ops.size() == 1) return Ops[0]; 3572 3573 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3574 3575 // Okay, it looks like we really DO need an smax expr. Check to see if we 3576 // already have one, otherwise create a new one. 3577 FoldingSetNodeID ID; 3578 ID.AddInteger(scSMaxExpr); 3579 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3580 ID.AddPointer(Ops[i]); 3581 void *IP = nullptr; 3582 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3583 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3584 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3585 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), 3586 O, Ops.size()); 3587 UniqueSCEVs.InsertNode(S, IP); 3588 addToLoopUseLists(S); 3589 return S; 3590 } 3591 3592 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 3593 const SCEV *RHS) { 3594 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3595 return getUMaxExpr(Ops); 3596 } 3597 3598 const SCEV * 3599 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3600 assert(!Ops.empty() && "Cannot get empty umax!"); 3601 if (Ops.size() == 1) return Ops[0]; 3602 #ifndef NDEBUG 3603 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3604 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3605 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3606 "SCEVUMaxExpr operand types don't match!"); 3607 #endif 3608 3609 // Sort by complexity, this groups all similar expression types together. 3610 GroupByComplexity(Ops, &LI, DT); 3611 3612 // If there are any constants, fold them together. 3613 unsigned Idx = 0; 3614 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3615 ++Idx; 3616 assert(Idx < Ops.size()); 3617 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3618 // We found two constants, fold them together! 3619 ConstantInt *Fold = ConstantInt::get( 3620 getContext(), APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt())); 3621 Ops[0] = getConstant(Fold); 3622 Ops.erase(Ops.begin()+1); // Erase the folded element 3623 if (Ops.size() == 1) return Ops[0]; 3624 LHSC = cast<SCEVConstant>(Ops[0]); 3625 } 3626 3627 // If we are left with a constant minimum-int, strip it off. 3628 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 3629 Ops.erase(Ops.begin()); 3630 --Idx; 3631 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 3632 // If we have an umax with a constant maximum-int, it will always be 3633 // maximum-int. 3634 return Ops[0]; 3635 } 3636 3637 if (Ops.size() == 1) return Ops[0]; 3638 } 3639 3640 // Find the first UMax 3641 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 3642 ++Idx; 3643 3644 // Check to see if one of the operands is a UMax. If so, expand its operands 3645 // onto our operand list, and recurse to simplify. 3646 if (Idx < Ops.size()) { 3647 bool DeletedUMax = false; 3648 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 3649 Ops.erase(Ops.begin()+Idx); 3650 Ops.append(UMax->op_begin(), UMax->op_end()); 3651 DeletedUMax = true; 3652 } 3653 3654 if (DeletedUMax) 3655 return getUMaxExpr(Ops); 3656 } 3657 3658 // Okay, check to see if the same value occurs in the operand list twice. If 3659 // so, delete one. Since we sorted the list, these values are required to 3660 // be adjacent. 3661 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3662 // X umax Y umax Y --> X umax Y 3663 // X umax Y --> X, if X is always greater than Y 3664 if (Ops[i] == Ops[i + 1] || isKnownViaNonRecursiveReasoning( 3665 ICmpInst::ICMP_UGE, Ops[i], Ops[i + 1])) { 3666 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3667 --i; --e; 3668 } else if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, Ops[i], 3669 Ops[i + 1])) { 3670 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3671 --i; --e; 3672 } 3673 3674 if (Ops.size() == 1) return Ops[0]; 3675 3676 assert(!Ops.empty() && "Reduced umax down to nothing!"); 3677 3678 // Okay, it looks like we really DO need a umax expr. Check to see if we 3679 // already have one, otherwise create a new one. 3680 FoldingSetNodeID ID; 3681 ID.AddInteger(scUMaxExpr); 3682 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3683 ID.AddPointer(Ops[i]); 3684 void *IP = nullptr; 3685 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3686 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3687 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3688 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 3689 O, Ops.size()); 3690 UniqueSCEVs.InsertNode(S, IP); 3691 addToLoopUseLists(S); 3692 return S; 3693 } 3694 3695 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3696 const SCEV *RHS) { 3697 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3698 return getSMinExpr(Ops); 3699 } 3700 3701 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3702 // ~smax(~x, ~y, ~z) == smin(x, y, z). 3703 SmallVector<const SCEV *, 2> NotOps; 3704 for (auto *S : Ops) 3705 NotOps.push_back(getNotSCEV(S)); 3706 return getNotSCEV(getSMaxExpr(NotOps)); 3707 } 3708 3709 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3710 const SCEV *RHS) { 3711 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3712 return getUMinExpr(Ops); 3713 } 3714 3715 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3716 assert(!Ops.empty() && "At least one operand must be!"); 3717 // Trivial case. 3718 if (Ops.size() == 1) 3719 return Ops[0]; 3720 3721 // ~umax(~x, ~y, ~z) == umin(x, y, z). 3722 SmallVector<const SCEV *, 2> NotOps; 3723 for (auto *S : Ops) 3724 NotOps.push_back(getNotSCEV(S)); 3725 return getNotSCEV(getUMaxExpr(NotOps)); 3726 } 3727 3728 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3729 // We can bypass creating a target-independent 3730 // constant expression and then folding it back into a ConstantInt. 3731 // This is just a compile-time optimization. 3732 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3733 } 3734 3735 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3736 StructType *STy, 3737 unsigned FieldNo) { 3738 // We can bypass creating a target-independent 3739 // constant expression and then folding it back into a ConstantInt. 3740 // This is just a compile-time optimization. 3741 return getConstant( 3742 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3743 } 3744 3745 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3746 // Don't attempt to do anything other than create a SCEVUnknown object 3747 // here. createSCEV only calls getUnknown after checking for all other 3748 // interesting possibilities, and any other code that calls getUnknown 3749 // is doing so in order to hide a value from SCEV canonicalization. 3750 3751 FoldingSetNodeID ID; 3752 ID.AddInteger(scUnknown); 3753 ID.AddPointer(V); 3754 void *IP = nullptr; 3755 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3756 assert(cast<SCEVUnknown>(S)->getValue() == V && 3757 "Stale SCEVUnknown in uniquing map!"); 3758 return S; 3759 } 3760 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3761 FirstUnknown); 3762 FirstUnknown = cast<SCEVUnknown>(S); 3763 UniqueSCEVs.InsertNode(S, IP); 3764 return S; 3765 } 3766 3767 //===----------------------------------------------------------------------===// 3768 // Basic SCEV Analysis and PHI Idiom Recognition Code 3769 // 3770 3771 /// Test if values of the given type are analyzable within the SCEV 3772 /// framework. This primarily includes integer types, and it can optionally 3773 /// include pointer types if the ScalarEvolution class has access to 3774 /// target-specific information. 3775 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3776 // Integers and pointers are always SCEVable. 3777 return Ty->isIntOrPtrTy(); 3778 } 3779 3780 /// Return the size in bits of the specified type, for which isSCEVable must 3781 /// return true. 3782 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3783 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3784 if (Ty->isPointerTy()) 3785 return getDataLayout().getIndexTypeSizeInBits(Ty); 3786 return getDataLayout().getTypeSizeInBits(Ty); 3787 } 3788 3789 /// Return a type with the same bitwidth as the given type and which represents 3790 /// how SCEV will treat the given type, for which isSCEVable must return 3791 /// true. For pointer types, this is the pointer-sized integer type. 3792 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3793 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3794 3795 if (Ty->isIntegerTy()) 3796 return Ty; 3797 3798 // The only other support type is pointer. 3799 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3800 return getDataLayout().getIntPtrType(Ty); 3801 } 3802 3803 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3804 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3805 } 3806 3807 const SCEV *ScalarEvolution::getCouldNotCompute() { 3808 return CouldNotCompute.get(); 3809 } 3810 3811 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3812 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3813 auto *SU = dyn_cast<SCEVUnknown>(S); 3814 return SU && SU->getValue() == nullptr; 3815 }); 3816 3817 return !ContainsNulls; 3818 } 3819 3820 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3821 HasRecMapType::iterator I = HasRecMap.find(S); 3822 if (I != HasRecMap.end()) 3823 return I->second; 3824 3825 bool FoundAddRec = SCEVExprContains(S, isa<SCEVAddRecExpr, const SCEV *>); 3826 HasRecMap.insert({S, FoundAddRec}); 3827 return FoundAddRec; 3828 } 3829 3830 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3831 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3832 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3833 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3834 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3835 if (!Add) 3836 return {S, nullptr}; 3837 3838 if (Add->getNumOperands() != 2) 3839 return {S, nullptr}; 3840 3841 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3842 if (!ConstOp) 3843 return {S, nullptr}; 3844 3845 return {Add->getOperand(1), ConstOp->getValue()}; 3846 } 3847 3848 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3849 /// by the value and offset from any ValueOffsetPair in the set. 3850 SetVector<ScalarEvolution::ValueOffsetPair> * 3851 ScalarEvolution::getSCEVValues(const SCEV *S) { 3852 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3853 if (SI == ExprValueMap.end()) 3854 return nullptr; 3855 #ifndef NDEBUG 3856 if (VerifySCEVMap) { 3857 // Check there is no dangling Value in the set returned. 3858 for (const auto &VE : SI->second) 3859 assert(ValueExprMap.count(VE.first)); 3860 } 3861 #endif 3862 return &SI->second; 3863 } 3864 3865 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3866 /// cannot be used separately. eraseValueFromMap should be used to remove 3867 /// V from ValueExprMap and ExprValueMap at the same time. 3868 void ScalarEvolution::eraseValueFromMap(Value *V) { 3869 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3870 if (I != ValueExprMap.end()) { 3871 const SCEV *S = I->second; 3872 // Remove {V, 0} from the set of ExprValueMap[S] 3873 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3874 SV->remove({V, nullptr}); 3875 3876 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3877 const SCEV *Stripped; 3878 ConstantInt *Offset; 3879 std::tie(Stripped, Offset) = splitAddExpr(S); 3880 if (Offset != nullptr) { 3881 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3882 SV->remove({V, Offset}); 3883 } 3884 ValueExprMap.erase(V); 3885 } 3886 } 3887 3888 /// Check whether value has nuw/nsw/exact set but SCEV does not. 3889 /// TODO: In reality it is better to check the poison recursevely 3890 /// but this is better than nothing. 3891 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { 3892 if (auto *I = dyn_cast<Instruction>(V)) { 3893 if (isa<OverflowingBinaryOperator>(I)) { 3894 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { 3895 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) 3896 return true; 3897 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) 3898 return true; 3899 } 3900 } else if (isa<PossiblyExactOperator>(I) && I->isExact()) 3901 return true; 3902 } 3903 return false; 3904 } 3905 3906 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3907 /// create a new one. 3908 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3909 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3910 3911 const SCEV *S = getExistingSCEV(V); 3912 if (S == nullptr) { 3913 S = createSCEV(V); 3914 // During PHI resolution, it is possible to create two SCEVs for the same 3915 // V, so it is needed to double check whether V->S is inserted into 3916 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3917 std::pair<ValueExprMapType::iterator, bool> Pair = 3918 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3919 if (Pair.second && !SCEVLostPoisonFlags(S, V)) { 3920 ExprValueMap[S].insert({V, nullptr}); 3921 3922 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3923 // ExprValueMap. 3924 const SCEV *Stripped = S; 3925 ConstantInt *Offset = nullptr; 3926 std::tie(Stripped, Offset) = splitAddExpr(S); 3927 // If stripped is SCEVUnknown, don't bother to save 3928 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3929 // increase the complexity of the expansion code. 3930 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3931 // because it may generate add/sub instead of GEP in SCEV expansion. 3932 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3933 !isa<GetElementPtrInst>(V)) 3934 ExprValueMap[Stripped].insert({V, Offset}); 3935 } 3936 } 3937 return S; 3938 } 3939 3940 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3941 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3942 3943 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3944 if (I != ValueExprMap.end()) { 3945 const SCEV *S = I->second; 3946 if (checkValidity(S)) 3947 return S; 3948 eraseValueFromMap(V); 3949 forgetMemoizedResults(S); 3950 } 3951 return nullptr; 3952 } 3953 3954 /// Return a SCEV corresponding to -V = -1*V 3955 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3956 SCEV::NoWrapFlags Flags) { 3957 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3958 return getConstant( 3959 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3960 3961 Type *Ty = V->getType(); 3962 Ty = getEffectiveSCEVType(Ty); 3963 return getMulExpr( 3964 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 3965 } 3966 3967 /// Return a SCEV corresponding to ~V = -1-V 3968 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3969 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3970 return getConstant( 3971 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3972 3973 Type *Ty = V->getType(); 3974 Ty = getEffectiveSCEVType(Ty); 3975 const SCEV *AllOnes = 3976 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 3977 return getMinusSCEV(AllOnes, V); 3978 } 3979 3980 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3981 SCEV::NoWrapFlags Flags, 3982 unsigned Depth) { 3983 // Fast path: X - X --> 0. 3984 if (LHS == RHS) 3985 return getZero(LHS->getType()); 3986 3987 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3988 // makes it so that we cannot make much use of NUW. 3989 auto AddFlags = SCEV::FlagAnyWrap; 3990 const bool RHSIsNotMinSigned = 3991 !getSignedRangeMin(RHS).isMinSignedValue(); 3992 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3993 // Let M be the minimum representable signed value. Then (-1)*RHS 3994 // signed-wraps if and only if RHS is M. That can happen even for 3995 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3996 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3997 // (-1)*RHS, we need to prove that RHS != M. 3998 // 3999 // If LHS is non-negative and we know that LHS - RHS does not 4000 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 4001 // either by proving that RHS > M or that LHS >= 0. 4002 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 4003 AddFlags = SCEV::FlagNSW; 4004 } 4005 } 4006 4007 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 4008 // RHS is NSW and LHS >= 0. 4009 // 4010 // The difficulty here is that the NSW flag may have been proven 4011 // relative to a loop that is to be found in a recurrence in LHS and 4012 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 4013 // larger scope than intended. 4014 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 4015 4016 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 4017 } 4018 4019 const SCEV * 4020 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) { 4021 Type *SrcTy = V->getType(); 4022 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4023 "Cannot truncate or zero extend with non-integer arguments!"); 4024 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4025 return V; // No conversion 4026 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4027 return getTruncateExpr(V, Ty); 4028 return getZeroExtendExpr(V, Ty); 4029 } 4030 4031 const SCEV * 4032 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 4033 Type *Ty) { 4034 Type *SrcTy = V->getType(); 4035 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4036 "Cannot truncate or zero extend with non-integer arguments!"); 4037 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4038 return V; // No conversion 4039 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4040 return getTruncateExpr(V, Ty); 4041 return getSignExtendExpr(V, Ty); 4042 } 4043 4044 const SCEV * 4045 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 4046 Type *SrcTy = V->getType(); 4047 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4048 "Cannot noop or zero extend with non-integer arguments!"); 4049 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4050 "getNoopOrZeroExtend cannot truncate!"); 4051 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4052 return V; // No conversion 4053 return getZeroExtendExpr(V, Ty); 4054 } 4055 4056 const SCEV * 4057 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 4058 Type *SrcTy = V->getType(); 4059 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4060 "Cannot noop or sign extend with non-integer arguments!"); 4061 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4062 "getNoopOrSignExtend cannot truncate!"); 4063 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4064 return V; // No conversion 4065 return getSignExtendExpr(V, Ty); 4066 } 4067 4068 const SCEV * 4069 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 4070 Type *SrcTy = V->getType(); 4071 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4072 "Cannot noop or any extend with non-integer arguments!"); 4073 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4074 "getNoopOrAnyExtend cannot truncate!"); 4075 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4076 return V; // No conversion 4077 return getAnyExtendExpr(V, Ty); 4078 } 4079 4080 const SCEV * 4081 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 4082 Type *SrcTy = V->getType(); 4083 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4084 "Cannot truncate or noop with non-integer arguments!"); 4085 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4086 "getTruncateOrNoop cannot extend!"); 4087 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4088 return V; // No conversion 4089 return getTruncateExpr(V, Ty); 4090 } 4091 4092 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4093 const SCEV *RHS) { 4094 const SCEV *PromotedLHS = LHS; 4095 const SCEV *PromotedRHS = RHS; 4096 4097 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4098 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4099 else 4100 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4101 4102 return getUMaxExpr(PromotedLHS, PromotedRHS); 4103 } 4104 4105 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4106 const SCEV *RHS) { 4107 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4108 return getUMinFromMismatchedTypes(Ops); 4109 } 4110 4111 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( 4112 SmallVectorImpl<const SCEV *> &Ops) { 4113 assert(!Ops.empty() && "At least one operand must be!"); 4114 // Trivial case. 4115 if (Ops.size() == 1) 4116 return Ops[0]; 4117 4118 // Find the max type first. 4119 Type *MaxType = nullptr; 4120 for (auto *S : Ops) 4121 if (MaxType) 4122 MaxType = getWiderType(MaxType, S->getType()); 4123 else 4124 MaxType = S->getType(); 4125 4126 // Extend all ops to max type. 4127 SmallVector<const SCEV *, 2> PromotedOps; 4128 for (auto *S : Ops) 4129 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4130 4131 // Generate umin. 4132 return getUMinExpr(PromotedOps); 4133 } 4134 4135 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4136 // A pointer operand may evaluate to a nonpointer expression, such as null. 4137 if (!V->getType()->isPointerTy()) 4138 return V; 4139 4140 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 4141 return getPointerBase(Cast->getOperand()); 4142 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 4143 const SCEV *PtrOp = nullptr; 4144 for (const SCEV *NAryOp : NAry->operands()) { 4145 if (NAryOp->getType()->isPointerTy()) { 4146 // Cannot find the base of an expression with multiple pointer operands. 4147 if (PtrOp) 4148 return V; 4149 PtrOp = NAryOp; 4150 } 4151 } 4152 if (!PtrOp) 4153 return V; 4154 return getPointerBase(PtrOp); 4155 } 4156 return V; 4157 } 4158 4159 /// Push users of the given Instruction onto the given Worklist. 4160 static void 4161 PushDefUseChildren(Instruction *I, 4162 SmallVectorImpl<Instruction *> &Worklist) { 4163 // Push the def-use children onto the Worklist stack. 4164 for (User *U : I->users()) 4165 Worklist.push_back(cast<Instruction>(U)); 4166 } 4167 4168 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 4169 SmallVector<Instruction *, 16> Worklist; 4170 PushDefUseChildren(PN, Worklist); 4171 4172 SmallPtrSet<Instruction *, 8> Visited; 4173 Visited.insert(PN); 4174 while (!Worklist.empty()) { 4175 Instruction *I = Worklist.pop_back_val(); 4176 if (!Visited.insert(I).second) 4177 continue; 4178 4179 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4180 if (It != ValueExprMap.end()) { 4181 const SCEV *Old = It->second; 4182 4183 // Short-circuit the def-use traversal if the symbolic name 4184 // ceases to appear in expressions. 4185 if (Old != SymName && !hasOperand(Old, SymName)) 4186 continue; 4187 4188 // SCEVUnknown for a PHI either means that it has an unrecognized 4189 // structure, it's a PHI that's in the progress of being computed 4190 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4191 // additional loop trip count information isn't going to change anything. 4192 // In the second case, createNodeForPHI will perform the necessary 4193 // updates on its own when it gets to that point. In the third, we do 4194 // want to forget the SCEVUnknown. 4195 if (!isa<PHINode>(I) || 4196 !isa<SCEVUnknown>(Old) || 4197 (I != PN && Old == SymName)) { 4198 eraseValueFromMap(It->first); 4199 forgetMemoizedResults(Old); 4200 } 4201 } 4202 4203 PushDefUseChildren(I, Worklist); 4204 } 4205 } 4206 4207 namespace { 4208 4209 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4210 /// expression in case its Loop is L. If it is not L then 4211 /// if IgnoreOtherLoops is true then use AddRec itself 4212 /// otherwise rewrite cannot be done. 4213 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4214 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4215 public: 4216 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4217 bool IgnoreOtherLoops = true) { 4218 SCEVInitRewriter Rewriter(L, SE); 4219 const SCEV *Result = Rewriter.visit(S); 4220 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4221 return SE.getCouldNotCompute(); 4222 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4223 ? SE.getCouldNotCompute() 4224 : Result; 4225 } 4226 4227 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4228 if (!SE.isLoopInvariant(Expr, L)) 4229 SeenLoopVariantSCEVUnknown = true; 4230 return Expr; 4231 } 4232 4233 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4234 // Only re-write AddRecExprs for this loop. 4235 if (Expr->getLoop() == L) 4236 return Expr->getStart(); 4237 SeenOtherLoops = true; 4238 return Expr; 4239 } 4240 4241 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4242 4243 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4244 4245 private: 4246 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4247 : SCEVRewriteVisitor(SE), L(L) {} 4248 4249 const Loop *L; 4250 bool SeenLoopVariantSCEVUnknown = false; 4251 bool SeenOtherLoops = false; 4252 }; 4253 4254 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4255 /// increment expression in case its Loop is L. If it is not L then 4256 /// use AddRec itself. 4257 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4258 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4259 public: 4260 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4261 SCEVPostIncRewriter Rewriter(L, SE); 4262 const SCEV *Result = Rewriter.visit(S); 4263 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4264 ? SE.getCouldNotCompute() 4265 : Result; 4266 } 4267 4268 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4269 if (!SE.isLoopInvariant(Expr, L)) 4270 SeenLoopVariantSCEVUnknown = true; 4271 return Expr; 4272 } 4273 4274 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4275 // Only re-write AddRecExprs for this loop. 4276 if (Expr->getLoop() == L) 4277 return Expr->getPostIncExpr(SE); 4278 SeenOtherLoops = true; 4279 return Expr; 4280 } 4281 4282 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4283 4284 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4285 4286 private: 4287 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4288 : SCEVRewriteVisitor(SE), L(L) {} 4289 4290 const Loop *L; 4291 bool SeenLoopVariantSCEVUnknown = false; 4292 bool SeenOtherLoops = false; 4293 }; 4294 4295 /// This class evaluates the compare condition by matching it against the 4296 /// condition of loop latch. If there is a match we assume a true value 4297 /// for the condition while building SCEV nodes. 4298 class SCEVBackedgeConditionFolder 4299 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4300 public: 4301 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4302 ScalarEvolution &SE) { 4303 bool IsPosBECond = false; 4304 Value *BECond = nullptr; 4305 if (BasicBlock *Latch = L->getLoopLatch()) { 4306 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4307 if (BI && BI->isConditional()) { 4308 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4309 "Both outgoing branches should not target same header!"); 4310 BECond = BI->getCondition(); 4311 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4312 } else { 4313 return S; 4314 } 4315 } 4316 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4317 return Rewriter.visit(S); 4318 } 4319 4320 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4321 const SCEV *Result = Expr; 4322 bool InvariantF = SE.isLoopInvariant(Expr, L); 4323 4324 if (!InvariantF) { 4325 Instruction *I = cast<Instruction>(Expr->getValue()); 4326 switch (I->getOpcode()) { 4327 case Instruction::Select: { 4328 SelectInst *SI = cast<SelectInst>(I); 4329 Optional<const SCEV *> Res = 4330 compareWithBackedgeCondition(SI->getCondition()); 4331 if (Res.hasValue()) { 4332 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4333 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4334 } 4335 break; 4336 } 4337 default: { 4338 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4339 if (Res.hasValue()) 4340 Result = Res.getValue(); 4341 break; 4342 } 4343 } 4344 } 4345 return Result; 4346 } 4347 4348 private: 4349 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4350 bool IsPosBECond, ScalarEvolution &SE) 4351 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4352 IsPositiveBECond(IsPosBECond) {} 4353 4354 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4355 4356 const Loop *L; 4357 /// Loop back condition. 4358 Value *BackedgeCond = nullptr; 4359 /// Set to true if loop back is on positive branch condition. 4360 bool IsPositiveBECond; 4361 }; 4362 4363 Optional<const SCEV *> 4364 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4365 4366 // If value matches the backedge condition for loop latch, 4367 // then return a constant evolution node based on loopback 4368 // branch taken. 4369 if (BackedgeCond == IC) 4370 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4371 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4372 return None; 4373 } 4374 4375 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4376 public: 4377 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4378 ScalarEvolution &SE) { 4379 SCEVShiftRewriter Rewriter(L, SE); 4380 const SCEV *Result = Rewriter.visit(S); 4381 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4382 } 4383 4384 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4385 // Only allow AddRecExprs for this loop. 4386 if (!SE.isLoopInvariant(Expr, L)) 4387 Valid = false; 4388 return Expr; 4389 } 4390 4391 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4392 if (Expr->getLoop() == L && Expr->isAffine()) 4393 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4394 Valid = false; 4395 return Expr; 4396 } 4397 4398 bool isValid() { return Valid; } 4399 4400 private: 4401 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4402 : SCEVRewriteVisitor(SE), L(L) {} 4403 4404 const Loop *L; 4405 bool Valid = true; 4406 }; 4407 4408 } // end anonymous namespace 4409 4410 SCEV::NoWrapFlags 4411 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4412 if (!AR->isAffine()) 4413 return SCEV::FlagAnyWrap; 4414 4415 using OBO = OverflowingBinaryOperator; 4416 4417 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4418 4419 if (!AR->hasNoSignedWrap()) { 4420 ConstantRange AddRecRange = getSignedRange(AR); 4421 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4422 4423 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4424 Instruction::Add, IncRange, OBO::NoSignedWrap); 4425 if (NSWRegion.contains(AddRecRange)) 4426 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4427 } 4428 4429 if (!AR->hasNoUnsignedWrap()) { 4430 ConstantRange AddRecRange = getUnsignedRange(AR); 4431 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4432 4433 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4434 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4435 if (NUWRegion.contains(AddRecRange)) 4436 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4437 } 4438 4439 return Result; 4440 } 4441 4442 namespace { 4443 4444 /// Represents an abstract binary operation. This may exist as a 4445 /// normal instruction or constant expression, or may have been 4446 /// derived from an expression tree. 4447 struct BinaryOp { 4448 unsigned Opcode; 4449 Value *LHS; 4450 Value *RHS; 4451 bool IsNSW = false; 4452 bool IsNUW = false; 4453 4454 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4455 /// constant expression. 4456 Operator *Op = nullptr; 4457 4458 explicit BinaryOp(Operator *Op) 4459 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4460 Op(Op) { 4461 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4462 IsNSW = OBO->hasNoSignedWrap(); 4463 IsNUW = OBO->hasNoUnsignedWrap(); 4464 } 4465 } 4466 4467 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4468 bool IsNUW = false) 4469 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 4470 }; 4471 4472 } // end anonymous namespace 4473 4474 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4475 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4476 auto *Op = dyn_cast<Operator>(V); 4477 if (!Op) 4478 return None; 4479 4480 // Implementation detail: all the cleverness here should happen without 4481 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4482 // SCEV expressions when possible, and we should not break that. 4483 4484 switch (Op->getOpcode()) { 4485 case Instruction::Add: 4486 case Instruction::Sub: 4487 case Instruction::Mul: 4488 case Instruction::UDiv: 4489 case Instruction::URem: 4490 case Instruction::And: 4491 case Instruction::Or: 4492 case Instruction::AShr: 4493 case Instruction::Shl: 4494 return BinaryOp(Op); 4495 4496 case Instruction::Xor: 4497 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4498 // If the RHS of the xor is a signmask, then this is just an add. 4499 // Instcombine turns add of signmask into xor as a strength reduction step. 4500 if (RHSC->getValue().isSignMask()) 4501 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4502 return BinaryOp(Op); 4503 4504 case Instruction::LShr: 4505 // Turn logical shift right of a constant into a unsigned divide. 4506 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4507 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4508 4509 // If the shift count is not less than the bitwidth, the result of 4510 // the shift is undefined. Don't try to analyze it, because the 4511 // resolution chosen here may differ from the resolution chosen in 4512 // other parts of the compiler. 4513 if (SA->getValue().ult(BitWidth)) { 4514 Constant *X = 4515 ConstantInt::get(SA->getContext(), 4516 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4517 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4518 } 4519 } 4520 return BinaryOp(Op); 4521 4522 case Instruction::ExtractValue: { 4523 auto *EVI = cast<ExtractValueInst>(Op); 4524 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4525 break; 4526 4527 auto *CI = dyn_cast<CallInst>(EVI->getAggregateOperand()); 4528 if (!CI) 4529 break; 4530 4531 if (auto *F = CI->getCalledFunction()) 4532 switch (F->getIntrinsicID()) { 4533 case Intrinsic::sadd_with_overflow: 4534 case Intrinsic::uadd_with_overflow: 4535 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4536 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4537 CI->getArgOperand(1)); 4538 4539 // Now that we know that all uses of the arithmetic-result component of 4540 // CI are guarded by the overflow check, we can go ahead and pretend 4541 // that the arithmetic is non-overflowing. 4542 if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow) 4543 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4544 CI->getArgOperand(1), /* IsNSW = */ true, 4545 /* IsNUW = */ false); 4546 else 4547 return BinaryOp(Instruction::Add, CI->getArgOperand(0), 4548 CI->getArgOperand(1), /* IsNSW = */ false, 4549 /* IsNUW*/ true); 4550 case Intrinsic::ssub_with_overflow: 4551 case Intrinsic::usub_with_overflow: 4552 if (!isOverflowIntrinsicNoWrap(cast<IntrinsicInst>(CI), DT)) 4553 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4554 CI->getArgOperand(1)); 4555 4556 // The same reasoning as sadd/uadd above. 4557 if (F->getIntrinsicID() == Intrinsic::ssub_with_overflow) 4558 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4559 CI->getArgOperand(1), /* IsNSW = */ true, 4560 /* IsNUW = */ false); 4561 else 4562 return BinaryOp(Instruction::Sub, CI->getArgOperand(0), 4563 CI->getArgOperand(1), /* IsNSW = */ false, 4564 /* IsNUW = */ true); 4565 case Intrinsic::smul_with_overflow: 4566 case Intrinsic::umul_with_overflow: 4567 return BinaryOp(Instruction::Mul, CI->getArgOperand(0), 4568 CI->getArgOperand(1)); 4569 default: 4570 break; 4571 } 4572 break; 4573 } 4574 4575 default: 4576 break; 4577 } 4578 4579 return None; 4580 } 4581 4582 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4583 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4584 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4585 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4586 /// follows one of the following patterns: 4587 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4588 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4589 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4590 /// we return the type of the truncation operation, and indicate whether the 4591 /// truncated type should be treated as signed/unsigned by setting 4592 /// \p Signed to true/false, respectively. 4593 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4594 bool &Signed, ScalarEvolution &SE) { 4595 // The case where Op == SymbolicPHI (that is, with no type conversions on 4596 // the way) is handled by the regular add recurrence creating logic and 4597 // would have already been triggered in createAddRecForPHI. Reaching it here 4598 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4599 // because one of the other operands of the SCEVAddExpr updating this PHI is 4600 // not invariant). 4601 // 4602 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4603 // this case predicates that allow us to prove that Op == SymbolicPHI will 4604 // be added. 4605 if (Op == SymbolicPHI) 4606 return nullptr; 4607 4608 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4609 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4610 if (SourceBits != NewBits) 4611 return nullptr; 4612 4613 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4614 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4615 if (!SExt && !ZExt) 4616 return nullptr; 4617 const SCEVTruncateExpr *Trunc = 4618 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4619 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4620 if (!Trunc) 4621 return nullptr; 4622 const SCEV *X = Trunc->getOperand(); 4623 if (X != SymbolicPHI) 4624 return nullptr; 4625 Signed = SExt != nullptr; 4626 return Trunc->getType(); 4627 } 4628 4629 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4630 if (!PN->getType()->isIntegerTy()) 4631 return nullptr; 4632 const Loop *L = LI.getLoopFor(PN->getParent()); 4633 if (!L || L->getHeader() != PN->getParent()) 4634 return nullptr; 4635 return L; 4636 } 4637 4638 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4639 // computation that updates the phi follows the following pattern: 4640 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4641 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4642 // If so, try to see if it can be rewritten as an AddRecExpr under some 4643 // Predicates. If successful, return them as a pair. Also cache the results 4644 // of the analysis. 4645 // 4646 // Example usage scenario: 4647 // Say the Rewriter is called for the following SCEV: 4648 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4649 // where: 4650 // %X = phi i64 (%Start, %BEValue) 4651 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4652 // and call this function with %SymbolicPHI = %X. 4653 // 4654 // The analysis will find that the value coming around the backedge has 4655 // the following SCEV: 4656 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4657 // Upon concluding that this matches the desired pattern, the function 4658 // will return the pair {NewAddRec, SmallPredsVec} where: 4659 // NewAddRec = {%Start,+,%Step} 4660 // SmallPredsVec = {P1, P2, P3} as follows: 4661 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4662 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4663 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4664 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4665 // under the predicates {P1,P2,P3}. 4666 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4667 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4668 // 4669 // TODO's: 4670 // 4671 // 1) Extend the Induction descriptor to also support inductions that involve 4672 // casts: When needed (namely, when we are called in the context of the 4673 // vectorizer induction analysis), a Set of cast instructions will be 4674 // populated by this method, and provided back to isInductionPHI. This is 4675 // needed to allow the vectorizer to properly record them to be ignored by 4676 // the cost model and to avoid vectorizing them (otherwise these casts, 4677 // which are redundant under the runtime overflow checks, will be 4678 // vectorized, which can be costly). 4679 // 4680 // 2) Support additional induction/PHISCEV patterns: We also want to support 4681 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4682 // after the induction update operation (the induction increment): 4683 // 4684 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4685 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4686 // 4687 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4688 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4689 // 4690 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4691 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4692 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4693 SmallVector<const SCEVPredicate *, 3> Predicates; 4694 4695 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4696 // return an AddRec expression under some predicate. 4697 4698 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4699 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4700 assert(L && "Expecting an integer loop header phi"); 4701 4702 // The loop may have multiple entrances or multiple exits; we can analyze 4703 // this phi as an addrec if it has a unique entry value and a unique 4704 // backedge value. 4705 Value *BEValueV = nullptr, *StartValueV = nullptr; 4706 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4707 Value *V = PN->getIncomingValue(i); 4708 if (L->contains(PN->getIncomingBlock(i))) { 4709 if (!BEValueV) { 4710 BEValueV = V; 4711 } else if (BEValueV != V) { 4712 BEValueV = nullptr; 4713 break; 4714 } 4715 } else if (!StartValueV) { 4716 StartValueV = V; 4717 } else if (StartValueV != V) { 4718 StartValueV = nullptr; 4719 break; 4720 } 4721 } 4722 if (!BEValueV || !StartValueV) 4723 return None; 4724 4725 const SCEV *BEValue = getSCEV(BEValueV); 4726 4727 // If the value coming around the backedge is an add with the symbolic 4728 // value we just inserted, possibly with casts that we can ignore under 4729 // an appropriate runtime guard, then we found a simple induction variable! 4730 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4731 if (!Add) 4732 return None; 4733 4734 // If there is a single occurrence of the symbolic value, possibly 4735 // casted, replace it with a recurrence. 4736 unsigned FoundIndex = Add->getNumOperands(); 4737 Type *TruncTy = nullptr; 4738 bool Signed; 4739 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4740 if ((TruncTy = 4741 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4742 if (FoundIndex == e) { 4743 FoundIndex = i; 4744 break; 4745 } 4746 4747 if (FoundIndex == Add->getNumOperands()) 4748 return None; 4749 4750 // Create an add with everything but the specified operand. 4751 SmallVector<const SCEV *, 8> Ops; 4752 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4753 if (i != FoundIndex) 4754 Ops.push_back(Add->getOperand(i)); 4755 const SCEV *Accum = getAddExpr(Ops); 4756 4757 // The runtime checks will not be valid if the step amount is 4758 // varying inside the loop. 4759 if (!isLoopInvariant(Accum, L)) 4760 return None; 4761 4762 // *** Part2: Create the predicates 4763 4764 // Analysis was successful: we have a phi-with-cast pattern for which we 4765 // can return an AddRec expression under the following predicates: 4766 // 4767 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4768 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4769 // P2: An Equal predicate that guarantees that 4770 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4771 // P3: An Equal predicate that guarantees that 4772 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4773 // 4774 // As we next prove, the above predicates guarantee that: 4775 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4776 // 4777 // 4778 // More formally, we want to prove that: 4779 // Expr(i+1) = Start + (i+1) * Accum 4780 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4781 // 4782 // Given that: 4783 // 1) Expr(0) = Start 4784 // 2) Expr(1) = Start + Accum 4785 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4786 // 3) Induction hypothesis (step i): 4787 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4788 // 4789 // Proof: 4790 // Expr(i+1) = 4791 // = Start + (i+1)*Accum 4792 // = (Start + i*Accum) + Accum 4793 // = Expr(i) + Accum 4794 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4795 // :: from step i 4796 // 4797 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4798 // 4799 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4800 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4801 // + Accum :: from P3 4802 // 4803 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4804 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4805 // 4806 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4807 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4808 // 4809 // By induction, the same applies to all iterations 1<=i<n: 4810 // 4811 4812 // Create a truncated addrec for which we will add a no overflow check (P1). 4813 const SCEV *StartVal = getSCEV(StartValueV); 4814 const SCEV *PHISCEV = 4815 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4816 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4817 4818 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4819 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4820 // will be constant. 4821 // 4822 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4823 // add P1. 4824 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4825 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4826 Signed ? SCEVWrapPredicate::IncrementNSSW 4827 : SCEVWrapPredicate::IncrementNUSW; 4828 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4829 Predicates.push_back(AddRecPred); 4830 } 4831 4832 // Create the Equal Predicates P2,P3: 4833 4834 // It is possible that the predicates P2 and/or P3 are computable at 4835 // compile time due to StartVal and/or Accum being constants. 4836 // If either one is, then we can check that now and escape if either P2 4837 // or P3 is false. 4838 4839 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4840 // for each of StartVal and Accum 4841 auto getExtendedExpr = [&](const SCEV *Expr, 4842 bool CreateSignExtend) -> const SCEV * { 4843 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4844 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4845 const SCEV *ExtendedExpr = 4846 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4847 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4848 return ExtendedExpr; 4849 }; 4850 4851 // Given: 4852 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4853 // = getExtendedExpr(Expr) 4854 // Determine whether the predicate P: Expr == ExtendedExpr 4855 // is known to be false at compile time 4856 auto PredIsKnownFalse = [&](const SCEV *Expr, 4857 const SCEV *ExtendedExpr) -> bool { 4858 return Expr != ExtendedExpr && 4859 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4860 }; 4861 4862 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 4863 if (PredIsKnownFalse(StartVal, StartExtended)) { 4864 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 4865 return None; 4866 } 4867 4868 // The Step is always Signed (because the overflow checks are either 4869 // NSSW or NUSW) 4870 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 4871 if (PredIsKnownFalse(Accum, AccumExtended)) { 4872 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 4873 return None; 4874 } 4875 4876 auto AppendPredicate = [&](const SCEV *Expr, 4877 const SCEV *ExtendedExpr) -> void { 4878 if (Expr != ExtendedExpr && 4879 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4880 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4881 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 4882 Predicates.push_back(Pred); 4883 } 4884 }; 4885 4886 AppendPredicate(StartVal, StartExtended); 4887 AppendPredicate(Accum, AccumExtended); 4888 4889 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4890 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4891 // into NewAR if it will also add the runtime overflow checks specified in 4892 // Predicates. 4893 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4894 4895 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4896 std::make_pair(NewAR, Predicates); 4897 // Remember the result of the analysis for this SCEV at this locayyytion. 4898 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4899 return PredRewrite; 4900 } 4901 4902 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4903 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4904 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4905 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4906 if (!L) 4907 return None; 4908 4909 // Check to see if we already analyzed this PHI. 4910 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4911 if (I != PredicatedSCEVRewrites.end()) { 4912 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 4913 I->second; 4914 // Analysis was done before and failed to create an AddRec: 4915 if (Rewrite.first == SymbolicPHI) 4916 return None; 4917 // Analysis was done before and succeeded to create an AddRec under 4918 // a predicate: 4919 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 4920 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 4921 return Rewrite; 4922 } 4923 4924 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4925 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 4926 4927 // Record in the cache that the analysis failed 4928 if (!Rewrite) { 4929 SmallVector<const SCEVPredicate *, 3> Predicates; 4930 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 4931 return None; 4932 } 4933 4934 return Rewrite; 4935 } 4936 4937 // FIXME: This utility is currently required because the Rewriter currently 4938 // does not rewrite this expression: 4939 // {0, +, (sext ix (trunc iy to ix) to iy)} 4940 // into {0, +, %step}, 4941 // even when the following Equal predicate exists: 4942 // "%step == (sext ix (trunc iy to ix) to iy)". 4943 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 4944 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 4945 if (AR1 == AR2) 4946 return true; 4947 4948 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 4949 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 4950 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 4951 return false; 4952 return true; 4953 }; 4954 4955 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 4956 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 4957 return false; 4958 return true; 4959 } 4960 4961 /// A helper function for createAddRecFromPHI to handle simple cases. 4962 /// 4963 /// This function tries to find an AddRec expression for the simplest (yet most 4964 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 4965 /// If it fails, createAddRecFromPHI will use a more general, but slow, 4966 /// technique for finding the AddRec expression. 4967 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 4968 Value *BEValueV, 4969 Value *StartValueV) { 4970 const Loop *L = LI.getLoopFor(PN->getParent()); 4971 assert(L && L->getHeader() == PN->getParent()); 4972 assert(BEValueV && StartValueV); 4973 4974 auto BO = MatchBinaryOp(BEValueV, DT); 4975 if (!BO) 4976 return nullptr; 4977 4978 if (BO->Opcode != Instruction::Add) 4979 return nullptr; 4980 4981 const SCEV *Accum = nullptr; 4982 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 4983 Accum = getSCEV(BO->RHS); 4984 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 4985 Accum = getSCEV(BO->LHS); 4986 4987 if (!Accum) 4988 return nullptr; 4989 4990 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4991 if (BO->IsNUW) 4992 Flags = setFlags(Flags, SCEV::FlagNUW); 4993 if (BO->IsNSW) 4994 Flags = setFlags(Flags, SCEV::FlagNSW); 4995 4996 const SCEV *StartVal = getSCEV(StartValueV); 4997 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4998 4999 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5000 5001 // We can add Flags to the post-inc expression only if we 5002 // know that it is *undefined behavior* for BEValueV to 5003 // overflow. 5004 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5005 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5006 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5007 5008 return PHISCEV; 5009 } 5010 5011 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 5012 const Loop *L = LI.getLoopFor(PN->getParent()); 5013 if (!L || L->getHeader() != PN->getParent()) 5014 return nullptr; 5015 5016 // The loop may have multiple entrances or multiple exits; we can analyze 5017 // this phi as an addrec if it has a unique entry value and a unique 5018 // backedge value. 5019 Value *BEValueV = nullptr, *StartValueV = nullptr; 5020 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5021 Value *V = PN->getIncomingValue(i); 5022 if (L->contains(PN->getIncomingBlock(i))) { 5023 if (!BEValueV) { 5024 BEValueV = V; 5025 } else if (BEValueV != V) { 5026 BEValueV = nullptr; 5027 break; 5028 } 5029 } else if (!StartValueV) { 5030 StartValueV = V; 5031 } else if (StartValueV != V) { 5032 StartValueV = nullptr; 5033 break; 5034 } 5035 } 5036 if (!BEValueV || !StartValueV) 5037 return nullptr; 5038 5039 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 5040 "PHI node already processed?"); 5041 5042 // First, try to find AddRec expression without creating a fictituos symbolic 5043 // value for PN. 5044 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 5045 return S; 5046 5047 // Handle PHI node value symbolically. 5048 const SCEV *SymbolicName = getUnknown(PN); 5049 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 5050 5051 // Using this symbolic name for the PHI, analyze the value coming around 5052 // the back-edge. 5053 const SCEV *BEValue = getSCEV(BEValueV); 5054 5055 // NOTE: If BEValue is loop invariant, we know that the PHI node just 5056 // has a special value for the first iteration of the loop. 5057 5058 // If the value coming around the backedge is an add with the symbolic 5059 // value we just inserted, then we found a simple induction variable! 5060 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 5061 // If there is a single occurrence of the symbolic value, replace it 5062 // with a recurrence. 5063 unsigned FoundIndex = Add->getNumOperands(); 5064 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5065 if (Add->getOperand(i) == SymbolicName) 5066 if (FoundIndex == e) { 5067 FoundIndex = i; 5068 break; 5069 } 5070 5071 if (FoundIndex != Add->getNumOperands()) { 5072 // Create an add with everything but the specified operand. 5073 SmallVector<const SCEV *, 8> Ops; 5074 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5075 if (i != FoundIndex) 5076 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 5077 L, *this)); 5078 const SCEV *Accum = getAddExpr(Ops); 5079 5080 // This is not a valid addrec if the step amount is varying each 5081 // loop iteration, but is not itself an addrec in this loop. 5082 if (isLoopInvariant(Accum, L) || 5083 (isa<SCEVAddRecExpr>(Accum) && 5084 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5085 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5086 5087 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 5088 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5089 if (BO->IsNUW) 5090 Flags = setFlags(Flags, SCEV::FlagNUW); 5091 if (BO->IsNSW) 5092 Flags = setFlags(Flags, SCEV::FlagNSW); 5093 } 5094 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5095 // If the increment is an inbounds GEP, then we know the address 5096 // space cannot be wrapped around. We cannot make any guarantee 5097 // about signed or unsigned overflow because pointers are 5098 // unsigned but we may have a negative index from the base 5099 // pointer. We can guarantee that no unsigned wrap occurs if the 5100 // indices form a positive value. 5101 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5102 Flags = setFlags(Flags, SCEV::FlagNW); 5103 5104 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 5105 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 5106 Flags = setFlags(Flags, SCEV::FlagNUW); 5107 } 5108 5109 // We cannot transfer nuw and nsw flags from subtraction 5110 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5111 // for instance. 5112 } 5113 5114 const SCEV *StartVal = getSCEV(StartValueV); 5115 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5116 5117 // Okay, for the entire analysis of this edge we assumed the PHI 5118 // to be symbolic. We now need to go back and purge all of the 5119 // entries for the scalars that use the symbolic expression. 5120 forgetSymbolicName(PN, SymbolicName); 5121 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5122 5123 // We can add Flags to the post-inc expression only if we 5124 // know that it is *undefined behavior* for BEValueV to 5125 // overflow. 5126 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5127 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5128 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5129 5130 return PHISCEV; 5131 } 5132 } 5133 } else { 5134 // Otherwise, this could be a loop like this: 5135 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5136 // In this case, j = {1,+,1} and BEValue is j. 5137 // Because the other in-value of i (0) fits the evolution of BEValue 5138 // i really is an addrec evolution. 5139 // 5140 // We can generalize this saying that i is the shifted value of BEValue 5141 // by one iteration: 5142 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5143 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5144 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5145 if (Shifted != getCouldNotCompute() && 5146 Start != getCouldNotCompute()) { 5147 const SCEV *StartVal = getSCEV(StartValueV); 5148 if (Start == StartVal) { 5149 // Okay, for the entire analysis of this edge we assumed the PHI 5150 // to be symbolic. We now need to go back and purge all of the 5151 // entries for the scalars that use the symbolic expression. 5152 forgetSymbolicName(PN, SymbolicName); 5153 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 5154 return Shifted; 5155 } 5156 } 5157 } 5158 5159 // Remove the temporary PHI node SCEV that has been inserted while intending 5160 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5161 // as it will prevent later (possibly simpler) SCEV expressions to be added 5162 // to the ValueExprMap. 5163 eraseValueFromMap(PN); 5164 5165 return nullptr; 5166 } 5167 5168 // Checks if the SCEV S is available at BB. S is considered available at BB 5169 // if S can be materialized at BB without introducing a fault. 5170 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 5171 BasicBlock *BB) { 5172 struct CheckAvailable { 5173 bool TraversalDone = false; 5174 bool Available = true; 5175 5176 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 5177 BasicBlock *BB = nullptr; 5178 DominatorTree &DT; 5179 5180 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5181 : L(L), BB(BB), DT(DT) {} 5182 5183 bool setUnavailable() { 5184 TraversalDone = true; 5185 Available = false; 5186 return false; 5187 } 5188 5189 bool follow(const SCEV *S) { 5190 switch (S->getSCEVType()) { 5191 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 5192 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 5193 // These expressions are available if their operand(s) is/are. 5194 return true; 5195 5196 case scAddRecExpr: { 5197 // We allow add recurrences that are on the loop BB is in, or some 5198 // outer loop. This guarantees availability because the value of the 5199 // add recurrence at BB is simply the "current" value of the induction 5200 // variable. We can relax this in the future; for instance an add 5201 // recurrence on a sibling dominating loop is also available at BB. 5202 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5203 if (L && (ARLoop == L || ARLoop->contains(L))) 5204 return true; 5205 5206 return setUnavailable(); 5207 } 5208 5209 case scUnknown: { 5210 // For SCEVUnknown, we check for simple dominance. 5211 const auto *SU = cast<SCEVUnknown>(S); 5212 Value *V = SU->getValue(); 5213 5214 if (isa<Argument>(V)) 5215 return false; 5216 5217 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5218 return false; 5219 5220 return setUnavailable(); 5221 } 5222 5223 case scUDivExpr: 5224 case scCouldNotCompute: 5225 // We do not try to smart about these at all. 5226 return setUnavailable(); 5227 } 5228 llvm_unreachable("switch should be fully covered!"); 5229 } 5230 5231 bool isDone() { return TraversalDone; } 5232 }; 5233 5234 CheckAvailable CA(L, BB, DT); 5235 SCEVTraversal<CheckAvailable> ST(CA); 5236 5237 ST.visitAll(S); 5238 return CA.Available; 5239 } 5240 5241 // Try to match a control flow sequence that branches out at BI and merges back 5242 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5243 // match. 5244 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5245 Value *&C, Value *&LHS, Value *&RHS) { 5246 C = BI->getCondition(); 5247 5248 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5249 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5250 5251 if (!LeftEdge.isSingleEdge()) 5252 return false; 5253 5254 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5255 5256 Use &LeftUse = Merge->getOperandUse(0); 5257 Use &RightUse = Merge->getOperandUse(1); 5258 5259 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5260 LHS = LeftUse; 5261 RHS = RightUse; 5262 return true; 5263 } 5264 5265 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5266 LHS = RightUse; 5267 RHS = LeftUse; 5268 return true; 5269 } 5270 5271 return false; 5272 } 5273 5274 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5275 auto IsReachable = 5276 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5277 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5278 const Loop *L = LI.getLoopFor(PN->getParent()); 5279 5280 // We don't want to break LCSSA, even in a SCEV expression tree. 5281 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5282 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5283 return nullptr; 5284 5285 // Try to match 5286 // 5287 // br %cond, label %left, label %right 5288 // left: 5289 // br label %merge 5290 // right: 5291 // br label %merge 5292 // merge: 5293 // V = phi [ %x, %left ], [ %y, %right ] 5294 // 5295 // as "select %cond, %x, %y" 5296 5297 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5298 assert(IDom && "At least the entry block should dominate PN"); 5299 5300 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5301 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5302 5303 if (BI && BI->isConditional() && 5304 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5305 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5306 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5307 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5308 } 5309 5310 return nullptr; 5311 } 5312 5313 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5314 if (const SCEV *S = createAddRecFromPHI(PN)) 5315 return S; 5316 5317 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5318 return S; 5319 5320 // If the PHI has a single incoming value, follow that value, unless the 5321 // PHI's incoming blocks are in a different loop, in which case doing so 5322 // risks breaking LCSSA form. Instcombine would normally zap these, but 5323 // it doesn't have DominatorTree information, so it may miss cases. 5324 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5325 if (LI.replacementPreservesLCSSAForm(PN, V)) 5326 return getSCEV(V); 5327 5328 // If it's not a loop phi, we can't handle it yet. 5329 return getUnknown(PN); 5330 } 5331 5332 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5333 Value *Cond, 5334 Value *TrueVal, 5335 Value *FalseVal) { 5336 // Handle "constant" branch or select. This can occur for instance when a 5337 // loop pass transforms an inner loop and moves on to process the outer loop. 5338 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5339 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5340 5341 // Try to match some simple smax or umax patterns. 5342 auto *ICI = dyn_cast<ICmpInst>(Cond); 5343 if (!ICI) 5344 return getUnknown(I); 5345 5346 Value *LHS = ICI->getOperand(0); 5347 Value *RHS = ICI->getOperand(1); 5348 5349 switch (ICI->getPredicate()) { 5350 case ICmpInst::ICMP_SLT: 5351 case ICmpInst::ICMP_SLE: 5352 std::swap(LHS, RHS); 5353 LLVM_FALLTHROUGH; 5354 case ICmpInst::ICMP_SGT: 5355 case ICmpInst::ICMP_SGE: 5356 // a >s b ? a+x : b+x -> smax(a, b)+x 5357 // a >s b ? b+x : a+x -> smin(a, b)+x 5358 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5359 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5360 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5361 const SCEV *LA = getSCEV(TrueVal); 5362 const SCEV *RA = getSCEV(FalseVal); 5363 const SCEV *LDiff = getMinusSCEV(LA, LS); 5364 const SCEV *RDiff = getMinusSCEV(RA, RS); 5365 if (LDiff == RDiff) 5366 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5367 LDiff = getMinusSCEV(LA, RS); 5368 RDiff = getMinusSCEV(RA, LS); 5369 if (LDiff == RDiff) 5370 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5371 } 5372 break; 5373 case ICmpInst::ICMP_ULT: 5374 case ICmpInst::ICMP_ULE: 5375 std::swap(LHS, RHS); 5376 LLVM_FALLTHROUGH; 5377 case ICmpInst::ICMP_UGT: 5378 case ICmpInst::ICMP_UGE: 5379 // a >u b ? a+x : b+x -> umax(a, b)+x 5380 // a >u b ? b+x : a+x -> umin(a, b)+x 5381 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5382 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5383 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5384 const SCEV *LA = getSCEV(TrueVal); 5385 const SCEV *RA = getSCEV(FalseVal); 5386 const SCEV *LDiff = getMinusSCEV(LA, LS); 5387 const SCEV *RDiff = getMinusSCEV(RA, RS); 5388 if (LDiff == RDiff) 5389 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5390 LDiff = getMinusSCEV(LA, RS); 5391 RDiff = getMinusSCEV(RA, LS); 5392 if (LDiff == RDiff) 5393 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5394 } 5395 break; 5396 case ICmpInst::ICMP_NE: 5397 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5398 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5399 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5400 const SCEV *One = getOne(I->getType()); 5401 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5402 const SCEV *LA = getSCEV(TrueVal); 5403 const SCEV *RA = getSCEV(FalseVal); 5404 const SCEV *LDiff = getMinusSCEV(LA, LS); 5405 const SCEV *RDiff = getMinusSCEV(RA, One); 5406 if (LDiff == RDiff) 5407 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5408 } 5409 break; 5410 case ICmpInst::ICMP_EQ: 5411 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5412 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5413 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5414 const SCEV *One = getOne(I->getType()); 5415 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5416 const SCEV *LA = getSCEV(TrueVal); 5417 const SCEV *RA = getSCEV(FalseVal); 5418 const SCEV *LDiff = getMinusSCEV(LA, One); 5419 const SCEV *RDiff = getMinusSCEV(RA, LS); 5420 if (LDiff == RDiff) 5421 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5422 } 5423 break; 5424 default: 5425 break; 5426 } 5427 5428 return getUnknown(I); 5429 } 5430 5431 /// Expand GEP instructions into add and multiply operations. This allows them 5432 /// to be analyzed by regular SCEV code. 5433 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5434 // Don't attempt to analyze GEPs over unsized objects. 5435 if (!GEP->getSourceElementType()->isSized()) 5436 return getUnknown(GEP); 5437 5438 SmallVector<const SCEV *, 4> IndexExprs; 5439 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5440 IndexExprs.push_back(getSCEV(*Index)); 5441 return getGEPExpr(GEP, IndexExprs); 5442 } 5443 5444 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5445 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5446 return C->getAPInt().countTrailingZeros(); 5447 5448 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5449 return std::min(GetMinTrailingZeros(T->getOperand()), 5450 (uint32_t)getTypeSizeInBits(T->getType())); 5451 5452 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5453 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5454 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5455 ? getTypeSizeInBits(E->getType()) 5456 : OpRes; 5457 } 5458 5459 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5460 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5461 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5462 ? getTypeSizeInBits(E->getType()) 5463 : OpRes; 5464 } 5465 5466 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5467 // The result is the min of all operands results. 5468 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5469 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5470 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5471 return MinOpRes; 5472 } 5473 5474 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5475 // The result is the sum of all operands results. 5476 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5477 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5478 for (unsigned i = 1, e = M->getNumOperands(); 5479 SumOpRes != BitWidth && i != e; ++i) 5480 SumOpRes = 5481 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5482 return SumOpRes; 5483 } 5484 5485 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5486 // The result is the min of all operands results. 5487 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5488 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5489 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5490 return MinOpRes; 5491 } 5492 5493 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5494 // The result is the min of all operands results. 5495 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5496 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5497 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5498 return MinOpRes; 5499 } 5500 5501 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5502 // The result is the min of all operands results. 5503 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5504 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5505 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5506 return MinOpRes; 5507 } 5508 5509 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5510 // For a SCEVUnknown, ask ValueTracking. 5511 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5512 return Known.countMinTrailingZeros(); 5513 } 5514 5515 // SCEVUDivExpr 5516 return 0; 5517 } 5518 5519 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5520 auto I = MinTrailingZerosCache.find(S); 5521 if (I != MinTrailingZerosCache.end()) 5522 return I->second; 5523 5524 uint32_t Result = GetMinTrailingZerosImpl(S); 5525 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5526 assert(InsertPair.second && "Should insert a new key"); 5527 return InsertPair.first->second; 5528 } 5529 5530 /// Helper method to assign a range to V from metadata present in the IR. 5531 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5532 if (Instruction *I = dyn_cast<Instruction>(V)) 5533 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5534 return getConstantRangeFromMetadata(*MD); 5535 5536 return None; 5537 } 5538 5539 /// Determine the range for a particular SCEV. If SignHint is 5540 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5541 /// with a "cleaner" unsigned (resp. signed) representation. 5542 const ConstantRange & 5543 ScalarEvolution::getRangeRef(const SCEV *S, 5544 ScalarEvolution::RangeSignHint SignHint) { 5545 DenseMap<const SCEV *, ConstantRange> &Cache = 5546 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5547 : SignedRanges; 5548 5549 // See if we've computed this range already. 5550 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5551 if (I != Cache.end()) 5552 return I->second; 5553 5554 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5555 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5556 5557 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5558 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5559 5560 // If the value has known zeros, the maximum value will have those known zeros 5561 // as well. 5562 uint32_t TZ = GetMinTrailingZeros(S); 5563 if (TZ != 0) { 5564 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5565 ConservativeResult = 5566 ConstantRange(APInt::getMinValue(BitWidth), 5567 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5568 else 5569 ConservativeResult = ConstantRange( 5570 APInt::getSignedMinValue(BitWidth), 5571 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5572 } 5573 5574 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5575 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5576 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5577 X = X.add(getRangeRef(Add->getOperand(i), SignHint)); 5578 return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); 5579 } 5580 5581 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5582 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5583 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5584 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5585 return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); 5586 } 5587 5588 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5589 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5590 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5591 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5592 return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); 5593 } 5594 5595 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5596 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5597 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5598 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5599 return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); 5600 } 5601 5602 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5603 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5604 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5605 return setRange(UDiv, SignHint, 5606 ConservativeResult.intersectWith(X.udiv(Y))); 5607 } 5608 5609 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5610 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5611 return setRange(ZExt, SignHint, 5612 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 5613 } 5614 5615 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5616 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5617 return setRange(SExt, SignHint, 5618 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 5619 } 5620 5621 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5622 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5623 return setRange(Trunc, SignHint, 5624 ConservativeResult.intersectWith(X.truncate(BitWidth))); 5625 } 5626 5627 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5628 // If there's no unsigned wrap, the value will never be less than its 5629 // initial value. 5630 if (AddRec->hasNoUnsignedWrap()) 5631 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 5632 if (!C->getValue()->isZero()) 5633 ConservativeResult = ConservativeResult.intersectWith( 5634 ConstantRange(C->getAPInt(), APInt(BitWidth, 0))); 5635 5636 // If there's no signed wrap, and all the operands have the same sign or 5637 // zero, the value won't ever change sign. 5638 if (AddRec->hasNoSignedWrap()) { 5639 bool AllNonNeg = true; 5640 bool AllNonPos = true; 5641 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 5642 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 5643 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 5644 } 5645 if (AllNonNeg) 5646 ConservativeResult = ConservativeResult.intersectWith( 5647 ConstantRange(APInt(BitWidth, 0), 5648 APInt::getSignedMinValue(BitWidth))); 5649 else if (AllNonPos) 5650 ConservativeResult = ConservativeResult.intersectWith( 5651 ConstantRange(APInt::getSignedMinValue(BitWidth), 5652 APInt(BitWidth, 1))); 5653 } 5654 5655 // TODO: non-affine addrec 5656 if (AddRec->isAffine()) { 5657 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 5658 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5659 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5660 auto RangeFromAffine = getRangeForAffineAR( 5661 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5662 BitWidth); 5663 if (!RangeFromAffine.isFullSet()) 5664 ConservativeResult = 5665 ConservativeResult.intersectWith(RangeFromAffine); 5666 5667 auto RangeFromFactoring = getRangeViaFactoring( 5668 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5669 BitWidth); 5670 if (!RangeFromFactoring.isFullSet()) 5671 ConservativeResult = 5672 ConservativeResult.intersectWith(RangeFromFactoring); 5673 } 5674 } 5675 5676 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5677 } 5678 5679 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5680 // Check if the IR explicitly contains !range metadata. 5681 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5682 if (MDRange.hasValue()) 5683 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue()); 5684 5685 // Split here to avoid paying the compile-time cost of calling both 5686 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 5687 // if needed. 5688 const DataLayout &DL = getDataLayout(); 5689 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 5690 // For a SCEVUnknown, ask ValueTracking. 5691 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5692 if (Known.One != ~Known.Zero + 1) 5693 ConservativeResult = 5694 ConservativeResult.intersectWith(ConstantRange(Known.One, 5695 ~Known.Zero + 1)); 5696 } else { 5697 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 5698 "generalize as needed!"); 5699 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5700 if (NS > 1) 5701 ConservativeResult = ConservativeResult.intersectWith( 5702 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5703 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1)); 5704 } 5705 5706 // A range of Phi is a subset of union of all ranges of its input. 5707 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 5708 // Make sure that we do not run over cycled Phis. 5709 if (PendingPhiRanges.insert(Phi).second) { 5710 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 5711 for (auto &Op : Phi->operands()) { 5712 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 5713 RangeFromOps = RangeFromOps.unionWith(OpRange); 5714 // No point to continue if we already have a full set. 5715 if (RangeFromOps.isFullSet()) 5716 break; 5717 } 5718 ConservativeResult = ConservativeResult.intersectWith(RangeFromOps); 5719 bool Erased = PendingPhiRanges.erase(Phi); 5720 assert(Erased && "Failed to erase Phi properly?"); 5721 (void) Erased; 5722 } 5723 } 5724 5725 return setRange(U, SignHint, std::move(ConservativeResult)); 5726 } 5727 5728 return setRange(S, SignHint, std::move(ConservativeResult)); 5729 } 5730 5731 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5732 // values that the expression can take. Initially, the expression has a value 5733 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5734 // argument defines if we treat Step as signed or unsigned. 5735 static ConstantRange getRangeForAffineARHelper(APInt Step, 5736 const ConstantRange &StartRange, 5737 const APInt &MaxBECount, 5738 unsigned BitWidth, bool Signed) { 5739 // If either Step or MaxBECount is 0, then the expression won't change, and we 5740 // just need to return the initial range. 5741 if (Step == 0 || MaxBECount == 0) 5742 return StartRange; 5743 5744 // If we don't know anything about the initial value (i.e. StartRange is 5745 // FullRange), then we don't know anything about the final range either. 5746 // Return FullRange. 5747 if (StartRange.isFullSet()) 5748 return ConstantRange(BitWidth, /* isFullSet = */ true); 5749 5750 // If Step is signed and negative, then we use its absolute value, but we also 5751 // note that we're moving in the opposite direction. 5752 bool Descending = Signed && Step.isNegative(); 5753 5754 if (Signed) 5755 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 5756 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 5757 // This equations hold true due to the well-defined wrap-around behavior of 5758 // APInt. 5759 Step = Step.abs(); 5760 5761 // Check if Offset is more than full span of BitWidth. If it is, the 5762 // expression is guaranteed to overflow. 5763 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 5764 return ConstantRange(BitWidth, /* isFullSet = */ true); 5765 5766 // Offset is by how much the expression can change. Checks above guarantee no 5767 // overflow here. 5768 APInt Offset = Step * MaxBECount; 5769 5770 // Minimum value of the final range will match the minimal value of StartRange 5771 // if the expression is increasing and will be decreased by Offset otherwise. 5772 // Maximum value of the final range will match the maximal value of StartRange 5773 // if the expression is decreasing and will be increased by Offset otherwise. 5774 APInt StartLower = StartRange.getLower(); 5775 APInt StartUpper = StartRange.getUpper() - 1; 5776 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 5777 : (StartUpper + std::move(Offset)); 5778 5779 // It's possible that the new minimum/maximum value will fall into the initial 5780 // range (due to wrap around). This means that the expression can take any 5781 // value in this bitwidth, and we have to return full range. 5782 if (StartRange.contains(MovedBoundary)) 5783 return ConstantRange(BitWidth, /* isFullSet = */ true); 5784 5785 APInt NewLower = 5786 Descending ? std::move(MovedBoundary) : std::move(StartLower); 5787 APInt NewUpper = 5788 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 5789 NewUpper += 1; 5790 5791 // If we end up with full range, return a proper full range. 5792 if (NewLower == NewUpper) 5793 return ConstantRange(BitWidth, /* isFullSet = */ true); 5794 5795 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 5796 return ConstantRange(std::move(NewLower), std::move(NewUpper)); 5797 } 5798 5799 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5800 const SCEV *Step, 5801 const SCEV *MaxBECount, 5802 unsigned BitWidth) { 5803 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5804 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5805 "Precondition!"); 5806 5807 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5808 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5809 5810 // First, consider step signed. 5811 ConstantRange StartSRange = getSignedRange(Start); 5812 ConstantRange StepSRange = getSignedRange(Step); 5813 5814 // If Step can be both positive and negative, we need to find ranges for the 5815 // maximum absolute step values in both directions and union them. 5816 ConstantRange SR = 5817 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5818 MaxBECountValue, BitWidth, /* Signed = */ true); 5819 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5820 StartSRange, MaxBECountValue, 5821 BitWidth, /* Signed = */ true)); 5822 5823 // Next, consider step unsigned. 5824 ConstantRange UR = getRangeForAffineARHelper( 5825 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5826 MaxBECountValue, BitWidth, /* Signed = */ false); 5827 5828 // Finally, intersect signed and unsigned ranges. 5829 return SR.intersectWith(UR); 5830 } 5831 5832 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 5833 const SCEV *Step, 5834 const SCEV *MaxBECount, 5835 unsigned BitWidth) { 5836 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 5837 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 5838 5839 struct SelectPattern { 5840 Value *Condition = nullptr; 5841 APInt TrueValue; 5842 APInt FalseValue; 5843 5844 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 5845 const SCEV *S) { 5846 Optional<unsigned> CastOp; 5847 APInt Offset(BitWidth, 0); 5848 5849 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 5850 "Should be!"); 5851 5852 // Peel off a constant offset: 5853 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 5854 // In the future we could consider being smarter here and handle 5855 // {Start+Step,+,Step} too. 5856 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 5857 return; 5858 5859 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 5860 S = SA->getOperand(1); 5861 } 5862 5863 // Peel off a cast operation 5864 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 5865 CastOp = SCast->getSCEVType(); 5866 S = SCast->getOperand(); 5867 } 5868 5869 using namespace llvm::PatternMatch; 5870 5871 auto *SU = dyn_cast<SCEVUnknown>(S); 5872 const APInt *TrueVal, *FalseVal; 5873 if (!SU || 5874 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 5875 m_APInt(FalseVal)))) { 5876 Condition = nullptr; 5877 return; 5878 } 5879 5880 TrueValue = *TrueVal; 5881 FalseValue = *FalseVal; 5882 5883 // Re-apply the cast we peeled off earlier 5884 if (CastOp.hasValue()) 5885 switch (*CastOp) { 5886 default: 5887 llvm_unreachable("Unknown SCEV cast type!"); 5888 5889 case scTruncate: 5890 TrueValue = TrueValue.trunc(BitWidth); 5891 FalseValue = FalseValue.trunc(BitWidth); 5892 break; 5893 case scZeroExtend: 5894 TrueValue = TrueValue.zext(BitWidth); 5895 FalseValue = FalseValue.zext(BitWidth); 5896 break; 5897 case scSignExtend: 5898 TrueValue = TrueValue.sext(BitWidth); 5899 FalseValue = FalseValue.sext(BitWidth); 5900 break; 5901 } 5902 5903 // Re-apply the constant offset we peeled off earlier 5904 TrueValue += Offset; 5905 FalseValue += Offset; 5906 } 5907 5908 bool isRecognized() { return Condition != nullptr; } 5909 }; 5910 5911 SelectPattern StartPattern(*this, BitWidth, Start); 5912 if (!StartPattern.isRecognized()) 5913 return ConstantRange(BitWidth, /* isFullSet = */ true); 5914 5915 SelectPattern StepPattern(*this, BitWidth, Step); 5916 if (!StepPattern.isRecognized()) 5917 return ConstantRange(BitWidth, /* isFullSet = */ true); 5918 5919 if (StartPattern.Condition != StepPattern.Condition) { 5920 // We don't handle this case today; but we could, by considering four 5921 // possibilities below instead of two. I'm not sure if there are cases where 5922 // that will help over what getRange already does, though. 5923 return ConstantRange(BitWidth, /* isFullSet = */ true); 5924 } 5925 5926 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 5927 // construct arbitrary general SCEV expressions here. This function is called 5928 // from deep in the call stack, and calling getSCEV (on a sext instruction, 5929 // say) can end up caching a suboptimal value. 5930 5931 // FIXME: without the explicit `this` receiver below, MSVC errors out with 5932 // C2352 and C2512 (otherwise it isn't needed). 5933 5934 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 5935 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 5936 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 5937 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 5938 5939 ConstantRange TrueRange = 5940 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 5941 ConstantRange FalseRange = 5942 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 5943 5944 return TrueRange.unionWith(FalseRange); 5945 } 5946 5947 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 5948 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 5949 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 5950 5951 // Return early if there are no flags to propagate to the SCEV. 5952 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5953 if (BinOp->hasNoUnsignedWrap()) 5954 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 5955 if (BinOp->hasNoSignedWrap()) 5956 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 5957 if (Flags == SCEV::FlagAnyWrap) 5958 return SCEV::FlagAnyWrap; 5959 5960 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 5961 } 5962 5963 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 5964 // Here we check that I is in the header of the innermost loop containing I, 5965 // since we only deal with instructions in the loop header. The actual loop we 5966 // need to check later will come from an add recurrence, but getting that 5967 // requires computing the SCEV of the operands, which can be expensive. This 5968 // check we can do cheaply to rule out some cases early. 5969 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 5970 if (InnermostContainingLoop == nullptr || 5971 InnermostContainingLoop->getHeader() != I->getParent()) 5972 return false; 5973 5974 // Only proceed if we can prove that I does not yield poison. 5975 if (!programUndefinedIfFullPoison(I)) 5976 return false; 5977 5978 // At this point we know that if I is executed, then it does not wrap 5979 // according to at least one of NSW or NUW. If I is not executed, then we do 5980 // not know if the calculation that I represents would wrap. Multiple 5981 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 5982 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 5983 // derived from other instructions that map to the same SCEV. We cannot make 5984 // that guarantee for cases where I is not executed. So we need to find the 5985 // loop that I is considered in relation to and prove that I is executed for 5986 // every iteration of that loop. That implies that the value that I 5987 // calculates does not wrap anywhere in the loop, so then we can apply the 5988 // flags to the SCEV. 5989 // 5990 // We check isLoopInvariant to disambiguate in case we are adding recurrences 5991 // from different loops, so that we know which loop to prove that I is 5992 // executed in. 5993 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 5994 // I could be an extractvalue from a call to an overflow intrinsic. 5995 // TODO: We can do better here in some cases. 5996 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 5997 return false; 5998 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 5999 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 6000 bool AllOtherOpsLoopInvariant = true; 6001 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 6002 ++OtherOpIndex) { 6003 if (OtherOpIndex != OpIndex) { 6004 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 6005 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 6006 AllOtherOpsLoopInvariant = false; 6007 break; 6008 } 6009 } 6010 } 6011 if (AllOtherOpsLoopInvariant && 6012 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 6013 return true; 6014 } 6015 } 6016 return false; 6017 } 6018 6019 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 6020 // If we know that \c I can never be poison period, then that's enough. 6021 if (isSCEVExprNeverPoison(I)) 6022 return true; 6023 6024 // For an add recurrence specifically, we assume that infinite loops without 6025 // side effects are undefined behavior, and then reason as follows: 6026 // 6027 // If the add recurrence is poison in any iteration, it is poison on all 6028 // future iterations (since incrementing poison yields poison). If the result 6029 // of the add recurrence is fed into the loop latch condition and the loop 6030 // does not contain any throws or exiting blocks other than the latch, we now 6031 // have the ability to "choose" whether the backedge is taken or not (by 6032 // choosing a sufficiently evil value for the poison feeding into the branch) 6033 // for every iteration including and after the one in which \p I first became 6034 // poison. There are two possibilities (let's call the iteration in which \p 6035 // I first became poison as K): 6036 // 6037 // 1. In the set of iterations including and after K, the loop body executes 6038 // no side effects. In this case executing the backege an infinte number 6039 // of times will yield undefined behavior. 6040 // 6041 // 2. In the set of iterations including and after K, the loop body executes 6042 // at least one side effect. In this case, that specific instance of side 6043 // effect is control dependent on poison, which also yields undefined 6044 // behavior. 6045 6046 auto *ExitingBB = L->getExitingBlock(); 6047 auto *LatchBB = L->getLoopLatch(); 6048 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 6049 return false; 6050 6051 SmallPtrSet<const Instruction *, 16> Pushed; 6052 SmallVector<const Instruction *, 8> PoisonStack; 6053 6054 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 6055 // things that are known to be fully poison under that assumption go on the 6056 // PoisonStack. 6057 Pushed.insert(I); 6058 PoisonStack.push_back(I); 6059 6060 bool LatchControlDependentOnPoison = false; 6061 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 6062 const Instruction *Poison = PoisonStack.pop_back_val(); 6063 6064 for (auto *PoisonUser : Poison->users()) { 6065 if (propagatesFullPoison(cast<Instruction>(PoisonUser))) { 6066 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 6067 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 6068 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 6069 assert(BI->isConditional() && "Only possibility!"); 6070 if (BI->getParent() == LatchBB) { 6071 LatchControlDependentOnPoison = true; 6072 break; 6073 } 6074 } 6075 } 6076 } 6077 6078 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 6079 } 6080 6081 ScalarEvolution::LoopProperties 6082 ScalarEvolution::getLoopProperties(const Loop *L) { 6083 using LoopProperties = ScalarEvolution::LoopProperties; 6084 6085 auto Itr = LoopPropertiesCache.find(L); 6086 if (Itr == LoopPropertiesCache.end()) { 6087 auto HasSideEffects = [](Instruction *I) { 6088 if (auto *SI = dyn_cast<StoreInst>(I)) 6089 return !SI->isSimple(); 6090 6091 return I->mayHaveSideEffects(); 6092 }; 6093 6094 LoopProperties LP = {/* HasNoAbnormalExits */ true, 6095 /*HasNoSideEffects*/ true}; 6096 6097 for (auto *BB : L->getBlocks()) 6098 for (auto &I : *BB) { 6099 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6100 LP.HasNoAbnormalExits = false; 6101 if (HasSideEffects(&I)) 6102 LP.HasNoSideEffects = false; 6103 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 6104 break; // We're already as pessimistic as we can get. 6105 } 6106 6107 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 6108 assert(InsertPair.second && "We just checked!"); 6109 Itr = InsertPair.first; 6110 } 6111 6112 return Itr->second; 6113 } 6114 6115 const SCEV *ScalarEvolution::createSCEV(Value *V) { 6116 if (!isSCEVable(V->getType())) 6117 return getUnknown(V); 6118 6119 if (Instruction *I = dyn_cast<Instruction>(V)) { 6120 // Don't attempt to analyze instructions in blocks that aren't 6121 // reachable. Such instructions don't matter, and they aren't required 6122 // to obey basic rules for definitions dominating uses which this 6123 // analysis depends on. 6124 if (!DT.isReachableFromEntry(I->getParent())) 6125 return getUnknown(V); 6126 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 6127 return getConstant(CI); 6128 else if (isa<ConstantPointerNull>(V)) 6129 return getZero(V->getType()); 6130 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 6131 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 6132 else if (!isa<ConstantExpr>(V)) 6133 return getUnknown(V); 6134 6135 Operator *U = cast<Operator>(V); 6136 if (auto BO = MatchBinaryOp(U, DT)) { 6137 switch (BO->Opcode) { 6138 case Instruction::Add: { 6139 // The simple thing to do would be to just call getSCEV on both operands 6140 // and call getAddExpr with the result. However if we're looking at a 6141 // bunch of things all added together, this can be quite inefficient, 6142 // because it leads to N-1 getAddExpr calls for N ultimate operands. 6143 // Instead, gather up all the operands and make a single getAddExpr call. 6144 // LLVM IR canonical form means we need only traverse the left operands. 6145 SmallVector<const SCEV *, 4> AddOps; 6146 do { 6147 if (BO->Op) { 6148 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6149 AddOps.push_back(OpSCEV); 6150 break; 6151 } 6152 6153 // If a NUW or NSW flag can be applied to the SCEV for this 6154 // addition, then compute the SCEV for this addition by itself 6155 // with a separate call to getAddExpr. We need to do that 6156 // instead of pushing the operands of the addition onto AddOps, 6157 // since the flags are only known to apply to this particular 6158 // addition - they may not apply to other additions that can be 6159 // formed with operands from AddOps. 6160 const SCEV *RHS = getSCEV(BO->RHS); 6161 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6162 if (Flags != SCEV::FlagAnyWrap) { 6163 const SCEV *LHS = getSCEV(BO->LHS); 6164 if (BO->Opcode == Instruction::Sub) 6165 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 6166 else 6167 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 6168 break; 6169 } 6170 } 6171 6172 if (BO->Opcode == Instruction::Sub) 6173 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 6174 else 6175 AddOps.push_back(getSCEV(BO->RHS)); 6176 6177 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6178 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6179 NewBO->Opcode != Instruction::Sub)) { 6180 AddOps.push_back(getSCEV(BO->LHS)); 6181 break; 6182 } 6183 BO = NewBO; 6184 } while (true); 6185 6186 return getAddExpr(AddOps); 6187 } 6188 6189 case Instruction::Mul: { 6190 SmallVector<const SCEV *, 4> MulOps; 6191 do { 6192 if (BO->Op) { 6193 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6194 MulOps.push_back(OpSCEV); 6195 break; 6196 } 6197 6198 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6199 if (Flags != SCEV::FlagAnyWrap) { 6200 MulOps.push_back( 6201 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6202 break; 6203 } 6204 } 6205 6206 MulOps.push_back(getSCEV(BO->RHS)); 6207 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6208 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6209 MulOps.push_back(getSCEV(BO->LHS)); 6210 break; 6211 } 6212 BO = NewBO; 6213 } while (true); 6214 6215 return getMulExpr(MulOps); 6216 } 6217 case Instruction::UDiv: 6218 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6219 case Instruction::URem: 6220 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6221 case Instruction::Sub: { 6222 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6223 if (BO->Op) 6224 Flags = getNoWrapFlagsFromUB(BO->Op); 6225 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6226 } 6227 case Instruction::And: 6228 // For an expression like x&255 that merely masks off the high bits, 6229 // use zext(trunc(x)) as the SCEV expression. 6230 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6231 if (CI->isZero()) 6232 return getSCEV(BO->RHS); 6233 if (CI->isMinusOne()) 6234 return getSCEV(BO->LHS); 6235 const APInt &A = CI->getValue(); 6236 6237 // Instcombine's ShrinkDemandedConstant may strip bits out of 6238 // constants, obscuring what would otherwise be a low-bits mask. 6239 // Use computeKnownBits to compute what ShrinkDemandedConstant 6240 // knew about to reconstruct a low-bits mask value. 6241 unsigned LZ = A.countLeadingZeros(); 6242 unsigned TZ = A.countTrailingZeros(); 6243 unsigned BitWidth = A.getBitWidth(); 6244 KnownBits Known(BitWidth); 6245 computeKnownBits(BO->LHS, Known, getDataLayout(), 6246 0, &AC, nullptr, &DT); 6247 6248 APInt EffectiveMask = 6249 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6250 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6251 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6252 const SCEV *LHS = getSCEV(BO->LHS); 6253 const SCEV *ShiftedLHS = nullptr; 6254 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6255 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6256 // For an expression like (x * 8) & 8, simplify the multiply. 6257 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6258 unsigned GCD = std::min(MulZeros, TZ); 6259 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6260 SmallVector<const SCEV*, 4> MulOps; 6261 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6262 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6263 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6264 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6265 } 6266 } 6267 if (!ShiftedLHS) 6268 ShiftedLHS = getUDivExpr(LHS, MulCount); 6269 return getMulExpr( 6270 getZeroExtendExpr( 6271 getTruncateExpr(ShiftedLHS, 6272 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6273 BO->LHS->getType()), 6274 MulCount); 6275 } 6276 } 6277 break; 6278 6279 case Instruction::Or: 6280 // If the RHS of the Or is a constant, we may have something like: 6281 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6282 // optimizations will transparently handle this case. 6283 // 6284 // In order for this transformation to be safe, the LHS must be of the 6285 // form X*(2^n) and the Or constant must be less than 2^n. 6286 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6287 const SCEV *LHS = getSCEV(BO->LHS); 6288 const APInt &CIVal = CI->getValue(); 6289 if (GetMinTrailingZeros(LHS) >= 6290 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6291 // Build a plain add SCEV. 6292 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 6293 // If the LHS of the add was an addrec and it has no-wrap flags, 6294 // transfer the no-wrap flags, since an or won't introduce a wrap. 6295 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 6296 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 6297 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( 6298 OldAR->getNoWrapFlags()); 6299 } 6300 return S; 6301 } 6302 } 6303 break; 6304 6305 case Instruction::Xor: 6306 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6307 // If the RHS of xor is -1, then this is a not operation. 6308 if (CI->isMinusOne()) 6309 return getNotSCEV(getSCEV(BO->LHS)); 6310 6311 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6312 // This is a variant of the check for xor with -1, and it handles 6313 // the case where instcombine has trimmed non-demanded bits out 6314 // of an xor with -1. 6315 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6316 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6317 if (LBO->getOpcode() == Instruction::And && 6318 LCI->getValue() == CI->getValue()) 6319 if (const SCEVZeroExtendExpr *Z = 6320 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6321 Type *UTy = BO->LHS->getType(); 6322 const SCEV *Z0 = Z->getOperand(); 6323 Type *Z0Ty = Z0->getType(); 6324 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6325 6326 // If C is a low-bits mask, the zero extend is serving to 6327 // mask off the high bits. Complement the operand and 6328 // re-apply the zext. 6329 if (CI->getValue().isMask(Z0TySize)) 6330 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6331 6332 // If C is a single bit, it may be in the sign-bit position 6333 // before the zero-extend. In this case, represent the xor 6334 // using an add, which is equivalent, and re-apply the zext. 6335 APInt Trunc = CI->getValue().trunc(Z0TySize); 6336 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6337 Trunc.isSignMask()) 6338 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6339 UTy); 6340 } 6341 } 6342 break; 6343 6344 case Instruction::Shl: 6345 // Turn shift left of a constant amount into a multiply. 6346 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6347 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6348 6349 // If the shift count is not less than the bitwidth, the result of 6350 // the shift is undefined. Don't try to analyze it, because the 6351 // resolution chosen here may differ from the resolution chosen in 6352 // other parts of the compiler. 6353 if (SA->getValue().uge(BitWidth)) 6354 break; 6355 6356 // It is currently not resolved how to interpret NSW for left 6357 // shift by BitWidth - 1, so we avoid applying flags in that 6358 // case. Remove this check (or this comment) once the situation 6359 // is resolved. See 6360 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html 6361 // and http://reviews.llvm.org/D8890 . 6362 auto Flags = SCEV::FlagAnyWrap; 6363 if (BO->Op && SA->getValue().ult(BitWidth - 1)) 6364 Flags = getNoWrapFlagsFromUB(BO->Op); 6365 6366 Constant *X = ConstantInt::get( 6367 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6368 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6369 } 6370 break; 6371 6372 case Instruction::AShr: { 6373 // AShr X, C, where C is a constant. 6374 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6375 if (!CI) 6376 break; 6377 6378 Type *OuterTy = BO->LHS->getType(); 6379 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6380 // If the shift count is not less than the bitwidth, the result of 6381 // the shift is undefined. Don't try to analyze it, because the 6382 // resolution chosen here may differ from the resolution chosen in 6383 // other parts of the compiler. 6384 if (CI->getValue().uge(BitWidth)) 6385 break; 6386 6387 if (CI->isZero()) 6388 return getSCEV(BO->LHS); // shift by zero --> noop 6389 6390 uint64_t AShrAmt = CI->getZExtValue(); 6391 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6392 6393 Operator *L = dyn_cast<Operator>(BO->LHS); 6394 if (L && L->getOpcode() == Instruction::Shl) { 6395 // X = Shl A, n 6396 // Y = AShr X, m 6397 // Both n and m are constant. 6398 6399 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6400 if (L->getOperand(1) == BO->RHS) 6401 // For a two-shift sext-inreg, i.e. n = m, 6402 // use sext(trunc(x)) as the SCEV expression. 6403 return getSignExtendExpr( 6404 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6405 6406 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6407 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6408 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6409 if (ShlAmt > AShrAmt) { 6410 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6411 // expression. We already checked that ShlAmt < BitWidth, so 6412 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6413 // ShlAmt - AShrAmt < Amt. 6414 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6415 ShlAmt - AShrAmt); 6416 return getSignExtendExpr( 6417 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6418 getConstant(Mul)), OuterTy); 6419 } 6420 } 6421 } 6422 break; 6423 } 6424 } 6425 } 6426 6427 switch (U->getOpcode()) { 6428 case Instruction::Trunc: 6429 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6430 6431 case Instruction::ZExt: 6432 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6433 6434 case Instruction::SExt: 6435 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6436 // The NSW flag of a subtract does not always survive the conversion to 6437 // A + (-1)*B. By pushing sign extension onto its operands we are much 6438 // more likely to preserve NSW and allow later AddRec optimisations. 6439 // 6440 // NOTE: This is effectively duplicating this logic from getSignExtend: 6441 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6442 // but by that point the NSW information has potentially been lost. 6443 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6444 Type *Ty = U->getType(); 6445 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6446 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6447 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6448 } 6449 } 6450 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6451 6452 case Instruction::BitCast: 6453 // BitCasts are no-op casts so we just eliminate the cast. 6454 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6455 return getSCEV(U->getOperand(0)); 6456 break; 6457 6458 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 6459 // lead to pointer expressions which cannot safely be expanded to GEPs, 6460 // because ScalarEvolution doesn't respect the GEP aliasing rules when 6461 // simplifying integer expressions. 6462 6463 case Instruction::GetElementPtr: 6464 return createNodeForGEP(cast<GEPOperator>(U)); 6465 6466 case Instruction::PHI: 6467 return createNodeForPHI(cast<PHINode>(U)); 6468 6469 case Instruction::Select: 6470 // U can also be a select constant expr, which let fall through. Since 6471 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6472 // constant expressions cannot have instructions as operands, we'd have 6473 // returned getUnknown for a select constant expressions anyway. 6474 if (isa<Instruction>(U)) 6475 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6476 U->getOperand(1), U->getOperand(2)); 6477 break; 6478 6479 case Instruction::Call: 6480 case Instruction::Invoke: 6481 if (Value *RV = CallSite(U).getReturnedArgOperand()) 6482 return getSCEV(RV); 6483 break; 6484 } 6485 6486 return getUnknown(V); 6487 } 6488 6489 //===----------------------------------------------------------------------===// 6490 // Iteration Count Computation Code 6491 // 6492 6493 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6494 if (!ExitCount) 6495 return 0; 6496 6497 ConstantInt *ExitConst = ExitCount->getValue(); 6498 6499 // Guard against huge trip counts. 6500 if (ExitConst->getValue().getActiveBits() > 32) 6501 return 0; 6502 6503 // In case of integer overflow, this returns 0, which is correct. 6504 return ((unsigned)ExitConst->getZExtValue()) + 1; 6505 } 6506 6507 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6508 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6509 return getSmallConstantTripCount(L, ExitingBB); 6510 6511 // No trip count information for multiple exits. 6512 return 0; 6513 } 6514 6515 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6516 BasicBlock *ExitingBlock) { 6517 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6518 assert(L->isLoopExiting(ExitingBlock) && 6519 "Exiting block must actually branch out of the loop!"); 6520 const SCEVConstant *ExitCount = 6521 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6522 return getConstantTripCount(ExitCount); 6523 } 6524 6525 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6526 const auto *MaxExitCount = 6527 dyn_cast<SCEVConstant>(getMaxBackedgeTakenCount(L)); 6528 return getConstantTripCount(MaxExitCount); 6529 } 6530 6531 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6532 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6533 return getSmallConstantTripMultiple(L, ExitingBB); 6534 6535 // No trip multiple information for multiple exits. 6536 return 0; 6537 } 6538 6539 /// Returns the largest constant divisor of the trip count of this loop as a 6540 /// normal unsigned value, if possible. This means that the actual trip count is 6541 /// always a multiple of the returned value (don't forget the trip count could 6542 /// very well be zero as well!). 6543 /// 6544 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6545 /// multiple of a constant (which is also the case if the trip count is simply 6546 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6547 /// if the trip count is very large (>= 2^32). 6548 /// 6549 /// As explained in the comments for getSmallConstantTripCount, this assumes 6550 /// that control exits the loop via ExitingBlock. 6551 unsigned 6552 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6553 BasicBlock *ExitingBlock) { 6554 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6555 assert(L->isLoopExiting(ExitingBlock) && 6556 "Exiting block must actually branch out of the loop!"); 6557 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6558 if (ExitCount == getCouldNotCompute()) 6559 return 1; 6560 6561 // Get the trip count from the BE count by adding 1. 6562 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6563 6564 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6565 if (!TC) 6566 // Attempt to factor more general cases. Returns the greatest power of 6567 // two divisor. If overflow happens, the trip count expression is still 6568 // divisible by the greatest power of 2 divisor returned. 6569 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 6570 6571 ConstantInt *Result = TC->getValue(); 6572 6573 // Guard against huge trip counts (this requires checking 6574 // for zero to handle the case where the trip count == -1 and the 6575 // addition wraps). 6576 if (!Result || Result->getValue().getActiveBits() > 32 || 6577 Result->getValue().getActiveBits() == 0) 6578 return 1; 6579 6580 return (unsigned)Result->getZExtValue(); 6581 } 6582 6583 /// Get the expression for the number of loop iterations for which this loop is 6584 /// guaranteed not to exit via ExitingBlock. Otherwise return 6585 /// SCEVCouldNotCompute. 6586 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6587 BasicBlock *ExitingBlock) { 6588 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6589 } 6590 6591 const SCEV * 6592 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6593 SCEVUnionPredicate &Preds) { 6594 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 6595 } 6596 6597 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 6598 return getBackedgeTakenInfo(L).getExact(L, this); 6599 } 6600 6601 /// Similar to getBackedgeTakenCount, except return the least SCEV value that is 6602 /// known never to be less than the actual backedge taken count. 6603 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 6604 return getBackedgeTakenInfo(L).getMax(this); 6605 } 6606 6607 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 6608 return getBackedgeTakenInfo(L).isMaxOrZero(this); 6609 } 6610 6611 /// Push PHI nodes in the header of the given loop onto the given Worklist. 6612 static void 6613 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 6614 BasicBlock *Header = L->getHeader(); 6615 6616 // Push all Loop-header PHIs onto the Worklist stack. 6617 for (PHINode &PN : Header->phis()) 6618 Worklist.push_back(&PN); 6619 } 6620 6621 const ScalarEvolution::BackedgeTakenInfo & 6622 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 6623 auto &BTI = getBackedgeTakenInfo(L); 6624 if (BTI.hasFullInfo()) 6625 return BTI; 6626 6627 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6628 6629 if (!Pair.second) 6630 return Pair.first->second; 6631 6632 BackedgeTakenInfo Result = 6633 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 6634 6635 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 6636 } 6637 6638 const ScalarEvolution::BackedgeTakenInfo & 6639 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 6640 // Initially insert an invalid entry for this loop. If the insertion 6641 // succeeds, proceed to actually compute a backedge-taken count and 6642 // update the value. The temporary CouldNotCompute value tells SCEV 6643 // code elsewhere that it shouldn't attempt to request a new 6644 // backedge-taken count, which could result in infinite recursion. 6645 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 6646 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6647 if (!Pair.second) 6648 return Pair.first->second; 6649 6650 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 6651 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 6652 // must be cleared in this scope. 6653 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 6654 6655 // In product build, there are no usage of statistic. 6656 (void)NumTripCountsComputed; 6657 (void)NumTripCountsNotComputed; 6658 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 6659 const SCEV *BEExact = Result.getExact(L, this); 6660 if (BEExact != getCouldNotCompute()) { 6661 assert(isLoopInvariant(BEExact, L) && 6662 isLoopInvariant(Result.getMax(this), L) && 6663 "Computed backedge-taken count isn't loop invariant for loop!"); 6664 ++NumTripCountsComputed; 6665 } 6666 else if (Result.getMax(this) == getCouldNotCompute() && 6667 isa<PHINode>(L->getHeader()->begin())) { 6668 // Only count loops that have phi nodes as not being computable. 6669 ++NumTripCountsNotComputed; 6670 } 6671 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 6672 6673 // Now that we know more about the trip count for this loop, forget any 6674 // existing SCEV values for PHI nodes in this loop since they are only 6675 // conservative estimates made without the benefit of trip count 6676 // information. This is similar to the code in forgetLoop, except that 6677 // it handles SCEVUnknown PHI nodes specially. 6678 if (Result.hasAnyInfo()) { 6679 SmallVector<Instruction *, 16> Worklist; 6680 PushLoopPHIs(L, Worklist); 6681 6682 SmallPtrSet<Instruction *, 8> Discovered; 6683 while (!Worklist.empty()) { 6684 Instruction *I = Worklist.pop_back_val(); 6685 6686 ValueExprMapType::iterator It = 6687 ValueExprMap.find_as(static_cast<Value *>(I)); 6688 if (It != ValueExprMap.end()) { 6689 const SCEV *Old = It->second; 6690 6691 // SCEVUnknown for a PHI either means that it has an unrecognized 6692 // structure, or it's a PHI that's in the progress of being computed 6693 // by createNodeForPHI. In the former case, additional loop trip 6694 // count information isn't going to change anything. In the later 6695 // case, createNodeForPHI will perform the necessary updates on its 6696 // own when it gets to that point. 6697 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 6698 eraseValueFromMap(It->first); 6699 forgetMemoizedResults(Old); 6700 } 6701 if (PHINode *PN = dyn_cast<PHINode>(I)) 6702 ConstantEvolutionLoopExitValue.erase(PN); 6703 } 6704 6705 // Since we don't need to invalidate anything for correctness and we're 6706 // only invalidating to make SCEV's results more precise, we get to stop 6707 // early to avoid invalidating too much. This is especially important in 6708 // cases like: 6709 // 6710 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 6711 // loop0: 6712 // %pn0 = phi 6713 // ... 6714 // loop1: 6715 // %pn1 = phi 6716 // ... 6717 // 6718 // where both loop0 and loop1's backedge taken count uses the SCEV 6719 // expression for %v. If we don't have the early stop below then in cases 6720 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 6721 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 6722 // count for loop1, effectively nullifying SCEV's trip count cache. 6723 for (auto *U : I->users()) 6724 if (auto *I = dyn_cast<Instruction>(U)) { 6725 auto *LoopForUser = LI.getLoopFor(I->getParent()); 6726 if (LoopForUser && L->contains(LoopForUser) && 6727 Discovered.insert(I).second) 6728 Worklist.push_back(I); 6729 } 6730 } 6731 } 6732 6733 // Re-lookup the insert position, since the call to 6734 // computeBackedgeTakenCount above could result in a 6735 // recusive call to getBackedgeTakenInfo (on a different 6736 // loop), which would invalidate the iterator computed 6737 // earlier. 6738 return BackedgeTakenCounts.find(L)->second = std::move(Result); 6739 } 6740 6741 void ScalarEvolution::forgetLoop(const Loop *L) { 6742 // Drop any stored trip count value. 6743 auto RemoveLoopFromBackedgeMap = 6744 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 6745 auto BTCPos = Map.find(L); 6746 if (BTCPos != Map.end()) { 6747 BTCPos->second.clear(); 6748 Map.erase(BTCPos); 6749 } 6750 }; 6751 6752 SmallVector<const Loop *, 16> LoopWorklist(1, L); 6753 SmallVector<Instruction *, 32> Worklist; 6754 SmallPtrSet<Instruction *, 16> Visited; 6755 6756 // Iterate over all the loops and sub-loops to drop SCEV information. 6757 while (!LoopWorklist.empty()) { 6758 auto *CurrL = LoopWorklist.pop_back_val(); 6759 6760 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 6761 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 6762 6763 // Drop information about predicated SCEV rewrites for this loop. 6764 for (auto I = PredicatedSCEVRewrites.begin(); 6765 I != PredicatedSCEVRewrites.end();) { 6766 std::pair<const SCEV *, const Loop *> Entry = I->first; 6767 if (Entry.second == CurrL) 6768 PredicatedSCEVRewrites.erase(I++); 6769 else 6770 ++I; 6771 } 6772 6773 auto LoopUsersItr = LoopUsers.find(CurrL); 6774 if (LoopUsersItr != LoopUsers.end()) { 6775 for (auto *S : LoopUsersItr->second) 6776 forgetMemoizedResults(S); 6777 LoopUsers.erase(LoopUsersItr); 6778 } 6779 6780 // Drop information about expressions based on loop-header PHIs. 6781 PushLoopPHIs(CurrL, Worklist); 6782 6783 while (!Worklist.empty()) { 6784 Instruction *I = Worklist.pop_back_val(); 6785 if (!Visited.insert(I).second) 6786 continue; 6787 6788 ValueExprMapType::iterator It = 6789 ValueExprMap.find_as(static_cast<Value *>(I)); 6790 if (It != ValueExprMap.end()) { 6791 eraseValueFromMap(It->first); 6792 forgetMemoizedResults(It->second); 6793 if (PHINode *PN = dyn_cast<PHINode>(I)) 6794 ConstantEvolutionLoopExitValue.erase(PN); 6795 } 6796 6797 PushDefUseChildren(I, Worklist); 6798 } 6799 6800 LoopPropertiesCache.erase(CurrL); 6801 // Forget all contained loops too, to avoid dangling entries in the 6802 // ValuesAtScopes map. 6803 LoopWorklist.append(CurrL->begin(), CurrL->end()); 6804 } 6805 } 6806 6807 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 6808 while (Loop *Parent = L->getParentLoop()) 6809 L = Parent; 6810 forgetLoop(L); 6811 } 6812 6813 void ScalarEvolution::forgetValue(Value *V) { 6814 Instruction *I = dyn_cast<Instruction>(V); 6815 if (!I) return; 6816 6817 // Drop information about expressions based on loop-header PHIs. 6818 SmallVector<Instruction *, 16> Worklist; 6819 Worklist.push_back(I); 6820 6821 SmallPtrSet<Instruction *, 8> Visited; 6822 while (!Worklist.empty()) { 6823 I = Worklist.pop_back_val(); 6824 if (!Visited.insert(I).second) 6825 continue; 6826 6827 ValueExprMapType::iterator It = 6828 ValueExprMap.find_as(static_cast<Value *>(I)); 6829 if (It != ValueExprMap.end()) { 6830 eraseValueFromMap(It->first); 6831 forgetMemoizedResults(It->second); 6832 if (PHINode *PN = dyn_cast<PHINode>(I)) 6833 ConstantEvolutionLoopExitValue.erase(PN); 6834 } 6835 6836 PushDefUseChildren(I, Worklist); 6837 } 6838 } 6839 6840 /// Get the exact loop backedge taken count considering all loop exits. A 6841 /// computable result can only be returned for loops with all exiting blocks 6842 /// dominating the latch. howFarToZero assumes that the limit of each loop test 6843 /// is never skipped. This is a valid assumption as long as the loop exits via 6844 /// that test. For precise results, it is the caller's responsibility to specify 6845 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 6846 const SCEV * 6847 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 6848 SCEVUnionPredicate *Preds) const { 6849 // If any exits were not computable, the loop is not computable. 6850 if (!isComplete() || ExitNotTaken.empty()) 6851 return SE->getCouldNotCompute(); 6852 6853 const BasicBlock *Latch = L->getLoopLatch(); 6854 // All exiting blocks we have collected must dominate the only backedge. 6855 if (!Latch) 6856 return SE->getCouldNotCompute(); 6857 6858 // All exiting blocks we have gathered dominate loop's latch, so exact trip 6859 // count is simply a minimum out of all these calculated exit counts. 6860 SmallVector<const SCEV *, 2> Ops; 6861 for (auto &ENT : ExitNotTaken) { 6862 const SCEV *BECount = ENT.ExactNotTaken; 6863 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 6864 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 6865 "We should only have known counts for exiting blocks that dominate " 6866 "latch!"); 6867 6868 Ops.push_back(BECount); 6869 6870 if (Preds && !ENT.hasAlwaysTruePredicate()) 6871 Preds->add(ENT.Predicate.get()); 6872 6873 assert((Preds || ENT.hasAlwaysTruePredicate()) && 6874 "Predicate should be always true!"); 6875 } 6876 6877 return SE->getUMinFromMismatchedTypes(Ops); 6878 } 6879 6880 /// Get the exact not taken count for this loop exit. 6881 const SCEV * 6882 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 6883 ScalarEvolution *SE) const { 6884 for (auto &ENT : ExitNotTaken) 6885 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6886 return ENT.ExactNotTaken; 6887 6888 return SE->getCouldNotCompute(); 6889 } 6890 6891 /// getMax - Get the max backedge taken count for the loop. 6892 const SCEV * 6893 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 6894 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6895 return !ENT.hasAlwaysTruePredicate(); 6896 }; 6897 6898 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax()) 6899 return SE->getCouldNotCompute(); 6900 6901 assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) && 6902 "No point in having a non-constant max backedge taken count!"); 6903 return getMax(); 6904 } 6905 6906 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const { 6907 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6908 return !ENT.hasAlwaysTruePredicate(); 6909 }; 6910 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 6911 } 6912 6913 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 6914 ScalarEvolution *SE) const { 6915 if (getMax() && getMax() != SE->getCouldNotCompute() && 6916 SE->hasOperand(getMax(), S)) 6917 return true; 6918 6919 for (auto &ENT : ExitNotTaken) 6920 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 6921 SE->hasOperand(ENT.ExactNotTaken, S)) 6922 return true; 6923 6924 return false; 6925 } 6926 6927 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 6928 : ExactNotTaken(E), MaxNotTaken(E) { 6929 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6930 isa<SCEVConstant>(MaxNotTaken)) && 6931 "No point in having a non-constant max backedge taken count!"); 6932 } 6933 6934 ScalarEvolution::ExitLimit::ExitLimit( 6935 const SCEV *E, const SCEV *M, bool MaxOrZero, 6936 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 6937 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 6938 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 6939 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 6940 "Exact is not allowed to be less precise than Max"); 6941 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6942 isa<SCEVConstant>(MaxNotTaken)) && 6943 "No point in having a non-constant max backedge taken count!"); 6944 for (auto *PredSet : PredSetList) 6945 for (auto *P : *PredSet) 6946 addPredicate(P); 6947 } 6948 6949 ScalarEvolution::ExitLimit::ExitLimit( 6950 const SCEV *E, const SCEV *M, bool MaxOrZero, 6951 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 6952 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 6953 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6954 isa<SCEVConstant>(MaxNotTaken)) && 6955 "No point in having a non-constant max backedge taken count!"); 6956 } 6957 6958 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 6959 bool MaxOrZero) 6960 : ExitLimit(E, M, MaxOrZero, None) { 6961 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6962 isa<SCEVConstant>(MaxNotTaken)) && 6963 "No point in having a non-constant max backedge taken count!"); 6964 } 6965 6966 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 6967 /// computable exit into a persistent ExitNotTakenInfo array. 6968 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 6969 SmallVectorImpl<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> 6970 &&ExitCounts, 6971 bool Complete, const SCEV *MaxCount, bool MaxOrZero) 6972 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { 6973 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 6974 6975 ExitNotTaken.reserve(ExitCounts.size()); 6976 std::transform( 6977 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 6978 [&](const EdgeExitInfo &EEI) { 6979 BasicBlock *ExitBB = EEI.first; 6980 const ExitLimit &EL = EEI.second; 6981 if (EL.Predicates.empty()) 6982 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, nullptr); 6983 6984 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 6985 for (auto *Pred : EL.Predicates) 6986 Predicate->add(Pred); 6987 6988 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, std::move(Predicate)); 6989 }); 6990 assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) && 6991 "No point in having a non-constant max backedge taken count!"); 6992 } 6993 6994 /// Invalidate this result and free the ExitNotTakenInfo array. 6995 void ScalarEvolution::BackedgeTakenInfo::clear() { 6996 ExitNotTaken.clear(); 6997 } 6998 6999 /// Compute the number of times the backedge of the specified loop will execute. 7000 ScalarEvolution::BackedgeTakenInfo 7001 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 7002 bool AllowPredicates) { 7003 SmallVector<BasicBlock *, 8> ExitingBlocks; 7004 L->getExitingBlocks(ExitingBlocks); 7005 7006 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7007 7008 SmallVector<EdgeExitInfo, 4> ExitCounts; 7009 bool CouldComputeBECount = true; 7010 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 7011 const SCEV *MustExitMaxBECount = nullptr; 7012 const SCEV *MayExitMaxBECount = nullptr; 7013 bool MustExitMaxOrZero = false; 7014 7015 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 7016 // and compute maxBECount. 7017 // Do a union of all the predicates here. 7018 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 7019 BasicBlock *ExitBB = ExitingBlocks[i]; 7020 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 7021 7022 assert((AllowPredicates || EL.Predicates.empty()) && 7023 "Predicated exit limit when predicates are not allowed!"); 7024 7025 // 1. For each exit that can be computed, add an entry to ExitCounts. 7026 // CouldComputeBECount is true only if all exits can be computed. 7027 if (EL.ExactNotTaken == getCouldNotCompute()) 7028 // We couldn't compute an exact value for this exit, so 7029 // we won't be able to compute an exact value for the loop. 7030 CouldComputeBECount = false; 7031 else 7032 ExitCounts.emplace_back(ExitBB, EL); 7033 7034 // 2. Derive the loop's MaxBECount from each exit's max number of 7035 // non-exiting iterations. Partition the loop exits into two kinds: 7036 // LoopMustExits and LoopMayExits. 7037 // 7038 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 7039 // is a LoopMayExit. If any computable LoopMustExit is found, then 7040 // MaxBECount is the minimum EL.MaxNotTaken of computable 7041 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 7042 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 7043 // computable EL.MaxNotTaken. 7044 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 7045 DT.dominates(ExitBB, Latch)) { 7046 if (!MustExitMaxBECount) { 7047 MustExitMaxBECount = EL.MaxNotTaken; 7048 MustExitMaxOrZero = EL.MaxOrZero; 7049 } else { 7050 MustExitMaxBECount = 7051 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 7052 } 7053 } else if (MayExitMaxBECount != getCouldNotCompute()) { 7054 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 7055 MayExitMaxBECount = EL.MaxNotTaken; 7056 else { 7057 MayExitMaxBECount = 7058 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 7059 } 7060 } 7061 } 7062 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 7063 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 7064 // The loop backedge will be taken the maximum or zero times if there's 7065 // a single exit that must be taken the maximum or zero times. 7066 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 7067 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 7068 MaxBECount, MaxOrZero); 7069 } 7070 7071 ScalarEvolution::ExitLimit 7072 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 7073 bool AllowPredicates) { 7074 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 7075 // If our exiting block does not dominate the latch, then its connection with 7076 // loop's exit limit may be far from trivial. 7077 const BasicBlock *Latch = L->getLoopLatch(); 7078 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 7079 return getCouldNotCompute(); 7080 7081 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 7082 Instruction *Term = ExitingBlock->getTerminator(); 7083 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 7084 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 7085 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7086 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 7087 "It should have one successor in loop and one exit block!"); 7088 // Proceed to the next level to examine the exit condition expression. 7089 return computeExitLimitFromCond( 7090 L, BI->getCondition(), ExitIfTrue, 7091 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 7092 } 7093 7094 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 7095 // For switch, make sure that there is a single exit from the loop. 7096 BasicBlock *Exit = nullptr; 7097 for (auto *SBB : successors(ExitingBlock)) 7098 if (!L->contains(SBB)) { 7099 if (Exit) // Multiple exit successors. 7100 return getCouldNotCompute(); 7101 Exit = SBB; 7102 } 7103 assert(Exit && "Exiting block must have at least one exit"); 7104 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 7105 /*ControlsExit=*/IsOnlyExit); 7106 } 7107 7108 return getCouldNotCompute(); 7109 } 7110 7111 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 7112 const Loop *L, Value *ExitCond, bool ExitIfTrue, 7113 bool ControlsExit, bool AllowPredicates) { 7114 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 7115 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 7116 ControlsExit, AllowPredicates); 7117 } 7118 7119 Optional<ScalarEvolution::ExitLimit> 7120 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 7121 bool ExitIfTrue, bool ControlsExit, 7122 bool AllowPredicates) { 7123 (void)this->L; 7124 (void)this->ExitIfTrue; 7125 (void)this->AllowPredicates; 7126 7127 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7128 this->AllowPredicates == AllowPredicates && 7129 "Variance in assumed invariant key components!"); 7130 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 7131 if (Itr == TripCountMap.end()) 7132 return None; 7133 return Itr->second; 7134 } 7135 7136 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 7137 bool ExitIfTrue, 7138 bool ControlsExit, 7139 bool AllowPredicates, 7140 const ExitLimit &EL) { 7141 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7142 this->AllowPredicates == AllowPredicates && 7143 "Variance in assumed invariant key components!"); 7144 7145 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 7146 assert(InsertResult.second && "Expected successful insertion!"); 7147 (void)InsertResult; 7148 (void)ExitIfTrue; 7149 } 7150 7151 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 7152 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7153 bool ControlsExit, bool AllowPredicates) { 7154 7155 if (auto MaybeEL = 7156 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7157 return *MaybeEL; 7158 7159 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 7160 ControlsExit, AllowPredicates); 7161 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 7162 return EL; 7163 } 7164 7165 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 7166 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7167 bool ControlsExit, bool AllowPredicates) { 7168 // Check if the controlling expression for this loop is an And or Or. 7169 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 7170 if (BO->getOpcode() == Instruction::And) { 7171 // Recurse on the operands of the and. 7172 bool EitherMayExit = !ExitIfTrue; 7173 ExitLimit EL0 = computeExitLimitFromCondCached( 7174 Cache, L, BO->getOperand(0), ExitIfTrue, 7175 ControlsExit && !EitherMayExit, AllowPredicates); 7176 ExitLimit EL1 = computeExitLimitFromCondCached( 7177 Cache, L, BO->getOperand(1), ExitIfTrue, 7178 ControlsExit && !EitherMayExit, AllowPredicates); 7179 const SCEV *BECount = getCouldNotCompute(); 7180 const SCEV *MaxBECount = getCouldNotCompute(); 7181 if (EitherMayExit) { 7182 // Both conditions must be true for the loop to continue executing. 7183 // Choose the less conservative count. 7184 if (EL0.ExactNotTaken == getCouldNotCompute() || 7185 EL1.ExactNotTaken == getCouldNotCompute()) 7186 BECount = getCouldNotCompute(); 7187 else 7188 BECount = 7189 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7190 if (EL0.MaxNotTaken == getCouldNotCompute()) 7191 MaxBECount = EL1.MaxNotTaken; 7192 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7193 MaxBECount = EL0.MaxNotTaken; 7194 else 7195 MaxBECount = 7196 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7197 } else { 7198 // Both conditions must be true at the same time for the loop to exit. 7199 // For now, be conservative. 7200 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7201 MaxBECount = EL0.MaxNotTaken; 7202 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7203 BECount = EL0.ExactNotTaken; 7204 } 7205 7206 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7207 // to be more aggressive when computing BECount than when computing 7208 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7209 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7210 // to not. 7211 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7212 !isa<SCEVCouldNotCompute>(BECount)) 7213 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7214 7215 return ExitLimit(BECount, MaxBECount, false, 7216 {&EL0.Predicates, &EL1.Predicates}); 7217 } 7218 if (BO->getOpcode() == Instruction::Or) { 7219 // Recurse on the operands of the or. 7220 bool EitherMayExit = ExitIfTrue; 7221 ExitLimit EL0 = computeExitLimitFromCondCached( 7222 Cache, L, BO->getOperand(0), ExitIfTrue, 7223 ControlsExit && !EitherMayExit, AllowPredicates); 7224 ExitLimit EL1 = computeExitLimitFromCondCached( 7225 Cache, L, BO->getOperand(1), ExitIfTrue, 7226 ControlsExit && !EitherMayExit, AllowPredicates); 7227 const SCEV *BECount = getCouldNotCompute(); 7228 const SCEV *MaxBECount = getCouldNotCompute(); 7229 if (EitherMayExit) { 7230 // Both conditions must be false for the loop to continue executing. 7231 // Choose the less conservative count. 7232 if (EL0.ExactNotTaken == getCouldNotCompute() || 7233 EL1.ExactNotTaken == getCouldNotCompute()) 7234 BECount = getCouldNotCompute(); 7235 else 7236 BECount = 7237 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7238 if (EL0.MaxNotTaken == getCouldNotCompute()) 7239 MaxBECount = EL1.MaxNotTaken; 7240 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7241 MaxBECount = EL0.MaxNotTaken; 7242 else 7243 MaxBECount = 7244 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7245 } else { 7246 // Both conditions must be false at the same time for the loop to exit. 7247 // For now, be conservative. 7248 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7249 MaxBECount = EL0.MaxNotTaken; 7250 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7251 BECount = EL0.ExactNotTaken; 7252 } 7253 7254 return ExitLimit(BECount, MaxBECount, false, 7255 {&EL0.Predicates, &EL1.Predicates}); 7256 } 7257 } 7258 7259 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7260 // Proceed to the next level to examine the icmp. 7261 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7262 ExitLimit EL = 7263 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 7264 if (EL.hasFullInfo() || !AllowPredicates) 7265 return EL; 7266 7267 // Try again, but use SCEV predicates this time. 7268 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 7269 /*AllowPredicates=*/true); 7270 } 7271 7272 // Check for a constant condition. These are normally stripped out by 7273 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7274 // preserve the CFG and is temporarily leaving constant conditions 7275 // in place. 7276 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7277 if (ExitIfTrue == !CI->getZExtValue()) 7278 // The backedge is always taken. 7279 return getCouldNotCompute(); 7280 else 7281 // The backedge is never taken. 7282 return getZero(CI->getType()); 7283 } 7284 7285 // If it's not an integer or pointer comparison then compute it the hard way. 7286 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7287 } 7288 7289 ScalarEvolution::ExitLimit 7290 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7291 ICmpInst *ExitCond, 7292 bool ExitIfTrue, 7293 bool ControlsExit, 7294 bool AllowPredicates) { 7295 // If the condition was exit on true, convert the condition to exit on false 7296 ICmpInst::Predicate Pred; 7297 if (!ExitIfTrue) 7298 Pred = ExitCond->getPredicate(); 7299 else 7300 Pred = ExitCond->getInversePredicate(); 7301 const ICmpInst::Predicate OriginalPred = Pred; 7302 7303 // Handle common loops like: for (X = "string"; *X; ++X) 7304 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7305 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7306 ExitLimit ItCnt = 7307 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); 7308 if (ItCnt.hasAnyInfo()) 7309 return ItCnt; 7310 } 7311 7312 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7313 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7314 7315 // Try to evaluate any dependencies out of the loop. 7316 LHS = getSCEVAtScope(LHS, L); 7317 RHS = getSCEVAtScope(RHS, L); 7318 7319 // At this point, we would like to compute how many iterations of the 7320 // loop the predicate will return true for these inputs. 7321 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7322 // If there is a loop-invariant, force it into the RHS. 7323 std::swap(LHS, RHS); 7324 Pred = ICmpInst::getSwappedPredicate(Pred); 7325 } 7326 7327 // Simplify the operands before analyzing them. 7328 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7329 7330 // If we have a comparison of a chrec against a constant, try to use value 7331 // ranges to answer this query. 7332 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7333 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7334 if (AddRec->getLoop() == L) { 7335 // Form the constant range. 7336 ConstantRange CompRange = 7337 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 7338 7339 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7340 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7341 } 7342 7343 switch (Pred) { 7344 case ICmpInst::ICMP_NE: { // while (X != Y) 7345 // Convert to: while (X-Y != 0) 7346 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7347 AllowPredicates); 7348 if (EL.hasAnyInfo()) return EL; 7349 break; 7350 } 7351 case ICmpInst::ICMP_EQ: { // while (X == Y) 7352 // Convert to: while (X-Y == 0) 7353 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7354 if (EL.hasAnyInfo()) return EL; 7355 break; 7356 } 7357 case ICmpInst::ICMP_SLT: 7358 case ICmpInst::ICMP_ULT: { // while (X < Y) 7359 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 7360 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7361 AllowPredicates); 7362 if (EL.hasAnyInfo()) return EL; 7363 break; 7364 } 7365 case ICmpInst::ICMP_SGT: 7366 case ICmpInst::ICMP_UGT: { // while (X > Y) 7367 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 7368 ExitLimit EL = 7369 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7370 AllowPredicates); 7371 if (EL.hasAnyInfo()) return EL; 7372 break; 7373 } 7374 default: 7375 break; 7376 } 7377 7378 auto *ExhaustiveCount = 7379 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7380 7381 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7382 return ExhaustiveCount; 7383 7384 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7385 ExitCond->getOperand(1), L, OriginalPred); 7386 } 7387 7388 ScalarEvolution::ExitLimit 7389 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7390 SwitchInst *Switch, 7391 BasicBlock *ExitingBlock, 7392 bool ControlsExit) { 7393 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7394 7395 // Give up if the exit is the default dest of a switch. 7396 if (Switch->getDefaultDest() == ExitingBlock) 7397 return getCouldNotCompute(); 7398 7399 assert(L->contains(Switch->getDefaultDest()) && 7400 "Default case must not exit the loop!"); 7401 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7402 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7403 7404 // while (X != Y) --> while (X-Y != 0) 7405 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7406 if (EL.hasAnyInfo()) 7407 return EL; 7408 7409 return getCouldNotCompute(); 7410 } 7411 7412 static ConstantInt * 7413 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7414 ScalarEvolution &SE) { 7415 const SCEV *InVal = SE.getConstant(C); 7416 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7417 assert(isa<SCEVConstant>(Val) && 7418 "Evaluation of SCEV at constant didn't fold correctly?"); 7419 return cast<SCEVConstant>(Val)->getValue(); 7420 } 7421 7422 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7423 /// compute the backedge execution count. 7424 ScalarEvolution::ExitLimit 7425 ScalarEvolution::computeLoadConstantCompareExitLimit( 7426 LoadInst *LI, 7427 Constant *RHS, 7428 const Loop *L, 7429 ICmpInst::Predicate predicate) { 7430 if (LI->isVolatile()) return getCouldNotCompute(); 7431 7432 // Check to see if the loaded pointer is a getelementptr of a global. 7433 // TODO: Use SCEV instead of manually grubbing with GEPs. 7434 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7435 if (!GEP) return getCouldNotCompute(); 7436 7437 // Make sure that it is really a constant global we are gepping, with an 7438 // initializer, and make sure the first IDX is really 0. 7439 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7440 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7441 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7442 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7443 return getCouldNotCompute(); 7444 7445 // Okay, we allow one non-constant index into the GEP instruction. 7446 Value *VarIdx = nullptr; 7447 std::vector<Constant*> Indexes; 7448 unsigned VarIdxNum = 0; 7449 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7450 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7451 Indexes.push_back(CI); 7452 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7453 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7454 VarIdx = GEP->getOperand(i); 7455 VarIdxNum = i-2; 7456 Indexes.push_back(nullptr); 7457 } 7458 7459 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7460 if (!VarIdx) 7461 return getCouldNotCompute(); 7462 7463 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7464 // Check to see if X is a loop variant variable value now. 7465 const SCEV *Idx = getSCEV(VarIdx); 7466 Idx = getSCEVAtScope(Idx, L); 7467 7468 // We can only recognize very limited forms of loop index expressions, in 7469 // particular, only affine AddRec's like {C1,+,C2}. 7470 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7471 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 7472 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7473 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7474 return getCouldNotCompute(); 7475 7476 unsigned MaxSteps = MaxBruteForceIterations; 7477 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7478 ConstantInt *ItCst = ConstantInt::get( 7479 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7480 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7481 7482 // Form the GEP offset. 7483 Indexes[VarIdxNum] = Val; 7484 7485 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7486 Indexes); 7487 if (!Result) break; // Cannot compute! 7488 7489 // Evaluate the condition for this iteration. 7490 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7491 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7492 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7493 ++NumArrayLenItCounts; 7494 return getConstant(ItCst); // Found terminating iteration! 7495 } 7496 } 7497 return getCouldNotCompute(); 7498 } 7499 7500 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7501 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7502 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7503 if (!RHS) 7504 return getCouldNotCompute(); 7505 7506 const BasicBlock *Latch = L->getLoopLatch(); 7507 if (!Latch) 7508 return getCouldNotCompute(); 7509 7510 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7511 if (!Predecessor) 7512 return getCouldNotCompute(); 7513 7514 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7515 // Return LHS in OutLHS and shift_opt in OutOpCode. 7516 auto MatchPositiveShift = 7517 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7518 7519 using namespace PatternMatch; 7520 7521 ConstantInt *ShiftAmt; 7522 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7523 OutOpCode = Instruction::LShr; 7524 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7525 OutOpCode = Instruction::AShr; 7526 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7527 OutOpCode = Instruction::Shl; 7528 else 7529 return false; 7530 7531 return ShiftAmt->getValue().isStrictlyPositive(); 7532 }; 7533 7534 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 7535 // 7536 // loop: 7537 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 7538 // %iv.shifted = lshr i32 %iv, <positive constant> 7539 // 7540 // Return true on a successful match. Return the corresponding PHI node (%iv 7541 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 7542 auto MatchShiftRecurrence = 7543 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 7544 Optional<Instruction::BinaryOps> PostShiftOpCode; 7545 7546 { 7547 Instruction::BinaryOps OpC; 7548 Value *V; 7549 7550 // If we encounter a shift instruction, "peel off" the shift operation, 7551 // and remember that we did so. Later when we inspect %iv's backedge 7552 // value, we will make sure that the backedge value uses the same 7553 // operation. 7554 // 7555 // Note: the peeled shift operation does not have to be the same 7556 // instruction as the one feeding into the PHI's backedge value. We only 7557 // really care about it being the same *kind* of shift instruction -- 7558 // that's all that is required for our later inferences to hold. 7559 if (MatchPositiveShift(LHS, V, OpC)) { 7560 PostShiftOpCode = OpC; 7561 LHS = V; 7562 } 7563 } 7564 7565 PNOut = dyn_cast<PHINode>(LHS); 7566 if (!PNOut || PNOut->getParent() != L->getHeader()) 7567 return false; 7568 7569 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 7570 Value *OpLHS; 7571 7572 return 7573 // The backedge value for the PHI node must be a shift by a positive 7574 // amount 7575 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 7576 7577 // of the PHI node itself 7578 OpLHS == PNOut && 7579 7580 // and the kind of shift should be match the kind of shift we peeled 7581 // off, if any. 7582 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 7583 }; 7584 7585 PHINode *PN; 7586 Instruction::BinaryOps OpCode; 7587 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 7588 return getCouldNotCompute(); 7589 7590 const DataLayout &DL = getDataLayout(); 7591 7592 // The key rationale for this optimization is that for some kinds of shift 7593 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 7594 // within a finite number of iterations. If the condition guarding the 7595 // backedge (in the sense that the backedge is taken if the condition is true) 7596 // is false for the value the shift recurrence stabilizes to, then we know 7597 // that the backedge is taken only a finite number of times. 7598 7599 ConstantInt *StableValue = nullptr; 7600 switch (OpCode) { 7601 default: 7602 llvm_unreachable("Impossible case!"); 7603 7604 case Instruction::AShr: { 7605 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 7606 // bitwidth(K) iterations. 7607 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 7608 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 7609 Predecessor->getTerminator(), &DT); 7610 auto *Ty = cast<IntegerType>(RHS->getType()); 7611 if (Known.isNonNegative()) 7612 StableValue = ConstantInt::get(Ty, 0); 7613 else if (Known.isNegative()) 7614 StableValue = ConstantInt::get(Ty, -1, true); 7615 else 7616 return getCouldNotCompute(); 7617 7618 break; 7619 } 7620 case Instruction::LShr: 7621 case Instruction::Shl: 7622 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 7623 // stabilize to 0 in at most bitwidth(K) iterations. 7624 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 7625 break; 7626 } 7627 7628 auto *Result = 7629 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 7630 assert(Result->getType()->isIntegerTy(1) && 7631 "Otherwise cannot be an operand to a branch instruction"); 7632 7633 if (Result->isZeroValue()) { 7634 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 7635 const SCEV *UpperBound = 7636 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 7637 return ExitLimit(getCouldNotCompute(), UpperBound, false); 7638 } 7639 7640 return getCouldNotCompute(); 7641 } 7642 7643 /// Return true if we can constant fold an instruction of the specified type, 7644 /// assuming that all operands were constants. 7645 static bool CanConstantFold(const Instruction *I) { 7646 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 7647 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7648 isa<LoadInst>(I)) 7649 return true; 7650 7651 if (const CallInst *CI = dyn_cast<CallInst>(I)) 7652 if (const Function *F = CI->getCalledFunction()) 7653 return canConstantFoldCallTo(CI, F); 7654 return false; 7655 } 7656 7657 /// Determine whether this instruction can constant evolve within this loop 7658 /// assuming its operands can all constant evolve. 7659 static bool canConstantEvolve(Instruction *I, const Loop *L) { 7660 // An instruction outside of the loop can't be derived from a loop PHI. 7661 if (!L->contains(I)) return false; 7662 7663 if (isa<PHINode>(I)) { 7664 // We don't currently keep track of the control flow needed to evaluate 7665 // PHIs, so we cannot handle PHIs inside of loops. 7666 return L->getHeader() == I->getParent(); 7667 } 7668 7669 // If we won't be able to constant fold this expression even if the operands 7670 // are constants, bail early. 7671 return CanConstantFold(I); 7672 } 7673 7674 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 7675 /// recursing through each instruction operand until reaching a loop header phi. 7676 static PHINode * 7677 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 7678 DenseMap<Instruction *, PHINode *> &PHIMap, 7679 unsigned Depth) { 7680 if (Depth > MaxConstantEvolvingDepth) 7681 return nullptr; 7682 7683 // Otherwise, we can evaluate this instruction if all of its operands are 7684 // constant or derived from a PHI node themselves. 7685 PHINode *PHI = nullptr; 7686 for (Value *Op : UseInst->operands()) { 7687 if (isa<Constant>(Op)) continue; 7688 7689 Instruction *OpInst = dyn_cast<Instruction>(Op); 7690 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 7691 7692 PHINode *P = dyn_cast<PHINode>(OpInst); 7693 if (!P) 7694 // If this operand is already visited, reuse the prior result. 7695 // We may have P != PHI if this is the deepest point at which the 7696 // inconsistent paths meet. 7697 P = PHIMap.lookup(OpInst); 7698 if (!P) { 7699 // Recurse and memoize the results, whether a phi is found or not. 7700 // This recursive call invalidates pointers into PHIMap. 7701 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 7702 PHIMap[OpInst] = P; 7703 } 7704 if (!P) 7705 return nullptr; // Not evolving from PHI 7706 if (PHI && PHI != P) 7707 return nullptr; // Evolving from multiple different PHIs. 7708 PHI = P; 7709 } 7710 // This is a expression evolving from a constant PHI! 7711 return PHI; 7712 } 7713 7714 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 7715 /// in the loop that V is derived from. We allow arbitrary operations along the 7716 /// way, but the operands of an operation must either be constants or a value 7717 /// derived from a constant PHI. If this expression does not fit with these 7718 /// constraints, return null. 7719 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 7720 Instruction *I = dyn_cast<Instruction>(V); 7721 if (!I || !canConstantEvolve(I, L)) return nullptr; 7722 7723 if (PHINode *PN = dyn_cast<PHINode>(I)) 7724 return PN; 7725 7726 // Record non-constant instructions contained by the loop. 7727 DenseMap<Instruction *, PHINode *> PHIMap; 7728 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 7729 } 7730 7731 /// EvaluateExpression - Given an expression that passes the 7732 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 7733 /// in the loop has the value PHIVal. If we can't fold this expression for some 7734 /// reason, return null. 7735 static Constant *EvaluateExpression(Value *V, const Loop *L, 7736 DenseMap<Instruction *, Constant *> &Vals, 7737 const DataLayout &DL, 7738 const TargetLibraryInfo *TLI) { 7739 // Convenient constant check, but redundant for recursive calls. 7740 if (Constant *C = dyn_cast<Constant>(V)) return C; 7741 Instruction *I = dyn_cast<Instruction>(V); 7742 if (!I) return nullptr; 7743 7744 if (Constant *C = Vals.lookup(I)) return C; 7745 7746 // An instruction inside the loop depends on a value outside the loop that we 7747 // weren't given a mapping for, or a value such as a call inside the loop. 7748 if (!canConstantEvolve(I, L)) return nullptr; 7749 7750 // An unmapped PHI can be due to a branch or another loop inside this loop, 7751 // or due to this not being the initial iteration through a loop where we 7752 // couldn't compute the evolution of this particular PHI last time. 7753 if (isa<PHINode>(I)) return nullptr; 7754 7755 std::vector<Constant*> Operands(I->getNumOperands()); 7756 7757 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 7758 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 7759 if (!Operand) { 7760 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 7761 if (!Operands[i]) return nullptr; 7762 continue; 7763 } 7764 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 7765 Vals[Operand] = C; 7766 if (!C) return nullptr; 7767 Operands[i] = C; 7768 } 7769 7770 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 7771 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7772 Operands[1], DL, TLI); 7773 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7774 if (!LI->isVolatile()) 7775 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7776 } 7777 return ConstantFoldInstOperands(I, Operands, DL, TLI); 7778 } 7779 7780 7781 // If every incoming value to PN except the one for BB is a specific Constant, 7782 // return that, else return nullptr. 7783 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 7784 Constant *IncomingVal = nullptr; 7785 7786 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 7787 if (PN->getIncomingBlock(i) == BB) 7788 continue; 7789 7790 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 7791 if (!CurrentVal) 7792 return nullptr; 7793 7794 if (IncomingVal != CurrentVal) { 7795 if (IncomingVal) 7796 return nullptr; 7797 IncomingVal = CurrentVal; 7798 } 7799 } 7800 7801 return IncomingVal; 7802 } 7803 7804 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 7805 /// in the header of its containing loop, we know the loop executes a 7806 /// constant number of times, and the PHI node is just a recurrence 7807 /// involving constants, fold it. 7808 Constant * 7809 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 7810 const APInt &BEs, 7811 const Loop *L) { 7812 auto I = ConstantEvolutionLoopExitValue.find(PN); 7813 if (I != ConstantEvolutionLoopExitValue.end()) 7814 return I->second; 7815 7816 if (BEs.ugt(MaxBruteForceIterations)) 7817 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 7818 7819 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 7820 7821 DenseMap<Instruction *, Constant *> CurrentIterVals; 7822 BasicBlock *Header = L->getHeader(); 7823 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7824 7825 BasicBlock *Latch = L->getLoopLatch(); 7826 if (!Latch) 7827 return nullptr; 7828 7829 for (PHINode &PHI : Header->phis()) { 7830 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 7831 CurrentIterVals[&PHI] = StartCST; 7832 } 7833 if (!CurrentIterVals.count(PN)) 7834 return RetVal = nullptr; 7835 7836 Value *BEValue = PN->getIncomingValueForBlock(Latch); 7837 7838 // Execute the loop symbolically to determine the exit value. 7839 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 7840 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 7841 7842 unsigned NumIterations = BEs.getZExtValue(); // must be in range 7843 unsigned IterationNum = 0; 7844 const DataLayout &DL = getDataLayout(); 7845 for (; ; ++IterationNum) { 7846 if (IterationNum == NumIterations) 7847 return RetVal = CurrentIterVals[PN]; // Got exit value! 7848 7849 // Compute the value of the PHIs for the next iteration. 7850 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 7851 DenseMap<Instruction *, Constant *> NextIterVals; 7852 Constant *NextPHI = 7853 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7854 if (!NextPHI) 7855 return nullptr; // Couldn't evaluate! 7856 NextIterVals[PN] = NextPHI; 7857 7858 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 7859 7860 // Also evaluate the other PHI nodes. However, we don't get to stop if we 7861 // cease to be able to evaluate one of them or if they stop evolving, 7862 // because that doesn't necessarily prevent us from computing PN. 7863 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 7864 for (const auto &I : CurrentIterVals) { 7865 PHINode *PHI = dyn_cast<PHINode>(I.first); 7866 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 7867 PHIsToCompute.emplace_back(PHI, I.second); 7868 } 7869 // We use two distinct loops because EvaluateExpression may invalidate any 7870 // iterators into CurrentIterVals. 7871 for (const auto &I : PHIsToCompute) { 7872 PHINode *PHI = I.first; 7873 Constant *&NextPHI = NextIterVals[PHI]; 7874 if (!NextPHI) { // Not already computed. 7875 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7876 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7877 } 7878 if (NextPHI != I.second) 7879 StoppedEvolving = false; 7880 } 7881 7882 // If all entries in CurrentIterVals == NextIterVals then we can stop 7883 // iterating, the loop can't continue to change. 7884 if (StoppedEvolving) 7885 return RetVal = CurrentIterVals[PN]; 7886 7887 CurrentIterVals.swap(NextIterVals); 7888 } 7889 } 7890 7891 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 7892 Value *Cond, 7893 bool ExitWhen) { 7894 PHINode *PN = getConstantEvolvingPHI(Cond, L); 7895 if (!PN) return getCouldNotCompute(); 7896 7897 // If the loop is canonicalized, the PHI will have exactly two entries. 7898 // That's the only form we support here. 7899 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 7900 7901 DenseMap<Instruction *, Constant *> CurrentIterVals; 7902 BasicBlock *Header = L->getHeader(); 7903 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7904 7905 BasicBlock *Latch = L->getLoopLatch(); 7906 assert(Latch && "Should follow from NumIncomingValues == 2!"); 7907 7908 for (PHINode &PHI : Header->phis()) { 7909 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 7910 CurrentIterVals[&PHI] = StartCST; 7911 } 7912 if (!CurrentIterVals.count(PN)) 7913 return getCouldNotCompute(); 7914 7915 // Okay, we find a PHI node that defines the trip count of this loop. Execute 7916 // the loop symbolically to determine when the condition gets a value of 7917 // "ExitWhen". 7918 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 7919 const DataLayout &DL = getDataLayout(); 7920 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 7921 auto *CondVal = dyn_cast_or_null<ConstantInt>( 7922 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 7923 7924 // Couldn't symbolically evaluate. 7925 if (!CondVal) return getCouldNotCompute(); 7926 7927 if (CondVal->getValue() == uint64_t(ExitWhen)) { 7928 ++NumBruteForceTripCountsComputed; 7929 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 7930 } 7931 7932 // Update all the PHI nodes for the next iteration. 7933 DenseMap<Instruction *, Constant *> NextIterVals; 7934 7935 // Create a list of which PHIs we need to compute. We want to do this before 7936 // calling EvaluateExpression on them because that may invalidate iterators 7937 // into CurrentIterVals. 7938 SmallVector<PHINode *, 8> PHIsToCompute; 7939 for (const auto &I : CurrentIterVals) { 7940 PHINode *PHI = dyn_cast<PHINode>(I.first); 7941 if (!PHI || PHI->getParent() != Header) continue; 7942 PHIsToCompute.push_back(PHI); 7943 } 7944 for (PHINode *PHI : PHIsToCompute) { 7945 Constant *&NextPHI = NextIterVals[PHI]; 7946 if (NextPHI) continue; // Already computed! 7947 7948 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7949 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7950 } 7951 CurrentIterVals.swap(NextIterVals); 7952 } 7953 7954 // Too many iterations were needed to evaluate. 7955 return getCouldNotCompute(); 7956 } 7957 7958 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 7959 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 7960 ValuesAtScopes[V]; 7961 // Check to see if we've folded this expression at this loop before. 7962 for (auto &LS : Values) 7963 if (LS.first == L) 7964 return LS.second ? LS.second : V; 7965 7966 Values.emplace_back(L, nullptr); 7967 7968 // Otherwise compute it. 7969 const SCEV *C = computeSCEVAtScope(V, L); 7970 for (auto &LS : reverse(ValuesAtScopes[V])) 7971 if (LS.first == L) { 7972 LS.second = C; 7973 break; 7974 } 7975 return C; 7976 } 7977 7978 /// This builds up a Constant using the ConstantExpr interface. That way, we 7979 /// will return Constants for objects which aren't represented by a 7980 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 7981 /// Returns NULL if the SCEV isn't representable as a Constant. 7982 static Constant *BuildConstantFromSCEV(const SCEV *V) { 7983 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 7984 case scCouldNotCompute: 7985 case scAddRecExpr: 7986 break; 7987 case scConstant: 7988 return cast<SCEVConstant>(V)->getValue(); 7989 case scUnknown: 7990 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 7991 case scSignExtend: { 7992 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 7993 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 7994 return ConstantExpr::getSExt(CastOp, SS->getType()); 7995 break; 7996 } 7997 case scZeroExtend: { 7998 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 7999 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 8000 return ConstantExpr::getZExt(CastOp, SZ->getType()); 8001 break; 8002 } 8003 case scTruncate: { 8004 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 8005 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 8006 return ConstantExpr::getTrunc(CastOp, ST->getType()); 8007 break; 8008 } 8009 case scAddExpr: { 8010 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 8011 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 8012 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8013 unsigned AS = PTy->getAddressSpace(); 8014 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8015 C = ConstantExpr::getBitCast(C, DestPtrTy); 8016 } 8017 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 8018 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 8019 if (!C2) return nullptr; 8020 8021 // First pointer! 8022 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 8023 unsigned AS = C2->getType()->getPointerAddressSpace(); 8024 std::swap(C, C2); 8025 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8026 // The offsets have been converted to bytes. We can add bytes to an 8027 // i8* by GEP with the byte count in the first index. 8028 C = ConstantExpr::getBitCast(C, DestPtrTy); 8029 } 8030 8031 // Don't bother trying to sum two pointers. We probably can't 8032 // statically compute a load that results from it anyway. 8033 if (C2->getType()->isPointerTy()) 8034 return nullptr; 8035 8036 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8037 if (PTy->getElementType()->isStructTy()) 8038 C2 = ConstantExpr::getIntegerCast( 8039 C2, Type::getInt32Ty(C->getContext()), true); 8040 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 8041 } else 8042 C = ConstantExpr::getAdd(C, C2); 8043 } 8044 return C; 8045 } 8046 break; 8047 } 8048 case scMulExpr: { 8049 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 8050 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 8051 // Don't bother with pointers at all. 8052 if (C->getType()->isPointerTy()) return nullptr; 8053 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 8054 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 8055 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 8056 C = ConstantExpr::getMul(C, C2); 8057 } 8058 return C; 8059 } 8060 break; 8061 } 8062 case scUDivExpr: { 8063 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 8064 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 8065 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 8066 if (LHS->getType() == RHS->getType()) 8067 return ConstantExpr::getUDiv(LHS, RHS); 8068 break; 8069 } 8070 case scSMaxExpr: 8071 case scUMaxExpr: 8072 break; // TODO: smax, umax. 8073 } 8074 return nullptr; 8075 } 8076 8077 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 8078 if (isa<SCEVConstant>(V)) return V; 8079 8080 // If this instruction is evolved from a constant-evolving PHI, compute the 8081 // exit value from the loop without using SCEVs. 8082 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 8083 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 8084 const Loop *LI = this->LI[I->getParent()]; 8085 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 8086 if (PHINode *PN = dyn_cast<PHINode>(I)) 8087 if (PN->getParent() == LI->getHeader()) { 8088 // Okay, there is no closed form solution for the PHI node. Check 8089 // to see if the loop that contains it has a known backedge-taken 8090 // count. If so, we may be able to force computation of the exit 8091 // value. 8092 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 8093 if (const SCEVConstant *BTCC = 8094 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 8095 8096 // This trivial case can show up in some degenerate cases where 8097 // the incoming IR has not yet been fully simplified. 8098 if (BTCC->getValue()->isZero()) { 8099 Value *InitValue = nullptr; 8100 bool MultipleInitValues = false; 8101 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 8102 if (!LI->contains(PN->getIncomingBlock(i))) { 8103 if (!InitValue) 8104 InitValue = PN->getIncomingValue(i); 8105 else if (InitValue != PN->getIncomingValue(i)) { 8106 MultipleInitValues = true; 8107 break; 8108 } 8109 } 8110 if (!MultipleInitValues && InitValue) 8111 return getSCEV(InitValue); 8112 } 8113 } 8114 // Okay, we know how many times the containing loop executes. If 8115 // this is a constant evolving PHI node, get the final value at 8116 // the specified iteration number. 8117 Constant *RV = 8118 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 8119 if (RV) return getSCEV(RV); 8120 } 8121 } 8122 8123 // Okay, this is an expression that we cannot symbolically evaluate 8124 // into a SCEV. Check to see if it's possible to symbolically evaluate 8125 // the arguments into constants, and if so, try to constant propagate the 8126 // result. This is particularly useful for computing loop exit values. 8127 if (CanConstantFold(I)) { 8128 SmallVector<Constant *, 4> Operands; 8129 bool MadeImprovement = false; 8130 for (Value *Op : I->operands()) { 8131 if (Constant *C = dyn_cast<Constant>(Op)) { 8132 Operands.push_back(C); 8133 continue; 8134 } 8135 8136 // If any of the operands is non-constant and if they are 8137 // non-integer and non-pointer, don't even try to analyze them 8138 // with scev techniques. 8139 if (!isSCEVable(Op->getType())) 8140 return V; 8141 8142 const SCEV *OrigV = getSCEV(Op); 8143 const SCEV *OpV = getSCEVAtScope(OrigV, L); 8144 MadeImprovement |= OrigV != OpV; 8145 8146 Constant *C = BuildConstantFromSCEV(OpV); 8147 if (!C) return V; 8148 if (C->getType() != Op->getType()) 8149 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 8150 Op->getType(), 8151 false), 8152 C, Op->getType()); 8153 Operands.push_back(C); 8154 } 8155 8156 // Check to see if getSCEVAtScope actually made an improvement. 8157 if (MadeImprovement) { 8158 Constant *C = nullptr; 8159 const DataLayout &DL = getDataLayout(); 8160 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 8161 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8162 Operands[1], DL, &TLI); 8163 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 8164 if (!LI->isVolatile()) 8165 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 8166 } else 8167 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 8168 if (!C) return V; 8169 return getSCEV(C); 8170 } 8171 } 8172 } 8173 8174 // This is some other type of SCEVUnknown, just return it. 8175 return V; 8176 } 8177 8178 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 8179 // Avoid performing the look-up in the common case where the specified 8180 // expression has no loop-variant portions. 8181 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 8182 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8183 if (OpAtScope != Comm->getOperand(i)) { 8184 // Okay, at least one of these operands is loop variant but might be 8185 // foldable. Build a new instance of the folded commutative expression. 8186 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 8187 Comm->op_begin()+i); 8188 NewOps.push_back(OpAtScope); 8189 8190 for (++i; i != e; ++i) { 8191 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8192 NewOps.push_back(OpAtScope); 8193 } 8194 if (isa<SCEVAddExpr>(Comm)) 8195 return getAddExpr(NewOps); 8196 if (isa<SCEVMulExpr>(Comm)) 8197 return getMulExpr(NewOps); 8198 if (isa<SCEVSMaxExpr>(Comm)) 8199 return getSMaxExpr(NewOps); 8200 if (isa<SCEVUMaxExpr>(Comm)) 8201 return getUMaxExpr(NewOps); 8202 llvm_unreachable("Unknown commutative SCEV type!"); 8203 } 8204 } 8205 // If we got here, all operands are loop invariant. 8206 return Comm; 8207 } 8208 8209 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 8210 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 8211 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 8212 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 8213 return Div; // must be loop invariant 8214 return getUDivExpr(LHS, RHS); 8215 } 8216 8217 // If this is a loop recurrence for a loop that does not contain L, then we 8218 // are dealing with the final value computed by the loop. 8219 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 8220 // First, attempt to evaluate each operand. 8221 // Avoid performing the look-up in the common case where the specified 8222 // expression has no loop-variant portions. 8223 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 8224 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 8225 if (OpAtScope == AddRec->getOperand(i)) 8226 continue; 8227 8228 // Okay, at least one of these operands is loop variant but might be 8229 // foldable. Build a new instance of the folded commutative expression. 8230 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8231 AddRec->op_begin()+i); 8232 NewOps.push_back(OpAtScope); 8233 for (++i; i != e; ++i) 8234 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8235 8236 const SCEV *FoldedRec = 8237 getAddRecExpr(NewOps, AddRec->getLoop(), 8238 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8239 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8240 // The addrec may be folded to a nonrecurrence, for example, if the 8241 // induction variable is multiplied by zero after constant folding. Go 8242 // ahead and return the folded value. 8243 if (!AddRec) 8244 return FoldedRec; 8245 break; 8246 } 8247 8248 // If the scope is outside the addrec's loop, evaluate it by using the 8249 // loop exit value of the addrec. 8250 if (!AddRec->getLoop()->contains(L)) { 8251 // To evaluate this recurrence, we need to know how many times the AddRec 8252 // loop iterates. Compute this now. 8253 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8254 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8255 8256 // Then, evaluate the AddRec. 8257 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8258 } 8259 8260 return AddRec; 8261 } 8262 8263 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8264 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8265 if (Op == Cast->getOperand()) 8266 return Cast; // must be loop invariant 8267 return getZeroExtendExpr(Op, Cast->getType()); 8268 } 8269 8270 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 8271 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8272 if (Op == Cast->getOperand()) 8273 return Cast; // must be loop invariant 8274 return getSignExtendExpr(Op, Cast->getType()); 8275 } 8276 8277 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 8278 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8279 if (Op == Cast->getOperand()) 8280 return Cast; // must be loop invariant 8281 return getTruncateExpr(Op, Cast->getType()); 8282 } 8283 8284 llvm_unreachable("Unknown SCEV type!"); 8285 } 8286 8287 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 8288 return getSCEVAtScope(getSCEV(V), L); 8289 } 8290 8291 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 8292 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 8293 return stripInjectiveFunctions(ZExt->getOperand()); 8294 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 8295 return stripInjectiveFunctions(SExt->getOperand()); 8296 return S; 8297 } 8298 8299 /// Finds the minimum unsigned root of the following equation: 8300 /// 8301 /// A * X = B (mod N) 8302 /// 8303 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 8304 /// A and B isn't important. 8305 /// 8306 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 8307 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 8308 ScalarEvolution &SE) { 8309 uint32_t BW = A.getBitWidth(); 8310 assert(BW == SE.getTypeSizeInBits(B->getType())); 8311 assert(A != 0 && "A must be non-zero."); 8312 8313 // 1. D = gcd(A, N) 8314 // 8315 // The gcd of A and N may have only one prime factor: 2. The number of 8316 // trailing zeros in A is its multiplicity 8317 uint32_t Mult2 = A.countTrailingZeros(); 8318 // D = 2^Mult2 8319 8320 // 2. Check if B is divisible by D. 8321 // 8322 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8323 // is not less than multiplicity of this prime factor for D. 8324 if (SE.GetMinTrailingZeros(B) < Mult2) 8325 return SE.getCouldNotCompute(); 8326 8327 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8328 // modulo (N / D). 8329 // 8330 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8331 // (N / D) in general. The inverse itself always fits into BW bits, though, 8332 // so we immediately truncate it. 8333 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8334 APInt Mod(BW + 1, 0); 8335 Mod.setBit(BW - Mult2); // Mod = N / D 8336 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8337 8338 // 4. Compute the minimum unsigned root of the equation: 8339 // I * (B / D) mod (N / D) 8340 // To simplify the computation, we factor out the divide by D: 8341 // (I * B mod N) / D 8342 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8343 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8344 } 8345 8346 /// For a given quadratic addrec, generate coefficients of the corresponding 8347 /// quadratic equation, multiplied by a common value to ensure that they are 8348 /// integers. 8349 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 8350 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 8351 /// were multiplied by, and BitWidth is the bit width of the original addrec 8352 /// coefficients. 8353 /// This function returns None if the addrec coefficients are not compile- 8354 /// time constants. 8355 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 8356 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 8357 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8358 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8359 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8360 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8361 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 8362 << *AddRec << '\n'); 8363 8364 // We currently can only solve this if the coefficients are constants. 8365 if (!LC || !MC || !NC) { 8366 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 8367 return None; 8368 } 8369 8370 APInt L = LC->getAPInt(); 8371 APInt M = MC->getAPInt(); 8372 APInt N = NC->getAPInt(); 8373 assert(!N.isNullValue() && "This is not a quadratic addrec"); 8374 8375 unsigned BitWidth = LC->getAPInt().getBitWidth(); 8376 unsigned NewWidth = BitWidth + 1; 8377 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 8378 << BitWidth << '\n'); 8379 // The sign-extension (as opposed to a zero-extension) here matches the 8380 // extension used in SolveQuadraticEquationWrap (with the same motivation). 8381 N = N.sext(NewWidth); 8382 M = M.sext(NewWidth); 8383 L = L.sext(NewWidth); 8384 8385 // The increments are M, M+N, M+2N, ..., so the accumulated values are 8386 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 8387 // L+M, L+2M+N, L+3M+3N, ... 8388 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 8389 // 8390 // The equation Acc = 0 is then 8391 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 8392 // In a quadratic form it becomes: 8393 // N n^2 + (2M-N) n + 2L = 0. 8394 8395 APInt A = N; 8396 APInt B = 2 * M - A; 8397 APInt C = 2 * L; 8398 APInt T = APInt(NewWidth, 2); 8399 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 8400 << "x + " << C << ", coeff bw: " << NewWidth 8401 << ", multiplied by " << T << '\n'); 8402 return std::make_tuple(A, B, C, T, BitWidth); 8403 } 8404 8405 /// Helper function to compare optional APInts: 8406 /// (a) if X and Y both exist, return min(X, Y), 8407 /// (b) if neither X nor Y exist, return None, 8408 /// (c) if exactly one of X and Y exists, return that value. 8409 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 8410 if (X.hasValue() && Y.hasValue()) { 8411 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 8412 APInt XW = X->sextOrSelf(W); 8413 APInt YW = Y->sextOrSelf(W); 8414 return XW.slt(YW) ? *X : *Y; 8415 } 8416 if (!X.hasValue() && !Y.hasValue()) 8417 return None; 8418 return X.hasValue() ? *X : *Y; 8419 } 8420 8421 /// Helper function to truncate an optional APInt to a given BitWidth. 8422 /// When solving addrec-related equations, it is preferable to return a value 8423 /// that has the same bit width as the original addrec's coefficients. If the 8424 /// solution fits in the original bit width, truncate it (except for i1). 8425 /// Returning a value of a different bit width may inhibit some optimizations. 8426 /// 8427 /// In general, a solution to a quadratic equation generated from an addrec 8428 /// may require BW+1 bits, where BW is the bit width of the addrec's 8429 /// coefficients. The reason is that the coefficients of the quadratic 8430 /// equation are BW+1 bits wide (to avoid truncation when converting from 8431 /// the addrec to the equation). 8432 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 8433 if (!X.hasValue()) 8434 return None; 8435 unsigned W = X->getBitWidth(); 8436 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 8437 return X->trunc(BitWidth); 8438 return X; 8439 } 8440 8441 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 8442 /// iterations. The values L, M, N are assumed to be signed, and they 8443 /// should all have the same bit widths. 8444 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 8445 /// where BW is the bit width of the addrec's coefficients. 8446 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 8447 /// returned as such, otherwise the bit width of the returned value may 8448 /// be greater than BW. 8449 /// 8450 /// This function returns None if 8451 /// (a) the addrec coefficients are not constant, or 8452 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 8453 /// like x^2 = 5, no integer solutions exist, in other cases an integer 8454 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 8455 static Optional<APInt> 8456 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8457 APInt A, B, C, M; 8458 unsigned BitWidth; 8459 auto T = GetQuadraticEquation(AddRec); 8460 if (!T.hasValue()) 8461 return None; 8462 8463 std::tie(A, B, C, M, BitWidth) = *T; 8464 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 8465 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 8466 if (!X.hasValue()) 8467 return None; 8468 8469 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 8470 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 8471 if (!V->isZero()) 8472 return None; 8473 8474 return TruncIfPossible(X, BitWidth); 8475 } 8476 8477 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 8478 /// iterations. The values M, N are assumed to be signed, and they 8479 /// should all have the same bit widths. 8480 /// Find the least n such that c(n) does not belong to the given range, 8481 /// while c(n-1) does. 8482 /// 8483 /// This function returns None if 8484 /// (a) the addrec coefficients are not constant, or 8485 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 8486 /// bounds of the range. 8487 static Optional<APInt> 8488 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 8489 const ConstantRange &Range, ScalarEvolution &SE) { 8490 assert(AddRec->getOperand(0)->isZero() && 8491 "Starting value of addrec should be 0"); 8492 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 8493 << Range << ", addrec " << *AddRec << '\n'); 8494 // This case is handled in getNumIterationsInRange. Here we can assume that 8495 // we start in the range. 8496 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 8497 "Addrec's initial value should be in range"); 8498 8499 APInt A, B, C, M; 8500 unsigned BitWidth; 8501 auto T = GetQuadraticEquation(AddRec); 8502 if (!T.hasValue()) 8503 return None; 8504 8505 // Be careful about the return value: there can be two reasons for not 8506 // returning an actual number. First, if no solutions to the equations 8507 // were found, and second, if the solutions don't leave the given range. 8508 // The first case means that the actual solution is "unknown", the second 8509 // means that it's known, but not valid. If the solution is unknown, we 8510 // cannot make any conclusions. 8511 // Return a pair: the optional solution and a flag indicating if the 8512 // solution was found. 8513 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 8514 // Solve for signed overflow and unsigned overflow, pick the lower 8515 // solution. 8516 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 8517 << Bound << " (before multiplying by " << M << ")\n"); 8518 Bound *= M; // The quadratic equation multiplier. 8519 8520 Optional<APInt> SO = None; 8521 if (BitWidth > 1) { 8522 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8523 "signed overflow\n"); 8524 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 8525 } 8526 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8527 "unsigned overflow\n"); 8528 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 8529 BitWidth+1); 8530 8531 auto LeavesRange = [&] (const APInt &X) { 8532 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 8533 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 8534 if (Range.contains(V0->getValue())) 8535 return false; 8536 // X should be at least 1, so X-1 is non-negative. 8537 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 8538 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 8539 if (Range.contains(V1->getValue())) 8540 return true; 8541 return false; 8542 }; 8543 8544 // If SolveQuadraticEquationWrap returns None, it means that there can 8545 // be a solution, but the function failed to find it. We cannot treat it 8546 // as "no solution". 8547 if (!SO.hasValue() || !UO.hasValue()) 8548 return { None, false }; 8549 8550 // Check the smaller value first to see if it leaves the range. 8551 // At this point, both SO and UO must have values. 8552 Optional<APInt> Min = MinOptional(SO, UO); 8553 if (LeavesRange(*Min)) 8554 return { Min, true }; 8555 Optional<APInt> Max = Min == SO ? UO : SO; 8556 if (LeavesRange(*Max)) 8557 return { Max, true }; 8558 8559 // Solutions were found, but were eliminated, hence the "true". 8560 return { None, true }; 8561 }; 8562 8563 std::tie(A, B, C, M, BitWidth) = *T; 8564 // Lower bound is inclusive, subtract 1 to represent the exiting value. 8565 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; 8566 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); 8567 auto SL = SolveForBoundary(Lower); 8568 auto SU = SolveForBoundary(Upper); 8569 // If any of the solutions was unknown, no meaninigful conclusions can 8570 // be made. 8571 if (!SL.second || !SU.second) 8572 return None; 8573 8574 // Claim: The correct solution is not some value between Min and Max. 8575 // 8576 // Justification: Assuming that Min and Max are different values, one of 8577 // them is when the first signed overflow happens, the other is when the 8578 // first unsigned overflow happens. Crossing the range boundary is only 8579 // possible via an overflow (treating 0 as a special case of it, modeling 8580 // an overflow as crossing k*2^W for some k). 8581 // 8582 // The interesting case here is when Min was eliminated as an invalid 8583 // solution, but Max was not. The argument is that if there was another 8584 // overflow between Min and Max, it would also have been eliminated if 8585 // it was considered. 8586 // 8587 // For a given boundary, it is possible to have two overflows of the same 8588 // type (signed/unsigned) without having the other type in between: this 8589 // can happen when the vertex of the parabola is between the iterations 8590 // corresponding to the overflows. This is only possible when the two 8591 // overflows cross k*2^W for the same k. In such case, if the second one 8592 // left the range (and was the first one to do so), the first overflow 8593 // would have to enter the range, which would mean that either we had left 8594 // the range before or that we started outside of it. Both of these cases 8595 // are contradictions. 8596 // 8597 // Claim: In the case where SolveForBoundary returns None, the correct 8598 // solution is not some value between the Max for this boundary and the 8599 // Min of the other boundary. 8600 // 8601 // Justification: Assume that we had such Max_A and Min_B corresponding 8602 // to range boundaries A and B and such that Max_A < Min_B. If there was 8603 // a solution between Max_A and Min_B, it would have to be caused by an 8604 // overflow corresponding to either A or B. It cannot correspond to B, 8605 // since Min_B is the first occurrence of such an overflow. If it 8606 // corresponded to A, it would have to be either a signed or an unsigned 8607 // overflow that is larger than both eliminated overflows for A. But 8608 // between the eliminated overflows and this overflow, the values would 8609 // cover the entire value space, thus crossing the other boundary, which 8610 // is a contradiction. 8611 8612 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 8613 } 8614 8615 ScalarEvolution::ExitLimit 8616 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 8617 bool AllowPredicates) { 8618 8619 // This is only used for loops with a "x != y" exit test. The exit condition 8620 // is now expressed as a single expression, V = x-y. So the exit test is 8621 // effectively V != 0. We know and take advantage of the fact that this 8622 // expression only being used in a comparison by zero context. 8623 8624 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 8625 // If the value is a constant 8626 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8627 // If the value is already zero, the branch will execute zero times. 8628 if (C->getValue()->isZero()) return C; 8629 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8630 } 8631 8632 const SCEVAddRecExpr *AddRec = 8633 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 8634 8635 if (!AddRec && AllowPredicates) 8636 // Try to make this an AddRec using runtime tests, in the first X 8637 // iterations of this loop, where X is the SCEV expression found by the 8638 // algorithm below. 8639 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 8640 8641 if (!AddRec || AddRec->getLoop() != L) 8642 return getCouldNotCompute(); 8643 8644 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 8645 // the quadratic equation to solve it. 8646 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 8647 // We can only use this value if the chrec ends up with an exact zero 8648 // value at this index. When solving for "X*X != 5", for example, we 8649 // should not accept a root of 2. 8650 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 8651 const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); 8652 return ExitLimit(R, R, false, Predicates); 8653 } 8654 return getCouldNotCompute(); 8655 } 8656 8657 // Otherwise we can only handle this if it is affine. 8658 if (!AddRec->isAffine()) 8659 return getCouldNotCompute(); 8660 8661 // If this is an affine expression, the execution count of this branch is 8662 // the minimum unsigned root of the following equation: 8663 // 8664 // Start + Step*N = 0 (mod 2^BW) 8665 // 8666 // equivalent to: 8667 // 8668 // Step*N = -Start (mod 2^BW) 8669 // 8670 // where BW is the common bit width of Start and Step. 8671 8672 // Get the initial value for the loop. 8673 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 8674 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 8675 8676 // For now we handle only constant steps. 8677 // 8678 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 8679 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 8680 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 8681 // We have not yet seen any such cases. 8682 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 8683 if (!StepC || StepC->getValue()->isZero()) 8684 return getCouldNotCompute(); 8685 8686 // For positive steps (counting up until unsigned overflow): 8687 // N = -Start/Step (as unsigned) 8688 // For negative steps (counting down to zero): 8689 // N = Start/-Step 8690 // First compute the unsigned distance from zero in the direction of Step. 8691 bool CountDown = StepC->getAPInt().isNegative(); 8692 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 8693 8694 // Handle unitary steps, which cannot wraparound. 8695 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 8696 // N = Distance (as unsigned) 8697 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 8698 APInt MaxBECount = getUnsignedRangeMax(Distance); 8699 8700 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 8701 // we end up with a loop whose backedge-taken count is n - 1. Detect this 8702 // case, and see if we can improve the bound. 8703 // 8704 // Explicitly handling this here is necessary because getUnsignedRange 8705 // isn't context-sensitive; it doesn't know that we only care about the 8706 // range inside the loop. 8707 const SCEV *Zero = getZero(Distance->getType()); 8708 const SCEV *One = getOne(Distance->getType()); 8709 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 8710 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 8711 // If Distance + 1 doesn't overflow, we can compute the maximum distance 8712 // as "unsigned_max(Distance + 1) - 1". 8713 ConstantRange CR = getUnsignedRange(DistancePlusOne); 8714 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 8715 } 8716 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 8717 } 8718 8719 // If the condition controls loop exit (the loop exits only if the expression 8720 // is true) and the addition is no-wrap we can use unsigned divide to 8721 // compute the backedge count. In this case, the step may not divide the 8722 // distance, but we don't care because if the condition is "missed" the loop 8723 // will have undefined behavior due to wrapping. 8724 if (ControlsExit && AddRec->hasNoSelfWrap() && 8725 loopHasNoAbnormalExits(AddRec->getLoop())) { 8726 const SCEV *Exact = 8727 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 8728 const SCEV *Max = 8729 Exact == getCouldNotCompute() 8730 ? Exact 8731 : getConstant(getUnsignedRangeMax(Exact)); 8732 return ExitLimit(Exact, Max, false, Predicates); 8733 } 8734 8735 // Solve the general equation. 8736 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 8737 getNegativeSCEV(Start), *this); 8738 const SCEV *M = E == getCouldNotCompute() 8739 ? E 8740 : getConstant(getUnsignedRangeMax(E)); 8741 return ExitLimit(E, M, false, Predicates); 8742 } 8743 8744 ScalarEvolution::ExitLimit 8745 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 8746 // Loops that look like: while (X == 0) are very strange indeed. We don't 8747 // handle them yet except for the trivial case. This could be expanded in the 8748 // future as needed. 8749 8750 // If the value is a constant, check to see if it is known to be non-zero 8751 // already. If so, the backedge will execute zero times. 8752 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8753 if (!C->getValue()->isZero()) 8754 return getZero(C->getType()); 8755 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8756 } 8757 8758 // We could implement others, but I really doubt anyone writes loops like 8759 // this, and if they did, they would already be constant folded. 8760 return getCouldNotCompute(); 8761 } 8762 8763 std::pair<BasicBlock *, BasicBlock *> 8764 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 8765 // If the block has a unique predecessor, then there is no path from the 8766 // predecessor to the block that does not go through the direct edge 8767 // from the predecessor to the block. 8768 if (BasicBlock *Pred = BB->getSinglePredecessor()) 8769 return {Pred, BB}; 8770 8771 // A loop's header is defined to be a block that dominates the loop. 8772 // If the header has a unique predecessor outside the loop, it must be 8773 // a block that has exactly one successor that can reach the loop. 8774 if (Loop *L = LI.getLoopFor(BB)) 8775 return {L->getLoopPredecessor(), L->getHeader()}; 8776 8777 return {nullptr, nullptr}; 8778 } 8779 8780 /// SCEV structural equivalence is usually sufficient for testing whether two 8781 /// expressions are equal, however for the purposes of looking for a condition 8782 /// guarding a loop, it can be useful to be a little more general, since a 8783 /// front-end may have replicated the controlling expression. 8784 static bool HasSameValue(const SCEV *A, const SCEV *B) { 8785 // Quick check to see if they are the same SCEV. 8786 if (A == B) return true; 8787 8788 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 8789 // Not all instructions that are "identical" compute the same value. For 8790 // instance, two distinct alloca instructions allocating the same type are 8791 // identical and do not read memory; but compute distinct values. 8792 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 8793 }; 8794 8795 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 8796 // two different instructions with the same value. Check for this case. 8797 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 8798 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 8799 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 8800 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 8801 if (ComputesEqualValues(AI, BI)) 8802 return true; 8803 8804 // Otherwise assume they may have a different value. 8805 return false; 8806 } 8807 8808 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 8809 const SCEV *&LHS, const SCEV *&RHS, 8810 unsigned Depth) { 8811 bool Changed = false; 8812 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 8813 // '0 != 0'. 8814 auto TrivialCase = [&](bool TriviallyTrue) { 8815 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8816 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 8817 return true; 8818 }; 8819 // If we hit the max recursion limit bail out. 8820 if (Depth >= 3) 8821 return false; 8822 8823 // Canonicalize a constant to the right side. 8824 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 8825 // Check for both operands constant. 8826 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 8827 if (ConstantExpr::getICmp(Pred, 8828 LHSC->getValue(), 8829 RHSC->getValue())->isNullValue()) 8830 return TrivialCase(false); 8831 else 8832 return TrivialCase(true); 8833 } 8834 // Otherwise swap the operands to put the constant on the right. 8835 std::swap(LHS, RHS); 8836 Pred = ICmpInst::getSwappedPredicate(Pred); 8837 Changed = true; 8838 } 8839 8840 // If we're comparing an addrec with a value which is loop-invariant in the 8841 // addrec's loop, put the addrec on the left. Also make a dominance check, 8842 // as both operands could be addrecs loop-invariant in each other's loop. 8843 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 8844 const Loop *L = AR->getLoop(); 8845 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 8846 std::swap(LHS, RHS); 8847 Pred = ICmpInst::getSwappedPredicate(Pred); 8848 Changed = true; 8849 } 8850 } 8851 8852 // If there's a constant operand, canonicalize comparisons with boundary 8853 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 8854 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 8855 const APInt &RA = RC->getAPInt(); 8856 8857 bool SimplifiedByConstantRange = false; 8858 8859 if (!ICmpInst::isEquality(Pred)) { 8860 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 8861 if (ExactCR.isFullSet()) 8862 return TrivialCase(true); 8863 else if (ExactCR.isEmptySet()) 8864 return TrivialCase(false); 8865 8866 APInt NewRHS; 8867 CmpInst::Predicate NewPred; 8868 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 8869 ICmpInst::isEquality(NewPred)) { 8870 // We were able to convert an inequality to an equality. 8871 Pred = NewPred; 8872 RHS = getConstant(NewRHS); 8873 Changed = SimplifiedByConstantRange = true; 8874 } 8875 } 8876 8877 if (!SimplifiedByConstantRange) { 8878 switch (Pred) { 8879 default: 8880 break; 8881 case ICmpInst::ICMP_EQ: 8882 case ICmpInst::ICMP_NE: 8883 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 8884 if (!RA) 8885 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 8886 if (const SCEVMulExpr *ME = 8887 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 8888 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 8889 ME->getOperand(0)->isAllOnesValue()) { 8890 RHS = AE->getOperand(1); 8891 LHS = ME->getOperand(1); 8892 Changed = true; 8893 } 8894 break; 8895 8896 8897 // The "Should have been caught earlier!" messages refer to the fact 8898 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 8899 // should have fired on the corresponding cases, and canonicalized the 8900 // check to trivial case. 8901 8902 case ICmpInst::ICMP_UGE: 8903 assert(!RA.isMinValue() && "Should have been caught earlier!"); 8904 Pred = ICmpInst::ICMP_UGT; 8905 RHS = getConstant(RA - 1); 8906 Changed = true; 8907 break; 8908 case ICmpInst::ICMP_ULE: 8909 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 8910 Pred = ICmpInst::ICMP_ULT; 8911 RHS = getConstant(RA + 1); 8912 Changed = true; 8913 break; 8914 case ICmpInst::ICMP_SGE: 8915 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 8916 Pred = ICmpInst::ICMP_SGT; 8917 RHS = getConstant(RA - 1); 8918 Changed = true; 8919 break; 8920 case ICmpInst::ICMP_SLE: 8921 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 8922 Pred = ICmpInst::ICMP_SLT; 8923 RHS = getConstant(RA + 1); 8924 Changed = true; 8925 break; 8926 } 8927 } 8928 } 8929 8930 // Check for obvious equality. 8931 if (HasSameValue(LHS, RHS)) { 8932 if (ICmpInst::isTrueWhenEqual(Pred)) 8933 return TrivialCase(true); 8934 if (ICmpInst::isFalseWhenEqual(Pred)) 8935 return TrivialCase(false); 8936 } 8937 8938 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 8939 // adding or subtracting 1 from one of the operands. 8940 switch (Pred) { 8941 case ICmpInst::ICMP_SLE: 8942 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 8943 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8944 SCEV::FlagNSW); 8945 Pred = ICmpInst::ICMP_SLT; 8946 Changed = true; 8947 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 8948 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 8949 SCEV::FlagNSW); 8950 Pred = ICmpInst::ICMP_SLT; 8951 Changed = true; 8952 } 8953 break; 8954 case ICmpInst::ICMP_SGE: 8955 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 8956 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 8957 SCEV::FlagNSW); 8958 Pred = ICmpInst::ICMP_SGT; 8959 Changed = true; 8960 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 8961 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8962 SCEV::FlagNSW); 8963 Pred = ICmpInst::ICMP_SGT; 8964 Changed = true; 8965 } 8966 break; 8967 case ICmpInst::ICMP_ULE: 8968 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 8969 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8970 SCEV::FlagNUW); 8971 Pred = ICmpInst::ICMP_ULT; 8972 Changed = true; 8973 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 8974 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 8975 Pred = ICmpInst::ICMP_ULT; 8976 Changed = true; 8977 } 8978 break; 8979 case ICmpInst::ICMP_UGE: 8980 if (!getUnsignedRangeMin(RHS).isMinValue()) { 8981 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 8982 Pred = ICmpInst::ICMP_UGT; 8983 Changed = true; 8984 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 8985 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8986 SCEV::FlagNUW); 8987 Pred = ICmpInst::ICMP_UGT; 8988 Changed = true; 8989 } 8990 break; 8991 default: 8992 break; 8993 } 8994 8995 // TODO: More simplifications are possible here. 8996 8997 // Recursively simplify until we either hit a recursion limit or nothing 8998 // changes. 8999 if (Changed) 9000 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 9001 9002 return Changed; 9003 } 9004 9005 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 9006 return getSignedRangeMax(S).isNegative(); 9007 } 9008 9009 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 9010 return getSignedRangeMin(S).isStrictlyPositive(); 9011 } 9012 9013 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 9014 return !getSignedRangeMin(S).isNegative(); 9015 } 9016 9017 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 9018 return !getSignedRangeMax(S).isStrictlyPositive(); 9019 } 9020 9021 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 9022 return isKnownNegative(S) || isKnownPositive(S); 9023 } 9024 9025 std::pair<const SCEV *, const SCEV *> 9026 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 9027 // Compute SCEV on entry of loop L. 9028 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 9029 if (Start == getCouldNotCompute()) 9030 return { Start, Start }; 9031 // Compute post increment SCEV for loop L. 9032 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 9033 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 9034 return { Start, PostInc }; 9035 } 9036 9037 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 9038 const SCEV *LHS, const SCEV *RHS) { 9039 // First collect all loops. 9040 SmallPtrSet<const Loop *, 8> LoopsUsed; 9041 getUsedLoops(LHS, LoopsUsed); 9042 getUsedLoops(RHS, LoopsUsed); 9043 9044 if (LoopsUsed.empty()) 9045 return false; 9046 9047 // Domination relationship must be a linear order on collected loops. 9048 #ifndef NDEBUG 9049 for (auto *L1 : LoopsUsed) 9050 for (auto *L2 : LoopsUsed) 9051 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 9052 DT.dominates(L2->getHeader(), L1->getHeader())) && 9053 "Domination relationship is not a linear order"); 9054 #endif 9055 9056 const Loop *MDL = 9057 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 9058 [&](const Loop *L1, const Loop *L2) { 9059 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 9060 }); 9061 9062 // Get init and post increment value for LHS. 9063 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 9064 // if LHS contains unknown non-invariant SCEV then bail out. 9065 if (SplitLHS.first == getCouldNotCompute()) 9066 return false; 9067 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 9068 // Get init and post increment value for RHS. 9069 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 9070 // if RHS contains unknown non-invariant SCEV then bail out. 9071 if (SplitRHS.first == getCouldNotCompute()) 9072 return false; 9073 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 9074 // It is possible that init SCEV contains an invariant load but it does 9075 // not dominate MDL and is not available at MDL loop entry, so we should 9076 // check it here. 9077 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 9078 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 9079 return false; 9080 9081 return isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first) && 9082 isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 9083 SplitRHS.second); 9084 } 9085 9086 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 9087 const SCEV *LHS, const SCEV *RHS) { 9088 // Canonicalize the inputs first. 9089 (void)SimplifyICmpOperands(Pred, LHS, RHS); 9090 9091 if (isKnownViaInduction(Pred, LHS, RHS)) 9092 return true; 9093 9094 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 9095 return true; 9096 9097 // Otherwise see what can be done with some simple reasoning. 9098 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 9099 } 9100 9101 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 9102 const SCEVAddRecExpr *LHS, 9103 const SCEV *RHS) { 9104 const Loop *L = LHS->getLoop(); 9105 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 9106 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 9107 } 9108 9109 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 9110 ICmpInst::Predicate Pred, 9111 bool &Increasing) { 9112 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 9113 9114 #ifndef NDEBUG 9115 // Verify an invariant: inverting the predicate should turn a monotonically 9116 // increasing change to a monotonically decreasing one, and vice versa. 9117 bool IncreasingSwapped; 9118 bool ResultSwapped = isMonotonicPredicateImpl( 9119 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 9120 9121 assert(Result == ResultSwapped && "should be able to analyze both!"); 9122 if (ResultSwapped) 9123 assert(Increasing == !IncreasingSwapped && 9124 "monotonicity should flip as we flip the predicate"); 9125 #endif 9126 9127 return Result; 9128 } 9129 9130 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 9131 ICmpInst::Predicate Pred, 9132 bool &Increasing) { 9133 9134 // A zero step value for LHS means the induction variable is essentially a 9135 // loop invariant value. We don't really depend on the predicate actually 9136 // flipping from false to true (for increasing predicates, and the other way 9137 // around for decreasing predicates), all we care about is that *if* the 9138 // predicate changes then it only changes from false to true. 9139 // 9140 // A zero step value in itself is not very useful, but there may be places 9141 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 9142 // as general as possible. 9143 9144 switch (Pred) { 9145 default: 9146 return false; // Conservative answer 9147 9148 case ICmpInst::ICMP_UGT: 9149 case ICmpInst::ICMP_UGE: 9150 case ICmpInst::ICMP_ULT: 9151 case ICmpInst::ICMP_ULE: 9152 if (!LHS->hasNoUnsignedWrap()) 9153 return false; 9154 9155 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 9156 return true; 9157 9158 case ICmpInst::ICMP_SGT: 9159 case ICmpInst::ICMP_SGE: 9160 case ICmpInst::ICMP_SLT: 9161 case ICmpInst::ICMP_SLE: { 9162 if (!LHS->hasNoSignedWrap()) 9163 return false; 9164 9165 const SCEV *Step = LHS->getStepRecurrence(*this); 9166 9167 if (isKnownNonNegative(Step)) { 9168 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 9169 return true; 9170 } 9171 9172 if (isKnownNonPositive(Step)) { 9173 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 9174 return true; 9175 } 9176 9177 return false; 9178 } 9179 9180 } 9181 9182 llvm_unreachable("switch has default clause!"); 9183 } 9184 9185 bool ScalarEvolution::isLoopInvariantPredicate( 9186 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 9187 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 9188 const SCEV *&InvariantRHS) { 9189 9190 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9191 if (!isLoopInvariant(RHS, L)) { 9192 if (!isLoopInvariant(LHS, L)) 9193 return false; 9194 9195 std::swap(LHS, RHS); 9196 Pred = ICmpInst::getSwappedPredicate(Pred); 9197 } 9198 9199 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9200 if (!ArLHS || ArLHS->getLoop() != L) 9201 return false; 9202 9203 bool Increasing; 9204 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 9205 return false; 9206 9207 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 9208 // true as the loop iterates, and the backedge is control dependent on 9209 // "ArLHS `Pred` RHS" == true then we can reason as follows: 9210 // 9211 // * if the predicate was false in the first iteration then the predicate 9212 // is never evaluated again, since the loop exits without taking the 9213 // backedge. 9214 // * if the predicate was true in the first iteration then it will 9215 // continue to be true for all future iterations since it is 9216 // monotonically increasing. 9217 // 9218 // For both the above possibilities, we can replace the loop varying 9219 // predicate with its value on the first iteration of the loop (which is 9220 // loop invariant). 9221 // 9222 // A similar reasoning applies for a monotonically decreasing predicate, by 9223 // replacing true with false and false with true in the above two bullets. 9224 9225 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 9226 9227 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 9228 return false; 9229 9230 InvariantPred = Pred; 9231 InvariantLHS = ArLHS->getStart(); 9232 InvariantRHS = RHS; 9233 return true; 9234 } 9235 9236 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 9237 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 9238 if (HasSameValue(LHS, RHS)) 9239 return ICmpInst::isTrueWhenEqual(Pred); 9240 9241 // This code is split out from isKnownPredicate because it is called from 9242 // within isLoopEntryGuardedByCond. 9243 9244 auto CheckRanges = 9245 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 9246 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 9247 .contains(RangeLHS); 9248 }; 9249 9250 // The check at the top of the function catches the case where the values are 9251 // known to be equal. 9252 if (Pred == CmpInst::ICMP_EQ) 9253 return false; 9254 9255 if (Pred == CmpInst::ICMP_NE) 9256 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 9257 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 9258 isKnownNonZero(getMinusSCEV(LHS, RHS)); 9259 9260 if (CmpInst::isSigned(Pred)) 9261 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 9262 9263 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 9264 } 9265 9266 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 9267 const SCEV *LHS, 9268 const SCEV *RHS) { 9269 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 9270 // Return Y via OutY. 9271 auto MatchBinaryAddToConst = 9272 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 9273 SCEV::NoWrapFlags ExpectedFlags) { 9274 const SCEV *NonConstOp, *ConstOp; 9275 SCEV::NoWrapFlags FlagsPresent; 9276 9277 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 9278 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 9279 return false; 9280 9281 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 9282 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 9283 }; 9284 9285 APInt C; 9286 9287 switch (Pred) { 9288 default: 9289 break; 9290 9291 case ICmpInst::ICMP_SGE: 9292 std::swap(LHS, RHS); 9293 LLVM_FALLTHROUGH; 9294 case ICmpInst::ICMP_SLE: 9295 // X s<= (X + C)<nsw> if C >= 0 9296 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 9297 return true; 9298 9299 // (X + C)<nsw> s<= X if C <= 0 9300 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 9301 !C.isStrictlyPositive()) 9302 return true; 9303 break; 9304 9305 case ICmpInst::ICMP_SGT: 9306 std::swap(LHS, RHS); 9307 LLVM_FALLTHROUGH; 9308 case ICmpInst::ICMP_SLT: 9309 // X s< (X + C)<nsw> if C > 0 9310 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 9311 C.isStrictlyPositive()) 9312 return true; 9313 9314 // (X + C)<nsw> s< X if C < 0 9315 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 9316 return true; 9317 break; 9318 } 9319 9320 return false; 9321 } 9322 9323 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 9324 const SCEV *LHS, 9325 const SCEV *RHS) { 9326 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 9327 return false; 9328 9329 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 9330 // the stack can result in exponential time complexity. 9331 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 9332 9333 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 9334 // 9335 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 9336 // isKnownPredicate. isKnownPredicate is more powerful, but also more 9337 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 9338 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 9339 // use isKnownPredicate later if needed. 9340 return isKnownNonNegative(RHS) && 9341 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 9342 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 9343 } 9344 9345 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 9346 ICmpInst::Predicate Pred, 9347 const SCEV *LHS, const SCEV *RHS) { 9348 // No need to even try if we know the module has no guards. 9349 if (!HasGuards) 9350 return false; 9351 9352 return any_of(*BB, [&](Instruction &I) { 9353 using namespace llvm::PatternMatch; 9354 9355 Value *Condition; 9356 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 9357 m_Value(Condition))) && 9358 isImpliedCond(Pred, LHS, RHS, Condition, false); 9359 }); 9360 } 9361 9362 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 9363 /// protected by a conditional between LHS and RHS. This is used to 9364 /// to eliminate casts. 9365 bool 9366 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 9367 ICmpInst::Predicate Pred, 9368 const SCEV *LHS, const SCEV *RHS) { 9369 // Interpret a null as meaning no loop, where there is obviously no guard 9370 // (interprocedural conditions notwithstanding). 9371 if (!L) return true; 9372 9373 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9374 return true; 9375 9376 BasicBlock *Latch = L->getLoopLatch(); 9377 if (!Latch) 9378 return false; 9379 9380 BranchInst *LoopContinuePredicate = 9381 dyn_cast<BranchInst>(Latch->getTerminator()); 9382 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 9383 isImpliedCond(Pred, LHS, RHS, 9384 LoopContinuePredicate->getCondition(), 9385 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 9386 return true; 9387 9388 // We don't want more than one activation of the following loops on the stack 9389 // -- that can lead to O(n!) time complexity. 9390 if (WalkingBEDominatingConds) 9391 return false; 9392 9393 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 9394 9395 // See if we can exploit a trip count to prove the predicate. 9396 const auto &BETakenInfo = getBackedgeTakenInfo(L); 9397 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 9398 if (LatchBECount != getCouldNotCompute()) { 9399 // We know that Latch branches back to the loop header exactly 9400 // LatchBECount times. This means the backdege condition at Latch is 9401 // equivalent to "{0,+,1} u< LatchBECount". 9402 Type *Ty = LatchBECount->getType(); 9403 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 9404 const SCEV *LoopCounter = 9405 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 9406 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 9407 LatchBECount)) 9408 return true; 9409 } 9410 9411 // Check conditions due to any @llvm.assume intrinsics. 9412 for (auto &AssumeVH : AC.assumptions()) { 9413 if (!AssumeVH) 9414 continue; 9415 auto *CI = cast<CallInst>(AssumeVH); 9416 if (!DT.dominates(CI, Latch->getTerminator())) 9417 continue; 9418 9419 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 9420 return true; 9421 } 9422 9423 // If the loop is not reachable from the entry block, we risk running into an 9424 // infinite loop as we walk up into the dom tree. These loops do not matter 9425 // anyway, so we just return a conservative answer when we see them. 9426 if (!DT.isReachableFromEntry(L->getHeader())) 9427 return false; 9428 9429 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 9430 return true; 9431 9432 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 9433 DTN != HeaderDTN; DTN = DTN->getIDom()) { 9434 assert(DTN && "should reach the loop header before reaching the root!"); 9435 9436 BasicBlock *BB = DTN->getBlock(); 9437 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 9438 return true; 9439 9440 BasicBlock *PBB = BB->getSinglePredecessor(); 9441 if (!PBB) 9442 continue; 9443 9444 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 9445 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 9446 continue; 9447 9448 Value *Condition = ContinuePredicate->getCondition(); 9449 9450 // If we have an edge `E` within the loop body that dominates the only 9451 // latch, the condition guarding `E` also guards the backedge. This 9452 // reasoning works only for loops with a single latch. 9453 9454 BasicBlockEdge DominatingEdge(PBB, BB); 9455 if (DominatingEdge.isSingleEdge()) { 9456 // We're constructively (and conservatively) enumerating edges within the 9457 // loop body that dominate the latch. The dominator tree better agree 9458 // with us on this: 9459 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 9460 9461 if (isImpliedCond(Pred, LHS, RHS, Condition, 9462 BB != ContinuePredicate->getSuccessor(0))) 9463 return true; 9464 } 9465 } 9466 9467 return false; 9468 } 9469 9470 bool 9471 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 9472 ICmpInst::Predicate Pred, 9473 const SCEV *LHS, const SCEV *RHS) { 9474 // Interpret a null as meaning no loop, where there is obviously no guard 9475 // (interprocedural conditions notwithstanding). 9476 if (!L) return false; 9477 9478 // Both LHS and RHS must be available at loop entry. 9479 assert(isAvailableAtLoopEntry(LHS, L) && 9480 "LHS is not available at Loop Entry"); 9481 assert(isAvailableAtLoopEntry(RHS, L) && 9482 "RHS is not available at Loop Entry"); 9483 9484 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9485 return true; 9486 9487 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 9488 // the facts (a >= b && a != b) separately. A typical situation is when the 9489 // non-strict comparison is known from ranges and non-equality is known from 9490 // dominating predicates. If we are proving strict comparison, we always try 9491 // to prove non-equality and non-strict comparison separately. 9492 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 9493 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 9494 bool ProvedNonStrictComparison = false; 9495 bool ProvedNonEquality = false; 9496 9497 if (ProvingStrictComparison) { 9498 ProvedNonStrictComparison = 9499 isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS); 9500 ProvedNonEquality = 9501 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, LHS, RHS); 9502 if (ProvedNonStrictComparison && ProvedNonEquality) 9503 return true; 9504 } 9505 9506 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 9507 auto ProveViaGuard = [&](BasicBlock *Block) { 9508 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 9509 return true; 9510 if (ProvingStrictComparison) { 9511 if (!ProvedNonStrictComparison) 9512 ProvedNonStrictComparison = 9513 isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS); 9514 if (!ProvedNonEquality) 9515 ProvedNonEquality = 9516 isImpliedViaGuard(Block, ICmpInst::ICMP_NE, LHS, RHS); 9517 if (ProvedNonStrictComparison && ProvedNonEquality) 9518 return true; 9519 } 9520 return false; 9521 }; 9522 9523 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 9524 auto ProveViaCond = [&](Value *Condition, bool Inverse) { 9525 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse)) 9526 return true; 9527 if (ProvingStrictComparison) { 9528 if (!ProvedNonStrictComparison) 9529 ProvedNonStrictComparison = 9530 isImpliedCond(NonStrictPredicate, LHS, RHS, Condition, Inverse); 9531 if (!ProvedNonEquality) 9532 ProvedNonEquality = 9533 isImpliedCond(ICmpInst::ICMP_NE, LHS, RHS, Condition, Inverse); 9534 if (ProvedNonStrictComparison && ProvedNonEquality) 9535 return true; 9536 } 9537 return false; 9538 }; 9539 9540 // Starting at the loop predecessor, climb up the predecessor chain, as long 9541 // as there are predecessors that can be found that have unique successors 9542 // leading to the original header. 9543 for (std::pair<BasicBlock *, BasicBlock *> 9544 Pair(L->getLoopPredecessor(), L->getHeader()); 9545 Pair.first; 9546 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 9547 9548 if (ProveViaGuard(Pair.first)) 9549 return true; 9550 9551 BranchInst *LoopEntryPredicate = 9552 dyn_cast<BranchInst>(Pair.first->getTerminator()); 9553 if (!LoopEntryPredicate || 9554 LoopEntryPredicate->isUnconditional()) 9555 continue; 9556 9557 if (ProveViaCond(LoopEntryPredicate->getCondition(), 9558 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 9559 return true; 9560 } 9561 9562 // Check conditions due to any @llvm.assume intrinsics. 9563 for (auto &AssumeVH : AC.assumptions()) { 9564 if (!AssumeVH) 9565 continue; 9566 auto *CI = cast<CallInst>(AssumeVH); 9567 if (!DT.dominates(CI, L->getHeader())) 9568 continue; 9569 9570 if (ProveViaCond(CI->getArgOperand(0), false)) 9571 return true; 9572 } 9573 9574 return false; 9575 } 9576 9577 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 9578 const SCEV *LHS, const SCEV *RHS, 9579 Value *FoundCondValue, 9580 bool Inverse) { 9581 if (!PendingLoopPredicates.insert(FoundCondValue).second) 9582 return false; 9583 9584 auto ClearOnExit = 9585 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 9586 9587 // Recursively handle And and Or conditions. 9588 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 9589 if (BO->getOpcode() == Instruction::And) { 9590 if (!Inverse) 9591 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9592 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9593 } else if (BO->getOpcode() == Instruction::Or) { 9594 if (Inverse) 9595 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9596 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9597 } 9598 } 9599 9600 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 9601 if (!ICI) return false; 9602 9603 // Now that we found a conditional branch that dominates the loop or controls 9604 // the loop latch. Check to see if it is the comparison we are looking for. 9605 ICmpInst::Predicate FoundPred; 9606 if (Inverse) 9607 FoundPred = ICI->getInversePredicate(); 9608 else 9609 FoundPred = ICI->getPredicate(); 9610 9611 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 9612 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 9613 9614 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 9615 } 9616 9617 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 9618 const SCEV *RHS, 9619 ICmpInst::Predicate FoundPred, 9620 const SCEV *FoundLHS, 9621 const SCEV *FoundRHS) { 9622 // Balance the types. 9623 if (getTypeSizeInBits(LHS->getType()) < 9624 getTypeSizeInBits(FoundLHS->getType())) { 9625 if (CmpInst::isSigned(Pred)) { 9626 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 9627 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 9628 } else { 9629 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 9630 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 9631 } 9632 } else if (getTypeSizeInBits(LHS->getType()) > 9633 getTypeSizeInBits(FoundLHS->getType())) { 9634 if (CmpInst::isSigned(FoundPred)) { 9635 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 9636 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 9637 } else { 9638 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 9639 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 9640 } 9641 } 9642 9643 // Canonicalize the query to match the way instcombine will have 9644 // canonicalized the comparison. 9645 if (SimplifyICmpOperands(Pred, LHS, RHS)) 9646 if (LHS == RHS) 9647 return CmpInst::isTrueWhenEqual(Pred); 9648 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 9649 if (FoundLHS == FoundRHS) 9650 return CmpInst::isFalseWhenEqual(FoundPred); 9651 9652 // Check to see if we can make the LHS or RHS match. 9653 if (LHS == FoundRHS || RHS == FoundLHS) { 9654 if (isa<SCEVConstant>(RHS)) { 9655 std::swap(FoundLHS, FoundRHS); 9656 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 9657 } else { 9658 std::swap(LHS, RHS); 9659 Pred = ICmpInst::getSwappedPredicate(Pred); 9660 } 9661 } 9662 9663 // Check whether the found predicate is the same as the desired predicate. 9664 if (FoundPred == Pred) 9665 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9666 9667 // Check whether swapping the found predicate makes it the same as the 9668 // desired predicate. 9669 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 9670 if (isa<SCEVConstant>(RHS)) 9671 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 9672 else 9673 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 9674 RHS, LHS, FoundLHS, FoundRHS); 9675 } 9676 9677 // Unsigned comparison is the same as signed comparison when both the operands 9678 // are non-negative. 9679 if (CmpInst::isUnsigned(FoundPred) && 9680 CmpInst::getSignedPredicate(FoundPred) == Pred && 9681 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 9682 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9683 9684 // Check if we can make progress by sharpening ranges. 9685 if (FoundPred == ICmpInst::ICMP_NE && 9686 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 9687 9688 const SCEVConstant *C = nullptr; 9689 const SCEV *V = nullptr; 9690 9691 if (isa<SCEVConstant>(FoundLHS)) { 9692 C = cast<SCEVConstant>(FoundLHS); 9693 V = FoundRHS; 9694 } else { 9695 C = cast<SCEVConstant>(FoundRHS); 9696 V = FoundLHS; 9697 } 9698 9699 // The guarding predicate tells us that C != V. If the known range 9700 // of V is [C, t), we can sharpen the range to [C + 1, t). The 9701 // range we consider has to correspond to same signedness as the 9702 // predicate we're interested in folding. 9703 9704 APInt Min = ICmpInst::isSigned(Pred) ? 9705 getSignedRangeMin(V) : getUnsignedRangeMin(V); 9706 9707 if (Min == C->getAPInt()) { 9708 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 9709 // This is true even if (Min + 1) wraps around -- in case of 9710 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 9711 9712 APInt SharperMin = Min + 1; 9713 9714 switch (Pred) { 9715 case ICmpInst::ICMP_SGE: 9716 case ICmpInst::ICMP_UGE: 9717 // We know V `Pred` SharperMin. If this implies LHS `Pred` 9718 // RHS, we're done. 9719 if (isImpliedCondOperands(Pred, LHS, RHS, V, 9720 getConstant(SharperMin))) 9721 return true; 9722 LLVM_FALLTHROUGH; 9723 9724 case ICmpInst::ICMP_SGT: 9725 case ICmpInst::ICMP_UGT: 9726 // We know from the range information that (V `Pred` Min || 9727 // V == Min). We know from the guarding condition that !(V 9728 // == Min). This gives us 9729 // 9730 // V `Pred` Min || V == Min && !(V == Min) 9731 // => V `Pred` Min 9732 // 9733 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 9734 9735 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 9736 return true; 9737 LLVM_FALLTHROUGH; 9738 9739 default: 9740 // No change 9741 break; 9742 } 9743 } 9744 } 9745 9746 // Check whether the actual condition is beyond sufficient. 9747 if (FoundPred == ICmpInst::ICMP_EQ) 9748 if (ICmpInst::isTrueWhenEqual(Pred)) 9749 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9750 return true; 9751 if (Pred == ICmpInst::ICMP_NE) 9752 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 9753 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 9754 return true; 9755 9756 // Otherwise assume the worst. 9757 return false; 9758 } 9759 9760 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 9761 const SCEV *&L, const SCEV *&R, 9762 SCEV::NoWrapFlags &Flags) { 9763 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 9764 if (!AE || AE->getNumOperands() != 2) 9765 return false; 9766 9767 L = AE->getOperand(0); 9768 R = AE->getOperand(1); 9769 Flags = AE->getNoWrapFlags(); 9770 return true; 9771 } 9772 9773 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 9774 const SCEV *Less) { 9775 // We avoid subtracting expressions here because this function is usually 9776 // fairly deep in the call stack (i.e. is called many times). 9777 9778 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 9779 const auto *LAR = cast<SCEVAddRecExpr>(Less); 9780 const auto *MAR = cast<SCEVAddRecExpr>(More); 9781 9782 if (LAR->getLoop() != MAR->getLoop()) 9783 return None; 9784 9785 // We look at affine expressions only; not for correctness but to keep 9786 // getStepRecurrence cheap. 9787 if (!LAR->isAffine() || !MAR->isAffine()) 9788 return None; 9789 9790 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 9791 return None; 9792 9793 Less = LAR->getStart(); 9794 More = MAR->getStart(); 9795 9796 // fall through 9797 } 9798 9799 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 9800 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 9801 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 9802 return M - L; 9803 } 9804 9805 SCEV::NoWrapFlags Flags; 9806 const SCEV *LLess = nullptr, *RLess = nullptr; 9807 const SCEV *LMore = nullptr, *RMore = nullptr; 9808 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 9809 // Compare (X + C1) vs X. 9810 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 9811 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 9812 if (RLess == More) 9813 return -(C1->getAPInt()); 9814 9815 // Compare X vs (X + C2). 9816 if (splitBinaryAdd(More, LMore, RMore, Flags)) 9817 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 9818 if (RMore == Less) 9819 return C2->getAPInt(); 9820 9821 // Compare (X + C1) vs (X + C2). 9822 if (C1 && C2 && RLess == RMore) 9823 return C2->getAPInt() - C1->getAPInt(); 9824 9825 return None; 9826 } 9827 9828 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 9829 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 9830 const SCEV *FoundLHS, const SCEV *FoundRHS) { 9831 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 9832 return false; 9833 9834 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9835 if (!AddRecLHS) 9836 return false; 9837 9838 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 9839 if (!AddRecFoundLHS) 9840 return false; 9841 9842 // We'd like to let SCEV reason about control dependencies, so we constrain 9843 // both the inequalities to be about add recurrences on the same loop. This 9844 // way we can use isLoopEntryGuardedByCond later. 9845 9846 const Loop *L = AddRecFoundLHS->getLoop(); 9847 if (L != AddRecLHS->getLoop()) 9848 return false; 9849 9850 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 9851 // 9852 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 9853 // ... (2) 9854 // 9855 // Informal proof for (2), assuming (1) [*]: 9856 // 9857 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 9858 // 9859 // Then 9860 // 9861 // FoundLHS s< FoundRHS s< INT_MIN - C 9862 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 9863 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 9864 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 9865 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 9866 // <=> FoundLHS + C s< FoundRHS + C 9867 // 9868 // [*]: (1) can be proved by ruling out overflow. 9869 // 9870 // [**]: This can be proved by analyzing all the four possibilities: 9871 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 9872 // (A s>= 0, B s>= 0). 9873 // 9874 // Note: 9875 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 9876 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 9877 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 9878 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 9879 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 9880 // C)". 9881 9882 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 9883 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 9884 if (!LDiff || !RDiff || *LDiff != *RDiff) 9885 return false; 9886 9887 if (LDiff->isMinValue()) 9888 return true; 9889 9890 APInt FoundRHSLimit; 9891 9892 if (Pred == CmpInst::ICMP_ULT) { 9893 FoundRHSLimit = -(*RDiff); 9894 } else { 9895 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 9896 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 9897 } 9898 9899 // Try to prove (1) or (2), as needed. 9900 return isAvailableAtLoopEntry(FoundRHS, L) && 9901 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 9902 getConstant(FoundRHSLimit)); 9903 } 9904 9905 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 9906 const SCEV *LHS, const SCEV *RHS, 9907 const SCEV *FoundLHS, 9908 const SCEV *FoundRHS, unsigned Depth) { 9909 const PHINode *LPhi = nullptr, *RPhi = nullptr; 9910 9911 auto ClearOnExit = make_scope_exit([&]() { 9912 if (LPhi) { 9913 bool Erased = PendingMerges.erase(LPhi); 9914 assert(Erased && "Failed to erase LPhi!"); 9915 (void)Erased; 9916 } 9917 if (RPhi) { 9918 bool Erased = PendingMerges.erase(RPhi); 9919 assert(Erased && "Failed to erase RPhi!"); 9920 (void)Erased; 9921 } 9922 }); 9923 9924 // Find respective Phis and check that they are not being pending. 9925 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 9926 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 9927 if (!PendingMerges.insert(Phi).second) 9928 return false; 9929 LPhi = Phi; 9930 } 9931 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 9932 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 9933 // If we detect a loop of Phi nodes being processed by this method, for 9934 // example: 9935 // 9936 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 9937 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 9938 // 9939 // we don't want to deal with a case that complex, so return conservative 9940 // answer false. 9941 if (!PendingMerges.insert(Phi).second) 9942 return false; 9943 RPhi = Phi; 9944 } 9945 9946 // If none of LHS, RHS is a Phi, nothing to do here. 9947 if (!LPhi && !RPhi) 9948 return false; 9949 9950 // If there is a SCEVUnknown Phi we are interested in, make it left. 9951 if (!LPhi) { 9952 std::swap(LHS, RHS); 9953 std::swap(FoundLHS, FoundRHS); 9954 std::swap(LPhi, RPhi); 9955 Pred = ICmpInst::getSwappedPredicate(Pred); 9956 } 9957 9958 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 9959 const BasicBlock *LBB = LPhi->getParent(); 9960 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 9961 9962 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 9963 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 9964 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 9965 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 9966 }; 9967 9968 if (RPhi && RPhi->getParent() == LBB) { 9969 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 9970 // If we compare two Phis from the same block, and for each entry block 9971 // the predicate is true for incoming values from this block, then the 9972 // predicate is also true for the Phis. 9973 for (const BasicBlock *IncBB : predecessors(LBB)) { 9974 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 9975 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 9976 if (!ProvedEasily(L, R)) 9977 return false; 9978 } 9979 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 9980 // Case two: RHS is also a Phi from the same basic block, and it is an 9981 // AddRec. It means that there is a loop which has both AddRec and Unknown 9982 // PHIs, for it we can compare incoming values of AddRec from above the loop 9983 // and latch with their respective incoming values of LPhi. 9984 // TODO: Generalize to handle loops with many inputs in a header. 9985 if (LPhi->getNumIncomingValues() != 2) return false; 9986 9987 auto *RLoop = RAR->getLoop(); 9988 auto *Predecessor = RLoop->getLoopPredecessor(); 9989 assert(Predecessor && "Loop with AddRec with no predecessor?"); 9990 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 9991 if (!ProvedEasily(L1, RAR->getStart())) 9992 return false; 9993 auto *Latch = RLoop->getLoopLatch(); 9994 assert(Latch && "Loop with AddRec with no latch?"); 9995 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 9996 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 9997 return false; 9998 } else { 9999 // In all other cases go over inputs of LHS and compare each of them to RHS, 10000 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 10001 // At this point RHS is either a non-Phi, or it is a Phi from some block 10002 // different from LBB. 10003 for (const BasicBlock *IncBB : predecessors(LBB)) { 10004 // Check that RHS is available in this block. 10005 if (!dominates(RHS, IncBB)) 10006 return false; 10007 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10008 if (!ProvedEasily(L, RHS)) 10009 return false; 10010 } 10011 } 10012 return true; 10013 } 10014 10015 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 10016 const SCEV *LHS, const SCEV *RHS, 10017 const SCEV *FoundLHS, 10018 const SCEV *FoundRHS) { 10019 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10020 return true; 10021 10022 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10023 return true; 10024 10025 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 10026 FoundLHS, FoundRHS) || 10027 // ~x < ~y --> x > y 10028 isImpliedCondOperandsHelper(Pred, LHS, RHS, 10029 getNotSCEV(FoundRHS), 10030 getNotSCEV(FoundLHS)); 10031 } 10032 10033 /// If Expr computes ~A, return A else return nullptr 10034 static const SCEV *MatchNotExpr(const SCEV *Expr) { 10035 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 10036 if (!Add || Add->getNumOperands() != 2 || 10037 !Add->getOperand(0)->isAllOnesValue()) 10038 return nullptr; 10039 10040 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 10041 if (!AddRHS || AddRHS->getNumOperands() != 2 || 10042 !AddRHS->getOperand(0)->isAllOnesValue()) 10043 return nullptr; 10044 10045 return AddRHS->getOperand(1); 10046 } 10047 10048 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values? 10049 template<typename MaxExprType> 10050 static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, 10051 const SCEV *Candidate) { 10052 const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr); 10053 if (!MaxExpr) return false; 10054 10055 return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end(); 10056 } 10057 10058 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values? 10059 template<typename MaxExprType> 10060 static bool IsMinConsistingOf(ScalarEvolution &SE, 10061 const SCEV *MaybeMinExpr, 10062 const SCEV *Candidate) { 10063 const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr); 10064 if (!MaybeMaxExpr) 10065 return false; 10066 10067 return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate)); 10068 } 10069 10070 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 10071 ICmpInst::Predicate Pred, 10072 const SCEV *LHS, const SCEV *RHS) { 10073 // If both sides are affine addrecs for the same loop, with equal 10074 // steps, and we know the recurrences don't wrap, then we only 10075 // need to check the predicate on the starting values. 10076 10077 if (!ICmpInst::isRelational(Pred)) 10078 return false; 10079 10080 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 10081 if (!LAR) 10082 return false; 10083 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10084 if (!RAR) 10085 return false; 10086 if (LAR->getLoop() != RAR->getLoop()) 10087 return false; 10088 if (!LAR->isAffine() || !RAR->isAffine()) 10089 return false; 10090 10091 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 10092 return false; 10093 10094 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 10095 SCEV::FlagNSW : SCEV::FlagNUW; 10096 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 10097 return false; 10098 10099 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 10100 } 10101 10102 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 10103 /// expression? 10104 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 10105 ICmpInst::Predicate Pred, 10106 const SCEV *LHS, const SCEV *RHS) { 10107 switch (Pred) { 10108 default: 10109 return false; 10110 10111 case ICmpInst::ICMP_SGE: 10112 std::swap(LHS, RHS); 10113 LLVM_FALLTHROUGH; 10114 case ICmpInst::ICMP_SLE: 10115 return 10116 // min(A, ...) <= A 10117 IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) || 10118 // A <= max(A, ...) 10119 IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 10120 10121 case ICmpInst::ICMP_UGE: 10122 std::swap(LHS, RHS); 10123 LLVM_FALLTHROUGH; 10124 case ICmpInst::ICMP_ULE: 10125 return 10126 // min(A, ...) <= A 10127 IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) || 10128 // A <= max(A, ...) 10129 IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 10130 } 10131 10132 llvm_unreachable("covered switch fell through?!"); 10133 } 10134 10135 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 10136 const SCEV *LHS, const SCEV *RHS, 10137 const SCEV *FoundLHS, 10138 const SCEV *FoundRHS, 10139 unsigned Depth) { 10140 assert(getTypeSizeInBits(LHS->getType()) == 10141 getTypeSizeInBits(RHS->getType()) && 10142 "LHS and RHS have different sizes?"); 10143 assert(getTypeSizeInBits(FoundLHS->getType()) == 10144 getTypeSizeInBits(FoundRHS->getType()) && 10145 "FoundLHS and FoundRHS have different sizes?"); 10146 // We want to avoid hurting the compile time with analysis of too big trees. 10147 if (Depth > MaxSCEVOperationsImplicationDepth) 10148 return false; 10149 // We only want to work with ICMP_SGT comparison so far. 10150 // TODO: Extend to ICMP_UGT? 10151 if (Pred == ICmpInst::ICMP_SLT) { 10152 Pred = ICmpInst::ICMP_SGT; 10153 std::swap(LHS, RHS); 10154 std::swap(FoundLHS, FoundRHS); 10155 } 10156 if (Pred != ICmpInst::ICMP_SGT) 10157 return false; 10158 10159 auto GetOpFromSExt = [&](const SCEV *S) { 10160 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 10161 return Ext->getOperand(); 10162 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 10163 // the constant in some cases. 10164 return S; 10165 }; 10166 10167 // Acquire values from extensions. 10168 auto *OrigLHS = LHS; 10169 auto *OrigFoundLHS = FoundLHS; 10170 LHS = GetOpFromSExt(LHS); 10171 FoundLHS = GetOpFromSExt(FoundLHS); 10172 10173 // Is the SGT predicate can be proved trivially or using the found context. 10174 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 10175 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 10176 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 10177 FoundRHS, Depth + 1); 10178 }; 10179 10180 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 10181 // We want to avoid creation of any new non-constant SCEV. Since we are 10182 // going to compare the operands to RHS, we should be certain that we don't 10183 // need any size extensions for this. So let's decline all cases when the 10184 // sizes of types of LHS and RHS do not match. 10185 // TODO: Maybe try to get RHS from sext to catch more cases? 10186 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 10187 return false; 10188 10189 // Should not overflow. 10190 if (!LHSAddExpr->hasNoSignedWrap()) 10191 return false; 10192 10193 auto *LL = LHSAddExpr->getOperand(0); 10194 auto *LR = LHSAddExpr->getOperand(1); 10195 auto *MinusOne = getNegativeSCEV(getOne(RHS->getType())); 10196 10197 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 10198 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 10199 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 10200 }; 10201 // Try to prove the following rule: 10202 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 10203 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 10204 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 10205 return true; 10206 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 10207 Value *LL, *LR; 10208 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 10209 10210 using namespace llvm::PatternMatch; 10211 10212 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 10213 // Rules for division. 10214 // We are going to perform some comparisons with Denominator and its 10215 // derivative expressions. In general case, creating a SCEV for it may 10216 // lead to a complex analysis of the entire graph, and in particular it 10217 // can request trip count recalculation for the same loop. This would 10218 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 10219 // this, we only want to create SCEVs that are constants in this section. 10220 // So we bail if Denominator is not a constant. 10221 if (!isa<ConstantInt>(LR)) 10222 return false; 10223 10224 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 10225 10226 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 10227 // then a SCEV for the numerator already exists and matches with FoundLHS. 10228 auto *Numerator = getExistingSCEV(LL); 10229 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 10230 return false; 10231 10232 // Make sure that the numerator matches with FoundLHS and the denominator 10233 // is positive. 10234 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 10235 return false; 10236 10237 auto *DTy = Denominator->getType(); 10238 auto *FRHSTy = FoundRHS->getType(); 10239 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 10240 // One of types is a pointer and another one is not. We cannot extend 10241 // them properly to a wider type, so let us just reject this case. 10242 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 10243 // to avoid this check. 10244 return false; 10245 10246 // Given that: 10247 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 10248 auto *WTy = getWiderType(DTy, FRHSTy); 10249 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 10250 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 10251 10252 // Try to prove the following rule: 10253 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 10254 // For example, given that FoundLHS > 2. It means that FoundLHS is at 10255 // least 3. If we divide it by Denominator < 4, we will have at least 1. 10256 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 10257 if (isKnownNonPositive(RHS) && 10258 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 10259 return true; 10260 10261 // Try to prove the following rule: 10262 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 10263 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 10264 // If we divide it by Denominator > 2, then: 10265 // 1. If FoundLHS is negative, then the result is 0. 10266 // 2. If FoundLHS is non-negative, then the result is non-negative. 10267 // Anyways, the result is non-negative. 10268 auto *MinusOne = getNegativeSCEV(getOne(WTy)); 10269 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 10270 if (isKnownNegative(RHS) && 10271 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 10272 return true; 10273 } 10274 } 10275 10276 // If our expression contained SCEVUnknown Phis, and we split it down and now 10277 // need to prove something for them, try to prove the predicate for every 10278 // possible incoming values of those Phis. 10279 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 10280 return true; 10281 10282 return false; 10283 } 10284 10285 bool 10286 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 10287 const SCEV *LHS, const SCEV *RHS) { 10288 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 10289 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 10290 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 10291 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 10292 } 10293 10294 bool 10295 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 10296 const SCEV *LHS, const SCEV *RHS, 10297 const SCEV *FoundLHS, 10298 const SCEV *FoundRHS) { 10299 switch (Pred) { 10300 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 10301 case ICmpInst::ICMP_EQ: 10302 case ICmpInst::ICMP_NE: 10303 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 10304 return true; 10305 break; 10306 case ICmpInst::ICMP_SLT: 10307 case ICmpInst::ICMP_SLE: 10308 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 10309 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 10310 return true; 10311 break; 10312 case ICmpInst::ICMP_SGT: 10313 case ICmpInst::ICMP_SGE: 10314 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 10315 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 10316 return true; 10317 break; 10318 case ICmpInst::ICMP_ULT: 10319 case ICmpInst::ICMP_ULE: 10320 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 10321 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 10322 return true; 10323 break; 10324 case ICmpInst::ICMP_UGT: 10325 case ICmpInst::ICMP_UGE: 10326 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 10327 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 10328 return true; 10329 break; 10330 } 10331 10332 // Maybe it can be proved via operations? 10333 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10334 return true; 10335 10336 return false; 10337 } 10338 10339 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 10340 const SCEV *LHS, 10341 const SCEV *RHS, 10342 const SCEV *FoundLHS, 10343 const SCEV *FoundRHS) { 10344 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 10345 // The restriction on `FoundRHS` be lifted easily -- it exists only to 10346 // reduce the compile time impact of this optimization. 10347 return false; 10348 10349 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 10350 if (!Addend) 10351 return false; 10352 10353 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 10354 10355 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 10356 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 10357 ConstantRange FoundLHSRange = 10358 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 10359 10360 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 10361 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 10362 10363 // We can also compute the range of values for `LHS` that satisfy the 10364 // consequent, "`LHS` `Pred` `RHS`": 10365 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 10366 ConstantRange SatisfyingLHSRange = 10367 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 10368 10369 // The antecedent implies the consequent if every value of `LHS` that 10370 // satisfies the antecedent also satisfies the consequent. 10371 return SatisfyingLHSRange.contains(LHSRange); 10372 } 10373 10374 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 10375 bool IsSigned, bool NoWrap) { 10376 assert(isKnownPositive(Stride) && "Positive stride expected!"); 10377 10378 if (NoWrap) return false; 10379 10380 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10381 const SCEV *One = getOne(Stride->getType()); 10382 10383 if (IsSigned) { 10384 APInt MaxRHS = getSignedRangeMax(RHS); 10385 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 10386 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10387 10388 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 10389 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 10390 } 10391 10392 APInt MaxRHS = getUnsignedRangeMax(RHS); 10393 APInt MaxValue = APInt::getMaxValue(BitWidth); 10394 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10395 10396 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 10397 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 10398 } 10399 10400 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 10401 bool IsSigned, bool NoWrap) { 10402 if (NoWrap) return false; 10403 10404 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10405 const SCEV *One = getOne(Stride->getType()); 10406 10407 if (IsSigned) { 10408 APInt MinRHS = getSignedRangeMin(RHS); 10409 APInt MinValue = APInt::getSignedMinValue(BitWidth); 10410 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10411 10412 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 10413 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 10414 } 10415 10416 APInt MinRHS = getUnsignedRangeMin(RHS); 10417 APInt MinValue = APInt::getMinValue(BitWidth); 10418 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10419 10420 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 10421 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 10422 } 10423 10424 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 10425 bool Equality) { 10426 const SCEV *One = getOne(Step->getType()); 10427 Delta = Equality ? getAddExpr(Delta, Step) 10428 : getAddExpr(Delta, getMinusSCEV(Step, One)); 10429 return getUDivExpr(Delta, Step); 10430 } 10431 10432 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 10433 const SCEV *Stride, 10434 const SCEV *End, 10435 unsigned BitWidth, 10436 bool IsSigned) { 10437 10438 assert(!isKnownNonPositive(Stride) && 10439 "Stride is expected strictly positive!"); 10440 // Calculate the maximum backedge count based on the range of values 10441 // permitted by Start, End, and Stride. 10442 const SCEV *MaxBECount; 10443 APInt MinStart = 10444 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 10445 10446 APInt StrideForMaxBECount = 10447 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 10448 10449 // We already know that the stride is positive, so we paper over conservatism 10450 // in our range computation by forcing StrideForMaxBECount to be at least one. 10451 // In theory this is unnecessary, but we expect MaxBECount to be a 10452 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 10453 // is nothing to constant fold it to). 10454 APInt One(BitWidth, 1, IsSigned); 10455 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 10456 10457 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 10458 : APInt::getMaxValue(BitWidth); 10459 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 10460 10461 // Although End can be a MAX expression we estimate MaxEnd considering only 10462 // the case End = RHS of the loop termination condition. This is safe because 10463 // in the other case (End - Start) is zero, leading to a zero maximum backedge 10464 // taken count. 10465 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 10466 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 10467 10468 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 10469 getConstant(StrideForMaxBECount) /* Step */, 10470 false /* Equality */); 10471 10472 return MaxBECount; 10473 } 10474 10475 ScalarEvolution::ExitLimit 10476 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 10477 const Loop *L, bool IsSigned, 10478 bool ControlsExit, bool AllowPredicates) { 10479 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10480 10481 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10482 bool PredicatedIV = false; 10483 10484 if (!IV && AllowPredicates) { 10485 // Try to make this an AddRec using runtime tests, in the first X 10486 // iterations of this loop, where X is the SCEV expression found by the 10487 // algorithm below. 10488 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10489 PredicatedIV = true; 10490 } 10491 10492 // Avoid weird loops 10493 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10494 return getCouldNotCompute(); 10495 10496 bool NoWrap = ControlsExit && 10497 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10498 10499 const SCEV *Stride = IV->getStepRecurrence(*this); 10500 10501 bool PositiveStride = isKnownPositive(Stride); 10502 10503 // Avoid negative or zero stride values. 10504 if (!PositiveStride) { 10505 // We can compute the correct backedge taken count for loops with unknown 10506 // strides if we can prove that the loop is not an infinite loop with side 10507 // effects. Here's the loop structure we are trying to handle - 10508 // 10509 // i = start 10510 // do { 10511 // A[i] = i; 10512 // i += s; 10513 // } while (i < end); 10514 // 10515 // The backedge taken count for such loops is evaluated as - 10516 // (max(end, start + stride) - start - 1) /u stride 10517 // 10518 // The additional preconditions that we need to check to prove correctness 10519 // of the above formula is as follows - 10520 // 10521 // a) IV is either nuw or nsw depending upon signedness (indicated by the 10522 // NoWrap flag). 10523 // b) loop is single exit with no side effects. 10524 // 10525 // 10526 // Precondition a) implies that if the stride is negative, this is a single 10527 // trip loop. The backedge taken count formula reduces to zero in this case. 10528 // 10529 // Precondition b) implies that the unknown stride cannot be zero otherwise 10530 // we have UB. 10531 // 10532 // The positive stride case is the same as isKnownPositive(Stride) returning 10533 // true (original behavior of the function). 10534 // 10535 // We want to make sure that the stride is truly unknown as there are edge 10536 // cases where ScalarEvolution propagates no wrap flags to the 10537 // post-increment/decrement IV even though the increment/decrement operation 10538 // itself is wrapping. The computed backedge taken count may be wrong in 10539 // such cases. This is prevented by checking that the stride is not known to 10540 // be either positive or non-positive. For example, no wrap flags are 10541 // propagated to the post-increment IV of this loop with a trip count of 2 - 10542 // 10543 // unsigned char i; 10544 // for(i=127; i<128; i+=129) 10545 // A[i] = i; 10546 // 10547 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 10548 !loopHasNoSideEffects(L)) 10549 return getCouldNotCompute(); 10550 } else if (!Stride->isOne() && 10551 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 10552 // Avoid proven overflow cases: this will ensure that the backedge taken 10553 // count will not generate any unsigned overflow. Relaxed no-overflow 10554 // conditions exploit NoWrapFlags, allowing to optimize in presence of 10555 // undefined behaviors like the case of C language. 10556 return getCouldNotCompute(); 10557 10558 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 10559 : ICmpInst::ICMP_ULT; 10560 const SCEV *Start = IV->getStart(); 10561 const SCEV *End = RHS; 10562 // When the RHS is not invariant, we do not know the end bound of the loop and 10563 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 10564 // calculate the MaxBECount, given the start, stride and max value for the end 10565 // bound of the loop (RHS), and the fact that IV does not overflow (which is 10566 // checked above). 10567 if (!isLoopInvariant(RHS, L)) { 10568 const SCEV *MaxBECount = computeMaxBECountForLT( 10569 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10570 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 10571 false /*MaxOrZero*/, Predicates); 10572 } 10573 // If the backedge is taken at least once, then it will be taken 10574 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 10575 // is the LHS value of the less-than comparison the first time it is evaluated 10576 // and End is the RHS. 10577 const SCEV *BECountIfBackedgeTaken = 10578 computeBECount(getMinusSCEV(End, Start), Stride, false); 10579 // If the loop entry is guarded by the result of the backedge test of the 10580 // first loop iteration, then we know the backedge will be taken at least 10581 // once and so the backedge taken count is as above. If not then we use the 10582 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 10583 // as if the backedge is taken at least once max(End,Start) is End and so the 10584 // result is as above, and if not max(End,Start) is Start so we get a backedge 10585 // count of zero. 10586 const SCEV *BECount; 10587 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 10588 BECount = BECountIfBackedgeTaken; 10589 else { 10590 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 10591 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 10592 } 10593 10594 const SCEV *MaxBECount; 10595 bool MaxOrZero = false; 10596 if (isa<SCEVConstant>(BECount)) 10597 MaxBECount = BECount; 10598 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 10599 // If we know exactly how many times the backedge will be taken if it's 10600 // taken at least once, then the backedge count will either be that or 10601 // zero. 10602 MaxBECount = BECountIfBackedgeTaken; 10603 MaxOrZero = true; 10604 } else { 10605 MaxBECount = computeMaxBECountForLT( 10606 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10607 } 10608 10609 if (isa<SCEVCouldNotCompute>(MaxBECount) && 10610 !isa<SCEVCouldNotCompute>(BECount)) 10611 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 10612 10613 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 10614 } 10615 10616 ScalarEvolution::ExitLimit 10617 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 10618 const Loop *L, bool IsSigned, 10619 bool ControlsExit, bool AllowPredicates) { 10620 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10621 // We handle only IV > Invariant 10622 if (!isLoopInvariant(RHS, L)) 10623 return getCouldNotCompute(); 10624 10625 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10626 if (!IV && AllowPredicates) 10627 // Try to make this an AddRec using runtime tests, in the first X 10628 // iterations of this loop, where X is the SCEV expression found by the 10629 // algorithm below. 10630 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10631 10632 // Avoid weird loops 10633 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10634 return getCouldNotCompute(); 10635 10636 bool NoWrap = ControlsExit && 10637 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10638 10639 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 10640 10641 // Avoid negative or zero stride values 10642 if (!isKnownPositive(Stride)) 10643 return getCouldNotCompute(); 10644 10645 // Avoid proven overflow cases: this will ensure that the backedge taken count 10646 // will not generate any unsigned overflow. Relaxed no-overflow conditions 10647 // exploit NoWrapFlags, allowing to optimize in presence of undefined 10648 // behaviors like the case of C language. 10649 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 10650 return getCouldNotCompute(); 10651 10652 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 10653 : ICmpInst::ICMP_UGT; 10654 10655 const SCEV *Start = IV->getStart(); 10656 const SCEV *End = RHS; 10657 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) 10658 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 10659 10660 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 10661 10662 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 10663 : getUnsignedRangeMax(Start); 10664 10665 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 10666 : getUnsignedRangeMin(Stride); 10667 10668 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 10669 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 10670 : APInt::getMinValue(BitWidth) + (MinStride - 1); 10671 10672 // Although End can be a MIN expression we estimate MinEnd considering only 10673 // the case End = RHS. This is safe because in the other case (Start - End) 10674 // is zero, leading to a zero maximum backedge taken count. 10675 APInt MinEnd = 10676 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 10677 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 10678 10679 10680 const SCEV *MaxBECount = getCouldNotCompute(); 10681 if (isa<SCEVConstant>(BECount)) 10682 MaxBECount = BECount; 10683 else 10684 MaxBECount = computeBECount(getConstant(MaxStart - MinEnd), 10685 getConstant(MinStride), false); 10686 10687 if (isa<SCEVCouldNotCompute>(MaxBECount)) 10688 MaxBECount = BECount; 10689 10690 return ExitLimit(BECount, MaxBECount, false, Predicates); 10691 } 10692 10693 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 10694 ScalarEvolution &SE) const { 10695 if (Range.isFullSet()) // Infinite loop. 10696 return SE.getCouldNotCompute(); 10697 10698 // If the start is a non-zero constant, shift the range to simplify things. 10699 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 10700 if (!SC->getValue()->isZero()) { 10701 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 10702 Operands[0] = SE.getZero(SC->getType()); 10703 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 10704 getNoWrapFlags(FlagNW)); 10705 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 10706 return ShiftedAddRec->getNumIterationsInRange( 10707 Range.subtract(SC->getAPInt()), SE); 10708 // This is strange and shouldn't happen. 10709 return SE.getCouldNotCompute(); 10710 } 10711 10712 // The only time we can solve this is when we have all constant indices. 10713 // Otherwise, we cannot determine the overflow conditions. 10714 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 10715 return SE.getCouldNotCompute(); 10716 10717 // Okay at this point we know that all elements of the chrec are constants and 10718 // that the start element is zero. 10719 10720 // First check to see if the range contains zero. If not, the first 10721 // iteration exits. 10722 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 10723 if (!Range.contains(APInt(BitWidth, 0))) 10724 return SE.getZero(getType()); 10725 10726 if (isAffine()) { 10727 // If this is an affine expression then we have this situation: 10728 // Solve {0,+,A} in Range === Ax in Range 10729 10730 // We know that zero is in the range. If A is positive then we know that 10731 // the upper value of the range must be the first possible exit value. 10732 // If A is negative then the lower of the range is the last possible loop 10733 // value. Also note that we already checked for a full range. 10734 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 10735 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 10736 10737 // The exit value should be (End+A)/A. 10738 APInt ExitVal = (End + A).udiv(A); 10739 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 10740 10741 // Evaluate at the exit value. If we really did fall out of the valid 10742 // range, then we computed our trip count, otherwise wrap around or other 10743 // things must have happened. 10744 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 10745 if (Range.contains(Val->getValue())) 10746 return SE.getCouldNotCompute(); // Something strange happened 10747 10748 // Ensure that the previous value is in the range. This is a sanity check. 10749 assert(Range.contains( 10750 EvaluateConstantChrecAtConstant(this, 10751 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 10752 "Linear scev computation is off in a bad way!"); 10753 return SE.getConstant(ExitValue); 10754 } 10755 10756 if (isQuadratic()) { 10757 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 10758 return SE.getConstant(S.getValue()); 10759 } 10760 10761 return SE.getCouldNotCompute(); 10762 } 10763 10764 const SCEVAddRecExpr * 10765 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 10766 assert(getNumOperands() > 1 && "AddRec with zero step?"); 10767 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 10768 // but in this case we cannot guarantee that the value returned will be an 10769 // AddRec because SCEV does not have a fixed point where it stops 10770 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 10771 // may happen if we reach arithmetic depth limit while simplifying. So we 10772 // construct the returned value explicitly. 10773 SmallVector<const SCEV *, 3> Ops; 10774 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 10775 // (this + Step) is {A+B,+,B+C,+...,+,N}. 10776 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 10777 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 10778 // We know that the last operand is not a constant zero (otherwise it would 10779 // have been popped out earlier). This guarantees us that if the result has 10780 // the same last operand, then it will also not be popped out, meaning that 10781 // the returned value will be an AddRec. 10782 const SCEV *Last = getOperand(getNumOperands() - 1); 10783 assert(!Last->isZero() && "Recurrency with zero step?"); 10784 Ops.push_back(Last); 10785 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 10786 SCEV::FlagAnyWrap)); 10787 } 10788 10789 // Return true when S contains at least an undef value. 10790 static inline bool containsUndefs(const SCEV *S) { 10791 return SCEVExprContains(S, [](const SCEV *S) { 10792 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 10793 return isa<UndefValue>(SU->getValue()); 10794 else if (const auto *SC = dyn_cast<SCEVConstant>(S)) 10795 return isa<UndefValue>(SC->getValue()); 10796 return false; 10797 }); 10798 } 10799 10800 namespace { 10801 10802 // Collect all steps of SCEV expressions. 10803 struct SCEVCollectStrides { 10804 ScalarEvolution &SE; 10805 SmallVectorImpl<const SCEV *> &Strides; 10806 10807 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 10808 : SE(SE), Strides(S) {} 10809 10810 bool follow(const SCEV *S) { 10811 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 10812 Strides.push_back(AR->getStepRecurrence(SE)); 10813 return true; 10814 } 10815 10816 bool isDone() const { return false; } 10817 }; 10818 10819 // Collect all SCEVUnknown and SCEVMulExpr expressions. 10820 struct SCEVCollectTerms { 10821 SmallVectorImpl<const SCEV *> &Terms; 10822 10823 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 10824 10825 bool follow(const SCEV *S) { 10826 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 10827 isa<SCEVSignExtendExpr>(S)) { 10828 if (!containsUndefs(S)) 10829 Terms.push_back(S); 10830 10831 // Stop recursion: once we collected a term, do not walk its operands. 10832 return false; 10833 } 10834 10835 // Keep looking. 10836 return true; 10837 } 10838 10839 bool isDone() const { return false; } 10840 }; 10841 10842 // Check if a SCEV contains an AddRecExpr. 10843 struct SCEVHasAddRec { 10844 bool &ContainsAddRec; 10845 10846 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 10847 ContainsAddRec = false; 10848 } 10849 10850 bool follow(const SCEV *S) { 10851 if (isa<SCEVAddRecExpr>(S)) { 10852 ContainsAddRec = true; 10853 10854 // Stop recursion: once we collected a term, do not walk its operands. 10855 return false; 10856 } 10857 10858 // Keep looking. 10859 return true; 10860 } 10861 10862 bool isDone() const { return false; } 10863 }; 10864 10865 // Find factors that are multiplied with an expression that (possibly as a 10866 // subexpression) contains an AddRecExpr. In the expression: 10867 // 10868 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 10869 // 10870 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 10871 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 10872 // parameters as they form a product with an induction variable. 10873 // 10874 // This collector expects all array size parameters to be in the same MulExpr. 10875 // It might be necessary to later add support for collecting parameters that are 10876 // spread over different nested MulExpr. 10877 struct SCEVCollectAddRecMultiplies { 10878 SmallVectorImpl<const SCEV *> &Terms; 10879 ScalarEvolution &SE; 10880 10881 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 10882 : Terms(T), SE(SE) {} 10883 10884 bool follow(const SCEV *S) { 10885 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 10886 bool HasAddRec = false; 10887 SmallVector<const SCEV *, 0> Operands; 10888 for (auto Op : Mul->operands()) { 10889 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 10890 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 10891 Operands.push_back(Op); 10892 } else if (Unknown) { 10893 HasAddRec = true; 10894 } else { 10895 bool ContainsAddRec; 10896 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 10897 visitAll(Op, ContiansAddRec); 10898 HasAddRec |= ContainsAddRec; 10899 } 10900 } 10901 if (Operands.size() == 0) 10902 return true; 10903 10904 if (!HasAddRec) 10905 return false; 10906 10907 Terms.push_back(SE.getMulExpr(Operands)); 10908 // Stop recursion: once we collected a term, do not walk its operands. 10909 return false; 10910 } 10911 10912 // Keep looking. 10913 return true; 10914 } 10915 10916 bool isDone() const { return false; } 10917 }; 10918 10919 } // end anonymous namespace 10920 10921 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 10922 /// two places: 10923 /// 1) The strides of AddRec expressions. 10924 /// 2) Unknowns that are multiplied with AddRec expressions. 10925 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 10926 SmallVectorImpl<const SCEV *> &Terms) { 10927 SmallVector<const SCEV *, 4> Strides; 10928 SCEVCollectStrides StrideCollector(*this, Strides); 10929 visitAll(Expr, StrideCollector); 10930 10931 LLVM_DEBUG({ 10932 dbgs() << "Strides:\n"; 10933 for (const SCEV *S : Strides) 10934 dbgs() << *S << "\n"; 10935 }); 10936 10937 for (const SCEV *S : Strides) { 10938 SCEVCollectTerms TermCollector(Terms); 10939 visitAll(S, TermCollector); 10940 } 10941 10942 LLVM_DEBUG({ 10943 dbgs() << "Terms:\n"; 10944 for (const SCEV *T : Terms) 10945 dbgs() << *T << "\n"; 10946 }); 10947 10948 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 10949 visitAll(Expr, MulCollector); 10950 } 10951 10952 static bool findArrayDimensionsRec(ScalarEvolution &SE, 10953 SmallVectorImpl<const SCEV *> &Terms, 10954 SmallVectorImpl<const SCEV *> &Sizes) { 10955 int Last = Terms.size() - 1; 10956 const SCEV *Step = Terms[Last]; 10957 10958 // End of recursion. 10959 if (Last == 0) { 10960 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 10961 SmallVector<const SCEV *, 2> Qs; 10962 for (const SCEV *Op : M->operands()) 10963 if (!isa<SCEVConstant>(Op)) 10964 Qs.push_back(Op); 10965 10966 Step = SE.getMulExpr(Qs); 10967 } 10968 10969 Sizes.push_back(Step); 10970 return true; 10971 } 10972 10973 for (const SCEV *&Term : Terms) { 10974 // Normalize the terms before the next call to findArrayDimensionsRec. 10975 const SCEV *Q, *R; 10976 SCEVDivision::divide(SE, Term, Step, &Q, &R); 10977 10978 // Bail out when GCD does not evenly divide one of the terms. 10979 if (!R->isZero()) 10980 return false; 10981 10982 Term = Q; 10983 } 10984 10985 // Remove all SCEVConstants. 10986 Terms.erase( 10987 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 10988 Terms.end()); 10989 10990 if (Terms.size() > 0) 10991 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 10992 return false; 10993 10994 Sizes.push_back(Step); 10995 return true; 10996 } 10997 10998 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 10999 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 11000 for (const SCEV *T : Terms) 11001 if (SCEVExprContains(T, isa<SCEVUnknown, const SCEV *>)) 11002 return true; 11003 return false; 11004 } 11005 11006 // Return the number of product terms in S. 11007 static inline int numberOfTerms(const SCEV *S) { 11008 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 11009 return Expr->getNumOperands(); 11010 return 1; 11011 } 11012 11013 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 11014 if (isa<SCEVConstant>(T)) 11015 return nullptr; 11016 11017 if (isa<SCEVUnknown>(T)) 11018 return T; 11019 11020 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 11021 SmallVector<const SCEV *, 2> Factors; 11022 for (const SCEV *Op : M->operands()) 11023 if (!isa<SCEVConstant>(Op)) 11024 Factors.push_back(Op); 11025 11026 return SE.getMulExpr(Factors); 11027 } 11028 11029 return T; 11030 } 11031 11032 /// Return the size of an element read or written by Inst. 11033 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 11034 Type *Ty; 11035 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 11036 Ty = Store->getValueOperand()->getType(); 11037 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 11038 Ty = Load->getType(); 11039 else 11040 return nullptr; 11041 11042 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 11043 return getSizeOfExpr(ETy, Ty); 11044 } 11045 11046 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 11047 SmallVectorImpl<const SCEV *> &Sizes, 11048 const SCEV *ElementSize) { 11049 if (Terms.size() < 1 || !ElementSize) 11050 return; 11051 11052 // Early return when Terms do not contain parameters: we do not delinearize 11053 // non parametric SCEVs. 11054 if (!containsParameters(Terms)) 11055 return; 11056 11057 LLVM_DEBUG({ 11058 dbgs() << "Terms:\n"; 11059 for (const SCEV *T : Terms) 11060 dbgs() << *T << "\n"; 11061 }); 11062 11063 // Remove duplicates. 11064 array_pod_sort(Terms.begin(), Terms.end()); 11065 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 11066 11067 // Put larger terms first. 11068 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) { 11069 return numberOfTerms(LHS) > numberOfTerms(RHS); 11070 }); 11071 11072 // Try to divide all terms by the element size. If term is not divisible by 11073 // element size, proceed with the original term. 11074 for (const SCEV *&Term : Terms) { 11075 const SCEV *Q, *R; 11076 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 11077 if (!Q->isZero()) 11078 Term = Q; 11079 } 11080 11081 SmallVector<const SCEV *, 4> NewTerms; 11082 11083 // Remove constant factors. 11084 for (const SCEV *T : Terms) 11085 if (const SCEV *NewT = removeConstantFactors(*this, T)) 11086 NewTerms.push_back(NewT); 11087 11088 LLVM_DEBUG({ 11089 dbgs() << "Terms after sorting:\n"; 11090 for (const SCEV *T : NewTerms) 11091 dbgs() << *T << "\n"; 11092 }); 11093 11094 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 11095 Sizes.clear(); 11096 return; 11097 } 11098 11099 // The last element to be pushed into Sizes is the size of an element. 11100 Sizes.push_back(ElementSize); 11101 11102 LLVM_DEBUG({ 11103 dbgs() << "Sizes:\n"; 11104 for (const SCEV *S : Sizes) 11105 dbgs() << *S << "\n"; 11106 }); 11107 } 11108 11109 void ScalarEvolution::computeAccessFunctions( 11110 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 11111 SmallVectorImpl<const SCEV *> &Sizes) { 11112 // Early exit in case this SCEV is not an affine multivariate function. 11113 if (Sizes.empty()) 11114 return; 11115 11116 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 11117 if (!AR->isAffine()) 11118 return; 11119 11120 const SCEV *Res = Expr; 11121 int Last = Sizes.size() - 1; 11122 for (int i = Last; i >= 0; i--) { 11123 const SCEV *Q, *R; 11124 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 11125 11126 LLVM_DEBUG({ 11127 dbgs() << "Res: " << *Res << "\n"; 11128 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 11129 dbgs() << "Res divided by Sizes[i]:\n"; 11130 dbgs() << "Quotient: " << *Q << "\n"; 11131 dbgs() << "Remainder: " << *R << "\n"; 11132 }); 11133 11134 Res = Q; 11135 11136 // Do not record the last subscript corresponding to the size of elements in 11137 // the array. 11138 if (i == Last) { 11139 11140 // Bail out if the remainder is too complex. 11141 if (isa<SCEVAddRecExpr>(R)) { 11142 Subscripts.clear(); 11143 Sizes.clear(); 11144 return; 11145 } 11146 11147 continue; 11148 } 11149 11150 // Record the access function for the current subscript. 11151 Subscripts.push_back(R); 11152 } 11153 11154 // Also push in last position the remainder of the last division: it will be 11155 // the access function of the innermost dimension. 11156 Subscripts.push_back(Res); 11157 11158 std::reverse(Subscripts.begin(), Subscripts.end()); 11159 11160 LLVM_DEBUG({ 11161 dbgs() << "Subscripts:\n"; 11162 for (const SCEV *S : Subscripts) 11163 dbgs() << *S << "\n"; 11164 }); 11165 } 11166 11167 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 11168 /// sizes of an array access. Returns the remainder of the delinearization that 11169 /// is the offset start of the array. The SCEV->delinearize algorithm computes 11170 /// the multiples of SCEV coefficients: that is a pattern matching of sub 11171 /// expressions in the stride and base of a SCEV corresponding to the 11172 /// computation of a GCD (greatest common divisor) of base and stride. When 11173 /// SCEV->delinearize fails, it returns the SCEV unchanged. 11174 /// 11175 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 11176 /// 11177 /// void foo(long n, long m, long o, double A[n][m][o]) { 11178 /// 11179 /// for (long i = 0; i < n; i++) 11180 /// for (long j = 0; j < m; j++) 11181 /// for (long k = 0; k < o; k++) 11182 /// A[i][j][k] = 1.0; 11183 /// } 11184 /// 11185 /// the delinearization input is the following AddRec SCEV: 11186 /// 11187 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 11188 /// 11189 /// From this SCEV, we are able to say that the base offset of the access is %A 11190 /// because it appears as an offset that does not divide any of the strides in 11191 /// the loops: 11192 /// 11193 /// CHECK: Base offset: %A 11194 /// 11195 /// and then SCEV->delinearize determines the size of some of the dimensions of 11196 /// the array as these are the multiples by which the strides are happening: 11197 /// 11198 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 11199 /// 11200 /// Note that the outermost dimension remains of UnknownSize because there are 11201 /// no strides that would help identifying the size of the last dimension: when 11202 /// the array has been statically allocated, one could compute the size of that 11203 /// dimension by dividing the overall size of the array by the size of the known 11204 /// dimensions: %m * %o * 8. 11205 /// 11206 /// Finally delinearize provides the access functions for the array reference 11207 /// that does correspond to A[i][j][k] of the above C testcase: 11208 /// 11209 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 11210 /// 11211 /// The testcases are checking the output of a function pass: 11212 /// DelinearizationPass that walks through all loads and stores of a function 11213 /// asking for the SCEV of the memory access with respect to all enclosing 11214 /// loops, calling SCEV->delinearize on that and printing the results. 11215 void ScalarEvolution::delinearize(const SCEV *Expr, 11216 SmallVectorImpl<const SCEV *> &Subscripts, 11217 SmallVectorImpl<const SCEV *> &Sizes, 11218 const SCEV *ElementSize) { 11219 // First step: collect parametric terms. 11220 SmallVector<const SCEV *, 4> Terms; 11221 collectParametricTerms(Expr, Terms); 11222 11223 if (Terms.empty()) 11224 return; 11225 11226 // Second step: find subscript sizes. 11227 findArrayDimensions(Terms, Sizes, ElementSize); 11228 11229 if (Sizes.empty()) 11230 return; 11231 11232 // Third step: compute the access functions for each subscript. 11233 computeAccessFunctions(Expr, Subscripts, Sizes); 11234 11235 if (Subscripts.empty()) 11236 return; 11237 11238 LLVM_DEBUG({ 11239 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 11240 dbgs() << "ArrayDecl[UnknownSize]"; 11241 for (const SCEV *S : Sizes) 11242 dbgs() << "[" << *S << "]"; 11243 11244 dbgs() << "\nArrayRef"; 11245 for (const SCEV *S : Subscripts) 11246 dbgs() << "[" << *S << "]"; 11247 dbgs() << "\n"; 11248 }); 11249 } 11250 11251 //===----------------------------------------------------------------------===// 11252 // SCEVCallbackVH Class Implementation 11253 //===----------------------------------------------------------------------===// 11254 11255 void ScalarEvolution::SCEVCallbackVH::deleted() { 11256 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11257 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 11258 SE->ConstantEvolutionLoopExitValue.erase(PN); 11259 SE->eraseValueFromMap(getValPtr()); 11260 // this now dangles! 11261 } 11262 11263 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 11264 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11265 11266 // Forget all the expressions associated with users of the old value, 11267 // so that future queries will recompute the expressions using the new 11268 // value. 11269 Value *Old = getValPtr(); 11270 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 11271 SmallPtrSet<User *, 8> Visited; 11272 while (!Worklist.empty()) { 11273 User *U = Worklist.pop_back_val(); 11274 // Deleting the Old value will cause this to dangle. Postpone 11275 // that until everything else is done. 11276 if (U == Old) 11277 continue; 11278 if (!Visited.insert(U).second) 11279 continue; 11280 if (PHINode *PN = dyn_cast<PHINode>(U)) 11281 SE->ConstantEvolutionLoopExitValue.erase(PN); 11282 SE->eraseValueFromMap(U); 11283 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 11284 } 11285 // Delete the Old value. 11286 if (PHINode *PN = dyn_cast<PHINode>(Old)) 11287 SE->ConstantEvolutionLoopExitValue.erase(PN); 11288 SE->eraseValueFromMap(Old); 11289 // this now dangles! 11290 } 11291 11292 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 11293 : CallbackVH(V), SE(se) {} 11294 11295 //===----------------------------------------------------------------------===// 11296 // ScalarEvolution Class Implementation 11297 //===----------------------------------------------------------------------===// 11298 11299 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 11300 AssumptionCache &AC, DominatorTree &DT, 11301 LoopInfo &LI) 11302 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 11303 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 11304 LoopDispositions(64), BlockDispositions(64) { 11305 // To use guards for proving predicates, we need to scan every instruction in 11306 // relevant basic blocks, and not just terminators. Doing this is a waste of 11307 // time if the IR does not actually contain any calls to 11308 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 11309 // 11310 // This pessimizes the case where a pass that preserves ScalarEvolution wants 11311 // to _add_ guards to the module when there weren't any before, and wants 11312 // ScalarEvolution to optimize based on those guards. For now we prefer to be 11313 // efficient in lieu of being smart in that rather obscure case. 11314 11315 auto *GuardDecl = F.getParent()->getFunction( 11316 Intrinsic::getName(Intrinsic::experimental_guard)); 11317 HasGuards = GuardDecl && !GuardDecl->use_empty(); 11318 } 11319 11320 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 11321 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 11322 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 11323 ValueExprMap(std::move(Arg.ValueExprMap)), 11324 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 11325 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 11326 PendingMerges(std::move(Arg.PendingMerges)), 11327 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 11328 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 11329 PredicatedBackedgeTakenCounts( 11330 std::move(Arg.PredicatedBackedgeTakenCounts)), 11331 ConstantEvolutionLoopExitValue( 11332 std::move(Arg.ConstantEvolutionLoopExitValue)), 11333 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 11334 LoopDispositions(std::move(Arg.LoopDispositions)), 11335 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 11336 BlockDispositions(std::move(Arg.BlockDispositions)), 11337 UnsignedRanges(std::move(Arg.UnsignedRanges)), 11338 SignedRanges(std::move(Arg.SignedRanges)), 11339 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 11340 UniquePreds(std::move(Arg.UniquePreds)), 11341 SCEVAllocator(std::move(Arg.SCEVAllocator)), 11342 LoopUsers(std::move(Arg.LoopUsers)), 11343 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 11344 FirstUnknown(Arg.FirstUnknown) { 11345 Arg.FirstUnknown = nullptr; 11346 } 11347 11348 ScalarEvolution::~ScalarEvolution() { 11349 // Iterate through all the SCEVUnknown instances and call their 11350 // destructors, so that they release their references to their values. 11351 for (SCEVUnknown *U = FirstUnknown; U;) { 11352 SCEVUnknown *Tmp = U; 11353 U = U->Next; 11354 Tmp->~SCEVUnknown(); 11355 } 11356 FirstUnknown = nullptr; 11357 11358 ExprValueMap.clear(); 11359 ValueExprMap.clear(); 11360 HasRecMap.clear(); 11361 11362 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 11363 // that a loop had multiple computable exits. 11364 for (auto &BTCI : BackedgeTakenCounts) 11365 BTCI.second.clear(); 11366 for (auto &BTCI : PredicatedBackedgeTakenCounts) 11367 BTCI.second.clear(); 11368 11369 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 11370 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 11371 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 11372 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 11373 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 11374 } 11375 11376 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 11377 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 11378 } 11379 11380 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 11381 const Loop *L) { 11382 // Print all inner loops first 11383 for (Loop *I : *L) 11384 PrintLoopInfo(OS, SE, I); 11385 11386 OS << "Loop "; 11387 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11388 OS << ": "; 11389 11390 SmallVector<BasicBlock *, 8> ExitBlocks; 11391 L->getExitBlocks(ExitBlocks); 11392 if (ExitBlocks.size() != 1) 11393 OS << "<multiple exits> "; 11394 11395 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 11396 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 11397 } else { 11398 OS << "Unpredictable backedge-taken count. "; 11399 } 11400 11401 OS << "\n" 11402 "Loop "; 11403 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11404 OS << ": "; 11405 11406 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 11407 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 11408 if (SE->isBackedgeTakenCountMaxOrZero(L)) 11409 OS << ", actual taken count either this or zero."; 11410 } else { 11411 OS << "Unpredictable max backedge-taken count. "; 11412 } 11413 11414 OS << "\n" 11415 "Loop "; 11416 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11417 OS << ": "; 11418 11419 SCEVUnionPredicate Pred; 11420 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 11421 if (!isa<SCEVCouldNotCompute>(PBT)) { 11422 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 11423 OS << " Predicates:\n"; 11424 Pred.print(OS, 4); 11425 } else { 11426 OS << "Unpredictable predicated backedge-taken count. "; 11427 } 11428 OS << "\n"; 11429 11430 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 11431 OS << "Loop "; 11432 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11433 OS << ": "; 11434 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 11435 } 11436 } 11437 11438 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 11439 switch (LD) { 11440 case ScalarEvolution::LoopVariant: 11441 return "Variant"; 11442 case ScalarEvolution::LoopInvariant: 11443 return "Invariant"; 11444 case ScalarEvolution::LoopComputable: 11445 return "Computable"; 11446 } 11447 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 11448 } 11449 11450 void ScalarEvolution::print(raw_ostream &OS) const { 11451 // ScalarEvolution's implementation of the print method is to print 11452 // out SCEV values of all instructions that are interesting. Doing 11453 // this potentially causes it to create new SCEV objects though, 11454 // which technically conflicts with the const qualifier. This isn't 11455 // observable from outside the class though, so casting away the 11456 // const isn't dangerous. 11457 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11458 11459 OS << "Classifying expressions for: "; 11460 F.printAsOperand(OS, /*PrintType=*/false); 11461 OS << "\n"; 11462 for (Instruction &I : instructions(F)) 11463 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 11464 OS << I << '\n'; 11465 OS << " --> "; 11466 const SCEV *SV = SE.getSCEV(&I); 11467 SV->print(OS); 11468 if (!isa<SCEVCouldNotCompute>(SV)) { 11469 OS << " U: "; 11470 SE.getUnsignedRange(SV).print(OS); 11471 OS << " S: "; 11472 SE.getSignedRange(SV).print(OS); 11473 } 11474 11475 const Loop *L = LI.getLoopFor(I.getParent()); 11476 11477 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 11478 if (AtUse != SV) { 11479 OS << " --> "; 11480 AtUse->print(OS); 11481 if (!isa<SCEVCouldNotCompute>(AtUse)) { 11482 OS << " U: "; 11483 SE.getUnsignedRange(AtUse).print(OS); 11484 OS << " S: "; 11485 SE.getSignedRange(AtUse).print(OS); 11486 } 11487 } 11488 11489 if (L) { 11490 OS << "\t\t" "Exits: "; 11491 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 11492 if (!SE.isLoopInvariant(ExitValue, L)) { 11493 OS << "<<Unknown>>"; 11494 } else { 11495 OS << *ExitValue; 11496 } 11497 11498 bool First = true; 11499 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 11500 if (First) { 11501 OS << "\t\t" "LoopDispositions: { "; 11502 First = false; 11503 } else { 11504 OS << ", "; 11505 } 11506 11507 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11508 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 11509 } 11510 11511 for (auto *InnerL : depth_first(L)) { 11512 if (InnerL == L) 11513 continue; 11514 if (First) { 11515 OS << "\t\t" "LoopDispositions: { "; 11516 First = false; 11517 } else { 11518 OS << ", "; 11519 } 11520 11521 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11522 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 11523 } 11524 11525 OS << " }"; 11526 } 11527 11528 OS << "\n"; 11529 } 11530 11531 OS << "Determining loop execution counts for: "; 11532 F.printAsOperand(OS, /*PrintType=*/false); 11533 OS << "\n"; 11534 for (Loop *I : LI) 11535 PrintLoopInfo(OS, &SE, I); 11536 } 11537 11538 ScalarEvolution::LoopDisposition 11539 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 11540 auto &Values = LoopDispositions[S]; 11541 for (auto &V : Values) { 11542 if (V.getPointer() == L) 11543 return V.getInt(); 11544 } 11545 Values.emplace_back(L, LoopVariant); 11546 LoopDisposition D = computeLoopDisposition(S, L); 11547 auto &Values2 = LoopDispositions[S]; 11548 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11549 if (V.getPointer() == L) { 11550 V.setInt(D); 11551 break; 11552 } 11553 } 11554 return D; 11555 } 11556 11557 ScalarEvolution::LoopDisposition 11558 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 11559 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11560 case scConstant: 11561 return LoopInvariant; 11562 case scTruncate: 11563 case scZeroExtend: 11564 case scSignExtend: 11565 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 11566 case scAddRecExpr: { 11567 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11568 11569 // If L is the addrec's loop, it's computable. 11570 if (AR->getLoop() == L) 11571 return LoopComputable; 11572 11573 // Add recurrences are never invariant in the function-body (null loop). 11574 if (!L) 11575 return LoopVariant; 11576 11577 // Everything that is not defined at loop entry is variant. 11578 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 11579 return LoopVariant; 11580 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 11581 " dominate the contained loop's header?"); 11582 11583 // This recurrence is invariant w.r.t. L if AR's loop contains L. 11584 if (AR->getLoop()->contains(L)) 11585 return LoopInvariant; 11586 11587 // This recurrence is variant w.r.t. L if any of its operands 11588 // are variant. 11589 for (auto *Op : AR->operands()) 11590 if (!isLoopInvariant(Op, L)) 11591 return LoopVariant; 11592 11593 // Otherwise it's loop-invariant. 11594 return LoopInvariant; 11595 } 11596 case scAddExpr: 11597 case scMulExpr: 11598 case scUMaxExpr: 11599 case scSMaxExpr: { 11600 bool HasVarying = false; 11601 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 11602 LoopDisposition D = getLoopDisposition(Op, L); 11603 if (D == LoopVariant) 11604 return LoopVariant; 11605 if (D == LoopComputable) 11606 HasVarying = true; 11607 } 11608 return HasVarying ? LoopComputable : LoopInvariant; 11609 } 11610 case scUDivExpr: { 11611 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11612 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 11613 if (LD == LoopVariant) 11614 return LoopVariant; 11615 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 11616 if (RD == LoopVariant) 11617 return LoopVariant; 11618 return (LD == LoopInvariant && RD == LoopInvariant) ? 11619 LoopInvariant : LoopComputable; 11620 } 11621 case scUnknown: 11622 // All non-instruction values are loop invariant. All instructions are loop 11623 // invariant if they are not contained in the specified loop. 11624 // Instructions are never considered invariant in the function body 11625 // (null loop) because they are defined within the "loop". 11626 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 11627 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 11628 return LoopInvariant; 11629 case scCouldNotCompute: 11630 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11631 } 11632 llvm_unreachable("Unknown SCEV kind!"); 11633 } 11634 11635 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 11636 return getLoopDisposition(S, L) == LoopInvariant; 11637 } 11638 11639 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 11640 return getLoopDisposition(S, L) == LoopComputable; 11641 } 11642 11643 ScalarEvolution::BlockDisposition 11644 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11645 auto &Values = BlockDispositions[S]; 11646 for (auto &V : Values) { 11647 if (V.getPointer() == BB) 11648 return V.getInt(); 11649 } 11650 Values.emplace_back(BB, DoesNotDominateBlock); 11651 BlockDisposition D = computeBlockDisposition(S, BB); 11652 auto &Values2 = BlockDispositions[S]; 11653 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11654 if (V.getPointer() == BB) { 11655 V.setInt(D); 11656 break; 11657 } 11658 } 11659 return D; 11660 } 11661 11662 ScalarEvolution::BlockDisposition 11663 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11664 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11665 case scConstant: 11666 return ProperlyDominatesBlock; 11667 case scTruncate: 11668 case scZeroExtend: 11669 case scSignExtend: 11670 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 11671 case scAddRecExpr: { 11672 // This uses a "dominates" query instead of "properly dominates" query 11673 // to test for proper dominance too, because the instruction which 11674 // produces the addrec's value is a PHI, and a PHI effectively properly 11675 // dominates its entire containing block. 11676 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11677 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 11678 return DoesNotDominateBlock; 11679 11680 // Fall through into SCEVNAryExpr handling. 11681 LLVM_FALLTHROUGH; 11682 } 11683 case scAddExpr: 11684 case scMulExpr: 11685 case scUMaxExpr: 11686 case scSMaxExpr: { 11687 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 11688 bool Proper = true; 11689 for (const SCEV *NAryOp : NAry->operands()) { 11690 BlockDisposition D = getBlockDisposition(NAryOp, BB); 11691 if (D == DoesNotDominateBlock) 11692 return DoesNotDominateBlock; 11693 if (D == DominatesBlock) 11694 Proper = false; 11695 } 11696 return Proper ? ProperlyDominatesBlock : DominatesBlock; 11697 } 11698 case scUDivExpr: { 11699 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11700 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 11701 BlockDisposition LD = getBlockDisposition(LHS, BB); 11702 if (LD == DoesNotDominateBlock) 11703 return DoesNotDominateBlock; 11704 BlockDisposition RD = getBlockDisposition(RHS, BB); 11705 if (RD == DoesNotDominateBlock) 11706 return DoesNotDominateBlock; 11707 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 11708 ProperlyDominatesBlock : DominatesBlock; 11709 } 11710 case scUnknown: 11711 if (Instruction *I = 11712 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 11713 if (I->getParent() == BB) 11714 return DominatesBlock; 11715 if (DT.properlyDominates(I->getParent(), BB)) 11716 return ProperlyDominatesBlock; 11717 return DoesNotDominateBlock; 11718 } 11719 return ProperlyDominatesBlock; 11720 case scCouldNotCompute: 11721 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11722 } 11723 llvm_unreachable("Unknown SCEV kind!"); 11724 } 11725 11726 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 11727 return getBlockDisposition(S, BB) >= DominatesBlock; 11728 } 11729 11730 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 11731 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 11732 } 11733 11734 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 11735 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 11736 } 11737 11738 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 11739 auto IsS = [&](const SCEV *X) { return S == X; }; 11740 auto ContainsS = [&](const SCEV *X) { 11741 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 11742 }; 11743 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 11744 } 11745 11746 void 11747 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 11748 ValuesAtScopes.erase(S); 11749 LoopDispositions.erase(S); 11750 BlockDispositions.erase(S); 11751 UnsignedRanges.erase(S); 11752 SignedRanges.erase(S); 11753 ExprValueMap.erase(S); 11754 HasRecMap.erase(S); 11755 MinTrailingZerosCache.erase(S); 11756 11757 for (auto I = PredicatedSCEVRewrites.begin(); 11758 I != PredicatedSCEVRewrites.end();) { 11759 std::pair<const SCEV *, const Loop *> Entry = I->first; 11760 if (Entry.first == S) 11761 PredicatedSCEVRewrites.erase(I++); 11762 else 11763 ++I; 11764 } 11765 11766 auto RemoveSCEVFromBackedgeMap = 11767 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 11768 for (auto I = Map.begin(), E = Map.end(); I != E;) { 11769 BackedgeTakenInfo &BEInfo = I->second; 11770 if (BEInfo.hasOperand(S, this)) { 11771 BEInfo.clear(); 11772 Map.erase(I++); 11773 } else 11774 ++I; 11775 } 11776 }; 11777 11778 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 11779 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 11780 } 11781 11782 void 11783 ScalarEvolution::getUsedLoops(const SCEV *S, 11784 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 11785 struct FindUsedLoops { 11786 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 11787 : LoopsUsed(LoopsUsed) {} 11788 SmallPtrSetImpl<const Loop *> &LoopsUsed; 11789 bool follow(const SCEV *S) { 11790 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 11791 LoopsUsed.insert(AR->getLoop()); 11792 return true; 11793 } 11794 11795 bool isDone() const { return false; } 11796 }; 11797 11798 FindUsedLoops F(LoopsUsed); 11799 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 11800 } 11801 11802 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 11803 SmallPtrSet<const Loop *, 8> LoopsUsed; 11804 getUsedLoops(S, LoopsUsed); 11805 for (auto *L : LoopsUsed) 11806 LoopUsers[L].push_back(S); 11807 } 11808 11809 void ScalarEvolution::verify() const { 11810 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11811 ScalarEvolution SE2(F, TLI, AC, DT, LI); 11812 11813 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 11814 11815 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 11816 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 11817 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 11818 11819 const SCEV *visitConstant(const SCEVConstant *Constant) { 11820 return SE.getConstant(Constant->getAPInt()); 11821 } 11822 11823 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 11824 return SE.getUnknown(Expr->getValue()); 11825 } 11826 11827 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 11828 return SE.getCouldNotCompute(); 11829 } 11830 }; 11831 11832 SCEVMapper SCM(SE2); 11833 11834 while (!LoopStack.empty()) { 11835 auto *L = LoopStack.pop_back_val(); 11836 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 11837 11838 auto *CurBECount = SCM.visit( 11839 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 11840 auto *NewBECount = SE2.getBackedgeTakenCount(L); 11841 11842 if (CurBECount == SE2.getCouldNotCompute() || 11843 NewBECount == SE2.getCouldNotCompute()) { 11844 // NB! This situation is legal, but is very suspicious -- whatever pass 11845 // change the loop to make a trip count go from could not compute to 11846 // computable or vice-versa *should have* invalidated SCEV. However, we 11847 // choose not to assert here (for now) since we don't want false 11848 // positives. 11849 continue; 11850 } 11851 11852 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 11853 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 11854 // not propagate undef aggressively). This means we can (and do) fail 11855 // verification in cases where a transform makes the trip count of a loop 11856 // go from "undef" to "undef+1" (say). The transform is fine, since in 11857 // both cases the loop iterates "undef" times, but SCEV thinks we 11858 // increased the trip count of the loop by 1 incorrectly. 11859 continue; 11860 } 11861 11862 if (SE.getTypeSizeInBits(CurBECount->getType()) > 11863 SE.getTypeSizeInBits(NewBECount->getType())) 11864 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 11865 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 11866 SE.getTypeSizeInBits(NewBECount->getType())) 11867 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 11868 11869 auto *ConstantDelta = 11870 dyn_cast<SCEVConstant>(SE2.getMinusSCEV(CurBECount, NewBECount)); 11871 11872 if (ConstantDelta && ConstantDelta->getAPInt() != 0) { 11873 dbgs() << "Trip Count Changed!\n"; 11874 dbgs() << "Old: " << *CurBECount << "\n"; 11875 dbgs() << "New: " << *NewBECount << "\n"; 11876 dbgs() << "Delta: " << *ConstantDelta << "\n"; 11877 std::abort(); 11878 } 11879 } 11880 } 11881 11882 bool ScalarEvolution::invalidate( 11883 Function &F, const PreservedAnalyses &PA, 11884 FunctionAnalysisManager::Invalidator &Inv) { 11885 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 11886 // of its dependencies is invalidated. 11887 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 11888 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 11889 Inv.invalidate<AssumptionAnalysis>(F, PA) || 11890 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 11891 Inv.invalidate<LoopAnalysis>(F, PA); 11892 } 11893 11894 AnalysisKey ScalarEvolutionAnalysis::Key; 11895 11896 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 11897 FunctionAnalysisManager &AM) { 11898 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 11899 AM.getResult<AssumptionAnalysis>(F), 11900 AM.getResult<DominatorTreeAnalysis>(F), 11901 AM.getResult<LoopAnalysis>(F)); 11902 } 11903 11904 PreservedAnalyses 11905 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 11906 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 11907 return PreservedAnalyses::all(); 11908 } 11909 11910 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 11911 "Scalar Evolution Analysis", false, true) 11912 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 11913 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 11914 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 11915 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 11916 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 11917 "Scalar Evolution Analysis", false, true) 11918 11919 char ScalarEvolutionWrapperPass::ID = 0; 11920 11921 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 11922 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 11923 } 11924 11925 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 11926 SE.reset(new ScalarEvolution( 11927 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 11928 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 11929 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 11930 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 11931 return false; 11932 } 11933 11934 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 11935 11936 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 11937 SE->print(OS); 11938 } 11939 11940 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 11941 if (!VerifySCEV) 11942 return; 11943 11944 SE->verify(); 11945 } 11946 11947 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 11948 AU.setPreservesAll(); 11949 AU.addRequiredTransitive<AssumptionCacheTracker>(); 11950 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 11951 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 11952 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 11953 } 11954 11955 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 11956 const SCEV *RHS) { 11957 FoldingSetNodeID ID; 11958 assert(LHS->getType() == RHS->getType() && 11959 "Type mismatch between LHS and RHS"); 11960 // Unique this node based on the arguments 11961 ID.AddInteger(SCEVPredicate::P_Equal); 11962 ID.AddPointer(LHS); 11963 ID.AddPointer(RHS); 11964 void *IP = nullptr; 11965 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 11966 return S; 11967 SCEVEqualPredicate *Eq = new (SCEVAllocator) 11968 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 11969 UniquePreds.InsertNode(Eq, IP); 11970 return Eq; 11971 } 11972 11973 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 11974 const SCEVAddRecExpr *AR, 11975 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 11976 FoldingSetNodeID ID; 11977 // Unique this node based on the arguments 11978 ID.AddInteger(SCEVPredicate::P_Wrap); 11979 ID.AddPointer(AR); 11980 ID.AddInteger(AddedFlags); 11981 void *IP = nullptr; 11982 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 11983 return S; 11984 auto *OF = new (SCEVAllocator) 11985 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 11986 UniquePreds.InsertNode(OF, IP); 11987 return OF; 11988 } 11989 11990 namespace { 11991 11992 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 11993 public: 11994 11995 /// Rewrites \p S in the context of a loop L and the SCEV predication 11996 /// infrastructure. 11997 /// 11998 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 11999 /// equivalences present in \p Pred. 12000 /// 12001 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 12002 /// \p NewPreds such that the result will be an AddRecExpr. 12003 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 12004 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12005 SCEVUnionPredicate *Pred) { 12006 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 12007 return Rewriter.visit(S); 12008 } 12009 12010 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12011 if (Pred) { 12012 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 12013 for (auto *Pred : ExprPreds) 12014 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 12015 if (IPred->getLHS() == Expr) 12016 return IPred->getRHS(); 12017 } 12018 return convertToAddRecWithPreds(Expr); 12019 } 12020 12021 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 12022 const SCEV *Operand = visit(Expr->getOperand()); 12023 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12024 if (AR && AR->getLoop() == L && AR->isAffine()) { 12025 // This couldn't be folded because the operand didn't have the nuw 12026 // flag. Add the nusw flag as an assumption that we could make. 12027 const SCEV *Step = AR->getStepRecurrence(SE); 12028 Type *Ty = Expr->getType(); 12029 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 12030 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 12031 SE.getSignExtendExpr(Step, Ty), L, 12032 AR->getNoWrapFlags()); 12033 } 12034 return SE.getZeroExtendExpr(Operand, Expr->getType()); 12035 } 12036 12037 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 12038 const SCEV *Operand = visit(Expr->getOperand()); 12039 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12040 if (AR && AR->getLoop() == L && AR->isAffine()) { 12041 // This couldn't be folded because the operand didn't have the nsw 12042 // flag. Add the nssw flag as an assumption that we could make. 12043 const SCEV *Step = AR->getStepRecurrence(SE); 12044 Type *Ty = Expr->getType(); 12045 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 12046 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 12047 SE.getSignExtendExpr(Step, Ty), L, 12048 AR->getNoWrapFlags()); 12049 } 12050 return SE.getSignExtendExpr(Operand, Expr->getType()); 12051 } 12052 12053 private: 12054 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 12055 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12056 SCEVUnionPredicate *Pred) 12057 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 12058 12059 bool addOverflowAssumption(const SCEVPredicate *P) { 12060 if (!NewPreds) { 12061 // Check if we've already made this assumption. 12062 return Pred && Pred->implies(P); 12063 } 12064 NewPreds->insert(P); 12065 return true; 12066 } 12067 12068 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 12069 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12070 auto *A = SE.getWrapPredicate(AR, AddedFlags); 12071 return addOverflowAssumption(A); 12072 } 12073 12074 // If \p Expr represents a PHINode, we try to see if it can be represented 12075 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 12076 // to add this predicate as a runtime overflow check, we return the AddRec. 12077 // If \p Expr does not meet these conditions (is not a PHI node, or we 12078 // couldn't create an AddRec for it, or couldn't add the predicate), we just 12079 // return \p Expr. 12080 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 12081 if (!isa<PHINode>(Expr->getValue())) 12082 return Expr; 12083 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 12084 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 12085 if (!PredicatedRewrite) 12086 return Expr; 12087 for (auto *P : PredicatedRewrite->second){ 12088 // Wrap predicates from outer loops are not supported. 12089 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 12090 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 12091 if (L != AR->getLoop()) 12092 return Expr; 12093 } 12094 if (!addOverflowAssumption(P)) 12095 return Expr; 12096 } 12097 return PredicatedRewrite->first; 12098 } 12099 12100 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 12101 SCEVUnionPredicate *Pred; 12102 const Loop *L; 12103 }; 12104 12105 } // end anonymous namespace 12106 12107 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 12108 SCEVUnionPredicate &Preds) { 12109 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 12110 } 12111 12112 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 12113 const SCEV *S, const Loop *L, 12114 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 12115 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 12116 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 12117 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 12118 12119 if (!AddRec) 12120 return nullptr; 12121 12122 // Since the transformation was successful, we can now transfer the SCEV 12123 // predicates. 12124 for (auto *P : TransformPreds) 12125 Preds.insert(P); 12126 12127 return AddRec; 12128 } 12129 12130 /// SCEV predicates 12131 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 12132 SCEVPredicateKind Kind) 12133 : FastID(ID), Kind(Kind) {} 12134 12135 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 12136 const SCEV *LHS, const SCEV *RHS) 12137 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 12138 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 12139 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 12140 } 12141 12142 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 12143 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 12144 12145 if (!Op) 12146 return false; 12147 12148 return Op->LHS == LHS && Op->RHS == RHS; 12149 } 12150 12151 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 12152 12153 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 12154 12155 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 12156 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 12157 } 12158 12159 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 12160 const SCEVAddRecExpr *AR, 12161 IncrementWrapFlags Flags) 12162 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 12163 12164 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 12165 12166 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 12167 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 12168 12169 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 12170 } 12171 12172 bool SCEVWrapPredicate::isAlwaysTrue() const { 12173 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 12174 IncrementWrapFlags IFlags = Flags; 12175 12176 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 12177 IFlags = clearFlags(IFlags, IncrementNSSW); 12178 12179 return IFlags == IncrementAnyWrap; 12180 } 12181 12182 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 12183 OS.indent(Depth) << *getExpr() << " Added Flags: "; 12184 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 12185 OS << "<nusw>"; 12186 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 12187 OS << "<nssw>"; 12188 OS << "\n"; 12189 } 12190 12191 SCEVWrapPredicate::IncrementWrapFlags 12192 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 12193 ScalarEvolution &SE) { 12194 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 12195 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 12196 12197 // We can safely transfer the NSW flag as NSSW. 12198 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 12199 ImpliedFlags = IncrementNSSW; 12200 12201 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 12202 // If the increment is positive, the SCEV NUW flag will also imply the 12203 // WrapPredicate NUSW flag. 12204 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 12205 if (Step->getValue()->getValue().isNonNegative()) 12206 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 12207 } 12208 12209 return ImpliedFlags; 12210 } 12211 12212 /// Union predicates don't get cached so create a dummy set ID for it. 12213 SCEVUnionPredicate::SCEVUnionPredicate() 12214 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 12215 12216 bool SCEVUnionPredicate::isAlwaysTrue() const { 12217 return all_of(Preds, 12218 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 12219 } 12220 12221 ArrayRef<const SCEVPredicate *> 12222 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 12223 auto I = SCEVToPreds.find(Expr); 12224 if (I == SCEVToPreds.end()) 12225 return ArrayRef<const SCEVPredicate *>(); 12226 return I->second; 12227 } 12228 12229 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 12230 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 12231 return all_of(Set->Preds, 12232 [this](const SCEVPredicate *I) { return this->implies(I); }); 12233 12234 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 12235 if (ScevPredsIt == SCEVToPreds.end()) 12236 return false; 12237 auto &SCEVPreds = ScevPredsIt->second; 12238 12239 return any_of(SCEVPreds, 12240 [N](const SCEVPredicate *I) { return I->implies(N); }); 12241 } 12242 12243 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 12244 12245 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 12246 for (auto Pred : Preds) 12247 Pred->print(OS, Depth); 12248 } 12249 12250 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 12251 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 12252 for (auto Pred : Set->Preds) 12253 add(Pred); 12254 return; 12255 } 12256 12257 if (implies(N)) 12258 return; 12259 12260 const SCEV *Key = N->getExpr(); 12261 assert(Key && "Only SCEVUnionPredicate doesn't have an " 12262 " associated expression!"); 12263 12264 SCEVToPreds[Key].push_back(N); 12265 Preds.push_back(N); 12266 } 12267 12268 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 12269 Loop &L) 12270 : SE(SE), L(L) {} 12271 12272 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 12273 const SCEV *Expr = SE.getSCEV(V); 12274 RewriteEntry &Entry = RewriteMap[Expr]; 12275 12276 // If we already have an entry and the version matches, return it. 12277 if (Entry.second && Generation == Entry.first) 12278 return Entry.second; 12279 12280 // We found an entry but it's stale. Rewrite the stale entry 12281 // according to the current predicate. 12282 if (Entry.second) 12283 Expr = Entry.second; 12284 12285 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 12286 Entry = {Generation, NewSCEV}; 12287 12288 return NewSCEV; 12289 } 12290 12291 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 12292 if (!BackedgeCount) { 12293 SCEVUnionPredicate BackedgePred; 12294 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 12295 addPredicate(BackedgePred); 12296 } 12297 return BackedgeCount; 12298 } 12299 12300 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 12301 if (Preds.implies(&Pred)) 12302 return; 12303 Preds.add(&Pred); 12304 updateGeneration(); 12305 } 12306 12307 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 12308 return Preds; 12309 } 12310 12311 void PredicatedScalarEvolution::updateGeneration() { 12312 // If the generation number wrapped recompute everything. 12313 if (++Generation == 0) { 12314 for (auto &II : RewriteMap) { 12315 const SCEV *Rewritten = II.second.second; 12316 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 12317 } 12318 } 12319 } 12320 12321 void PredicatedScalarEvolution::setNoOverflow( 12322 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12323 const SCEV *Expr = getSCEV(V); 12324 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12325 12326 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 12327 12328 // Clear the statically implied flags. 12329 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 12330 addPredicate(*SE.getWrapPredicate(AR, Flags)); 12331 12332 auto II = FlagsMap.insert({V, Flags}); 12333 if (!II.second) 12334 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 12335 } 12336 12337 bool PredicatedScalarEvolution::hasNoOverflow( 12338 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12339 const SCEV *Expr = getSCEV(V); 12340 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12341 12342 Flags = SCEVWrapPredicate::clearFlags( 12343 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 12344 12345 auto II = FlagsMap.find(V); 12346 12347 if (II != FlagsMap.end()) 12348 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 12349 12350 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 12351 } 12352 12353 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 12354 const SCEV *Expr = this->getSCEV(V); 12355 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 12356 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 12357 12358 if (!New) 12359 return nullptr; 12360 12361 for (auto *P : NewPreds) 12362 Preds.add(P); 12363 12364 updateGeneration(); 12365 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 12366 return New; 12367 } 12368 12369 PredicatedScalarEvolution::PredicatedScalarEvolution( 12370 const PredicatedScalarEvolution &Init) 12371 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 12372 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 12373 for (const auto &I : Init.FlagsMap) 12374 FlagsMap.insert(I); 12375 } 12376 12377 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 12378 // For each block. 12379 for (auto *BB : L.getBlocks()) 12380 for (auto &I : *BB) { 12381 if (!SE.isSCEVable(I.getType())) 12382 continue; 12383 12384 auto *Expr = SE.getSCEV(&I); 12385 auto II = RewriteMap.find(Expr); 12386 12387 if (II == RewriteMap.end()) 12388 continue; 12389 12390 // Don't print things that are not interesting. 12391 if (II->second.second == Expr) 12392 continue; 12393 12394 OS.indent(Depth) << "[PSE]" << I << ":\n"; 12395 OS.indent(Depth + 2) << *Expr << "\n"; 12396 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 12397 } 12398 } 12399 12400 // Match the mathematical pattern A - (A / B) * B, where A and B can be 12401 // arbitrary expressions. 12402 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 12403 // 4, A / B becomes X / 8). 12404 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 12405 const SCEV *&RHS) { 12406 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 12407 if (Add == nullptr || Add->getNumOperands() != 2) 12408 return false; 12409 12410 const SCEV *A = Add->getOperand(1); 12411 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 12412 12413 if (Mul == nullptr) 12414 return false; 12415 12416 const auto MatchURemWithDivisor = [&](const SCEV *B) { 12417 // (SomeExpr + (-(SomeExpr / B) * B)). 12418 if (Expr == getURemExpr(A, B)) { 12419 LHS = A; 12420 RHS = B; 12421 return true; 12422 } 12423 return false; 12424 }; 12425 12426 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 12427 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 12428 return MatchURemWithDivisor(Mul->getOperand(1)) || 12429 MatchURemWithDivisor(Mul->getOperand(2)); 12430 12431 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 12432 if (Mul->getNumOperands() == 2) 12433 return MatchURemWithDivisor(Mul->getOperand(1)) || 12434 MatchURemWithDivisor(Mul->getOperand(0)) || 12435 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 12436 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 12437 return false; 12438 } 12439