1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionDivision.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/Config/llvm-config.h" 87 #include "llvm/IR/Argument.h" 88 #include "llvm/IR/BasicBlock.h" 89 #include "llvm/IR/CFG.h" 90 #include "llvm/IR/Constant.h" 91 #include "llvm/IR/ConstantRange.h" 92 #include "llvm/IR/Constants.h" 93 #include "llvm/IR/DataLayout.h" 94 #include "llvm/IR/DerivedTypes.h" 95 #include "llvm/IR/Dominators.h" 96 #include "llvm/IR/Function.h" 97 #include "llvm/IR/GlobalAlias.h" 98 #include "llvm/IR/GlobalValue.h" 99 #include "llvm/IR/GlobalVariable.h" 100 #include "llvm/IR/InstIterator.h" 101 #include "llvm/IR/InstrTypes.h" 102 #include "llvm/IR/Instruction.h" 103 #include "llvm/IR/Instructions.h" 104 #include "llvm/IR/IntrinsicInst.h" 105 #include "llvm/IR/Intrinsics.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/Metadata.h" 108 #include "llvm/IR/Operator.h" 109 #include "llvm/IR/PatternMatch.h" 110 #include "llvm/IR/Type.h" 111 #include "llvm/IR/Use.h" 112 #include "llvm/IR/User.h" 113 #include "llvm/IR/Value.h" 114 #include "llvm/IR/Verifier.h" 115 #include "llvm/InitializePasses.h" 116 #include "llvm/Pass.h" 117 #include "llvm/Support/Casting.h" 118 #include "llvm/Support/CommandLine.h" 119 #include "llvm/Support/Compiler.h" 120 #include "llvm/Support/Debug.h" 121 #include "llvm/Support/ErrorHandling.h" 122 #include "llvm/Support/KnownBits.h" 123 #include "llvm/Support/SaveAndRestore.h" 124 #include "llvm/Support/raw_ostream.h" 125 #include <algorithm> 126 #include <cassert> 127 #include <climits> 128 #include <cstddef> 129 #include <cstdint> 130 #include <cstdlib> 131 #include <map> 132 #include <memory> 133 #include <tuple> 134 #include <utility> 135 #include <vector> 136 137 using namespace llvm; 138 139 #define DEBUG_TYPE "scalar-evolution" 140 141 STATISTIC(NumArrayLenItCounts, 142 "Number of trip counts computed with array length"); 143 STATISTIC(NumTripCountsComputed, 144 "Number of loops with predictable loop counts"); 145 STATISTIC(NumTripCountsNotComputed, 146 "Number of loops without predictable loop counts"); 147 STATISTIC(NumBruteForceTripCountsComputed, 148 "Number of loops with trip counts computed by force"); 149 150 static cl::opt<unsigned> 151 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 152 cl::ZeroOrMore, 153 cl::desc("Maximum number of iterations SCEV will " 154 "symbolically execute a constant " 155 "derived loop"), 156 cl::init(100)); 157 158 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 159 static cl::opt<bool> VerifySCEV( 160 "verify-scev", cl::Hidden, 161 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 162 static cl::opt<bool> VerifySCEVStrict( 163 "verify-scev-strict", cl::Hidden, 164 cl::desc("Enable stricter verification with -verify-scev is passed")); 165 static cl::opt<bool> 166 VerifySCEVMap("verify-scev-maps", cl::Hidden, 167 cl::desc("Verify no dangling value in ScalarEvolution's " 168 "ExprValueMap (slow)")); 169 170 static cl::opt<bool> VerifyIR( 171 "scev-verify-ir", cl::Hidden, 172 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 173 cl::init(false)); 174 175 static cl::opt<unsigned> MulOpsInlineThreshold( 176 "scev-mulops-inline-threshold", cl::Hidden, 177 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 178 cl::init(32)); 179 180 static cl::opt<unsigned> AddOpsInlineThreshold( 181 "scev-addops-inline-threshold", cl::Hidden, 182 cl::desc("Threshold for inlining addition operands into a SCEV"), 183 cl::init(500)); 184 185 static cl::opt<unsigned> MaxSCEVCompareDepth( 186 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 187 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 188 cl::init(32)); 189 190 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 191 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 192 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 193 cl::init(2)); 194 195 static cl::opt<unsigned> MaxValueCompareDepth( 196 "scalar-evolution-max-value-compare-depth", cl::Hidden, 197 cl::desc("Maximum depth of recursive value complexity comparisons"), 198 cl::init(2)); 199 200 static cl::opt<unsigned> 201 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 202 cl::desc("Maximum depth of recursive arithmetics"), 203 cl::init(32)); 204 205 static cl::opt<unsigned> MaxConstantEvolvingDepth( 206 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 207 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 208 209 static cl::opt<unsigned> 210 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 211 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 212 cl::init(8)); 213 214 static cl::opt<unsigned> 215 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 216 cl::desc("Max coefficients in AddRec during evolving"), 217 cl::init(8)); 218 219 static cl::opt<unsigned> 220 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 221 cl::desc("Size of the expression which is considered huge"), 222 cl::init(4096)); 223 224 static cl::opt<bool> 225 ClassifyExpressions("scalar-evolution-classify-expressions", 226 cl::Hidden, cl::init(true), 227 cl::desc("When printing analysis, include information on every instruction")); 228 229 230 //===----------------------------------------------------------------------===// 231 // SCEV class definitions 232 //===----------------------------------------------------------------------===// 233 234 //===----------------------------------------------------------------------===// 235 // Implementation of the SCEV class. 236 // 237 238 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 239 LLVM_DUMP_METHOD void SCEV::dump() const { 240 print(dbgs()); 241 dbgs() << '\n'; 242 } 243 #endif 244 245 void SCEV::print(raw_ostream &OS) const { 246 switch (static_cast<SCEVTypes>(getSCEVType())) { 247 case scConstant: 248 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 249 return; 250 case scTruncate: { 251 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 252 const SCEV *Op = Trunc->getOperand(); 253 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 254 << *Trunc->getType() << ")"; 255 return; 256 } 257 case scZeroExtend: { 258 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 259 const SCEV *Op = ZExt->getOperand(); 260 OS << "(zext " << *Op->getType() << " " << *Op << " to " 261 << *ZExt->getType() << ")"; 262 return; 263 } 264 case scSignExtend: { 265 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 266 const SCEV *Op = SExt->getOperand(); 267 OS << "(sext " << *Op->getType() << " " << *Op << " to " 268 << *SExt->getType() << ")"; 269 return; 270 } 271 case scAddRecExpr: { 272 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 273 OS << "{" << *AR->getOperand(0); 274 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 275 OS << ",+," << *AR->getOperand(i); 276 OS << "}<"; 277 if (AR->hasNoUnsignedWrap()) 278 OS << "nuw><"; 279 if (AR->hasNoSignedWrap()) 280 OS << "nsw><"; 281 if (AR->hasNoSelfWrap() && 282 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 283 OS << "nw><"; 284 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 285 OS << ">"; 286 return; 287 } 288 case scAddExpr: 289 case scMulExpr: 290 case scUMaxExpr: 291 case scSMaxExpr: 292 case scUMinExpr: 293 case scSMinExpr: { 294 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 295 const char *OpStr = nullptr; 296 switch (NAry->getSCEVType()) { 297 case scAddExpr: OpStr = " + "; break; 298 case scMulExpr: OpStr = " * "; break; 299 case scUMaxExpr: OpStr = " umax "; break; 300 case scSMaxExpr: OpStr = " smax "; break; 301 case scUMinExpr: 302 OpStr = " umin "; 303 break; 304 case scSMinExpr: 305 OpStr = " smin "; 306 break; 307 } 308 OS << "("; 309 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 310 I != E; ++I) { 311 OS << **I; 312 if (std::next(I) != E) 313 OS << OpStr; 314 } 315 OS << ")"; 316 switch (NAry->getSCEVType()) { 317 case scAddExpr: 318 case scMulExpr: 319 if (NAry->hasNoUnsignedWrap()) 320 OS << "<nuw>"; 321 if (NAry->hasNoSignedWrap()) 322 OS << "<nsw>"; 323 } 324 return; 325 } 326 case scUDivExpr: { 327 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 328 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 329 return; 330 } 331 case scUnknown: { 332 const SCEVUnknown *U = cast<SCEVUnknown>(this); 333 Type *AllocTy; 334 if (U->isSizeOf(AllocTy)) { 335 OS << "sizeof(" << *AllocTy << ")"; 336 return; 337 } 338 if (U->isAlignOf(AllocTy)) { 339 OS << "alignof(" << *AllocTy << ")"; 340 return; 341 } 342 343 Type *CTy; 344 Constant *FieldNo; 345 if (U->isOffsetOf(CTy, FieldNo)) { 346 OS << "offsetof(" << *CTy << ", "; 347 FieldNo->printAsOperand(OS, false); 348 OS << ")"; 349 return; 350 } 351 352 // Otherwise just print it normally. 353 U->getValue()->printAsOperand(OS, false); 354 return; 355 } 356 case scCouldNotCompute: 357 OS << "***COULDNOTCOMPUTE***"; 358 return; 359 } 360 llvm_unreachable("Unknown SCEV kind!"); 361 } 362 363 Type *SCEV::getType() const { 364 switch (static_cast<SCEVTypes>(getSCEVType())) { 365 case scConstant: 366 return cast<SCEVConstant>(this)->getType(); 367 case scTruncate: 368 case scZeroExtend: 369 case scSignExtend: 370 return cast<SCEVIntegralCastExpr>(this)->getType(); 371 case scAddRecExpr: 372 case scMulExpr: 373 case scUMaxExpr: 374 case scSMaxExpr: 375 case scUMinExpr: 376 case scSMinExpr: 377 return cast<SCEVNAryExpr>(this)->getType(); 378 case scAddExpr: 379 return cast<SCEVAddExpr>(this)->getType(); 380 case scUDivExpr: 381 return cast<SCEVUDivExpr>(this)->getType(); 382 case scUnknown: 383 return cast<SCEVUnknown>(this)->getType(); 384 case scCouldNotCompute: 385 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 386 } 387 llvm_unreachable("Unknown SCEV kind!"); 388 } 389 390 bool SCEV::isZero() const { 391 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 392 return SC->getValue()->isZero(); 393 return false; 394 } 395 396 bool SCEV::isOne() const { 397 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 398 return SC->getValue()->isOne(); 399 return false; 400 } 401 402 bool SCEV::isAllOnesValue() const { 403 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 404 return SC->getValue()->isMinusOne(); 405 return false; 406 } 407 408 bool SCEV::isNonConstantNegative() const { 409 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 410 if (!Mul) return false; 411 412 // If there is a constant factor, it will be first. 413 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 414 if (!SC) return false; 415 416 // Return true if the value is negative, this matches things like (-42 * V). 417 return SC->getAPInt().isNegative(); 418 } 419 420 SCEVCouldNotCompute::SCEVCouldNotCompute() : 421 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 422 423 bool SCEVCouldNotCompute::classof(const SCEV *S) { 424 return S->getSCEVType() == scCouldNotCompute; 425 } 426 427 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 428 FoldingSetNodeID ID; 429 ID.AddInteger(scConstant); 430 ID.AddPointer(V); 431 void *IP = nullptr; 432 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 433 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 434 UniqueSCEVs.InsertNode(S, IP); 435 return S; 436 } 437 438 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 439 return getConstant(ConstantInt::get(getContext(), Val)); 440 } 441 442 const SCEV * 443 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 444 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 445 return getConstant(ConstantInt::get(ITy, V, isSigned)); 446 } 447 448 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, 449 unsigned SCEVTy, const SCEV *op, 450 Type *ty) 451 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) { 452 Operands[0] = op; 453 } 454 455 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, 456 Type *ty) 457 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { 458 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 459 "Cannot truncate non-integer value!"); 460 } 461 462 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 463 const SCEV *op, Type *ty) 464 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { 465 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 466 "Cannot zero extend non-integer value!"); 467 } 468 469 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 470 const SCEV *op, Type *ty) 471 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { 472 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 473 "Cannot sign extend non-integer value!"); 474 } 475 476 void SCEVUnknown::deleted() { 477 // Clear this SCEVUnknown from various maps. 478 SE->forgetMemoizedResults(this); 479 480 // Remove this SCEVUnknown from the uniquing map. 481 SE->UniqueSCEVs.RemoveNode(this); 482 483 // Release the value. 484 setValPtr(nullptr); 485 } 486 487 void SCEVUnknown::allUsesReplacedWith(Value *New) { 488 // Remove this SCEVUnknown from the uniquing map. 489 SE->UniqueSCEVs.RemoveNode(this); 490 491 // Update this SCEVUnknown to point to the new value. This is needed 492 // because there may still be outstanding SCEVs which still point to 493 // this SCEVUnknown. 494 setValPtr(New); 495 } 496 497 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 498 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 499 if (VCE->getOpcode() == Instruction::PtrToInt) 500 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 501 if (CE->getOpcode() == Instruction::GetElementPtr && 502 CE->getOperand(0)->isNullValue() && 503 CE->getNumOperands() == 2) 504 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 505 if (CI->isOne()) { 506 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 507 ->getElementType(); 508 return true; 509 } 510 511 return false; 512 } 513 514 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 515 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 516 if (VCE->getOpcode() == Instruction::PtrToInt) 517 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 518 if (CE->getOpcode() == Instruction::GetElementPtr && 519 CE->getOperand(0)->isNullValue()) { 520 Type *Ty = 521 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 522 if (StructType *STy = dyn_cast<StructType>(Ty)) 523 if (!STy->isPacked() && 524 CE->getNumOperands() == 3 && 525 CE->getOperand(1)->isNullValue()) { 526 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 527 if (CI->isOne() && 528 STy->getNumElements() == 2 && 529 STy->getElementType(0)->isIntegerTy(1)) { 530 AllocTy = STy->getElementType(1); 531 return true; 532 } 533 } 534 } 535 536 return false; 537 } 538 539 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 540 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 541 if (VCE->getOpcode() == Instruction::PtrToInt) 542 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 543 if (CE->getOpcode() == Instruction::GetElementPtr && 544 CE->getNumOperands() == 3 && 545 CE->getOperand(0)->isNullValue() && 546 CE->getOperand(1)->isNullValue()) { 547 Type *Ty = 548 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 549 // Ignore vector types here so that ScalarEvolutionExpander doesn't 550 // emit getelementptrs that index into vectors. 551 if (Ty->isStructTy() || Ty->isArrayTy()) { 552 CTy = Ty; 553 FieldNo = CE->getOperand(2); 554 return true; 555 } 556 } 557 558 return false; 559 } 560 561 //===----------------------------------------------------------------------===// 562 // SCEV Utilities 563 //===----------------------------------------------------------------------===// 564 565 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 566 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 567 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 568 /// have been previously deemed to be "equally complex" by this routine. It is 569 /// intended to avoid exponential time complexity in cases like: 570 /// 571 /// %a = f(%x, %y) 572 /// %b = f(%a, %a) 573 /// %c = f(%b, %b) 574 /// 575 /// %d = f(%x, %y) 576 /// %e = f(%d, %d) 577 /// %f = f(%e, %e) 578 /// 579 /// CompareValueComplexity(%f, %c) 580 /// 581 /// Since we do not continue running this routine on expression trees once we 582 /// have seen unequal values, there is no need to track them in the cache. 583 static int 584 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 585 const LoopInfo *const LI, Value *LV, Value *RV, 586 unsigned Depth) { 587 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 588 return 0; 589 590 // Order pointer values after integer values. This helps SCEVExpander form 591 // GEPs. 592 bool LIsPointer = LV->getType()->isPointerTy(), 593 RIsPointer = RV->getType()->isPointerTy(); 594 if (LIsPointer != RIsPointer) 595 return (int)LIsPointer - (int)RIsPointer; 596 597 // Compare getValueID values. 598 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 599 if (LID != RID) 600 return (int)LID - (int)RID; 601 602 // Sort arguments by their position. 603 if (const auto *LA = dyn_cast<Argument>(LV)) { 604 const auto *RA = cast<Argument>(RV); 605 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 606 return (int)LArgNo - (int)RArgNo; 607 } 608 609 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 610 const auto *RGV = cast<GlobalValue>(RV); 611 612 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 613 auto LT = GV->getLinkage(); 614 return !(GlobalValue::isPrivateLinkage(LT) || 615 GlobalValue::isInternalLinkage(LT)); 616 }; 617 618 // Use the names to distinguish the two values, but only if the 619 // names are semantically important. 620 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 621 return LGV->getName().compare(RGV->getName()); 622 } 623 624 // For instructions, compare their loop depth, and their operand count. This 625 // is pretty loose. 626 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 627 const auto *RInst = cast<Instruction>(RV); 628 629 // Compare loop depths. 630 const BasicBlock *LParent = LInst->getParent(), 631 *RParent = RInst->getParent(); 632 if (LParent != RParent) { 633 unsigned LDepth = LI->getLoopDepth(LParent), 634 RDepth = LI->getLoopDepth(RParent); 635 if (LDepth != RDepth) 636 return (int)LDepth - (int)RDepth; 637 } 638 639 // Compare the number of operands. 640 unsigned LNumOps = LInst->getNumOperands(), 641 RNumOps = RInst->getNumOperands(); 642 if (LNumOps != RNumOps) 643 return (int)LNumOps - (int)RNumOps; 644 645 for (unsigned Idx : seq(0u, LNumOps)) { 646 int Result = 647 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 648 RInst->getOperand(Idx), Depth + 1); 649 if (Result != 0) 650 return Result; 651 } 652 } 653 654 EqCacheValue.unionSets(LV, RV); 655 return 0; 656 } 657 658 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 659 // than RHS, respectively. A three-way result allows recursive comparisons to be 660 // more efficient. 661 static int CompareSCEVComplexity( 662 EquivalenceClasses<const SCEV *> &EqCacheSCEV, 663 EquivalenceClasses<const Value *> &EqCacheValue, 664 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 665 DominatorTree &DT, unsigned Depth = 0) { 666 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 667 if (LHS == RHS) 668 return 0; 669 670 // Primarily, sort the SCEVs by their getSCEVType(). 671 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 672 if (LType != RType) 673 return (int)LType - (int)RType; 674 675 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS)) 676 return 0; 677 // Aside from the getSCEVType() ordering, the particular ordering 678 // isn't very important except that it's beneficial to be consistent, 679 // so that (a + b) and (b + a) don't end up as different expressions. 680 switch (static_cast<SCEVTypes>(LType)) { 681 case scUnknown: { 682 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 683 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 684 685 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 686 RU->getValue(), Depth + 1); 687 if (X == 0) 688 EqCacheSCEV.unionSets(LHS, RHS); 689 return X; 690 } 691 692 case scConstant: { 693 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 694 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 695 696 // Compare constant values. 697 const APInt &LA = LC->getAPInt(); 698 const APInt &RA = RC->getAPInt(); 699 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 700 if (LBitWidth != RBitWidth) 701 return (int)LBitWidth - (int)RBitWidth; 702 return LA.ult(RA) ? -1 : 1; 703 } 704 705 case scAddRecExpr: { 706 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 707 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 708 709 // There is always a dominance between two recs that are used by one SCEV, 710 // so we can safely sort recs by loop header dominance. We require such 711 // order in getAddExpr. 712 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 713 if (LLoop != RLoop) { 714 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 715 assert(LHead != RHead && "Two loops share the same header?"); 716 if (DT.dominates(LHead, RHead)) 717 return 1; 718 else 719 assert(DT.dominates(RHead, LHead) && 720 "No dominance between recurrences used by one SCEV?"); 721 return -1; 722 } 723 724 // Addrec complexity grows with operand count. 725 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 726 if (LNumOps != RNumOps) 727 return (int)LNumOps - (int)RNumOps; 728 729 // Lexicographically compare. 730 for (unsigned i = 0; i != LNumOps; ++i) { 731 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 732 LA->getOperand(i), RA->getOperand(i), DT, 733 Depth + 1); 734 if (X != 0) 735 return X; 736 } 737 EqCacheSCEV.unionSets(LHS, RHS); 738 return 0; 739 } 740 741 case scAddExpr: 742 case scMulExpr: 743 case scSMaxExpr: 744 case scUMaxExpr: 745 case scSMinExpr: 746 case scUMinExpr: { 747 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 748 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 749 750 // Lexicographically compare n-ary expressions. 751 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 752 if (LNumOps != RNumOps) 753 return (int)LNumOps - (int)RNumOps; 754 755 for (unsigned i = 0; i != LNumOps; ++i) { 756 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 757 LC->getOperand(i), RC->getOperand(i), DT, 758 Depth + 1); 759 if (X != 0) 760 return X; 761 } 762 EqCacheSCEV.unionSets(LHS, RHS); 763 return 0; 764 } 765 766 case scUDivExpr: { 767 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 768 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 769 770 // Lexicographically compare udiv expressions. 771 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 772 RC->getLHS(), DT, Depth + 1); 773 if (X != 0) 774 return X; 775 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 776 RC->getRHS(), DT, Depth + 1); 777 if (X == 0) 778 EqCacheSCEV.unionSets(LHS, RHS); 779 return X; 780 } 781 782 case scTruncate: 783 case scZeroExtend: 784 case scSignExtend: { 785 const SCEVIntegralCastExpr *LC = cast<SCEVIntegralCastExpr>(LHS); 786 const SCEVIntegralCastExpr *RC = cast<SCEVIntegralCastExpr>(RHS); 787 788 // Compare cast expressions by operand. 789 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 790 LC->getOperand(), RC->getOperand(), DT, 791 Depth + 1); 792 if (X == 0) 793 EqCacheSCEV.unionSets(LHS, RHS); 794 return X; 795 } 796 797 case scCouldNotCompute: 798 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 799 } 800 llvm_unreachable("Unknown SCEV kind!"); 801 } 802 803 /// Given a list of SCEV objects, order them by their complexity, and group 804 /// objects of the same complexity together by value. When this routine is 805 /// finished, we know that any duplicates in the vector are consecutive and that 806 /// complexity is monotonically increasing. 807 /// 808 /// Note that we go take special precautions to ensure that we get deterministic 809 /// results from this routine. In other words, we don't want the results of 810 /// this to depend on where the addresses of various SCEV objects happened to 811 /// land in memory. 812 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 813 LoopInfo *LI, DominatorTree &DT) { 814 if (Ops.size() < 2) return; // Noop 815 816 EquivalenceClasses<const SCEV *> EqCacheSCEV; 817 EquivalenceClasses<const Value *> EqCacheValue; 818 if (Ops.size() == 2) { 819 // This is the common case, which also happens to be trivially simple. 820 // Special case it. 821 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 822 if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0) 823 std::swap(LHS, RHS); 824 return; 825 } 826 827 // Do the rough sort by complexity. 828 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { 829 return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT) < 830 0; 831 }); 832 833 // Now that we are sorted by complexity, group elements of the same 834 // complexity. Note that this is, at worst, N^2, but the vector is likely to 835 // be extremely short in practice. Note that we take this approach because we 836 // do not want to depend on the addresses of the objects we are grouping. 837 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 838 const SCEV *S = Ops[i]; 839 unsigned Complexity = S->getSCEVType(); 840 841 // If there are any objects of the same complexity and same value as this 842 // one, group them. 843 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 844 if (Ops[j] == S) { // Found a duplicate. 845 // Move it to immediately after i'th element. 846 std::swap(Ops[i+1], Ops[j]); 847 ++i; // no need to rescan it. 848 if (i == e-2) return; // Done! 849 } 850 } 851 } 852 } 853 854 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at 855 /// least HugeExprThreshold nodes). 856 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 857 return any_of(Ops, [](const SCEV *S) { 858 return S->getExpressionSize() >= HugeExprThreshold; 859 }); 860 } 861 862 //===----------------------------------------------------------------------===// 863 // Simple SCEV method implementations 864 //===----------------------------------------------------------------------===// 865 866 /// Compute BC(It, K). The result has width W. Assume, K > 0. 867 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 868 ScalarEvolution &SE, 869 Type *ResultTy) { 870 // Handle the simplest case efficiently. 871 if (K == 1) 872 return SE.getTruncateOrZeroExtend(It, ResultTy); 873 874 // We are using the following formula for BC(It, K): 875 // 876 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 877 // 878 // Suppose, W is the bitwidth of the return value. We must be prepared for 879 // overflow. Hence, we must assure that the result of our computation is 880 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 881 // safe in modular arithmetic. 882 // 883 // However, this code doesn't use exactly that formula; the formula it uses 884 // is something like the following, where T is the number of factors of 2 in 885 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 886 // exponentiation: 887 // 888 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 889 // 890 // This formula is trivially equivalent to the previous formula. However, 891 // this formula can be implemented much more efficiently. The trick is that 892 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 893 // arithmetic. To do exact division in modular arithmetic, all we have 894 // to do is multiply by the inverse. Therefore, this step can be done at 895 // width W. 896 // 897 // The next issue is how to safely do the division by 2^T. The way this 898 // is done is by doing the multiplication step at a width of at least W + T 899 // bits. This way, the bottom W+T bits of the product are accurate. Then, 900 // when we perform the division by 2^T (which is equivalent to a right shift 901 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 902 // truncated out after the division by 2^T. 903 // 904 // In comparison to just directly using the first formula, this technique 905 // is much more efficient; using the first formula requires W * K bits, 906 // but this formula less than W + K bits. Also, the first formula requires 907 // a division step, whereas this formula only requires multiplies and shifts. 908 // 909 // It doesn't matter whether the subtraction step is done in the calculation 910 // width or the input iteration count's width; if the subtraction overflows, 911 // the result must be zero anyway. We prefer here to do it in the width of 912 // the induction variable because it helps a lot for certain cases; CodeGen 913 // isn't smart enough to ignore the overflow, which leads to much less 914 // efficient code if the width of the subtraction is wider than the native 915 // register width. 916 // 917 // (It's possible to not widen at all by pulling out factors of 2 before 918 // the multiplication; for example, K=2 can be calculated as 919 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 920 // extra arithmetic, so it's not an obvious win, and it gets 921 // much more complicated for K > 3.) 922 923 // Protection from insane SCEVs; this bound is conservative, 924 // but it probably doesn't matter. 925 if (K > 1000) 926 return SE.getCouldNotCompute(); 927 928 unsigned W = SE.getTypeSizeInBits(ResultTy); 929 930 // Calculate K! / 2^T and T; we divide out the factors of two before 931 // multiplying for calculating K! / 2^T to avoid overflow. 932 // Other overflow doesn't matter because we only care about the bottom 933 // W bits of the result. 934 APInt OddFactorial(W, 1); 935 unsigned T = 1; 936 for (unsigned i = 3; i <= K; ++i) { 937 APInt Mult(W, i); 938 unsigned TwoFactors = Mult.countTrailingZeros(); 939 T += TwoFactors; 940 Mult.lshrInPlace(TwoFactors); 941 OddFactorial *= Mult; 942 } 943 944 // We need at least W + T bits for the multiplication step 945 unsigned CalculationBits = W + T; 946 947 // Calculate 2^T, at width T+W. 948 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 949 950 // Calculate the multiplicative inverse of K! / 2^T; 951 // this multiplication factor will perform the exact division by 952 // K! / 2^T. 953 APInt Mod = APInt::getSignedMinValue(W+1); 954 APInt MultiplyFactor = OddFactorial.zext(W+1); 955 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 956 MultiplyFactor = MultiplyFactor.trunc(W); 957 958 // Calculate the product, at width T+W 959 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 960 CalculationBits); 961 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 962 for (unsigned i = 1; i != K; ++i) { 963 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 964 Dividend = SE.getMulExpr(Dividend, 965 SE.getTruncateOrZeroExtend(S, CalculationTy)); 966 } 967 968 // Divide by 2^T 969 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 970 971 // Truncate the result, and divide by K! / 2^T. 972 973 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 974 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 975 } 976 977 /// Return the value of this chain of recurrences at the specified iteration 978 /// number. We can evaluate this recurrence by multiplying each element in the 979 /// chain by the binomial coefficient corresponding to it. In other words, we 980 /// can evaluate {A,+,B,+,C,+,D} as: 981 /// 982 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 983 /// 984 /// where BC(It, k) stands for binomial coefficient. 985 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 986 ScalarEvolution &SE) const { 987 const SCEV *Result = getStart(); 988 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 989 // The computation is correct in the face of overflow provided that the 990 // multiplication is performed _after_ the evaluation of the binomial 991 // coefficient. 992 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 993 if (isa<SCEVCouldNotCompute>(Coeff)) 994 return Coeff; 995 996 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 997 } 998 return Result; 999 } 1000 1001 //===----------------------------------------------------------------------===// 1002 // SCEV Expression folder implementations 1003 //===----------------------------------------------------------------------===// 1004 1005 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1006 unsigned Depth) { 1007 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1008 "This is not a truncating conversion!"); 1009 assert(isSCEVable(Ty) && 1010 "This is not a conversion to a SCEVable type!"); 1011 Ty = getEffectiveSCEVType(Ty); 1012 1013 FoldingSetNodeID ID; 1014 ID.AddInteger(scTruncate); 1015 ID.AddPointer(Op); 1016 ID.AddPointer(Ty); 1017 void *IP = nullptr; 1018 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1019 1020 // Fold if the operand is constant. 1021 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1022 return getConstant( 1023 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1024 1025 // trunc(trunc(x)) --> trunc(x) 1026 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1027 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1028 1029 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1030 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1031 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1032 1033 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1034 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1035 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1036 1037 if (Depth > MaxCastDepth) { 1038 SCEV *S = 1039 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1040 UniqueSCEVs.InsertNode(S, IP); 1041 addToLoopUseLists(S); 1042 return S; 1043 } 1044 1045 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1046 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1047 // if after transforming we have at most one truncate, not counting truncates 1048 // that replace other casts. 1049 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1050 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1051 SmallVector<const SCEV *, 4> Operands; 1052 unsigned numTruncs = 0; 1053 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1054 ++i) { 1055 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1056 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) && 1057 isa<SCEVTruncateExpr>(S)) 1058 numTruncs++; 1059 Operands.push_back(S); 1060 } 1061 if (numTruncs < 2) { 1062 if (isa<SCEVAddExpr>(Op)) 1063 return getAddExpr(Operands); 1064 else if (isa<SCEVMulExpr>(Op)) 1065 return getMulExpr(Operands); 1066 else 1067 llvm_unreachable("Unexpected SCEV type for Op."); 1068 } 1069 // Although we checked in the beginning that ID is not in the cache, it is 1070 // possible that during recursion and different modification ID was inserted 1071 // into the cache. So if we find it, just return it. 1072 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1073 return S; 1074 } 1075 1076 // If the input value is a chrec scev, truncate the chrec's operands. 1077 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1078 SmallVector<const SCEV *, 4> Operands; 1079 for (const SCEV *Op : AddRec->operands()) 1080 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1081 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1082 } 1083 1084 // The cast wasn't folded; create an explicit cast node. We can reuse 1085 // the existing insert position since if we get here, we won't have 1086 // made any changes which would invalidate it. 1087 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1088 Op, Ty); 1089 UniqueSCEVs.InsertNode(S, IP); 1090 addToLoopUseLists(S); 1091 return S; 1092 } 1093 1094 // Get the limit of a recurrence such that incrementing by Step cannot cause 1095 // signed overflow as long as the value of the recurrence within the 1096 // loop does not exceed this limit before incrementing. 1097 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1098 ICmpInst::Predicate *Pred, 1099 ScalarEvolution *SE) { 1100 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1101 if (SE->isKnownPositive(Step)) { 1102 *Pred = ICmpInst::ICMP_SLT; 1103 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1104 SE->getSignedRangeMax(Step)); 1105 } 1106 if (SE->isKnownNegative(Step)) { 1107 *Pred = ICmpInst::ICMP_SGT; 1108 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1109 SE->getSignedRangeMin(Step)); 1110 } 1111 return nullptr; 1112 } 1113 1114 // Get the limit of a recurrence such that incrementing by Step cannot cause 1115 // unsigned overflow as long as the value of the recurrence within the loop does 1116 // not exceed this limit before incrementing. 1117 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1118 ICmpInst::Predicate *Pred, 1119 ScalarEvolution *SE) { 1120 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1121 *Pred = ICmpInst::ICMP_ULT; 1122 1123 return SE->getConstant(APInt::getMinValue(BitWidth) - 1124 SE->getUnsignedRangeMax(Step)); 1125 } 1126 1127 namespace { 1128 1129 struct ExtendOpTraitsBase { 1130 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1131 unsigned); 1132 }; 1133 1134 // Used to make code generic over signed and unsigned overflow. 1135 template <typename ExtendOp> struct ExtendOpTraits { 1136 // Members present: 1137 // 1138 // static const SCEV::NoWrapFlags WrapType; 1139 // 1140 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1141 // 1142 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1143 // ICmpInst::Predicate *Pred, 1144 // ScalarEvolution *SE); 1145 }; 1146 1147 template <> 1148 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1149 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1150 1151 static const GetExtendExprTy GetExtendExpr; 1152 1153 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1154 ICmpInst::Predicate *Pred, 1155 ScalarEvolution *SE) { 1156 return getSignedOverflowLimitForStep(Step, Pred, SE); 1157 } 1158 }; 1159 1160 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1161 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1162 1163 template <> 1164 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1165 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1166 1167 static const GetExtendExprTy GetExtendExpr; 1168 1169 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1170 ICmpInst::Predicate *Pred, 1171 ScalarEvolution *SE) { 1172 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1173 } 1174 }; 1175 1176 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1177 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1178 1179 } // end anonymous namespace 1180 1181 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1182 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1183 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1184 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1185 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1186 // expression "Step + sext/zext(PreIncAR)" is congruent with 1187 // "sext/zext(PostIncAR)" 1188 template <typename ExtendOpTy> 1189 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1190 ScalarEvolution *SE, unsigned Depth) { 1191 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1192 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1193 1194 const Loop *L = AR->getLoop(); 1195 const SCEV *Start = AR->getStart(); 1196 const SCEV *Step = AR->getStepRecurrence(*SE); 1197 1198 // Check for a simple looking step prior to loop entry. 1199 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1200 if (!SA) 1201 return nullptr; 1202 1203 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1204 // subtraction is expensive. For this purpose, perform a quick and dirty 1205 // difference, by checking for Step in the operand list. 1206 SmallVector<const SCEV *, 4> DiffOps; 1207 for (const SCEV *Op : SA->operands()) 1208 if (Op != Step) 1209 DiffOps.push_back(Op); 1210 1211 if (DiffOps.size() == SA->getNumOperands()) 1212 return nullptr; 1213 1214 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1215 // `Step`: 1216 1217 // 1. NSW/NUW flags on the step increment. 1218 auto PreStartFlags = 1219 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1220 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1221 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1222 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1223 1224 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1225 // "S+X does not sign/unsign-overflow". 1226 // 1227 1228 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1229 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1230 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1231 return PreStart; 1232 1233 // 2. Direct overflow check on the step operation's expression. 1234 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1235 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1236 const SCEV *OperandExtendedStart = 1237 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1238 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1239 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1240 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1241 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1242 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1243 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1244 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1245 } 1246 return PreStart; 1247 } 1248 1249 // 3. Loop precondition. 1250 ICmpInst::Predicate Pred; 1251 const SCEV *OverflowLimit = 1252 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1253 1254 if (OverflowLimit && 1255 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1256 return PreStart; 1257 1258 return nullptr; 1259 } 1260 1261 // Get the normalized zero or sign extended expression for this AddRec's Start. 1262 template <typename ExtendOpTy> 1263 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1264 ScalarEvolution *SE, 1265 unsigned Depth) { 1266 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1267 1268 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1269 if (!PreStart) 1270 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1271 1272 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1273 Depth), 1274 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1275 } 1276 1277 // Try to prove away overflow by looking at "nearby" add recurrences. A 1278 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1279 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1280 // 1281 // Formally: 1282 // 1283 // {S,+,X} == {S-T,+,X} + T 1284 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1285 // 1286 // If ({S-T,+,X} + T) does not overflow ... (1) 1287 // 1288 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1289 // 1290 // If {S-T,+,X} does not overflow ... (2) 1291 // 1292 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1293 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1294 // 1295 // If (S-T)+T does not overflow ... (3) 1296 // 1297 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1298 // == {Ext(S),+,Ext(X)} == LHS 1299 // 1300 // Thus, if (1), (2) and (3) are true for some T, then 1301 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1302 // 1303 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1304 // does not overflow" restricted to the 0th iteration. Therefore we only need 1305 // to check for (1) and (2). 1306 // 1307 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1308 // is `Delta` (defined below). 1309 template <typename ExtendOpTy> 1310 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1311 const SCEV *Step, 1312 const Loop *L) { 1313 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1314 1315 // We restrict `Start` to a constant to prevent SCEV from spending too much 1316 // time here. It is correct (but more expensive) to continue with a 1317 // non-constant `Start` and do a general SCEV subtraction to compute 1318 // `PreStart` below. 1319 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1320 if (!StartC) 1321 return false; 1322 1323 APInt StartAI = StartC->getAPInt(); 1324 1325 for (unsigned Delta : {-2, -1, 1, 2}) { 1326 const SCEV *PreStart = getConstant(StartAI - Delta); 1327 1328 FoldingSetNodeID ID; 1329 ID.AddInteger(scAddRecExpr); 1330 ID.AddPointer(PreStart); 1331 ID.AddPointer(Step); 1332 ID.AddPointer(L); 1333 void *IP = nullptr; 1334 const auto *PreAR = 1335 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1336 1337 // Give up if we don't already have the add recurrence we need because 1338 // actually constructing an add recurrence is relatively expensive. 1339 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1340 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1341 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1342 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1343 DeltaS, &Pred, this); 1344 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1345 return true; 1346 } 1347 } 1348 1349 return false; 1350 } 1351 1352 // Finds an integer D for an expression (C + x + y + ...) such that the top 1353 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1354 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1355 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1356 // the (C + x + y + ...) expression is \p WholeAddExpr. 1357 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1358 const SCEVConstant *ConstantTerm, 1359 const SCEVAddExpr *WholeAddExpr) { 1360 const APInt &C = ConstantTerm->getAPInt(); 1361 const unsigned BitWidth = C.getBitWidth(); 1362 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1363 uint32_t TZ = BitWidth; 1364 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1365 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1366 if (TZ) { 1367 // Set D to be as many least significant bits of C as possible while still 1368 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1369 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1370 } 1371 return APInt(BitWidth, 0); 1372 } 1373 1374 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1375 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1376 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1377 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1378 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1379 const APInt &ConstantStart, 1380 const SCEV *Step) { 1381 const unsigned BitWidth = ConstantStart.getBitWidth(); 1382 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1383 if (TZ) 1384 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1385 : ConstantStart; 1386 return APInt(BitWidth, 0); 1387 } 1388 1389 const SCEV * 1390 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1391 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1392 "This is not an extending conversion!"); 1393 assert(isSCEVable(Ty) && 1394 "This is not a conversion to a SCEVable type!"); 1395 Ty = getEffectiveSCEVType(Ty); 1396 1397 // Fold if the operand is constant. 1398 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1399 return getConstant( 1400 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1401 1402 // zext(zext(x)) --> zext(x) 1403 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1404 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1405 1406 // Before doing any expensive analysis, check to see if we've already 1407 // computed a SCEV for this Op and Ty. 1408 FoldingSetNodeID ID; 1409 ID.AddInteger(scZeroExtend); 1410 ID.AddPointer(Op); 1411 ID.AddPointer(Ty); 1412 void *IP = nullptr; 1413 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1414 if (Depth > MaxCastDepth) { 1415 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1416 Op, Ty); 1417 UniqueSCEVs.InsertNode(S, IP); 1418 addToLoopUseLists(S); 1419 return S; 1420 } 1421 1422 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1423 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1424 // It's possible the bits taken off by the truncate were all zero bits. If 1425 // so, we should be able to simplify this further. 1426 const SCEV *X = ST->getOperand(); 1427 ConstantRange CR = getUnsignedRange(X); 1428 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1429 unsigned NewBits = getTypeSizeInBits(Ty); 1430 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1431 CR.zextOrTrunc(NewBits))) 1432 return getTruncateOrZeroExtend(X, Ty, Depth); 1433 } 1434 1435 // If the input value is a chrec scev, and we can prove that the value 1436 // did not overflow the old, smaller, value, we can zero extend all of the 1437 // operands (often constants). This allows analysis of something like 1438 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1439 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1440 if (AR->isAffine()) { 1441 const SCEV *Start = AR->getStart(); 1442 const SCEV *Step = AR->getStepRecurrence(*this); 1443 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1444 const Loop *L = AR->getLoop(); 1445 1446 if (!AR->hasNoUnsignedWrap()) { 1447 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1448 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1449 } 1450 1451 // If we have special knowledge that this addrec won't overflow, 1452 // we don't need to do any further analysis. 1453 if (AR->hasNoUnsignedWrap()) 1454 return getAddRecExpr( 1455 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1456 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1457 1458 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1459 // Note that this serves two purposes: It filters out loops that are 1460 // simply not analyzable, and it covers the case where this code is 1461 // being called from within backedge-taken count analysis, such that 1462 // attempting to ask for the backedge-taken count would likely result 1463 // in infinite recursion. In the later case, the analysis code will 1464 // cope with a conservative value, and it will take care to purge 1465 // that value once it has finished. 1466 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1467 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1468 // Manually compute the final value for AR, checking for 1469 // overflow. 1470 1471 // Check whether the backedge-taken count can be losslessly casted to 1472 // the addrec's type. The count is always unsigned. 1473 const SCEV *CastedMaxBECount = 1474 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1475 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1476 CastedMaxBECount, MaxBECount->getType(), Depth); 1477 if (MaxBECount == RecastedMaxBECount) { 1478 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1479 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1480 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1481 SCEV::FlagAnyWrap, Depth + 1); 1482 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1483 SCEV::FlagAnyWrap, 1484 Depth + 1), 1485 WideTy, Depth + 1); 1486 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1487 const SCEV *WideMaxBECount = 1488 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1489 const SCEV *OperandExtendedAdd = 1490 getAddExpr(WideStart, 1491 getMulExpr(WideMaxBECount, 1492 getZeroExtendExpr(Step, WideTy, Depth + 1), 1493 SCEV::FlagAnyWrap, Depth + 1), 1494 SCEV::FlagAnyWrap, Depth + 1); 1495 if (ZAdd == OperandExtendedAdd) { 1496 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1497 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1498 // Return the expression with the addrec on the outside. 1499 return getAddRecExpr( 1500 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1501 Depth + 1), 1502 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1503 AR->getNoWrapFlags()); 1504 } 1505 // Similar to above, only this time treat the step value as signed. 1506 // This covers loops that count down. 1507 OperandExtendedAdd = 1508 getAddExpr(WideStart, 1509 getMulExpr(WideMaxBECount, 1510 getSignExtendExpr(Step, WideTy, Depth + 1), 1511 SCEV::FlagAnyWrap, Depth + 1), 1512 SCEV::FlagAnyWrap, Depth + 1); 1513 if (ZAdd == OperandExtendedAdd) { 1514 // Cache knowledge of AR NW, which is propagated to this AddRec. 1515 // Negative step causes unsigned wrap, but it still can't self-wrap. 1516 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1517 // Return the expression with the addrec on the outside. 1518 return getAddRecExpr( 1519 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1520 Depth + 1), 1521 getSignExtendExpr(Step, Ty, Depth + 1), L, 1522 AR->getNoWrapFlags()); 1523 } 1524 } 1525 } 1526 1527 // Normally, in the cases we can prove no-overflow via a 1528 // backedge guarding condition, we can also compute a backedge 1529 // taken count for the loop. The exceptions are assumptions and 1530 // guards present in the loop -- SCEV is not great at exploiting 1531 // these to compute max backedge taken counts, but can still use 1532 // these to prove lack of overflow. Use this fact to avoid 1533 // doing extra work that may not pay off. 1534 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1535 !AC.assumptions().empty()) { 1536 // If the backedge is guarded by a comparison with the pre-inc 1537 // value the addrec is safe. Also, if the entry is guarded by 1538 // a comparison with the start value and the backedge is 1539 // guarded by a comparison with the post-inc value, the addrec 1540 // is safe. 1541 if (isKnownPositive(Step)) { 1542 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1543 getUnsignedRangeMax(Step)); 1544 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1545 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 1546 // Cache knowledge of AR NUW, which is propagated to this 1547 // AddRec. 1548 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1549 // Return the expression with the addrec on the outside. 1550 return getAddRecExpr( 1551 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1552 Depth + 1), 1553 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1554 AR->getNoWrapFlags()); 1555 } 1556 } else if (isKnownNegative(Step)) { 1557 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1558 getSignedRangeMin(Step)); 1559 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1560 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1561 // Cache knowledge of AR NW, which is propagated to this 1562 // AddRec. Negative step causes unsigned wrap, but it 1563 // still can't self-wrap. 1564 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1565 // Return the expression with the addrec on the outside. 1566 return getAddRecExpr( 1567 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1568 Depth + 1), 1569 getSignExtendExpr(Step, Ty, Depth + 1), L, 1570 AR->getNoWrapFlags()); 1571 } 1572 } 1573 } 1574 1575 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1576 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1577 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1578 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1579 const APInt &C = SC->getAPInt(); 1580 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1581 if (D != 0) { 1582 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1583 const SCEV *SResidual = 1584 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1585 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1586 return getAddExpr(SZExtD, SZExtR, 1587 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1588 Depth + 1); 1589 } 1590 } 1591 1592 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1593 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1594 return getAddRecExpr( 1595 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1596 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1597 } 1598 } 1599 1600 // zext(A % B) --> zext(A) % zext(B) 1601 { 1602 const SCEV *LHS; 1603 const SCEV *RHS; 1604 if (matchURem(Op, LHS, RHS)) 1605 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1606 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1607 } 1608 1609 // zext(A / B) --> zext(A) / zext(B). 1610 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1611 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1612 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1613 1614 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1615 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1616 if (SA->hasNoUnsignedWrap()) { 1617 // If the addition does not unsign overflow then we can, by definition, 1618 // commute the zero extension with the addition operation. 1619 SmallVector<const SCEV *, 4> Ops; 1620 for (const auto *Op : SA->operands()) 1621 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1622 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1623 } 1624 1625 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1626 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1627 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1628 // 1629 // Often address arithmetics contain expressions like 1630 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1631 // This transformation is useful while proving that such expressions are 1632 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1633 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1634 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1635 if (D != 0) { 1636 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1637 const SCEV *SResidual = 1638 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1639 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1640 return getAddExpr(SZExtD, SZExtR, 1641 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1642 Depth + 1); 1643 } 1644 } 1645 } 1646 1647 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1648 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1649 if (SM->hasNoUnsignedWrap()) { 1650 // If the multiply does not unsign overflow then we can, by definition, 1651 // commute the zero extension with the multiply operation. 1652 SmallVector<const SCEV *, 4> Ops; 1653 for (const auto *Op : SM->operands()) 1654 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1655 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1656 } 1657 1658 // zext(2^K * (trunc X to iN)) to iM -> 1659 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1660 // 1661 // Proof: 1662 // 1663 // zext(2^K * (trunc X to iN)) to iM 1664 // = zext((trunc X to iN) << K) to iM 1665 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1666 // (because shl removes the top K bits) 1667 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1668 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1669 // 1670 if (SM->getNumOperands() == 2) 1671 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1672 if (MulLHS->getAPInt().isPowerOf2()) 1673 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1674 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1675 MulLHS->getAPInt().logBase2(); 1676 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1677 return getMulExpr( 1678 getZeroExtendExpr(MulLHS, Ty), 1679 getZeroExtendExpr( 1680 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1681 SCEV::FlagNUW, Depth + 1); 1682 } 1683 } 1684 1685 // The cast wasn't folded; create an explicit cast node. 1686 // Recompute the insert position, as it may have been invalidated. 1687 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1688 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1689 Op, Ty); 1690 UniqueSCEVs.InsertNode(S, IP); 1691 addToLoopUseLists(S); 1692 return S; 1693 } 1694 1695 const SCEV * 1696 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1697 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1698 "This is not an extending conversion!"); 1699 assert(isSCEVable(Ty) && 1700 "This is not a conversion to a SCEVable type!"); 1701 Ty = getEffectiveSCEVType(Ty); 1702 1703 // Fold if the operand is constant. 1704 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1705 return getConstant( 1706 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1707 1708 // sext(sext(x)) --> sext(x) 1709 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1710 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1711 1712 // sext(zext(x)) --> zext(x) 1713 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1714 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1715 1716 // Before doing any expensive analysis, check to see if we've already 1717 // computed a SCEV for this Op and Ty. 1718 FoldingSetNodeID ID; 1719 ID.AddInteger(scSignExtend); 1720 ID.AddPointer(Op); 1721 ID.AddPointer(Ty); 1722 void *IP = nullptr; 1723 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1724 // Limit recursion depth. 1725 if (Depth > MaxCastDepth) { 1726 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1727 Op, Ty); 1728 UniqueSCEVs.InsertNode(S, IP); 1729 addToLoopUseLists(S); 1730 return S; 1731 } 1732 1733 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1734 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1735 // It's possible the bits taken off by the truncate were all sign bits. If 1736 // so, we should be able to simplify this further. 1737 const SCEV *X = ST->getOperand(); 1738 ConstantRange CR = getSignedRange(X); 1739 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1740 unsigned NewBits = getTypeSizeInBits(Ty); 1741 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1742 CR.sextOrTrunc(NewBits))) 1743 return getTruncateOrSignExtend(X, Ty, Depth); 1744 } 1745 1746 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1747 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1748 if (SA->hasNoSignedWrap()) { 1749 // If the addition does not sign overflow then we can, by definition, 1750 // commute the sign extension with the addition operation. 1751 SmallVector<const SCEV *, 4> Ops; 1752 for (const auto *Op : SA->operands()) 1753 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1754 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1755 } 1756 1757 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 1758 // if D + (C - D + x + y + ...) could be proven to not signed wrap 1759 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1760 // 1761 // For instance, this will bring two seemingly different expressions: 1762 // 1 + sext(5 + 20 * %x + 24 * %y) and 1763 // sext(6 + 20 * %x + 24 * %y) 1764 // to the same form: 1765 // 2 + sext(4 + 20 * %x + 24 * %y) 1766 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1767 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1768 if (D != 0) { 1769 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1770 const SCEV *SResidual = 1771 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1772 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1773 return getAddExpr(SSExtD, SSExtR, 1774 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1775 Depth + 1); 1776 } 1777 } 1778 } 1779 // If the input value is a chrec scev, and we can prove that the value 1780 // did not overflow the old, smaller, value, we can sign extend all of the 1781 // operands (often constants). This allows analysis of something like 1782 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1783 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1784 if (AR->isAffine()) { 1785 const SCEV *Start = AR->getStart(); 1786 const SCEV *Step = AR->getStepRecurrence(*this); 1787 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1788 const Loop *L = AR->getLoop(); 1789 1790 if (!AR->hasNoSignedWrap()) { 1791 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1792 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1793 } 1794 1795 // If we have special knowledge that this addrec won't overflow, 1796 // we don't need to do any further analysis. 1797 if (AR->hasNoSignedWrap()) 1798 return getAddRecExpr( 1799 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1800 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1801 1802 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1803 // Note that this serves two purposes: It filters out loops that are 1804 // simply not analyzable, and it covers the case where this code is 1805 // being called from within backedge-taken count analysis, such that 1806 // attempting to ask for the backedge-taken count would likely result 1807 // in infinite recursion. In the later case, the analysis code will 1808 // cope with a conservative value, and it will take care to purge 1809 // that value once it has finished. 1810 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1811 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1812 // Manually compute the final value for AR, checking for 1813 // overflow. 1814 1815 // Check whether the backedge-taken count can be losslessly casted to 1816 // the addrec's type. The count is always unsigned. 1817 const SCEV *CastedMaxBECount = 1818 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1819 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1820 CastedMaxBECount, MaxBECount->getType(), Depth); 1821 if (MaxBECount == RecastedMaxBECount) { 1822 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1823 // Check whether Start+Step*MaxBECount has no signed overflow. 1824 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 1825 SCEV::FlagAnyWrap, Depth + 1); 1826 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 1827 SCEV::FlagAnyWrap, 1828 Depth + 1), 1829 WideTy, Depth + 1); 1830 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 1831 const SCEV *WideMaxBECount = 1832 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1833 const SCEV *OperandExtendedAdd = 1834 getAddExpr(WideStart, 1835 getMulExpr(WideMaxBECount, 1836 getSignExtendExpr(Step, WideTy, Depth + 1), 1837 SCEV::FlagAnyWrap, Depth + 1), 1838 SCEV::FlagAnyWrap, Depth + 1); 1839 if (SAdd == OperandExtendedAdd) { 1840 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1841 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1842 // Return the expression with the addrec on the outside. 1843 return getAddRecExpr( 1844 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1845 Depth + 1), 1846 getSignExtendExpr(Step, Ty, Depth + 1), L, 1847 AR->getNoWrapFlags()); 1848 } 1849 // Similar to above, only this time treat the step value as unsigned. 1850 // This covers loops that count up with an unsigned step. 1851 OperandExtendedAdd = 1852 getAddExpr(WideStart, 1853 getMulExpr(WideMaxBECount, 1854 getZeroExtendExpr(Step, WideTy, Depth + 1), 1855 SCEV::FlagAnyWrap, Depth + 1), 1856 SCEV::FlagAnyWrap, Depth + 1); 1857 if (SAdd == OperandExtendedAdd) { 1858 // If AR wraps around then 1859 // 1860 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 1861 // => SAdd != OperandExtendedAdd 1862 // 1863 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 1864 // (SAdd == OperandExtendedAdd => AR is NW) 1865 1866 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1867 1868 // Return the expression with the addrec on the outside. 1869 return getAddRecExpr( 1870 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 1871 Depth + 1), 1872 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1873 AR->getNoWrapFlags()); 1874 } 1875 } 1876 } 1877 1878 // Normally, in the cases we can prove no-overflow via a 1879 // backedge guarding condition, we can also compute a backedge 1880 // taken count for the loop. The exceptions are assumptions and 1881 // guards present in the loop -- SCEV is not great at exploiting 1882 // these to compute max backedge taken counts, but can still use 1883 // these to prove lack of overflow. Use this fact to avoid 1884 // doing extra work that may not pay off. 1885 1886 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1887 !AC.assumptions().empty()) { 1888 // If the backedge is guarded by a comparison with the pre-inc 1889 // value the addrec is safe. Also, if the entry is guarded by 1890 // a comparison with the start value and the backedge is 1891 // guarded by a comparison with the post-inc value, the addrec 1892 // is safe. 1893 ICmpInst::Predicate Pred; 1894 const SCEV *OverflowLimit = 1895 getSignedOverflowLimitForStep(Step, &Pred, this); 1896 if (OverflowLimit && 1897 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 1898 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 1899 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 1900 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1901 return getAddRecExpr( 1902 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1903 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1904 } 1905 } 1906 1907 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 1908 // if D + (C - D + Step * n) could be proven to not signed wrap 1909 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1910 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1911 const APInt &C = SC->getAPInt(); 1912 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1913 if (D != 0) { 1914 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1915 const SCEV *SResidual = 1916 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1917 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1918 return getAddExpr(SSExtD, SSExtR, 1919 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1920 Depth + 1); 1921 } 1922 } 1923 1924 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 1925 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1926 return getAddRecExpr( 1927 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1928 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1929 } 1930 } 1931 1932 // If the input value is provably positive and we could not simplify 1933 // away the sext build a zext instead. 1934 if (isKnownNonNegative(Op)) 1935 return getZeroExtendExpr(Op, Ty, Depth + 1); 1936 1937 // The cast wasn't folded; create an explicit cast node. 1938 // Recompute the insert position, as it may have been invalidated. 1939 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1940 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1941 Op, Ty); 1942 UniqueSCEVs.InsertNode(S, IP); 1943 addToLoopUseLists(S); 1944 return S; 1945 } 1946 1947 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 1948 /// unspecified bits out to the given type. 1949 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 1950 Type *Ty) { 1951 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1952 "This is not an extending conversion!"); 1953 assert(isSCEVable(Ty) && 1954 "This is not a conversion to a SCEVable type!"); 1955 Ty = getEffectiveSCEVType(Ty); 1956 1957 // Sign-extend negative constants. 1958 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1959 if (SC->getAPInt().isNegative()) 1960 return getSignExtendExpr(Op, Ty); 1961 1962 // Peel off a truncate cast. 1963 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 1964 const SCEV *NewOp = T->getOperand(); 1965 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 1966 return getAnyExtendExpr(NewOp, Ty); 1967 return getTruncateOrNoop(NewOp, Ty); 1968 } 1969 1970 // Next try a zext cast. If the cast is folded, use it. 1971 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 1972 if (!isa<SCEVZeroExtendExpr>(ZExt)) 1973 return ZExt; 1974 1975 // Next try a sext cast. If the cast is folded, use it. 1976 const SCEV *SExt = getSignExtendExpr(Op, Ty); 1977 if (!isa<SCEVSignExtendExpr>(SExt)) 1978 return SExt; 1979 1980 // Force the cast to be folded into the operands of an addrec. 1981 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 1982 SmallVector<const SCEV *, 4> Ops; 1983 for (const SCEV *Op : AR->operands()) 1984 Ops.push_back(getAnyExtendExpr(Op, Ty)); 1985 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 1986 } 1987 1988 // If the expression is obviously signed, use the sext cast value. 1989 if (isa<SCEVSMaxExpr>(Op)) 1990 return SExt; 1991 1992 // Absent any other information, use the zext cast value. 1993 return ZExt; 1994 } 1995 1996 /// Process the given Ops list, which is a list of operands to be added under 1997 /// the given scale, update the given map. This is a helper function for 1998 /// getAddRecExpr. As an example of what it does, given a sequence of operands 1999 /// that would form an add expression like this: 2000 /// 2001 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2002 /// 2003 /// where A and B are constants, update the map with these values: 2004 /// 2005 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2006 /// 2007 /// and add 13 + A*B*29 to AccumulatedConstant. 2008 /// This will allow getAddRecExpr to produce this: 2009 /// 2010 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2011 /// 2012 /// This form often exposes folding opportunities that are hidden in 2013 /// the original operand list. 2014 /// 2015 /// Return true iff it appears that any interesting folding opportunities 2016 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2017 /// the common case where no interesting opportunities are present, and 2018 /// is also used as a check to avoid infinite recursion. 2019 static bool 2020 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2021 SmallVectorImpl<const SCEV *> &NewOps, 2022 APInt &AccumulatedConstant, 2023 const SCEV *const *Ops, size_t NumOperands, 2024 const APInt &Scale, 2025 ScalarEvolution &SE) { 2026 bool Interesting = false; 2027 2028 // Iterate over the add operands. They are sorted, with constants first. 2029 unsigned i = 0; 2030 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2031 ++i; 2032 // Pull a buried constant out to the outside. 2033 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2034 Interesting = true; 2035 AccumulatedConstant += Scale * C->getAPInt(); 2036 } 2037 2038 // Next comes everything else. We're especially interested in multiplies 2039 // here, but they're in the middle, so just visit the rest with one loop. 2040 for (; i != NumOperands; ++i) { 2041 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2042 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2043 APInt NewScale = 2044 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2045 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2046 // A multiplication of a constant with another add; recurse. 2047 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2048 Interesting |= 2049 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2050 Add->op_begin(), Add->getNumOperands(), 2051 NewScale, SE); 2052 } else { 2053 // A multiplication of a constant with some other value. Update 2054 // the map. 2055 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2056 const SCEV *Key = SE.getMulExpr(MulOps); 2057 auto Pair = M.insert({Key, NewScale}); 2058 if (Pair.second) { 2059 NewOps.push_back(Pair.first->first); 2060 } else { 2061 Pair.first->second += NewScale; 2062 // The map already had an entry for this value, which may indicate 2063 // a folding opportunity. 2064 Interesting = true; 2065 } 2066 } 2067 } else { 2068 // An ordinary operand. Update the map. 2069 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2070 M.insert({Ops[i], Scale}); 2071 if (Pair.second) { 2072 NewOps.push_back(Pair.first->first); 2073 } else { 2074 Pair.first->second += Scale; 2075 // The map already had an entry for this value, which may indicate 2076 // a folding opportunity. 2077 Interesting = true; 2078 } 2079 } 2080 } 2081 2082 return Interesting; 2083 } 2084 2085 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2086 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2087 // can't-overflow flags for the operation if possible. 2088 static SCEV::NoWrapFlags 2089 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2090 const ArrayRef<const SCEV *> Ops, 2091 SCEV::NoWrapFlags Flags) { 2092 using namespace std::placeholders; 2093 2094 using OBO = OverflowingBinaryOperator; 2095 2096 bool CanAnalyze = 2097 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2098 (void)CanAnalyze; 2099 assert(CanAnalyze && "don't call from other places!"); 2100 2101 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2102 SCEV::NoWrapFlags SignOrUnsignWrap = 2103 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2104 2105 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2106 auto IsKnownNonNegative = [&](const SCEV *S) { 2107 return SE->isKnownNonNegative(S); 2108 }; 2109 2110 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2111 Flags = 2112 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2113 2114 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2115 2116 if (SignOrUnsignWrap != SignOrUnsignMask && 2117 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2118 isa<SCEVConstant>(Ops[0])) { 2119 2120 auto Opcode = [&] { 2121 switch (Type) { 2122 case scAddExpr: 2123 return Instruction::Add; 2124 case scMulExpr: 2125 return Instruction::Mul; 2126 default: 2127 llvm_unreachable("Unexpected SCEV op."); 2128 } 2129 }(); 2130 2131 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2132 2133 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2134 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2135 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2136 Opcode, C, OBO::NoSignedWrap); 2137 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2138 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2139 } 2140 2141 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2142 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2143 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2144 Opcode, C, OBO::NoUnsignedWrap); 2145 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2146 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2147 } 2148 } 2149 2150 return Flags; 2151 } 2152 2153 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2154 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2155 } 2156 2157 /// Get a canonical add expression, or something simpler if possible. 2158 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2159 SCEV::NoWrapFlags Flags, 2160 unsigned Depth) { 2161 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2162 "only nuw or nsw allowed"); 2163 assert(!Ops.empty() && "Cannot get empty add!"); 2164 if (Ops.size() == 1) return Ops[0]; 2165 #ifndef NDEBUG 2166 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2167 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2168 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2169 "SCEVAddExpr operand types don't match!"); 2170 #endif 2171 2172 // Sort by complexity, this groups all similar expression types together. 2173 GroupByComplexity(Ops, &LI, DT); 2174 2175 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2176 2177 // If there are any constants, fold them together. 2178 unsigned Idx = 0; 2179 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2180 ++Idx; 2181 assert(Idx < Ops.size()); 2182 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2183 // We found two constants, fold them together! 2184 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2185 if (Ops.size() == 2) return Ops[0]; 2186 Ops.erase(Ops.begin()+1); // Erase the folded element 2187 LHSC = cast<SCEVConstant>(Ops[0]); 2188 } 2189 2190 // If we are left with a constant zero being added, strip it off. 2191 if (LHSC->getValue()->isZero()) { 2192 Ops.erase(Ops.begin()); 2193 --Idx; 2194 } 2195 2196 if (Ops.size() == 1) return Ops[0]; 2197 } 2198 2199 // Limit recursion calls depth. 2200 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2201 return getOrCreateAddExpr(Ops, Flags); 2202 2203 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scAddExpr, Ops))) { 2204 static_cast<SCEVAddExpr *>(S)->setNoWrapFlags(Flags); 2205 return S; 2206 } 2207 2208 // Okay, check to see if the same value occurs in the operand list more than 2209 // once. If so, merge them together into an multiply expression. Since we 2210 // sorted the list, these values are required to be adjacent. 2211 Type *Ty = Ops[0]->getType(); 2212 bool FoundMatch = false; 2213 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2214 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2215 // Scan ahead to count how many equal operands there are. 2216 unsigned Count = 2; 2217 while (i+Count != e && Ops[i+Count] == Ops[i]) 2218 ++Count; 2219 // Merge the values into a multiply. 2220 const SCEV *Scale = getConstant(Ty, Count); 2221 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2222 if (Ops.size() == Count) 2223 return Mul; 2224 Ops[i] = Mul; 2225 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2226 --i; e -= Count - 1; 2227 FoundMatch = true; 2228 } 2229 if (FoundMatch) 2230 return getAddExpr(Ops, Flags, Depth + 1); 2231 2232 // Check for truncates. If all the operands are truncated from the same 2233 // type, see if factoring out the truncate would permit the result to be 2234 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2235 // if the contents of the resulting outer trunc fold to something simple. 2236 auto FindTruncSrcType = [&]() -> Type * { 2237 // We're ultimately looking to fold an addrec of truncs and muls of only 2238 // constants and truncs, so if we find any other types of SCEV 2239 // as operands of the addrec then we bail and return nullptr here. 2240 // Otherwise, we return the type of the operand of a trunc that we find. 2241 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2242 return T->getOperand()->getType(); 2243 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2244 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2245 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2246 return T->getOperand()->getType(); 2247 } 2248 return nullptr; 2249 }; 2250 if (auto *SrcType = FindTruncSrcType()) { 2251 SmallVector<const SCEV *, 8> LargeOps; 2252 bool Ok = true; 2253 // Check all the operands to see if they can be represented in the 2254 // source type of the truncate. 2255 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2256 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2257 if (T->getOperand()->getType() != SrcType) { 2258 Ok = false; 2259 break; 2260 } 2261 LargeOps.push_back(T->getOperand()); 2262 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2263 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2264 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2265 SmallVector<const SCEV *, 8> LargeMulOps; 2266 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2267 if (const SCEVTruncateExpr *T = 2268 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2269 if (T->getOperand()->getType() != SrcType) { 2270 Ok = false; 2271 break; 2272 } 2273 LargeMulOps.push_back(T->getOperand()); 2274 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2275 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2276 } else { 2277 Ok = false; 2278 break; 2279 } 2280 } 2281 if (Ok) 2282 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2283 } else { 2284 Ok = false; 2285 break; 2286 } 2287 } 2288 if (Ok) { 2289 // Evaluate the expression in the larger type. 2290 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2291 // If it folds to something simple, use it. Otherwise, don't. 2292 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2293 return getTruncateExpr(Fold, Ty); 2294 } 2295 } 2296 2297 // Skip past any other cast SCEVs. 2298 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2299 ++Idx; 2300 2301 // If there are add operands they would be next. 2302 if (Idx < Ops.size()) { 2303 bool DeletedAdd = false; 2304 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2305 if (Ops.size() > AddOpsInlineThreshold || 2306 Add->getNumOperands() > AddOpsInlineThreshold) 2307 break; 2308 // If we have an add, expand the add operands onto the end of the operands 2309 // list. 2310 Ops.erase(Ops.begin()+Idx); 2311 Ops.append(Add->op_begin(), Add->op_end()); 2312 DeletedAdd = true; 2313 } 2314 2315 // If we deleted at least one add, we added operands to the end of the list, 2316 // and they are not necessarily sorted. Recurse to resort and resimplify 2317 // any operands we just acquired. 2318 if (DeletedAdd) 2319 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2320 } 2321 2322 // Skip over the add expression until we get to a multiply. 2323 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2324 ++Idx; 2325 2326 // Check to see if there are any folding opportunities present with 2327 // operands multiplied by constant values. 2328 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2329 uint64_t BitWidth = getTypeSizeInBits(Ty); 2330 DenseMap<const SCEV *, APInt> M; 2331 SmallVector<const SCEV *, 8> NewOps; 2332 APInt AccumulatedConstant(BitWidth, 0); 2333 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2334 Ops.data(), Ops.size(), 2335 APInt(BitWidth, 1), *this)) { 2336 struct APIntCompare { 2337 bool operator()(const APInt &LHS, const APInt &RHS) const { 2338 return LHS.ult(RHS); 2339 } 2340 }; 2341 2342 // Some interesting folding opportunity is present, so its worthwhile to 2343 // re-generate the operands list. Group the operands by constant scale, 2344 // to avoid multiplying by the same constant scale multiple times. 2345 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2346 for (const SCEV *NewOp : NewOps) 2347 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2348 // Re-generate the operands list. 2349 Ops.clear(); 2350 if (AccumulatedConstant != 0) 2351 Ops.push_back(getConstant(AccumulatedConstant)); 2352 for (auto &MulOp : MulOpLists) 2353 if (MulOp.first != 0) 2354 Ops.push_back(getMulExpr( 2355 getConstant(MulOp.first), 2356 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2357 SCEV::FlagAnyWrap, Depth + 1)); 2358 if (Ops.empty()) 2359 return getZero(Ty); 2360 if (Ops.size() == 1) 2361 return Ops[0]; 2362 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2363 } 2364 } 2365 2366 // If we are adding something to a multiply expression, make sure the 2367 // something is not already an operand of the multiply. If so, merge it into 2368 // the multiply. 2369 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2370 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2371 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2372 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2373 if (isa<SCEVConstant>(MulOpSCEV)) 2374 continue; 2375 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2376 if (MulOpSCEV == Ops[AddOp]) { 2377 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2378 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2379 if (Mul->getNumOperands() != 2) { 2380 // If the multiply has more than two operands, we must get the 2381 // Y*Z term. 2382 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2383 Mul->op_begin()+MulOp); 2384 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2385 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2386 } 2387 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2388 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2389 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2390 SCEV::FlagAnyWrap, Depth + 1); 2391 if (Ops.size() == 2) return OuterMul; 2392 if (AddOp < Idx) { 2393 Ops.erase(Ops.begin()+AddOp); 2394 Ops.erase(Ops.begin()+Idx-1); 2395 } else { 2396 Ops.erase(Ops.begin()+Idx); 2397 Ops.erase(Ops.begin()+AddOp-1); 2398 } 2399 Ops.push_back(OuterMul); 2400 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2401 } 2402 2403 // Check this multiply against other multiplies being added together. 2404 for (unsigned OtherMulIdx = Idx+1; 2405 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2406 ++OtherMulIdx) { 2407 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2408 // If MulOp occurs in OtherMul, we can fold the two multiplies 2409 // together. 2410 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2411 OMulOp != e; ++OMulOp) 2412 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2413 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2414 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2415 if (Mul->getNumOperands() != 2) { 2416 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2417 Mul->op_begin()+MulOp); 2418 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2419 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2420 } 2421 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2422 if (OtherMul->getNumOperands() != 2) { 2423 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2424 OtherMul->op_begin()+OMulOp); 2425 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2426 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2427 } 2428 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2429 const SCEV *InnerMulSum = 2430 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2431 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2432 SCEV::FlagAnyWrap, Depth + 1); 2433 if (Ops.size() == 2) return OuterMul; 2434 Ops.erase(Ops.begin()+Idx); 2435 Ops.erase(Ops.begin()+OtherMulIdx-1); 2436 Ops.push_back(OuterMul); 2437 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2438 } 2439 } 2440 } 2441 } 2442 2443 // If there are any add recurrences in the operands list, see if any other 2444 // added values are loop invariant. If so, we can fold them into the 2445 // recurrence. 2446 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2447 ++Idx; 2448 2449 // Scan over all recurrences, trying to fold loop invariants into them. 2450 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2451 // Scan all of the other operands to this add and add them to the vector if 2452 // they are loop invariant w.r.t. the recurrence. 2453 SmallVector<const SCEV *, 8> LIOps; 2454 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2455 const Loop *AddRecLoop = AddRec->getLoop(); 2456 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2457 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2458 LIOps.push_back(Ops[i]); 2459 Ops.erase(Ops.begin()+i); 2460 --i; --e; 2461 } 2462 2463 // If we found some loop invariants, fold them into the recurrence. 2464 if (!LIOps.empty()) { 2465 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2466 LIOps.push_back(AddRec->getStart()); 2467 2468 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2469 AddRec->op_end()); 2470 // This follows from the fact that the no-wrap flags on the outer add 2471 // expression are applicable on the 0th iteration, when the add recurrence 2472 // will be equal to its start value. 2473 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2474 2475 // Build the new addrec. Propagate the NUW and NSW flags if both the 2476 // outer add and the inner addrec are guaranteed to have no overflow. 2477 // Always propagate NW. 2478 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2479 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2480 2481 // If all of the other operands were loop invariant, we are done. 2482 if (Ops.size() == 1) return NewRec; 2483 2484 // Otherwise, add the folded AddRec by the non-invariant parts. 2485 for (unsigned i = 0;; ++i) 2486 if (Ops[i] == AddRec) { 2487 Ops[i] = NewRec; 2488 break; 2489 } 2490 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2491 } 2492 2493 // Okay, if there weren't any loop invariants to be folded, check to see if 2494 // there are multiple AddRec's with the same loop induction variable being 2495 // added together. If so, we can fold them. 2496 for (unsigned OtherIdx = Idx+1; 2497 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2498 ++OtherIdx) { 2499 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2500 // so that the 1st found AddRecExpr is dominated by all others. 2501 assert(DT.dominates( 2502 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2503 AddRec->getLoop()->getHeader()) && 2504 "AddRecExprs are not sorted in reverse dominance order?"); 2505 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2506 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2507 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2508 AddRec->op_end()); 2509 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2510 ++OtherIdx) { 2511 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2512 if (OtherAddRec->getLoop() == AddRecLoop) { 2513 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2514 i != e; ++i) { 2515 if (i >= AddRecOps.size()) { 2516 AddRecOps.append(OtherAddRec->op_begin()+i, 2517 OtherAddRec->op_end()); 2518 break; 2519 } 2520 SmallVector<const SCEV *, 2> TwoOps = { 2521 AddRecOps[i], OtherAddRec->getOperand(i)}; 2522 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2523 } 2524 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2525 } 2526 } 2527 // Step size has changed, so we cannot guarantee no self-wraparound. 2528 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2529 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2530 } 2531 } 2532 2533 // Otherwise couldn't fold anything into this recurrence. Move onto the 2534 // next one. 2535 } 2536 2537 // Okay, it looks like we really DO need an add expr. Check to see if we 2538 // already have one, otherwise create a new one. 2539 return getOrCreateAddExpr(Ops, Flags); 2540 } 2541 2542 const SCEV * 2543 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 2544 SCEV::NoWrapFlags Flags) { 2545 FoldingSetNodeID ID; 2546 ID.AddInteger(scAddExpr); 2547 for (const SCEV *Op : Ops) 2548 ID.AddPointer(Op); 2549 void *IP = nullptr; 2550 SCEVAddExpr *S = 2551 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2552 if (!S) { 2553 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2554 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2555 S = new (SCEVAllocator) 2556 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2557 UniqueSCEVs.InsertNode(S, IP); 2558 addToLoopUseLists(S); 2559 } 2560 S->setNoWrapFlags(Flags); 2561 return S; 2562 } 2563 2564 const SCEV * 2565 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 2566 const Loop *L, SCEV::NoWrapFlags Flags) { 2567 FoldingSetNodeID ID; 2568 ID.AddInteger(scAddRecExpr); 2569 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2570 ID.AddPointer(Ops[i]); 2571 ID.AddPointer(L); 2572 void *IP = nullptr; 2573 SCEVAddRecExpr *S = 2574 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2575 if (!S) { 2576 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2577 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2578 S = new (SCEVAllocator) 2579 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 2580 UniqueSCEVs.InsertNode(S, IP); 2581 addToLoopUseLists(S); 2582 } 2583 S->setNoWrapFlags(Flags); 2584 return S; 2585 } 2586 2587 const SCEV * 2588 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 2589 SCEV::NoWrapFlags Flags) { 2590 FoldingSetNodeID ID; 2591 ID.AddInteger(scMulExpr); 2592 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2593 ID.AddPointer(Ops[i]); 2594 void *IP = nullptr; 2595 SCEVMulExpr *S = 2596 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2597 if (!S) { 2598 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2599 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2600 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2601 O, Ops.size()); 2602 UniqueSCEVs.InsertNode(S, IP); 2603 addToLoopUseLists(S); 2604 } 2605 S->setNoWrapFlags(Flags); 2606 return S; 2607 } 2608 2609 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2610 uint64_t k = i*j; 2611 if (j > 1 && k / j != i) Overflow = true; 2612 return k; 2613 } 2614 2615 /// Compute the result of "n choose k", the binomial coefficient. If an 2616 /// intermediate computation overflows, Overflow will be set and the return will 2617 /// be garbage. Overflow is not cleared on absence of overflow. 2618 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2619 // We use the multiplicative formula: 2620 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2621 // At each iteration, we take the n-th term of the numeral and divide by the 2622 // (k-n)th term of the denominator. This division will always produce an 2623 // integral result, and helps reduce the chance of overflow in the 2624 // intermediate computations. However, we can still overflow even when the 2625 // final result would fit. 2626 2627 if (n == 0 || n == k) return 1; 2628 if (k > n) return 0; 2629 2630 if (k > n/2) 2631 k = n-k; 2632 2633 uint64_t r = 1; 2634 for (uint64_t i = 1; i <= k; ++i) { 2635 r = umul_ov(r, n-(i-1), Overflow); 2636 r /= i; 2637 } 2638 return r; 2639 } 2640 2641 /// Determine if any of the operands in this SCEV are a constant or if 2642 /// any of the add or multiply expressions in this SCEV contain a constant. 2643 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2644 struct FindConstantInAddMulChain { 2645 bool FoundConstant = false; 2646 2647 bool follow(const SCEV *S) { 2648 FoundConstant |= isa<SCEVConstant>(S); 2649 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2650 } 2651 2652 bool isDone() const { 2653 return FoundConstant; 2654 } 2655 }; 2656 2657 FindConstantInAddMulChain F; 2658 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2659 ST.visitAll(StartExpr); 2660 return F.FoundConstant; 2661 } 2662 2663 /// Get a canonical multiply expression, or something simpler if possible. 2664 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2665 SCEV::NoWrapFlags Flags, 2666 unsigned Depth) { 2667 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2668 "only nuw or nsw allowed"); 2669 assert(!Ops.empty() && "Cannot get empty mul!"); 2670 if (Ops.size() == 1) return Ops[0]; 2671 #ifndef NDEBUG 2672 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2673 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2674 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2675 "SCEVMulExpr operand types don't match!"); 2676 #endif 2677 2678 // Sort by complexity, this groups all similar expression types together. 2679 GroupByComplexity(Ops, &LI, DT); 2680 2681 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2682 2683 // Limit recursion calls depth, but fold all-constant expressions. 2684 // `Ops` is sorted, so it's enough to check just last one. 2685 if ((Depth > MaxArithDepth || hasHugeExpression(Ops)) && 2686 !isa<SCEVConstant>(Ops.back())) 2687 return getOrCreateMulExpr(Ops, Flags); 2688 2689 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scMulExpr, Ops))) { 2690 static_cast<SCEVMulExpr *>(S)->setNoWrapFlags(Flags); 2691 return S; 2692 } 2693 2694 // If there are any constants, fold them together. 2695 unsigned Idx = 0; 2696 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2697 2698 if (Ops.size() == 2) 2699 // C1*(C2+V) -> C1*C2 + C1*V 2700 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2701 // If any of Add's ops are Adds or Muls with a constant, apply this 2702 // transformation as well. 2703 // 2704 // TODO: There are some cases where this transformation is not 2705 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 2706 // this transformation should be narrowed down. 2707 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) 2708 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2709 SCEV::FlagAnyWrap, Depth + 1), 2710 getMulExpr(LHSC, Add->getOperand(1), 2711 SCEV::FlagAnyWrap, Depth + 1), 2712 SCEV::FlagAnyWrap, Depth + 1); 2713 2714 ++Idx; 2715 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2716 // We found two constants, fold them together! 2717 ConstantInt *Fold = 2718 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2719 Ops[0] = getConstant(Fold); 2720 Ops.erase(Ops.begin()+1); // Erase the folded element 2721 if (Ops.size() == 1) return Ops[0]; 2722 LHSC = cast<SCEVConstant>(Ops[0]); 2723 } 2724 2725 // If we are left with a constant one being multiplied, strip it off. 2726 if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) { 2727 Ops.erase(Ops.begin()); 2728 --Idx; 2729 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2730 // If we have a multiply of zero, it will always be zero. 2731 return Ops[0]; 2732 } else if (Ops[0]->isAllOnesValue()) { 2733 // If we have a mul by -1 of an add, try distributing the -1 among the 2734 // add operands. 2735 if (Ops.size() == 2) { 2736 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2737 SmallVector<const SCEV *, 4> NewOps; 2738 bool AnyFolded = false; 2739 for (const SCEV *AddOp : Add->operands()) { 2740 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2741 Depth + 1); 2742 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2743 NewOps.push_back(Mul); 2744 } 2745 if (AnyFolded) 2746 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2747 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2748 // Negation preserves a recurrence's no self-wrap property. 2749 SmallVector<const SCEV *, 4> Operands; 2750 for (const SCEV *AddRecOp : AddRec->operands()) 2751 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2752 Depth + 1)); 2753 2754 return getAddRecExpr(Operands, AddRec->getLoop(), 2755 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2756 } 2757 } 2758 } 2759 2760 if (Ops.size() == 1) 2761 return Ops[0]; 2762 } 2763 2764 // Skip over the add expression until we get to a multiply. 2765 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2766 ++Idx; 2767 2768 // If there are mul operands inline them all into this expression. 2769 if (Idx < Ops.size()) { 2770 bool DeletedMul = false; 2771 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2772 if (Ops.size() > MulOpsInlineThreshold) 2773 break; 2774 // If we have an mul, expand the mul operands onto the end of the 2775 // operands list. 2776 Ops.erase(Ops.begin()+Idx); 2777 Ops.append(Mul->op_begin(), Mul->op_end()); 2778 DeletedMul = true; 2779 } 2780 2781 // If we deleted at least one mul, we added operands to the end of the 2782 // list, and they are not necessarily sorted. Recurse to resort and 2783 // resimplify any operands we just acquired. 2784 if (DeletedMul) 2785 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2786 } 2787 2788 // If there are any add recurrences in the operands list, see if any other 2789 // added values are loop invariant. If so, we can fold them into the 2790 // recurrence. 2791 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2792 ++Idx; 2793 2794 // Scan over all recurrences, trying to fold loop invariants into them. 2795 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2796 // Scan all of the other operands to this mul and add them to the vector 2797 // if they are loop invariant w.r.t. the recurrence. 2798 SmallVector<const SCEV *, 8> LIOps; 2799 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2800 const Loop *AddRecLoop = AddRec->getLoop(); 2801 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2802 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2803 LIOps.push_back(Ops[i]); 2804 Ops.erase(Ops.begin()+i); 2805 --i; --e; 2806 } 2807 2808 // If we found some loop invariants, fold them into the recurrence. 2809 if (!LIOps.empty()) { 2810 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2811 SmallVector<const SCEV *, 4> NewOps; 2812 NewOps.reserve(AddRec->getNumOperands()); 2813 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 2814 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2815 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 2816 SCEV::FlagAnyWrap, Depth + 1)); 2817 2818 // Build the new addrec. Propagate the NUW and NSW flags if both the 2819 // outer mul and the inner addrec are guaranteed to have no overflow. 2820 // 2821 // No self-wrap cannot be guaranteed after changing the step size, but 2822 // will be inferred if either NUW or NSW is true. 2823 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 2824 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 2825 2826 // If all of the other operands were loop invariant, we are done. 2827 if (Ops.size() == 1) return NewRec; 2828 2829 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2830 for (unsigned i = 0;; ++i) 2831 if (Ops[i] == AddRec) { 2832 Ops[i] = NewRec; 2833 break; 2834 } 2835 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2836 } 2837 2838 // Okay, if there weren't any loop invariants to be folded, check to see 2839 // if there are multiple AddRec's with the same loop induction variable 2840 // being multiplied together. If so, we can fold them. 2841 2842 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2843 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2844 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2845 // ]]],+,...up to x=2n}. 2846 // Note that the arguments to choose() are always integers with values 2847 // known at compile time, never SCEV objects. 2848 // 2849 // The implementation avoids pointless extra computations when the two 2850 // addrec's are of different length (mathematically, it's equivalent to 2851 // an infinite stream of zeros on the right). 2852 bool OpsModified = false; 2853 for (unsigned OtherIdx = Idx+1; 2854 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2855 ++OtherIdx) { 2856 const SCEVAddRecExpr *OtherAddRec = 2857 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2858 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 2859 continue; 2860 2861 // Limit max number of arguments to avoid creation of unreasonably big 2862 // SCEVAddRecs with very complex operands. 2863 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 2864 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) 2865 continue; 2866 2867 bool Overflow = false; 2868 Type *Ty = AddRec->getType(); 2869 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 2870 SmallVector<const SCEV*, 7> AddRecOps; 2871 for (int x = 0, xe = AddRec->getNumOperands() + 2872 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 2873 SmallVector <const SCEV *, 7> SumOps; 2874 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 2875 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 2876 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 2877 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 2878 z < ze && !Overflow; ++z) { 2879 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 2880 uint64_t Coeff; 2881 if (LargerThan64Bits) 2882 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 2883 else 2884 Coeff = Coeff1*Coeff2; 2885 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 2886 const SCEV *Term1 = AddRec->getOperand(y-z); 2887 const SCEV *Term2 = OtherAddRec->getOperand(z); 2888 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 2889 SCEV::FlagAnyWrap, Depth + 1)); 2890 } 2891 } 2892 if (SumOps.empty()) 2893 SumOps.push_back(getZero(Ty)); 2894 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 2895 } 2896 if (!Overflow) { 2897 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, 2898 SCEV::FlagAnyWrap); 2899 if (Ops.size() == 2) return NewAddRec; 2900 Ops[Idx] = NewAddRec; 2901 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2902 OpsModified = true; 2903 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 2904 if (!AddRec) 2905 break; 2906 } 2907 } 2908 if (OpsModified) 2909 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2910 2911 // Otherwise couldn't fold anything into this recurrence. Move onto the 2912 // next one. 2913 } 2914 2915 // Okay, it looks like we really DO need an mul expr. Check to see if we 2916 // already have one, otherwise create a new one. 2917 return getOrCreateMulExpr(Ops, Flags); 2918 } 2919 2920 /// Represents an unsigned remainder expression based on unsigned division. 2921 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 2922 const SCEV *RHS) { 2923 assert(getEffectiveSCEVType(LHS->getType()) == 2924 getEffectiveSCEVType(RHS->getType()) && 2925 "SCEVURemExpr operand types don't match!"); 2926 2927 // Short-circuit easy cases 2928 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 2929 // If constant is one, the result is trivial 2930 if (RHSC->getValue()->isOne()) 2931 return getZero(LHS->getType()); // X urem 1 --> 0 2932 2933 // If constant is a power of two, fold into a zext(trunc(LHS)). 2934 if (RHSC->getAPInt().isPowerOf2()) { 2935 Type *FullTy = LHS->getType(); 2936 Type *TruncTy = 2937 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 2938 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 2939 } 2940 } 2941 2942 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 2943 const SCEV *UDiv = getUDivExpr(LHS, RHS); 2944 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 2945 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 2946 } 2947 2948 /// Get a canonical unsigned division expression, or something simpler if 2949 /// possible. 2950 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 2951 const SCEV *RHS) { 2952 assert(getEffectiveSCEVType(LHS->getType()) == 2953 getEffectiveSCEVType(RHS->getType()) && 2954 "SCEVUDivExpr operand types don't match!"); 2955 2956 FoldingSetNodeID ID; 2957 ID.AddInteger(scUDivExpr); 2958 ID.AddPointer(LHS); 2959 ID.AddPointer(RHS); 2960 void *IP = nullptr; 2961 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 2962 return S; 2963 2964 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 2965 if (RHSC->getValue()->isOne()) 2966 return LHS; // X udiv 1 --> x 2967 // If the denominator is zero, the result of the udiv is undefined. Don't 2968 // try to analyze it, because the resolution chosen here may differ from 2969 // the resolution chosen in other parts of the compiler. 2970 if (!RHSC->getValue()->isZero()) { 2971 // Determine if the division can be folded into the operands of 2972 // its operands. 2973 // TODO: Generalize this to non-constants by using known-bits information. 2974 Type *Ty = LHS->getType(); 2975 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 2976 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 2977 // For non-power-of-two values, effectively round the value up to the 2978 // nearest power of two. 2979 if (!RHSC->getAPInt().isPowerOf2()) 2980 ++MaxShiftAmt; 2981 IntegerType *ExtTy = 2982 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 2983 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 2984 if (const SCEVConstant *Step = 2985 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 2986 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 2987 const APInt &StepInt = Step->getAPInt(); 2988 const APInt &DivInt = RHSC->getAPInt(); 2989 if (!StepInt.urem(DivInt) && 2990 getZeroExtendExpr(AR, ExtTy) == 2991 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 2992 getZeroExtendExpr(Step, ExtTy), 2993 AR->getLoop(), SCEV::FlagAnyWrap)) { 2994 SmallVector<const SCEV *, 4> Operands; 2995 for (const SCEV *Op : AR->operands()) 2996 Operands.push_back(getUDivExpr(Op, RHS)); 2997 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 2998 } 2999 /// Get a canonical UDivExpr for a recurrence. 3000 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3001 // We can currently only fold X%N if X is constant. 3002 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3003 if (StartC && !DivInt.urem(StepInt) && 3004 getZeroExtendExpr(AR, ExtTy) == 3005 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3006 getZeroExtendExpr(Step, ExtTy), 3007 AR->getLoop(), SCEV::FlagAnyWrap)) { 3008 const APInt &StartInt = StartC->getAPInt(); 3009 const APInt &StartRem = StartInt.urem(StepInt); 3010 if (StartRem != 0) { 3011 const SCEV *NewLHS = 3012 getAddRecExpr(getConstant(StartInt - StartRem), Step, 3013 AR->getLoop(), SCEV::FlagNW); 3014 if (LHS != NewLHS) { 3015 LHS = NewLHS; 3016 3017 // Reset the ID to include the new LHS, and check if it is 3018 // already cached. 3019 ID.clear(); 3020 ID.AddInteger(scUDivExpr); 3021 ID.AddPointer(LHS); 3022 ID.AddPointer(RHS); 3023 IP = nullptr; 3024 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3025 return S; 3026 } 3027 } 3028 } 3029 } 3030 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3031 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3032 SmallVector<const SCEV *, 4> Operands; 3033 for (const SCEV *Op : M->operands()) 3034 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3035 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3036 // Find an operand that's safely divisible. 3037 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3038 const SCEV *Op = M->getOperand(i); 3039 const SCEV *Div = getUDivExpr(Op, RHSC); 3040 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3041 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3042 M->op_end()); 3043 Operands[i] = Div; 3044 return getMulExpr(Operands); 3045 } 3046 } 3047 } 3048 3049 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3050 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3051 if (auto *DivisorConstant = 3052 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3053 bool Overflow = false; 3054 APInt NewRHS = 3055 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3056 if (Overflow) { 3057 return getConstant(RHSC->getType(), 0, false); 3058 } 3059 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3060 } 3061 } 3062 3063 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3064 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3065 SmallVector<const SCEV *, 4> Operands; 3066 for (const SCEV *Op : A->operands()) 3067 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3068 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3069 Operands.clear(); 3070 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3071 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3072 if (isa<SCEVUDivExpr>(Op) || 3073 getMulExpr(Op, RHS) != A->getOperand(i)) 3074 break; 3075 Operands.push_back(Op); 3076 } 3077 if (Operands.size() == A->getNumOperands()) 3078 return getAddExpr(Operands); 3079 } 3080 } 3081 3082 // Fold if both operands are constant. 3083 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3084 Constant *LHSCV = LHSC->getValue(); 3085 Constant *RHSCV = RHSC->getValue(); 3086 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3087 RHSCV))); 3088 } 3089 } 3090 } 3091 3092 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs 3093 // changes). Make sure we get a new one. 3094 IP = nullptr; 3095 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3096 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3097 LHS, RHS); 3098 UniqueSCEVs.InsertNode(S, IP); 3099 addToLoopUseLists(S); 3100 return S; 3101 } 3102 3103 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3104 APInt A = C1->getAPInt().abs(); 3105 APInt B = C2->getAPInt().abs(); 3106 uint32_t ABW = A.getBitWidth(); 3107 uint32_t BBW = B.getBitWidth(); 3108 3109 if (ABW > BBW) 3110 B = B.zext(ABW); 3111 else if (ABW < BBW) 3112 A = A.zext(BBW); 3113 3114 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3115 } 3116 3117 /// Get a canonical unsigned division expression, or something simpler if 3118 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3119 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3120 /// it's not exact because the udiv may be clearing bits. 3121 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3122 const SCEV *RHS) { 3123 // TODO: we could try to find factors in all sorts of things, but for now we 3124 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3125 // end of this file for inspiration. 3126 3127 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3128 if (!Mul || !Mul->hasNoUnsignedWrap()) 3129 return getUDivExpr(LHS, RHS); 3130 3131 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3132 // If the mulexpr multiplies by a constant, then that constant must be the 3133 // first element of the mulexpr. 3134 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3135 if (LHSCst == RHSCst) { 3136 SmallVector<const SCEV *, 2> Operands; 3137 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3138 return getMulExpr(Operands); 3139 } 3140 3141 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3142 // that there's a factor provided by one of the other terms. We need to 3143 // check. 3144 APInt Factor = gcd(LHSCst, RHSCst); 3145 if (!Factor.isIntN(1)) { 3146 LHSCst = 3147 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3148 RHSCst = 3149 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3150 SmallVector<const SCEV *, 2> Operands; 3151 Operands.push_back(LHSCst); 3152 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3153 LHS = getMulExpr(Operands); 3154 RHS = RHSCst; 3155 Mul = dyn_cast<SCEVMulExpr>(LHS); 3156 if (!Mul) 3157 return getUDivExactExpr(LHS, RHS); 3158 } 3159 } 3160 } 3161 3162 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3163 if (Mul->getOperand(i) == RHS) { 3164 SmallVector<const SCEV *, 2> Operands; 3165 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3166 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3167 return getMulExpr(Operands); 3168 } 3169 } 3170 3171 return getUDivExpr(LHS, RHS); 3172 } 3173 3174 /// Get an add recurrence expression for the specified loop. Simplify the 3175 /// expression as much as possible. 3176 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3177 const Loop *L, 3178 SCEV::NoWrapFlags Flags) { 3179 SmallVector<const SCEV *, 4> Operands; 3180 Operands.push_back(Start); 3181 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3182 if (StepChrec->getLoop() == L) { 3183 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3184 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3185 } 3186 3187 Operands.push_back(Step); 3188 return getAddRecExpr(Operands, L, Flags); 3189 } 3190 3191 /// Get an add recurrence expression for the specified loop. Simplify the 3192 /// expression as much as possible. 3193 const SCEV * 3194 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3195 const Loop *L, SCEV::NoWrapFlags Flags) { 3196 if (Operands.size() == 1) return Operands[0]; 3197 #ifndef NDEBUG 3198 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3199 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3200 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3201 "SCEVAddRecExpr operand types don't match!"); 3202 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3203 assert(isLoopInvariant(Operands[i], L) && 3204 "SCEVAddRecExpr operand is not loop-invariant!"); 3205 #endif 3206 3207 if (Operands.back()->isZero()) { 3208 Operands.pop_back(); 3209 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3210 } 3211 3212 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and 3213 // use that information to infer NUW and NSW flags. However, computing a 3214 // BE count requires calling getAddRecExpr, so we may not yet have a 3215 // meaningful BE count at this point (and if we don't, we'd be stuck 3216 // with a SCEVCouldNotCompute as the cached BE count). 3217 3218 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3219 3220 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3221 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3222 const Loop *NestedLoop = NestedAR->getLoop(); 3223 if (L->contains(NestedLoop) 3224 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3225 : (!NestedLoop->contains(L) && 3226 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3227 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3228 NestedAR->op_end()); 3229 Operands[0] = NestedAR->getStart(); 3230 // AddRecs require their operands be loop-invariant with respect to their 3231 // loops. Don't perform this transformation if it would break this 3232 // requirement. 3233 bool AllInvariant = all_of( 3234 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3235 3236 if (AllInvariant) { 3237 // Create a recurrence for the outer loop with the same step size. 3238 // 3239 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3240 // inner recurrence has the same property. 3241 SCEV::NoWrapFlags OuterFlags = 3242 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3243 3244 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3245 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3246 return isLoopInvariant(Op, NestedLoop); 3247 }); 3248 3249 if (AllInvariant) { 3250 // Ok, both add recurrences are valid after the transformation. 3251 // 3252 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3253 // the outer recurrence has the same property. 3254 SCEV::NoWrapFlags InnerFlags = 3255 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3256 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3257 } 3258 } 3259 // Reset Operands to its original state. 3260 Operands[0] = NestedAR; 3261 } 3262 } 3263 3264 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3265 // already have one, otherwise create a new one. 3266 return getOrCreateAddRecExpr(Operands, L, Flags); 3267 } 3268 3269 const SCEV * 3270 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3271 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3272 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3273 // getSCEV(Base)->getType() has the same address space as Base->getType() 3274 // because SCEV::getType() preserves the address space. 3275 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); 3276 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3277 // instruction to its SCEV, because the Instruction may be guarded by control 3278 // flow and the no-overflow bits may not be valid for the expression in any 3279 // context. This can be fixed similarly to how these flags are handled for 3280 // adds. 3281 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3282 : SCEV::FlagAnyWrap; 3283 3284 const SCEV *TotalOffset = getZero(IntIdxTy); 3285 Type *CurTy = GEP->getType(); 3286 bool FirstIter = true; 3287 for (const SCEV *IndexExpr : IndexExprs) { 3288 // Compute the (potentially symbolic) offset in bytes for this index. 3289 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3290 // For a struct, add the member offset. 3291 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3292 unsigned FieldNo = Index->getZExtValue(); 3293 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); 3294 3295 // Add the field offset to the running total offset. 3296 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3297 3298 // Update CurTy to the type of the field at Index. 3299 CurTy = STy->getTypeAtIndex(Index); 3300 } else { 3301 // Update CurTy to its element type. 3302 if (FirstIter) { 3303 assert(isa<PointerType>(CurTy) && 3304 "The first index of a GEP indexes a pointer"); 3305 CurTy = GEP->getSourceElementType(); 3306 FirstIter = false; 3307 } else { 3308 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); 3309 } 3310 // For an array, add the element offset, explicitly scaled. 3311 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); 3312 // Getelementptr indices are signed. 3313 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); 3314 3315 // Multiply the index by the element size to compute the element offset. 3316 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3317 3318 // Add the element offset to the running total offset. 3319 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3320 } 3321 } 3322 3323 // Add the total offset from all the GEP indices to the base. 3324 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3325 } 3326 3327 std::tuple<SCEV *, FoldingSetNodeID, void *> 3328 ScalarEvolution::findExistingSCEVInCache(int SCEVType, 3329 ArrayRef<const SCEV *> Ops) { 3330 FoldingSetNodeID ID; 3331 void *IP = nullptr; 3332 ID.AddInteger(SCEVType); 3333 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3334 ID.AddPointer(Ops[i]); 3335 return std::tuple<SCEV *, FoldingSetNodeID, void *>( 3336 UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP); 3337 } 3338 3339 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { 3340 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3341 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); 3342 } 3343 3344 const SCEV *ScalarEvolution::getSignumExpr(const SCEV *Op) { 3345 Type *Ty = Op->getType(); 3346 return getSMinExpr(getSMaxExpr(Op, getMinusOne(Ty)), getOne(Ty)); 3347 } 3348 3349 const SCEV *ScalarEvolution::getMinMaxExpr(unsigned Kind, 3350 SmallVectorImpl<const SCEV *> &Ops) { 3351 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 3352 if (Ops.size() == 1) return Ops[0]; 3353 #ifndef NDEBUG 3354 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3355 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3356 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3357 "Operand types don't match!"); 3358 #endif 3359 3360 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; 3361 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; 3362 3363 // Sort by complexity, this groups all similar expression types together. 3364 GroupByComplexity(Ops, &LI, DT); 3365 3366 // Check if we have created the same expression before. 3367 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) { 3368 return S; 3369 } 3370 3371 // If there are any constants, fold them together. 3372 unsigned Idx = 0; 3373 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3374 ++Idx; 3375 assert(Idx < Ops.size()); 3376 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { 3377 if (Kind == scSMaxExpr) 3378 return APIntOps::smax(LHS, RHS); 3379 else if (Kind == scSMinExpr) 3380 return APIntOps::smin(LHS, RHS); 3381 else if (Kind == scUMaxExpr) 3382 return APIntOps::umax(LHS, RHS); 3383 else if (Kind == scUMinExpr) 3384 return APIntOps::umin(LHS, RHS); 3385 llvm_unreachable("Unknown SCEV min/max opcode"); 3386 }; 3387 3388 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3389 // We found two constants, fold them together! 3390 ConstantInt *Fold = ConstantInt::get( 3391 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); 3392 Ops[0] = getConstant(Fold); 3393 Ops.erase(Ops.begin()+1); // Erase the folded element 3394 if (Ops.size() == 1) return Ops[0]; 3395 LHSC = cast<SCEVConstant>(Ops[0]); 3396 } 3397 3398 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); 3399 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); 3400 3401 if (IsMax ? IsMinV : IsMaxV) { 3402 // If we are left with a constant minimum(/maximum)-int, strip it off. 3403 Ops.erase(Ops.begin()); 3404 --Idx; 3405 } else if (IsMax ? IsMaxV : IsMinV) { 3406 // If we have a max(/min) with a constant maximum(/minimum)-int, 3407 // it will always be the extremum. 3408 return LHSC; 3409 } 3410 3411 if (Ops.size() == 1) return Ops[0]; 3412 } 3413 3414 // Find the first operation of the same kind 3415 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) 3416 ++Idx; 3417 3418 // Check to see if one of the operands is of the same kind. If so, expand its 3419 // operands onto our operand list, and recurse to simplify. 3420 if (Idx < Ops.size()) { 3421 bool DeletedAny = false; 3422 while (Ops[Idx]->getSCEVType() == Kind) { 3423 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); 3424 Ops.erase(Ops.begin()+Idx); 3425 Ops.append(SMME->op_begin(), SMME->op_end()); 3426 DeletedAny = true; 3427 } 3428 3429 if (DeletedAny) 3430 return getMinMaxExpr(Kind, Ops); 3431 } 3432 3433 // Okay, check to see if the same value occurs in the operand list twice. If 3434 // so, delete one. Since we sorted the list, these values are required to 3435 // be adjacent. 3436 llvm::CmpInst::Predicate GEPred = 3437 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 3438 llvm::CmpInst::Predicate LEPred = 3439 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 3440 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; 3441 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; 3442 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { 3443 if (Ops[i] == Ops[i + 1] || 3444 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { 3445 // X op Y op Y --> X op Y 3446 // X op Y --> X, if we know X, Y are ordered appropriately 3447 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3448 --i; 3449 --e; 3450 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], 3451 Ops[i + 1])) { 3452 // X op Y --> Y, if we know X, Y are ordered appropriately 3453 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3454 --i; 3455 --e; 3456 } 3457 } 3458 3459 if (Ops.size() == 1) return Ops[0]; 3460 3461 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3462 3463 // Okay, it looks like we really DO need an expr. Check to see if we 3464 // already have one, otherwise create a new one. 3465 const SCEV *ExistingSCEV; 3466 FoldingSetNodeID ID; 3467 void *IP; 3468 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops); 3469 if (ExistingSCEV) 3470 return ExistingSCEV; 3471 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3472 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3473 SCEV *S = new (SCEVAllocator) SCEVMinMaxExpr( 3474 ID.Intern(SCEVAllocator), static_cast<SCEVTypes>(Kind), O, Ops.size()); 3475 3476 UniqueSCEVs.InsertNode(S, IP); 3477 addToLoopUseLists(S); 3478 return S; 3479 } 3480 3481 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3482 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3483 return getSMaxExpr(Ops); 3484 } 3485 3486 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3487 return getMinMaxExpr(scSMaxExpr, Ops); 3488 } 3489 3490 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3491 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3492 return getUMaxExpr(Ops); 3493 } 3494 3495 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3496 return getMinMaxExpr(scUMaxExpr, Ops); 3497 } 3498 3499 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3500 const SCEV *RHS) { 3501 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3502 return getSMinExpr(Ops); 3503 } 3504 3505 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3506 return getMinMaxExpr(scSMinExpr, Ops); 3507 } 3508 3509 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3510 const SCEV *RHS) { 3511 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3512 return getUMinExpr(Ops); 3513 } 3514 3515 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3516 return getMinMaxExpr(scUMinExpr, Ops); 3517 } 3518 3519 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3520 // We can bypass creating a target-independent 3521 // constant expression and then folding it back into a ConstantInt. 3522 // This is just a compile-time optimization. 3523 if (isa<ScalableVectorType>(AllocTy)) { 3524 Constant *NullPtr = Constant::getNullValue(AllocTy->getPointerTo()); 3525 Constant *One = ConstantInt::get(IntTy, 1); 3526 Constant *GEP = ConstantExpr::getGetElementPtr(AllocTy, NullPtr, One); 3527 return getSCEV(ConstantExpr::getPtrToInt(GEP, IntTy)); 3528 } 3529 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3530 } 3531 3532 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3533 StructType *STy, 3534 unsigned FieldNo) { 3535 // We can bypass creating a target-independent 3536 // constant expression and then folding it back into a ConstantInt. 3537 // This is just a compile-time optimization. 3538 return getConstant( 3539 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3540 } 3541 3542 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3543 // Don't attempt to do anything other than create a SCEVUnknown object 3544 // here. createSCEV only calls getUnknown after checking for all other 3545 // interesting possibilities, and any other code that calls getUnknown 3546 // is doing so in order to hide a value from SCEV canonicalization. 3547 3548 FoldingSetNodeID ID; 3549 ID.AddInteger(scUnknown); 3550 ID.AddPointer(V); 3551 void *IP = nullptr; 3552 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3553 assert(cast<SCEVUnknown>(S)->getValue() == V && 3554 "Stale SCEVUnknown in uniquing map!"); 3555 return S; 3556 } 3557 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3558 FirstUnknown); 3559 FirstUnknown = cast<SCEVUnknown>(S); 3560 UniqueSCEVs.InsertNode(S, IP); 3561 return S; 3562 } 3563 3564 //===----------------------------------------------------------------------===// 3565 // Basic SCEV Analysis and PHI Idiom Recognition Code 3566 // 3567 3568 /// Test if values of the given type are analyzable within the SCEV 3569 /// framework. This primarily includes integer types, and it can optionally 3570 /// include pointer types if the ScalarEvolution class has access to 3571 /// target-specific information. 3572 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3573 // Integers and pointers are always SCEVable. 3574 return Ty->isIntOrPtrTy(); 3575 } 3576 3577 /// Return the size in bits of the specified type, for which isSCEVable must 3578 /// return true. 3579 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3580 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3581 if (Ty->isPointerTy()) 3582 return getDataLayout().getIndexTypeSizeInBits(Ty); 3583 return getDataLayout().getTypeSizeInBits(Ty); 3584 } 3585 3586 /// Return a type with the same bitwidth as the given type and which represents 3587 /// how SCEV will treat the given type, for which isSCEVable must return 3588 /// true. For pointer types, this is the pointer index sized integer type. 3589 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3590 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3591 3592 if (Ty->isIntegerTy()) 3593 return Ty; 3594 3595 // The only other support type is pointer. 3596 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3597 return getDataLayout().getIndexType(Ty); 3598 } 3599 3600 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3601 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3602 } 3603 3604 const SCEV *ScalarEvolution::getCouldNotCompute() { 3605 return CouldNotCompute.get(); 3606 } 3607 3608 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3609 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3610 auto *SU = dyn_cast<SCEVUnknown>(S); 3611 return SU && SU->getValue() == nullptr; 3612 }); 3613 3614 return !ContainsNulls; 3615 } 3616 3617 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3618 HasRecMapType::iterator I = HasRecMap.find(S); 3619 if (I != HasRecMap.end()) 3620 return I->second; 3621 3622 bool FoundAddRec = 3623 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); 3624 HasRecMap.insert({S, FoundAddRec}); 3625 return FoundAddRec; 3626 } 3627 3628 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3629 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3630 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3631 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3632 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3633 if (!Add) 3634 return {S, nullptr}; 3635 3636 if (Add->getNumOperands() != 2) 3637 return {S, nullptr}; 3638 3639 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3640 if (!ConstOp) 3641 return {S, nullptr}; 3642 3643 return {Add->getOperand(1), ConstOp->getValue()}; 3644 } 3645 3646 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3647 /// by the value and offset from any ValueOffsetPair in the set. 3648 SetVector<ScalarEvolution::ValueOffsetPair> * 3649 ScalarEvolution::getSCEVValues(const SCEV *S) { 3650 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3651 if (SI == ExprValueMap.end()) 3652 return nullptr; 3653 #ifndef NDEBUG 3654 if (VerifySCEVMap) { 3655 // Check there is no dangling Value in the set returned. 3656 for (const auto &VE : SI->second) 3657 assert(ValueExprMap.count(VE.first)); 3658 } 3659 #endif 3660 return &SI->second; 3661 } 3662 3663 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3664 /// cannot be used separately. eraseValueFromMap should be used to remove 3665 /// V from ValueExprMap and ExprValueMap at the same time. 3666 void ScalarEvolution::eraseValueFromMap(Value *V) { 3667 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3668 if (I != ValueExprMap.end()) { 3669 const SCEV *S = I->second; 3670 // Remove {V, 0} from the set of ExprValueMap[S] 3671 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3672 SV->remove({V, nullptr}); 3673 3674 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3675 const SCEV *Stripped; 3676 ConstantInt *Offset; 3677 std::tie(Stripped, Offset) = splitAddExpr(S); 3678 if (Offset != nullptr) { 3679 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3680 SV->remove({V, Offset}); 3681 } 3682 ValueExprMap.erase(V); 3683 } 3684 } 3685 3686 /// Check whether value has nuw/nsw/exact set but SCEV does not. 3687 /// TODO: In reality it is better to check the poison recursively 3688 /// but this is better than nothing. 3689 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { 3690 if (auto *I = dyn_cast<Instruction>(V)) { 3691 if (isa<OverflowingBinaryOperator>(I)) { 3692 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { 3693 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) 3694 return true; 3695 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) 3696 return true; 3697 } 3698 } else if (isa<PossiblyExactOperator>(I) && I->isExact()) 3699 return true; 3700 } 3701 return false; 3702 } 3703 3704 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3705 /// create a new one. 3706 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3707 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3708 3709 const SCEV *S = getExistingSCEV(V); 3710 if (S == nullptr) { 3711 S = createSCEV(V); 3712 // During PHI resolution, it is possible to create two SCEVs for the same 3713 // V, so it is needed to double check whether V->S is inserted into 3714 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3715 std::pair<ValueExprMapType::iterator, bool> Pair = 3716 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3717 if (Pair.second && !SCEVLostPoisonFlags(S, V)) { 3718 ExprValueMap[S].insert({V, nullptr}); 3719 3720 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3721 // ExprValueMap. 3722 const SCEV *Stripped = S; 3723 ConstantInt *Offset = nullptr; 3724 std::tie(Stripped, Offset) = splitAddExpr(S); 3725 // If stripped is SCEVUnknown, don't bother to save 3726 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3727 // increase the complexity of the expansion code. 3728 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3729 // because it may generate add/sub instead of GEP in SCEV expansion. 3730 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3731 !isa<GetElementPtrInst>(V)) 3732 ExprValueMap[Stripped].insert({V, Offset}); 3733 } 3734 } 3735 return S; 3736 } 3737 3738 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3739 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3740 3741 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3742 if (I != ValueExprMap.end()) { 3743 const SCEV *S = I->second; 3744 if (checkValidity(S)) 3745 return S; 3746 eraseValueFromMap(V); 3747 forgetMemoizedResults(S); 3748 } 3749 return nullptr; 3750 } 3751 3752 /// Return a SCEV corresponding to -V = -1*V 3753 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3754 SCEV::NoWrapFlags Flags) { 3755 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3756 return getConstant( 3757 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3758 3759 Type *Ty = V->getType(); 3760 Ty = getEffectiveSCEVType(Ty); 3761 return getMulExpr(V, getMinusOne(Ty), Flags); 3762 } 3763 3764 /// If Expr computes ~A, return A else return nullptr 3765 static const SCEV *MatchNotExpr(const SCEV *Expr) { 3766 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 3767 if (!Add || Add->getNumOperands() != 2 || 3768 !Add->getOperand(0)->isAllOnesValue()) 3769 return nullptr; 3770 3771 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 3772 if (!AddRHS || AddRHS->getNumOperands() != 2 || 3773 !AddRHS->getOperand(0)->isAllOnesValue()) 3774 return nullptr; 3775 3776 return AddRHS->getOperand(1); 3777 } 3778 3779 /// Return a SCEV corresponding to ~V = -1-V 3780 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3781 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3782 return getConstant( 3783 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3784 3785 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) 3786 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { 3787 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { 3788 SmallVector<const SCEV *, 2> MatchedOperands; 3789 for (const SCEV *Operand : MME->operands()) { 3790 const SCEV *Matched = MatchNotExpr(Operand); 3791 if (!Matched) 3792 return (const SCEV *)nullptr; 3793 MatchedOperands.push_back(Matched); 3794 } 3795 return getMinMaxExpr( 3796 SCEVMinMaxExpr::negate(static_cast<SCEVTypes>(MME->getSCEVType())), 3797 MatchedOperands); 3798 }; 3799 if (const SCEV *Replaced = MatchMinMaxNegation(MME)) 3800 return Replaced; 3801 } 3802 3803 Type *Ty = V->getType(); 3804 Ty = getEffectiveSCEVType(Ty); 3805 return getMinusSCEV(getMinusOne(Ty), V); 3806 } 3807 3808 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3809 SCEV::NoWrapFlags Flags, 3810 unsigned Depth) { 3811 // Fast path: X - X --> 0. 3812 if (LHS == RHS) 3813 return getZero(LHS->getType()); 3814 3815 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3816 // makes it so that we cannot make much use of NUW. 3817 auto AddFlags = SCEV::FlagAnyWrap; 3818 const bool RHSIsNotMinSigned = 3819 !getSignedRangeMin(RHS).isMinSignedValue(); 3820 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3821 // Let M be the minimum representable signed value. Then (-1)*RHS 3822 // signed-wraps if and only if RHS is M. That can happen even for 3823 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3824 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3825 // (-1)*RHS, we need to prove that RHS != M. 3826 // 3827 // If LHS is non-negative and we know that LHS - RHS does not 3828 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 3829 // either by proving that RHS > M or that LHS >= 0. 3830 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 3831 AddFlags = SCEV::FlagNSW; 3832 } 3833 } 3834 3835 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 3836 // RHS is NSW and LHS >= 0. 3837 // 3838 // The difficulty here is that the NSW flag may have been proven 3839 // relative to a loop that is to be found in a recurrence in LHS and 3840 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 3841 // larger scope than intended. 3842 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3843 3844 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 3845 } 3846 3847 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 3848 unsigned Depth) { 3849 Type *SrcTy = V->getType(); 3850 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3851 "Cannot truncate or zero extend with non-integer arguments!"); 3852 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3853 return V; // No conversion 3854 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3855 return getTruncateExpr(V, Ty, Depth); 3856 return getZeroExtendExpr(V, Ty, Depth); 3857 } 3858 3859 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 3860 unsigned Depth) { 3861 Type *SrcTy = V->getType(); 3862 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3863 "Cannot truncate or zero extend with non-integer arguments!"); 3864 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3865 return V; // No conversion 3866 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3867 return getTruncateExpr(V, Ty, Depth); 3868 return getSignExtendExpr(V, Ty, Depth); 3869 } 3870 3871 const SCEV * 3872 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 3873 Type *SrcTy = V->getType(); 3874 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3875 "Cannot noop or zero extend with non-integer arguments!"); 3876 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3877 "getNoopOrZeroExtend cannot truncate!"); 3878 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3879 return V; // No conversion 3880 return getZeroExtendExpr(V, Ty); 3881 } 3882 3883 const SCEV * 3884 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 3885 Type *SrcTy = V->getType(); 3886 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3887 "Cannot noop or sign extend with non-integer arguments!"); 3888 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3889 "getNoopOrSignExtend cannot truncate!"); 3890 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3891 return V; // No conversion 3892 return getSignExtendExpr(V, Ty); 3893 } 3894 3895 const SCEV * 3896 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 3897 Type *SrcTy = V->getType(); 3898 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3899 "Cannot noop or any extend with non-integer arguments!"); 3900 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3901 "getNoopOrAnyExtend cannot truncate!"); 3902 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3903 return V; // No conversion 3904 return getAnyExtendExpr(V, Ty); 3905 } 3906 3907 const SCEV * 3908 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 3909 Type *SrcTy = V->getType(); 3910 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 3911 "Cannot truncate or noop with non-integer arguments!"); 3912 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 3913 "getTruncateOrNoop cannot extend!"); 3914 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3915 return V; // No conversion 3916 return getTruncateExpr(V, Ty); 3917 } 3918 3919 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 3920 const SCEV *RHS) { 3921 const SCEV *PromotedLHS = LHS; 3922 const SCEV *PromotedRHS = RHS; 3923 3924 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3925 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3926 else 3927 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3928 3929 return getUMaxExpr(PromotedLHS, PromotedRHS); 3930 } 3931 3932 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 3933 const SCEV *RHS) { 3934 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3935 return getUMinFromMismatchedTypes(Ops); 3936 } 3937 3938 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( 3939 SmallVectorImpl<const SCEV *> &Ops) { 3940 assert(!Ops.empty() && "At least one operand must be!"); 3941 // Trivial case. 3942 if (Ops.size() == 1) 3943 return Ops[0]; 3944 3945 // Find the max type first. 3946 Type *MaxType = nullptr; 3947 for (auto *S : Ops) 3948 if (MaxType) 3949 MaxType = getWiderType(MaxType, S->getType()); 3950 else 3951 MaxType = S->getType(); 3952 assert(MaxType && "Failed to find maximum type!"); 3953 3954 // Extend all ops to max type. 3955 SmallVector<const SCEV *, 2> PromotedOps; 3956 for (auto *S : Ops) 3957 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 3958 3959 // Generate umin. 3960 return getUMinExpr(PromotedOps); 3961 } 3962 3963 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 3964 // A pointer operand may evaluate to a nonpointer expression, such as null. 3965 if (!V->getType()->isPointerTy()) 3966 return V; 3967 3968 while (true) { 3969 if (const SCEVIntegralCastExpr *Cast = dyn_cast<SCEVIntegralCastExpr>(V)) { 3970 V = Cast->getOperand(); 3971 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 3972 const SCEV *PtrOp = nullptr; 3973 for (const SCEV *NAryOp : NAry->operands()) { 3974 if (NAryOp->getType()->isPointerTy()) { 3975 // Cannot find the base of an expression with multiple pointer ops. 3976 if (PtrOp) 3977 return V; 3978 PtrOp = NAryOp; 3979 } 3980 } 3981 if (!PtrOp) // All operands were non-pointer. 3982 return V; 3983 V = PtrOp; 3984 } else // Not something we can look further into. 3985 return V; 3986 } 3987 } 3988 3989 /// Push users of the given Instruction onto the given Worklist. 3990 static void 3991 PushDefUseChildren(Instruction *I, 3992 SmallVectorImpl<Instruction *> &Worklist) { 3993 // Push the def-use children onto the Worklist stack. 3994 for (User *U : I->users()) 3995 Worklist.push_back(cast<Instruction>(U)); 3996 } 3997 3998 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 3999 SmallVector<Instruction *, 16> Worklist; 4000 PushDefUseChildren(PN, Worklist); 4001 4002 SmallPtrSet<Instruction *, 8> Visited; 4003 Visited.insert(PN); 4004 while (!Worklist.empty()) { 4005 Instruction *I = Worklist.pop_back_val(); 4006 if (!Visited.insert(I).second) 4007 continue; 4008 4009 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4010 if (It != ValueExprMap.end()) { 4011 const SCEV *Old = It->second; 4012 4013 // Short-circuit the def-use traversal if the symbolic name 4014 // ceases to appear in expressions. 4015 if (Old != SymName && !hasOperand(Old, SymName)) 4016 continue; 4017 4018 // SCEVUnknown for a PHI either means that it has an unrecognized 4019 // structure, it's a PHI that's in the progress of being computed 4020 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4021 // additional loop trip count information isn't going to change anything. 4022 // In the second case, createNodeForPHI will perform the necessary 4023 // updates on its own when it gets to that point. In the third, we do 4024 // want to forget the SCEVUnknown. 4025 if (!isa<PHINode>(I) || 4026 !isa<SCEVUnknown>(Old) || 4027 (I != PN && Old == SymName)) { 4028 eraseValueFromMap(It->first); 4029 forgetMemoizedResults(Old); 4030 } 4031 } 4032 4033 PushDefUseChildren(I, Worklist); 4034 } 4035 } 4036 4037 namespace { 4038 4039 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4040 /// expression in case its Loop is L. If it is not L then 4041 /// if IgnoreOtherLoops is true then use AddRec itself 4042 /// otherwise rewrite cannot be done. 4043 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4044 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4045 public: 4046 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4047 bool IgnoreOtherLoops = true) { 4048 SCEVInitRewriter Rewriter(L, SE); 4049 const SCEV *Result = Rewriter.visit(S); 4050 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4051 return SE.getCouldNotCompute(); 4052 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4053 ? SE.getCouldNotCompute() 4054 : Result; 4055 } 4056 4057 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4058 if (!SE.isLoopInvariant(Expr, L)) 4059 SeenLoopVariantSCEVUnknown = true; 4060 return Expr; 4061 } 4062 4063 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4064 // Only re-write AddRecExprs for this loop. 4065 if (Expr->getLoop() == L) 4066 return Expr->getStart(); 4067 SeenOtherLoops = true; 4068 return Expr; 4069 } 4070 4071 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4072 4073 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4074 4075 private: 4076 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4077 : SCEVRewriteVisitor(SE), L(L) {} 4078 4079 const Loop *L; 4080 bool SeenLoopVariantSCEVUnknown = false; 4081 bool SeenOtherLoops = false; 4082 }; 4083 4084 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4085 /// increment expression in case its Loop is L. If it is not L then 4086 /// use AddRec itself. 4087 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4088 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4089 public: 4090 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4091 SCEVPostIncRewriter Rewriter(L, SE); 4092 const SCEV *Result = Rewriter.visit(S); 4093 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4094 ? SE.getCouldNotCompute() 4095 : Result; 4096 } 4097 4098 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4099 if (!SE.isLoopInvariant(Expr, L)) 4100 SeenLoopVariantSCEVUnknown = true; 4101 return Expr; 4102 } 4103 4104 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4105 // Only re-write AddRecExprs for this loop. 4106 if (Expr->getLoop() == L) 4107 return Expr->getPostIncExpr(SE); 4108 SeenOtherLoops = true; 4109 return Expr; 4110 } 4111 4112 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4113 4114 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4115 4116 private: 4117 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4118 : SCEVRewriteVisitor(SE), L(L) {} 4119 4120 const Loop *L; 4121 bool SeenLoopVariantSCEVUnknown = false; 4122 bool SeenOtherLoops = false; 4123 }; 4124 4125 /// This class evaluates the compare condition by matching it against the 4126 /// condition of loop latch. If there is a match we assume a true value 4127 /// for the condition while building SCEV nodes. 4128 class SCEVBackedgeConditionFolder 4129 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4130 public: 4131 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4132 ScalarEvolution &SE) { 4133 bool IsPosBECond = false; 4134 Value *BECond = nullptr; 4135 if (BasicBlock *Latch = L->getLoopLatch()) { 4136 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4137 if (BI && BI->isConditional()) { 4138 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4139 "Both outgoing branches should not target same header!"); 4140 BECond = BI->getCondition(); 4141 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4142 } else { 4143 return S; 4144 } 4145 } 4146 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4147 return Rewriter.visit(S); 4148 } 4149 4150 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4151 const SCEV *Result = Expr; 4152 bool InvariantF = SE.isLoopInvariant(Expr, L); 4153 4154 if (!InvariantF) { 4155 Instruction *I = cast<Instruction>(Expr->getValue()); 4156 switch (I->getOpcode()) { 4157 case Instruction::Select: { 4158 SelectInst *SI = cast<SelectInst>(I); 4159 Optional<const SCEV *> Res = 4160 compareWithBackedgeCondition(SI->getCondition()); 4161 if (Res.hasValue()) { 4162 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4163 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4164 } 4165 break; 4166 } 4167 default: { 4168 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4169 if (Res.hasValue()) 4170 Result = Res.getValue(); 4171 break; 4172 } 4173 } 4174 } 4175 return Result; 4176 } 4177 4178 private: 4179 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4180 bool IsPosBECond, ScalarEvolution &SE) 4181 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4182 IsPositiveBECond(IsPosBECond) {} 4183 4184 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4185 4186 const Loop *L; 4187 /// Loop back condition. 4188 Value *BackedgeCond = nullptr; 4189 /// Set to true if loop back is on positive branch condition. 4190 bool IsPositiveBECond; 4191 }; 4192 4193 Optional<const SCEV *> 4194 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4195 4196 // If value matches the backedge condition for loop latch, 4197 // then return a constant evolution node based on loopback 4198 // branch taken. 4199 if (BackedgeCond == IC) 4200 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4201 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4202 return None; 4203 } 4204 4205 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4206 public: 4207 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4208 ScalarEvolution &SE) { 4209 SCEVShiftRewriter Rewriter(L, SE); 4210 const SCEV *Result = Rewriter.visit(S); 4211 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4212 } 4213 4214 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4215 // Only allow AddRecExprs for this loop. 4216 if (!SE.isLoopInvariant(Expr, L)) 4217 Valid = false; 4218 return Expr; 4219 } 4220 4221 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4222 if (Expr->getLoop() == L && Expr->isAffine()) 4223 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4224 Valid = false; 4225 return Expr; 4226 } 4227 4228 bool isValid() { return Valid; } 4229 4230 private: 4231 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4232 : SCEVRewriteVisitor(SE), L(L) {} 4233 4234 const Loop *L; 4235 bool Valid = true; 4236 }; 4237 4238 } // end anonymous namespace 4239 4240 SCEV::NoWrapFlags 4241 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4242 if (!AR->isAffine()) 4243 return SCEV::FlagAnyWrap; 4244 4245 using OBO = OverflowingBinaryOperator; 4246 4247 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4248 4249 if (!AR->hasNoSignedWrap()) { 4250 ConstantRange AddRecRange = getSignedRange(AR); 4251 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4252 4253 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4254 Instruction::Add, IncRange, OBO::NoSignedWrap); 4255 if (NSWRegion.contains(AddRecRange)) 4256 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4257 } 4258 4259 if (!AR->hasNoUnsignedWrap()) { 4260 ConstantRange AddRecRange = getUnsignedRange(AR); 4261 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4262 4263 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4264 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4265 if (NUWRegion.contains(AddRecRange)) 4266 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4267 } 4268 4269 return Result; 4270 } 4271 4272 namespace { 4273 4274 /// Represents an abstract binary operation. This may exist as a 4275 /// normal instruction or constant expression, or may have been 4276 /// derived from an expression tree. 4277 struct BinaryOp { 4278 unsigned Opcode; 4279 Value *LHS; 4280 Value *RHS; 4281 bool IsNSW = false; 4282 bool IsNUW = false; 4283 bool IsExact = false; 4284 4285 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4286 /// constant expression. 4287 Operator *Op = nullptr; 4288 4289 explicit BinaryOp(Operator *Op) 4290 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4291 Op(Op) { 4292 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4293 IsNSW = OBO->hasNoSignedWrap(); 4294 IsNUW = OBO->hasNoUnsignedWrap(); 4295 } 4296 if (auto *PEO = dyn_cast<PossiblyExactOperator>(Op)) 4297 IsExact = PEO->isExact(); 4298 } 4299 4300 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4301 bool IsNUW = false, bool IsExact = false) 4302 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW), 4303 IsExact(IsExact) {} 4304 }; 4305 4306 } // end anonymous namespace 4307 4308 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4309 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4310 auto *Op = dyn_cast<Operator>(V); 4311 if (!Op) 4312 return None; 4313 4314 // Implementation detail: all the cleverness here should happen without 4315 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4316 // SCEV expressions when possible, and we should not break that. 4317 4318 switch (Op->getOpcode()) { 4319 case Instruction::Add: 4320 case Instruction::Sub: 4321 case Instruction::Mul: 4322 case Instruction::UDiv: 4323 case Instruction::URem: 4324 case Instruction::And: 4325 case Instruction::Or: 4326 case Instruction::AShr: 4327 case Instruction::Shl: 4328 return BinaryOp(Op); 4329 4330 case Instruction::Xor: 4331 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4332 // If the RHS of the xor is a signmask, then this is just an add. 4333 // Instcombine turns add of signmask into xor as a strength reduction step. 4334 if (RHSC->getValue().isSignMask()) 4335 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4336 return BinaryOp(Op); 4337 4338 case Instruction::LShr: 4339 // Turn logical shift right of a constant into a unsigned divide. 4340 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4341 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4342 4343 // If the shift count is not less than the bitwidth, the result of 4344 // the shift is undefined. Don't try to analyze it, because the 4345 // resolution chosen here may differ from the resolution chosen in 4346 // other parts of the compiler. 4347 if (SA->getValue().ult(BitWidth)) { 4348 Constant *X = 4349 ConstantInt::get(SA->getContext(), 4350 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4351 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4352 } 4353 } 4354 return BinaryOp(Op); 4355 4356 case Instruction::ExtractValue: { 4357 auto *EVI = cast<ExtractValueInst>(Op); 4358 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4359 break; 4360 4361 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 4362 if (!WO) 4363 break; 4364 4365 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 4366 bool Signed = WO->isSigned(); 4367 // TODO: Should add nuw/nsw flags for mul as well. 4368 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 4369 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 4370 4371 // Now that we know that all uses of the arithmetic-result component of 4372 // CI are guarded by the overflow check, we can go ahead and pretend 4373 // that the arithmetic is non-overflowing. 4374 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 4375 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 4376 } 4377 4378 default: 4379 break; 4380 } 4381 4382 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same 4383 // semantics as a Sub, return a binary sub expression. 4384 if (auto *II = dyn_cast<IntrinsicInst>(V)) 4385 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) 4386 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); 4387 4388 return None; 4389 } 4390 4391 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4392 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4393 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4394 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4395 /// follows one of the following patterns: 4396 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4397 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4398 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4399 /// we return the type of the truncation operation, and indicate whether the 4400 /// truncated type should be treated as signed/unsigned by setting 4401 /// \p Signed to true/false, respectively. 4402 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4403 bool &Signed, ScalarEvolution &SE) { 4404 // The case where Op == SymbolicPHI (that is, with no type conversions on 4405 // the way) is handled by the regular add recurrence creating logic and 4406 // would have already been triggered in createAddRecForPHI. Reaching it here 4407 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4408 // because one of the other operands of the SCEVAddExpr updating this PHI is 4409 // not invariant). 4410 // 4411 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4412 // this case predicates that allow us to prove that Op == SymbolicPHI will 4413 // be added. 4414 if (Op == SymbolicPHI) 4415 return nullptr; 4416 4417 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4418 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4419 if (SourceBits != NewBits) 4420 return nullptr; 4421 4422 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4423 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4424 if (!SExt && !ZExt) 4425 return nullptr; 4426 const SCEVTruncateExpr *Trunc = 4427 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4428 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4429 if (!Trunc) 4430 return nullptr; 4431 const SCEV *X = Trunc->getOperand(); 4432 if (X != SymbolicPHI) 4433 return nullptr; 4434 Signed = SExt != nullptr; 4435 return Trunc->getType(); 4436 } 4437 4438 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4439 if (!PN->getType()->isIntegerTy()) 4440 return nullptr; 4441 const Loop *L = LI.getLoopFor(PN->getParent()); 4442 if (!L || L->getHeader() != PN->getParent()) 4443 return nullptr; 4444 return L; 4445 } 4446 4447 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4448 // computation that updates the phi follows the following pattern: 4449 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4450 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4451 // If so, try to see if it can be rewritten as an AddRecExpr under some 4452 // Predicates. If successful, return them as a pair. Also cache the results 4453 // of the analysis. 4454 // 4455 // Example usage scenario: 4456 // Say the Rewriter is called for the following SCEV: 4457 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4458 // where: 4459 // %X = phi i64 (%Start, %BEValue) 4460 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4461 // and call this function with %SymbolicPHI = %X. 4462 // 4463 // The analysis will find that the value coming around the backedge has 4464 // the following SCEV: 4465 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4466 // Upon concluding that this matches the desired pattern, the function 4467 // will return the pair {NewAddRec, SmallPredsVec} where: 4468 // NewAddRec = {%Start,+,%Step} 4469 // SmallPredsVec = {P1, P2, P3} as follows: 4470 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4471 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4472 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4473 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4474 // under the predicates {P1,P2,P3}. 4475 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4476 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4477 // 4478 // TODO's: 4479 // 4480 // 1) Extend the Induction descriptor to also support inductions that involve 4481 // casts: When needed (namely, when we are called in the context of the 4482 // vectorizer induction analysis), a Set of cast instructions will be 4483 // populated by this method, and provided back to isInductionPHI. This is 4484 // needed to allow the vectorizer to properly record them to be ignored by 4485 // the cost model and to avoid vectorizing them (otherwise these casts, 4486 // which are redundant under the runtime overflow checks, will be 4487 // vectorized, which can be costly). 4488 // 4489 // 2) Support additional induction/PHISCEV patterns: We also want to support 4490 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4491 // after the induction update operation (the induction increment): 4492 // 4493 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4494 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4495 // 4496 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4497 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4498 // 4499 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4500 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4501 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4502 SmallVector<const SCEVPredicate *, 3> Predicates; 4503 4504 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4505 // return an AddRec expression under some predicate. 4506 4507 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4508 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4509 assert(L && "Expecting an integer loop header phi"); 4510 4511 // The loop may have multiple entrances or multiple exits; we can analyze 4512 // this phi as an addrec if it has a unique entry value and a unique 4513 // backedge value. 4514 Value *BEValueV = nullptr, *StartValueV = nullptr; 4515 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4516 Value *V = PN->getIncomingValue(i); 4517 if (L->contains(PN->getIncomingBlock(i))) { 4518 if (!BEValueV) { 4519 BEValueV = V; 4520 } else if (BEValueV != V) { 4521 BEValueV = nullptr; 4522 break; 4523 } 4524 } else if (!StartValueV) { 4525 StartValueV = V; 4526 } else if (StartValueV != V) { 4527 StartValueV = nullptr; 4528 break; 4529 } 4530 } 4531 if (!BEValueV || !StartValueV) 4532 return None; 4533 4534 const SCEV *BEValue = getSCEV(BEValueV); 4535 4536 // If the value coming around the backedge is an add with the symbolic 4537 // value we just inserted, possibly with casts that we can ignore under 4538 // an appropriate runtime guard, then we found a simple induction variable! 4539 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4540 if (!Add) 4541 return None; 4542 4543 // If there is a single occurrence of the symbolic value, possibly 4544 // casted, replace it with a recurrence. 4545 unsigned FoundIndex = Add->getNumOperands(); 4546 Type *TruncTy = nullptr; 4547 bool Signed; 4548 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4549 if ((TruncTy = 4550 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4551 if (FoundIndex == e) { 4552 FoundIndex = i; 4553 break; 4554 } 4555 4556 if (FoundIndex == Add->getNumOperands()) 4557 return None; 4558 4559 // Create an add with everything but the specified operand. 4560 SmallVector<const SCEV *, 8> Ops; 4561 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4562 if (i != FoundIndex) 4563 Ops.push_back(Add->getOperand(i)); 4564 const SCEV *Accum = getAddExpr(Ops); 4565 4566 // The runtime checks will not be valid if the step amount is 4567 // varying inside the loop. 4568 if (!isLoopInvariant(Accum, L)) 4569 return None; 4570 4571 // *** Part2: Create the predicates 4572 4573 // Analysis was successful: we have a phi-with-cast pattern for which we 4574 // can return an AddRec expression under the following predicates: 4575 // 4576 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4577 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4578 // P2: An Equal predicate that guarantees that 4579 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4580 // P3: An Equal predicate that guarantees that 4581 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4582 // 4583 // As we next prove, the above predicates guarantee that: 4584 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4585 // 4586 // 4587 // More formally, we want to prove that: 4588 // Expr(i+1) = Start + (i+1) * Accum 4589 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4590 // 4591 // Given that: 4592 // 1) Expr(0) = Start 4593 // 2) Expr(1) = Start + Accum 4594 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4595 // 3) Induction hypothesis (step i): 4596 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4597 // 4598 // Proof: 4599 // Expr(i+1) = 4600 // = Start + (i+1)*Accum 4601 // = (Start + i*Accum) + Accum 4602 // = Expr(i) + Accum 4603 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4604 // :: from step i 4605 // 4606 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4607 // 4608 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4609 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4610 // + Accum :: from P3 4611 // 4612 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4613 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4614 // 4615 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4616 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4617 // 4618 // By induction, the same applies to all iterations 1<=i<n: 4619 // 4620 4621 // Create a truncated addrec for which we will add a no overflow check (P1). 4622 const SCEV *StartVal = getSCEV(StartValueV); 4623 const SCEV *PHISCEV = 4624 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4625 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4626 4627 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4628 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4629 // will be constant. 4630 // 4631 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4632 // add P1. 4633 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4634 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4635 Signed ? SCEVWrapPredicate::IncrementNSSW 4636 : SCEVWrapPredicate::IncrementNUSW; 4637 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4638 Predicates.push_back(AddRecPred); 4639 } 4640 4641 // Create the Equal Predicates P2,P3: 4642 4643 // It is possible that the predicates P2 and/or P3 are computable at 4644 // compile time due to StartVal and/or Accum being constants. 4645 // If either one is, then we can check that now and escape if either P2 4646 // or P3 is false. 4647 4648 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4649 // for each of StartVal and Accum 4650 auto getExtendedExpr = [&](const SCEV *Expr, 4651 bool CreateSignExtend) -> const SCEV * { 4652 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4653 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4654 const SCEV *ExtendedExpr = 4655 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4656 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4657 return ExtendedExpr; 4658 }; 4659 4660 // Given: 4661 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4662 // = getExtendedExpr(Expr) 4663 // Determine whether the predicate P: Expr == ExtendedExpr 4664 // is known to be false at compile time 4665 auto PredIsKnownFalse = [&](const SCEV *Expr, 4666 const SCEV *ExtendedExpr) -> bool { 4667 return Expr != ExtendedExpr && 4668 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4669 }; 4670 4671 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 4672 if (PredIsKnownFalse(StartVal, StartExtended)) { 4673 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 4674 return None; 4675 } 4676 4677 // The Step is always Signed (because the overflow checks are either 4678 // NSSW or NUSW) 4679 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 4680 if (PredIsKnownFalse(Accum, AccumExtended)) { 4681 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 4682 return None; 4683 } 4684 4685 auto AppendPredicate = [&](const SCEV *Expr, 4686 const SCEV *ExtendedExpr) -> void { 4687 if (Expr != ExtendedExpr && 4688 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4689 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4690 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 4691 Predicates.push_back(Pred); 4692 } 4693 }; 4694 4695 AppendPredicate(StartVal, StartExtended); 4696 AppendPredicate(Accum, AccumExtended); 4697 4698 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4699 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4700 // into NewAR if it will also add the runtime overflow checks specified in 4701 // Predicates. 4702 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4703 4704 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4705 std::make_pair(NewAR, Predicates); 4706 // Remember the result of the analysis for this SCEV at this locayyytion. 4707 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4708 return PredRewrite; 4709 } 4710 4711 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4712 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4713 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4714 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4715 if (!L) 4716 return None; 4717 4718 // Check to see if we already analyzed this PHI. 4719 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4720 if (I != PredicatedSCEVRewrites.end()) { 4721 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 4722 I->second; 4723 // Analysis was done before and failed to create an AddRec: 4724 if (Rewrite.first == SymbolicPHI) 4725 return None; 4726 // Analysis was done before and succeeded to create an AddRec under 4727 // a predicate: 4728 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 4729 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 4730 return Rewrite; 4731 } 4732 4733 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4734 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 4735 4736 // Record in the cache that the analysis failed 4737 if (!Rewrite) { 4738 SmallVector<const SCEVPredicate *, 3> Predicates; 4739 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 4740 return None; 4741 } 4742 4743 return Rewrite; 4744 } 4745 4746 // FIXME: This utility is currently required because the Rewriter currently 4747 // does not rewrite this expression: 4748 // {0, +, (sext ix (trunc iy to ix) to iy)} 4749 // into {0, +, %step}, 4750 // even when the following Equal predicate exists: 4751 // "%step == (sext ix (trunc iy to ix) to iy)". 4752 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 4753 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 4754 if (AR1 == AR2) 4755 return true; 4756 4757 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 4758 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 4759 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 4760 return false; 4761 return true; 4762 }; 4763 4764 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 4765 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 4766 return false; 4767 return true; 4768 } 4769 4770 /// A helper function for createAddRecFromPHI to handle simple cases. 4771 /// 4772 /// This function tries to find an AddRec expression for the simplest (yet most 4773 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 4774 /// If it fails, createAddRecFromPHI will use a more general, but slow, 4775 /// technique for finding the AddRec expression. 4776 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 4777 Value *BEValueV, 4778 Value *StartValueV) { 4779 const Loop *L = LI.getLoopFor(PN->getParent()); 4780 assert(L && L->getHeader() == PN->getParent()); 4781 assert(BEValueV && StartValueV); 4782 4783 auto BO = MatchBinaryOp(BEValueV, DT); 4784 if (!BO) 4785 return nullptr; 4786 4787 if (BO->Opcode != Instruction::Add) 4788 return nullptr; 4789 4790 const SCEV *Accum = nullptr; 4791 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 4792 Accum = getSCEV(BO->RHS); 4793 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 4794 Accum = getSCEV(BO->LHS); 4795 4796 if (!Accum) 4797 return nullptr; 4798 4799 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4800 if (BO->IsNUW) 4801 Flags = setFlags(Flags, SCEV::FlagNUW); 4802 if (BO->IsNSW) 4803 Flags = setFlags(Flags, SCEV::FlagNSW); 4804 4805 const SCEV *StartVal = getSCEV(StartValueV); 4806 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4807 4808 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4809 4810 // We can add Flags to the post-inc expression only if we 4811 // know that it is *undefined behavior* for BEValueV to 4812 // overflow. 4813 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4814 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4815 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4816 4817 return PHISCEV; 4818 } 4819 4820 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 4821 const Loop *L = LI.getLoopFor(PN->getParent()); 4822 if (!L || L->getHeader() != PN->getParent()) 4823 return nullptr; 4824 4825 // The loop may have multiple entrances or multiple exits; we can analyze 4826 // this phi as an addrec if it has a unique entry value and a unique 4827 // backedge value. 4828 Value *BEValueV = nullptr, *StartValueV = nullptr; 4829 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4830 Value *V = PN->getIncomingValue(i); 4831 if (L->contains(PN->getIncomingBlock(i))) { 4832 if (!BEValueV) { 4833 BEValueV = V; 4834 } else if (BEValueV != V) { 4835 BEValueV = nullptr; 4836 break; 4837 } 4838 } else if (!StartValueV) { 4839 StartValueV = V; 4840 } else if (StartValueV != V) { 4841 StartValueV = nullptr; 4842 break; 4843 } 4844 } 4845 if (!BEValueV || !StartValueV) 4846 return nullptr; 4847 4848 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 4849 "PHI node already processed?"); 4850 4851 // First, try to find AddRec expression without creating a fictituos symbolic 4852 // value for PN. 4853 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 4854 return S; 4855 4856 // Handle PHI node value symbolically. 4857 const SCEV *SymbolicName = getUnknown(PN); 4858 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 4859 4860 // Using this symbolic name for the PHI, analyze the value coming around 4861 // the back-edge. 4862 const SCEV *BEValue = getSCEV(BEValueV); 4863 4864 // NOTE: If BEValue is loop invariant, we know that the PHI node just 4865 // has a special value for the first iteration of the loop. 4866 4867 // If the value coming around the backedge is an add with the symbolic 4868 // value we just inserted, then we found a simple induction variable! 4869 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 4870 // If there is a single occurrence of the symbolic value, replace it 4871 // with a recurrence. 4872 unsigned FoundIndex = Add->getNumOperands(); 4873 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4874 if (Add->getOperand(i) == SymbolicName) 4875 if (FoundIndex == e) { 4876 FoundIndex = i; 4877 break; 4878 } 4879 4880 if (FoundIndex != Add->getNumOperands()) { 4881 // Create an add with everything but the specified operand. 4882 SmallVector<const SCEV *, 8> Ops; 4883 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4884 if (i != FoundIndex) 4885 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 4886 L, *this)); 4887 const SCEV *Accum = getAddExpr(Ops); 4888 4889 // This is not a valid addrec if the step amount is varying each 4890 // loop iteration, but is not itself an addrec in this loop. 4891 if (isLoopInvariant(Accum, L) || 4892 (isa<SCEVAddRecExpr>(Accum) && 4893 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 4894 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4895 4896 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 4897 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 4898 if (BO->IsNUW) 4899 Flags = setFlags(Flags, SCEV::FlagNUW); 4900 if (BO->IsNSW) 4901 Flags = setFlags(Flags, SCEV::FlagNSW); 4902 } 4903 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 4904 // If the increment is an inbounds GEP, then we know the address 4905 // space cannot be wrapped around. We cannot make any guarantee 4906 // about signed or unsigned overflow because pointers are 4907 // unsigned but we may have a negative index from the base 4908 // pointer. We can guarantee that no unsigned wrap occurs if the 4909 // indices form a positive value. 4910 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 4911 Flags = setFlags(Flags, SCEV::FlagNW); 4912 4913 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 4914 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 4915 Flags = setFlags(Flags, SCEV::FlagNUW); 4916 } 4917 4918 // We cannot transfer nuw and nsw flags from subtraction 4919 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 4920 // for instance. 4921 } 4922 4923 const SCEV *StartVal = getSCEV(StartValueV); 4924 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4925 4926 // Okay, for the entire analysis of this edge we assumed the PHI 4927 // to be symbolic. We now need to go back and purge all of the 4928 // entries for the scalars that use the symbolic expression. 4929 forgetSymbolicName(PN, SymbolicName); 4930 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4931 4932 // We can add Flags to the post-inc expression only if we 4933 // know that it is *undefined behavior* for BEValueV to 4934 // overflow. 4935 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 4936 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 4937 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4938 4939 return PHISCEV; 4940 } 4941 } 4942 } else { 4943 // Otherwise, this could be a loop like this: 4944 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 4945 // In this case, j = {1,+,1} and BEValue is j. 4946 // Because the other in-value of i (0) fits the evolution of BEValue 4947 // i really is an addrec evolution. 4948 // 4949 // We can generalize this saying that i is the shifted value of BEValue 4950 // by one iteration: 4951 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 4952 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 4953 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 4954 if (Shifted != getCouldNotCompute() && 4955 Start != getCouldNotCompute()) { 4956 const SCEV *StartVal = getSCEV(StartValueV); 4957 if (Start == StartVal) { 4958 // Okay, for the entire analysis of this edge we assumed the PHI 4959 // to be symbolic. We now need to go back and purge all of the 4960 // entries for the scalars that use the symbolic expression. 4961 forgetSymbolicName(PN, SymbolicName); 4962 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 4963 return Shifted; 4964 } 4965 } 4966 } 4967 4968 // Remove the temporary PHI node SCEV that has been inserted while intending 4969 // to create an AddRecExpr for this PHI node. We can not keep this temporary 4970 // as it will prevent later (possibly simpler) SCEV expressions to be added 4971 // to the ValueExprMap. 4972 eraseValueFromMap(PN); 4973 4974 return nullptr; 4975 } 4976 4977 // Checks if the SCEV S is available at BB. S is considered available at BB 4978 // if S can be materialized at BB without introducing a fault. 4979 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 4980 BasicBlock *BB) { 4981 struct CheckAvailable { 4982 bool TraversalDone = false; 4983 bool Available = true; 4984 4985 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 4986 BasicBlock *BB = nullptr; 4987 DominatorTree &DT; 4988 4989 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 4990 : L(L), BB(BB), DT(DT) {} 4991 4992 bool setUnavailable() { 4993 TraversalDone = true; 4994 Available = false; 4995 return false; 4996 } 4997 4998 bool follow(const SCEV *S) { 4999 switch (S->getSCEVType()) { 5000 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 5001 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 5002 case scUMinExpr: 5003 case scSMinExpr: 5004 // These expressions are available if their operand(s) is/are. 5005 return true; 5006 5007 case scAddRecExpr: { 5008 // We allow add recurrences that are on the loop BB is in, or some 5009 // outer loop. This guarantees availability because the value of the 5010 // add recurrence at BB is simply the "current" value of the induction 5011 // variable. We can relax this in the future; for instance an add 5012 // recurrence on a sibling dominating loop is also available at BB. 5013 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5014 if (L && (ARLoop == L || ARLoop->contains(L))) 5015 return true; 5016 5017 return setUnavailable(); 5018 } 5019 5020 case scUnknown: { 5021 // For SCEVUnknown, we check for simple dominance. 5022 const auto *SU = cast<SCEVUnknown>(S); 5023 Value *V = SU->getValue(); 5024 5025 if (isa<Argument>(V)) 5026 return false; 5027 5028 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5029 return false; 5030 5031 return setUnavailable(); 5032 } 5033 5034 case scUDivExpr: 5035 case scCouldNotCompute: 5036 // We do not try to smart about these at all. 5037 return setUnavailable(); 5038 } 5039 llvm_unreachable("switch should be fully covered!"); 5040 } 5041 5042 bool isDone() { return TraversalDone; } 5043 }; 5044 5045 CheckAvailable CA(L, BB, DT); 5046 SCEVTraversal<CheckAvailable> ST(CA); 5047 5048 ST.visitAll(S); 5049 return CA.Available; 5050 } 5051 5052 // Try to match a control flow sequence that branches out at BI and merges back 5053 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5054 // match. 5055 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5056 Value *&C, Value *&LHS, Value *&RHS) { 5057 C = BI->getCondition(); 5058 5059 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5060 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5061 5062 if (!LeftEdge.isSingleEdge()) 5063 return false; 5064 5065 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5066 5067 Use &LeftUse = Merge->getOperandUse(0); 5068 Use &RightUse = Merge->getOperandUse(1); 5069 5070 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5071 LHS = LeftUse; 5072 RHS = RightUse; 5073 return true; 5074 } 5075 5076 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5077 LHS = RightUse; 5078 RHS = LeftUse; 5079 return true; 5080 } 5081 5082 return false; 5083 } 5084 5085 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5086 auto IsReachable = 5087 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5088 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5089 const Loop *L = LI.getLoopFor(PN->getParent()); 5090 5091 // We don't want to break LCSSA, even in a SCEV expression tree. 5092 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5093 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5094 return nullptr; 5095 5096 // Try to match 5097 // 5098 // br %cond, label %left, label %right 5099 // left: 5100 // br label %merge 5101 // right: 5102 // br label %merge 5103 // merge: 5104 // V = phi [ %x, %left ], [ %y, %right ] 5105 // 5106 // as "select %cond, %x, %y" 5107 5108 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5109 assert(IDom && "At least the entry block should dominate PN"); 5110 5111 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5112 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5113 5114 if (BI && BI->isConditional() && 5115 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5116 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5117 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5118 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5119 } 5120 5121 return nullptr; 5122 } 5123 5124 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5125 if (const SCEV *S = createAddRecFromPHI(PN)) 5126 return S; 5127 5128 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5129 return S; 5130 5131 // If the PHI has a single incoming value, follow that value, unless the 5132 // PHI's incoming blocks are in a different loop, in which case doing so 5133 // risks breaking LCSSA form. Instcombine would normally zap these, but 5134 // it doesn't have DominatorTree information, so it may miss cases. 5135 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5136 if (LI.replacementPreservesLCSSAForm(PN, V)) 5137 return getSCEV(V); 5138 5139 // If it's not a loop phi, we can't handle it yet. 5140 return getUnknown(PN); 5141 } 5142 5143 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5144 Value *Cond, 5145 Value *TrueVal, 5146 Value *FalseVal) { 5147 // Handle "constant" branch or select. This can occur for instance when a 5148 // loop pass transforms an inner loop and moves on to process the outer loop. 5149 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5150 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5151 5152 // Try to match some simple smax or umax patterns. 5153 auto *ICI = dyn_cast<ICmpInst>(Cond); 5154 if (!ICI) 5155 return getUnknown(I); 5156 5157 Value *LHS = ICI->getOperand(0); 5158 Value *RHS = ICI->getOperand(1); 5159 5160 switch (ICI->getPredicate()) { 5161 case ICmpInst::ICMP_SLT: 5162 case ICmpInst::ICMP_SLE: 5163 std::swap(LHS, RHS); 5164 LLVM_FALLTHROUGH; 5165 case ICmpInst::ICMP_SGT: 5166 case ICmpInst::ICMP_SGE: 5167 // a >s b ? a+x : b+x -> smax(a, b)+x 5168 // a >s b ? b+x : a+x -> smin(a, b)+x 5169 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5170 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5171 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5172 const SCEV *LA = getSCEV(TrueVal); 5173 const SCEV *RA = getSCEV(FalseVal); 5174 const SCEV *LDiff = getMinusSCEV(LA, LS); 5175 const SCEV *RDiff = getMinusSCEV(RA, RS); 5176 if (LDiff == RDiff) 5177 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5178 LDiff = getMinusSCEV(LA, RS); 5179 RDiff = getMinusSCEV(RA, LS); 5180 if (LDiff == RDiff) 5181 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5182 } 5183 break; 5184 case ICmpInst::ICMP_ULT: 5185 case ICmpInst::ICMP_ULE: 5186 std::swap(LHS, RHS); 5187 LLVM_FALLTHROUGH; 5188 case ICmpInst::ICMP_UGT: 5189 case ICmpInst::ICMP_UGE: 5190 // a >u b ? a+x : b+x -> umax(a, b)+x 5191 // a >u b ? b+x : a+x -> umin(a, b)+x 5192 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5193 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5194 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5195 const SCEV *LA = getSCEV(TrueVal); 5196 const SCEV *RA = getSCEV(FalseVal); 5197 const SCEV *LDiff = getMinusSCEV(LA, LS); 5198 const SCEV *RDiff = getMinusSCEV(RA, RS); 5199 if (LDiff == RDiff) 5200 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5201 LDiff = getMinusSCEV(LA, RS); 5202 RDiff = getMinusSCEV(RA, LS); 5203 if (LDiff == RDiff) 5204 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5205 } 5206 break; 5207 case ICmpInst::ICMP_NE: 5208 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5209 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5210 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5211 const SCEV *One = getOne(I->getType()); 5212 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5213 const SCEV *LA = getSCEV(TrueVal); 5214 const SCEV *RA = getSCEV(FalseVal); 5215 const SCEV *LDiff = getMinusSCEV(LA, LS); 5216 const SCEV *RDiff = getMinusSCEV(RA, One); 5217 if (LDiff == RDiff) 5218 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5219 } 5220 break; 5221 case ICmpInst::ICMP_EQ: 5222 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5223 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5224 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5225 const SCEV *One = getOne(I->getType()); 5226 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5227 const SCEV *LA = getSCEV(TrueVal); 5228 const SCEV *RA = getSCEV(FalseVal); 5229 const SCEV *LDiff = getMinusSCEV(LA, One); 5230 const SCEV *RDiff = getMinusSCEV(RA, LS); 5231 if (LDiff == RDiff) 5232 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5233 } 5234 break; 5235 default: 5236 break; 5237 } 5238 5239 return getUnknown(I); 5240 } 5241 5242 /// Expand GEP instructions into add and multiply operations. This allows them 5243 /// to be analyzed by regular SCEV code. 5244 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5245 // Don't attempt to analyze GEPs over unsized objects. 5246 if (!GEP->getSourceElementType()->isSized()) 5247 return getUnknown(GEP); 5248 5249 SmallVector<const SCEV *, 4> IndexExprs; 5250 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5251 IndexExprs.push_back(getSCEV(*Index)); 5252 return getGEPExpr(GEP, IndexExprs); 5253 } 5254 5255 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5256 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5257 return C->getAPInt().countTrailingZeros(); 5258 5259 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5260 return std::min(GetMinTrailingZeros(T->getOperand()), 5261 (uint32_t)getTypeSizeInBits(T->getType())); 5262 5263 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5264 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5265 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5266 ? getTypeSizeInBits(E->getType()) 5267 : OpRes; 5268 } 5269 5270 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5271 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5272 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5273 ? getTypeSizeInBits(E->getType()) 5274 : OpRes; 5275 } 5276 5277 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5278 // The result is the min of all operands results. 5279 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5280 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5281 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5282 return MinOpRes; 5283 } 5284 5285 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5286 // The result is the sum of all operands results. 5287 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5288 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5289 for (unsigned i = 1, e = M->getNumOperands(); 5290 SumOpRes != BitWidth && i != e; ++i) 5291 SumOpRes = 5292 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5293 return SumOpRes; 5294 } 5295 5296 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5297 // The result is the min of all operands results. 5298 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5299 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5300 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5301 return MinOpRes; 5302 } 5303 5304 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5305 // The result is the min of all operands results. 5306 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5307 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5308 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5309 return MinOpRes; 5310 } 5311 5312 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5313 // The result is the min of all operands results. 5314 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5315 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5316 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5317 return MinOpRes; 5318 } 5319 5320 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5321 // For a SCEVUnknown, ask ValueTracking. 5322 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5323 return Known.countMinTrailingZeros(); 5324 } 5325 5326 // SCEVUDivExpr 5327 return 0; 5328 } 5329 5330 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5331 auto I = MinTrailingZerosCache.find(S); 5332 if (I != MinTrailingZerosCache.end()) 5333 return I->second; 5334 5335 uint32_t Result = GetMinTrailingZerosImpl(S); 5336 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5337 assert(InsertPair.second && "Should insert a new key"); 5338 return InsertPair.first->second; 5339 } 5340 5341 /// Helper method to assign a range to V from metadata present in the IR. 5342 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5343 if (Instruction *I = dyn_cast<Instruction>(V)) 5344 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5345 return getConstantRangeFromMetadata(*MD); 5346 5347 return None; 5348 } 5349 5350 /// Determine the range for a particular SCEV. If SignHint is 5351 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5352 /// with a "cleaner" unsigned (resp. signed) representation. 5353 const ConstantRange & 5354 ScalarEvolution::getRangeRef(const SCEV *S, 5355 ScalarEvolution::RangeSignHint SignHint) { 5356 DenseMap<const SCEV *, ConstantRange> &Cache = 5357 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5358 : SignedRanges; 5359 ConstantRange::PreferredRangeType RangeType = 5360 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED 5361 ? ConstantRange::Unsigned : ConstantRange::Signed; 5362 5363 // See if we've computed this range already. 5364 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5365 if (I != Cache.end()) 5366 return I->second; 5367 5368 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5369 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5370 5371 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5372 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5373 using OBO = OverflowingBinaryOperator; 5374 5375 // If the value has known zeros, the maximum value will have those known zeros 5376 // as well. 5377 uint32_t TZ = GetMinTrailingZeros(S); 5378 if (TZ != 0) { 5379 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5380 ConservativeResult = 5381 ConstantRange(APInt::getMinValue(BitWidth), 5382 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5383 else 5384 ConservativeResult = ConstantRange( 5385 APInt::getSignedMinValue(BitWidth), 5386 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5387 } 5388 5389 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5390 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5391 unsigned WrapType = OBO::AnyWrap; 5392 if (Add->hasNoSignedWrap()) 5393 WrapType |= OBO::NoSignedWrap; 5394 if (Add->hasNoUnsignedWrap()) 5395 WrapType |= OBO::NoUnsignedWrap; 5396 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5397 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint), 5398 WrapType, RangeType); 5399 return setRange(Add, SignHint, 5400 ConservativeResult.intersectWith(X, RangeType)); 5401 } 5402 5403 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5404 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5405 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5406 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5407 return setRange(Mul, SignHint, 5408 ConservativeResult.intersectWith(X, RangeType)); 5409 } 5410 5411 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5412 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5413 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5414 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5415 return setRange(SMax, SignHint, 5416 ConservativeResult.intersectWith(X, RangeType)); 5417 } 5418 5419 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5420 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5421 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5422 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5423 return setRange(UMax, SignHint, 5424 ConservativeResult.intersectWith(X, RangeType)); 5425 } 5426 5427 if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) { 5428 ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint); 5429 for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i) 5430 X = X.smin(getRangeRef(SMin->getOperand(i), SignHint)); 5431 return setRange(SMin, SignHint, 5432 ConservativeResult.intersectWith(X, RangeType)); 5433 } 5434 5435 if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) { 5436 ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint); 5437 for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i) 5438 X = X.umin(getRangeRef(UMin->getOperand(i), SignHint)); 5439 return setRange(UMin, SignHint, 5440 ConservativeResult.intersectWith(X, RangeType)); 5441 } 5442 5443 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5444 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5445 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5446 return setRange(UDiv, SignHint, 5447 ConservativeResult.intersectWith(X.udiv(Y), RangeType)); 5448 } 5449 5450 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5451 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5452 return setRange(ZExt, SignHint, 5453 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), 5454 RangeType)); 5455 } 5456 5457 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5458 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5459 return setRange(SExt, SignHint, 5460 ConservativeResult.intersectWith(X.signExtend(BitWidth), 5461 RangeType)); 5462 } 5463 5464 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5465 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5466 return setRange(Trunc, SignHint, 5467 ConservativeResult.intersectWith(X.truncate(BitWidth), 5468 RangeType)); 5469 } 5470 5471 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5472 // If there's no unsigned wrap, the value will never be less than its 5473 // initial value. 5474 if (AddRec->hasNoUnsignedWrap()) { 5475 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); 5476 if (!UnsignedMinValue.isNullValue()) 5477 ConservativeResult = ConservativeResult.intersectWith( 5478 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); 5479 } 5480 5481 // If there's no signed wrap, and all the operands except initial value have 5482 // the same sign or zero, the value won't ever be: 5483 // 1: smaller than initial value if operands are non negative, 5484 // 2: bigger than initial value if operands are non positive. 5485 // For both cases, value can not cross signed min/max boundary. 5486 if (AddRec->hasNoSignedWrap()) { 5487 bool AllNonNeg = true; 5488 bool AllNonPos = true; 5489 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { 5490 if (!isKnownNonNegative(AddRec->getOperand(i))) 5491 AllNonNeg = false; 5492 if (!isKnownNonPositive(AddRec->getOperand(i))) 5493 AllNonPos = false; 5494 } 5495 if (AllNonNeg) 5496 ConservativeResult = ConservativeResult.intersectWith( 5497 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), 5498 APInt::getSignedMinValue(BitWidth)), 5499 RangeType); 5500 else if (AllNonPos) 5501 ConservativeResult = ConservativeResult.intersectWith( 5502 ConstantRange::getNonEmpty( 5503 APInt::getSignedMinValue(BitWidth), 5504 getSignedRangeMax(AddRec->getStart()) + 1), 5505 RangeType); 5506 } 5507 5508 // TODO: non-affine addrec 5509 if (AddRec->isAffine()) { 5510 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop()); 5511 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5512 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5513 auto RangeFromAffine = getRangeForAffineAR( 5514 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5515 BitWidth); 5516 ConservativeResult = 5517 ConservativeResult.intersectWith(RangeFromAffine, RangeType); 5518 5519 auto RangeFromFactoring = getRangeViaFactoring( 5520 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5521 BitWidth); 5522 ConservativeResult = 5523 ConservativeResult.intersectWith(RangeFromFactoring, RangeType); 5524 } 5525 } 5526 5527 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5528 } 5529 5530 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5531 // Check if the IR explicitly contains !range metadata. 5532 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5533 if (MDRange.hasValue()) 5534 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(), 5535 RangeType); 5536 5537 // Split here to avoid paying the compile-time cost of calling both 5538 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 5539 // if needed. 5540 const DataLayout &DL = getDataLayout(); 5541 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 5542 // For a SCEVUnknown, ask ValueTracking. 5543 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5544 if (Known.getBitWidth() != BitWidth) 5545 Known = Known.zextOrTrunc(BitWidth); 5546 // If Known does not result in full-set, intersect with it. 5547 if (Known.getMinValue() != Known.getMaxValue() + 1) 5548 ConservativeResult = ConservativeResult.intersectWith( 5549 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), 5550 RangeType); 5551 } else { 5552 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 5553 "generalize as needed!"); 5554 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5555 // If the pointer size is larger than the index size type, this can cause 5556 // NS to be larger than BitWidth. So compensate for this. 5557 if (U->getType()->isPointerTy()) { 5558 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); 5559 int ptrIdxDiff = ptrSize - BitWidth; 5560 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) 5561 NS -= ptrIdxDiff; 5562 } 5563 5564 if (NS > 1) 5565 ConservativeResult = ConservativeResult.intersectWith( 5566 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5567 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), 5568 RangeType); 5569 } 5570 5571 // A range of Phi is a subset of union of all ranges of its input. 5572 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 5573 // Make sure that we do not run over cycled Phis. 5574 if (PendingPhiRanges.insert(Phi).second) { 5575 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 5576 for (auto &Op : Phi->operands()) { 5577 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 5578 RangeFromOps = RangeFromOps.unionWith(OpRange); 5579 // No point to continue if we already have a full set. 5580 if (RangeFromOps.isFullSet()) 5581 break; 5582 } 5583 ConservativeResult = 5584 ConservativeResult.intersectWith(RangeFromOps, RangeType); 5585 bool Erased = PendingPhiRanges.erase(Phi); 5586 assert(Erased && "Failed to erase Phi properly?"); 5587 (void) Erased; 5588 } 5589 } 5590 5591 return setRange(U, SignHint, std::move(ConservativeResult)); 5592 } 5593 5594 return setRange(S, SignHint, std::move(ConservativeResult)); 5595 } 5596 5597 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5598 // values that the expression can take. Initially, the expression has a value 5599 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5600 // argument defines if we treat Step as signed or unsigned. 5601 static ConstantRange getRangeForAffineARHelper(APInt Step, 5602 const ConstantRange &StartRange, 5603 const APInt &MaxBECount, 5604 unsigned BitWidth, bool Signed) { 5605 // If either Step or MaxBECount is 0, then the expression won't change, and we 5606 // just need to return the initial range. 5607 if (Step == 0 || MaxBECount == 0) 5608 return StartRange; 5609 5610 // If we don't know anything about the initial value (i.e. StartRange is 5611 // FullRange), then we don't know anything about the final range either. 5612 // Return FullRange. 5613 if (StartRange.isFullSet()) 5614 return ConstantRange::getFull(BitWidth); 5615 5616 // If Step is signed and negative, then we use its absolute value, but we also 5617 // note that we're moving in the opposite direction. 5618 bool Descending = Signed && Step.isNegative(); 5619 5620 if (Signed) 5621 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 5622 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 5623 // This equations hold true due to the well-defined wrap-around behavior of 5624 // APInt. 5625 Step = Step.abs(); 5626 5627 // Check if Offset is more than full span of BitWidth. If it is, the 5628 // expression is guaranteed to overflow. 5629 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 5630 return ConstantRange::getFull(BitWidth); 5631 5632 // Offset is by how much the expression can change. Checks above guarantee no 5633 // overflow here. 5634 APInt Offset = Step * MaxBECount; 5635 5636 // Minimum value of the final range will match the minimal value of StartRange 5637 // if the expression is increasing and will be decreased by Offset otherwise. 5638 // Maximum value of the final range will match the maximal value of StartRange 5639 // if the expression is decreasing and will be increased by Offset otherwise. 5640 APInt StartLower = StartRange.getLower(); 5641 APInt StartUpper = StartRange.getUpper() - 1; 5642 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 5643 : (StartUpper + std::move(Offset)); 5644 5645 // It's possible that the new minimum/maximum value will fall into the initial 5646 // range (due to wrap around). This means that the expression can take any 5647 // value in this bitwidth, and we have to return full range. 5648 if (StartRange.contains(MovedBoundary)) 5649 return ConstantRange::getFull(BitWidth); 5650 5651 APInt NewLower = 5652 Descending ? std::move(MovedBoundary) : std::move(StartLower); 5653 APInt NewUpper = 5654 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 5655 NewUpper += 1; 5656 5657 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 5658 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); 5659 } 5660 5661 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5662 const SCEV *Step, 5663 const SCEV *MaxBECount, 5664 unsigned BitWidth) { 5665 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5666 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5667 "Precondition!"); 5668 5669 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5670 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5671 5672 // First, consider step signed. 5673 ConstantRange StartSRange = getSignedRange(Start); 5674 ConstantRange StepSRange = getSignedRange(Step); 5675 5676 // If Step can be both positive and negative, we need to find ranges for the 5677 // maximum absolute step values in both directions and union them. 5678 ConstantRange SR = 5679 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5680 MaxBECountValue, BitWidth, /* Signed = */ true); 5681 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5682 StartSRange, MaxBECountValue, 5683 BitWidth, /* Signed = */ true)); 5684 5685 // Next, consider step unsigned. 5686 ConstantRange UR = getRangeForAffineARHelper( 5687 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5688 MaxBECountValue, BitWidth, /* Signed = */ false); 5689 5690 // Finally, intersect signed and unsigned ranges. 5691 return SR.intersectWith(UR, ConstantRange::Smallest); 5692 } 5693 5694 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 5695 const SCEV *Step, 5696 const SCEV *MaxBECount, 5697 unsigned BitWidth) { 5698 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 5699 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 5700 5701 struct SelectPattern { 5702 Value *Condition = nullptr; 5703 APInt TrueValue; 5704 APInt FalseValue; 5705 5706 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 5707 const SCEV *S) { 5708 Optional<unsigned> CastOp; 5709 APInt Offset(BitWidth, 0); 5710 5711 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 5712 "Should be!"); 5713 5714 // Peel off a constant offset: 5715 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 5716 // In the future we could consider being smarter here and handle 5717 // {Start+Step,+,Step} too. 5718 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 5719 return; 5720 5721 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 5722 S = SA->getOperand(1); 5723 } 5724 5725 // Peel off a cast operation 5726 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) { 5727 CastOp = SCast->getSCEVType(); 5728 S = SCast->getOperand(); 5729 } 5730 5731 using namespace llvm::PatternMatch; 5732 5733 auto *SU = dyn_cast<SCEVUnknown>(S); 5734 const APInt *TrueVal, *FalseVal; 5735 if (!SU || 5736 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 5737 m_APInt(FalseVal)))) { 5738 Condition = nullptr; 5739 return; 5740 } 5741 5742 TrueValue = *TrueVal; 5743 FalseValue = *FalseVal; 5744 5745 // Re-apply the cast we peeled off earlier 5746 if (CastOp.hasValue()) 5747 switch (*CastOp) { 5748 default: 5749 llvm_unreachable("Unknown SCEV cast type!"); 5750 5751 case scTruncate: 5752 TrueValue = TrueValue.trunc(BitWidth); 5753 FalseValue = FalseValue.trunc(BitWidth); 5754 break; 5755 case scZeroExtend: 5756 TrueValue = TrueValue.zext(BitWidth); 5757 FalseValue = FalseValue.zext(BitWidth); 5758 break; 5759 case scSignExtend: 5760 TrueValue = TrueValue.sext(BitWidth); 5761 FalseValue = FalseValue.sext(BitWidth); 5762 break; 5763 } 5764 5765 // Re-apply the constant offset we peeled off earlier 5766 TrueValue += Offset; 5767 FalseValue += Offset; 5768 } 5769 5770 bool isRecognized() { return Condition != nullptr; } 5771 }; 5772 5773 SelectPattern StartPattern(*this, BitWidth, Start); 5774 if (!StartPattern.isRecognized()) 5775 return ConstantRange::getFull(BitWidth); 5776 5777 SelectPattern StepPattern(*this, BitWidth, Step); 5778 if (!StepPattern.isRecognized()) 5779 return ConstantRange::getFull(BitWidth); 5780 5781 if (StartPattern.Condition != StepPattern.Condition) { 5782 // We don't handle this case today; but we could, by considering four 5783 // possibilities below instead of two. I'm not sure if there are cases where 5784 // that will help over what getRange already does, though. 5785 return ConstantRange::getFull(BitWidth); 5786 } 5787 5788 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 5789 // construct arbitrary general SCEV expressions here. This function is called 5790 // from deep in the call stack, and calling getSCEV (on a sext instruction, 5791 // say) can end up caching a suboptimal value. 5792 5793 // FIXME: without the explicit `this` receiver below, MSVC errors out with 5794 // C2352 and C2512 (otherwise it isn't needed). 5795 5796 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 5797 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 5798 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 5799 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 5800 5801 ConstantRange TrueRange = 5802 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 5803 ConstantRange FalseRange = 5804 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 5805 5806 return TrueRange.unionWith(FalseRange); 5807 } 5808 5809 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 5810 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 5811 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 5812 5813 // Return early if there are no flags to propagate to the SCEV. 5814 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5815 if (BinOp->hasNoUnsignedWrap()) 5816 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 5817 if (BinOp->hasNoSignedWrap()) 5818 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 5819 if (Flags == SCEV::FlagAnyWrap) 5820 return SCEV::FlagAnyWrap; 5821 5822 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 5823 } 5824 5825 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 5826 // Here we check that I is in the header of the innermost loop containing I, 5827 // since we only deal with instructions in the loop header. The actual loop we 5828 // need to check later will come from an add recurrence, but getting that 5829 // requires computing the SCEV of the operands, which can be expensive. This 5830 // check we can do cheaply to rule out some cases early. 5831 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 5832 if (InnermostContainingLoop == nullptr || 5833 InnermostContainingLoop->getHeader() != I->getParent()) 5834 return false; 5835 5836 // Only proceed if we can prove that I does not yield poison. 5837 if (!programUndefinedIfPoison(I)) 5838 return false; 5839 5840 // At this point we know that if I is executed, then it does not wrap 5841 // according to at least one of NSW or NUW. If I is not executed, then we do 5842 // not know if the calculation that I represents would wrap. Multiple 5843 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 5844 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 5845 // derived from other instructions that map to the same SCEV. We cannot make 5846 // that guarantee for cases where I is not executed. So we need to find the 5847 // loop that I is considered in relation to and prove that I is executed for 5848 // every iteration of that loop. That implies that the value that I 5849 // calculates does not wrap anywhere in the loop, so then we can apply the 5850 // flags to the SCEV. 5851 // 5852 // We check isLoopInvariant to disambiguate in case we are adding recurrences 5853 // from different loops, so that we know which loop to prove that I is 5854 // executed in. 5855 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 5856 // I could be an extractvalue from a call to an overflow intrinsic. 5857 // TODO: We can do better here in some cases. 5858 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 5859 return false; 5860 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 5861 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 5862 bool AllOtherOpsLoopInvariant = true; 5863 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 5864 ++OtherOpIndex) { 5865 if (OtherOpIndex != OpIndex) { 5866 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 5867 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 5868 AllOtherOpsLoopInvariant = false; 5869 break; 5870 } 5871 } 5872 } 5873 if (AllOtherOpsLoopInvariant && 5874 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 5875 return true; 5876 } 5877 } 5878 return false; 5879 } 5880 5881 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 5882 // If we know that \c I can never be poison period, then that's enough. 5883 if (isSCEVExprNeverPoison(I)) 5884 return true; 5885 5886 // For an add recurrence specifically, we assume that infinite loops without 5887 // side effects are undefined behavior, and then reason as follows: 5888 // 5889 // If the add recurrence is poison in any iteration, it is poison on all 5890 // future iterations (since incrementing poison yields poison). If the result 5891 // of the add recurrence is fed into the loop latch condition and the loop 5892 // does not contain any throws or exiting blocks other than the latch, we now 5893 // have the ability to "choose" whether the backedge is taken or not (by 5894 // choosing a sufficiently evil value for the poison feeding into the branch) 5895 // for every iteration including and after the one in which \p I first became 5896 // poison. There are two possibilities (let's call the iteration in which \p 5897 // I first became poison as K): 5898 // 5899 // 1. In the set of iterations including and after K, the loop body executes 5900 // no side effects. In this case executing the backege an infinte number 5901 // of times will yield undefined behavior. 5902 // 5903 // 2. In the set of iterations including and after K, the loop body executes 5904 // at least one side effect. In this case, that specific instance of side 5905 // effect is control dependent on poison, which also yields undefined 5906 // behavior. 5907 5908 auto *ExitingBB = L->getExitingBlock(); 5909 auto *LatchBB = L->getLoopLatch(); 5910 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 5911 return false; 5912 5913 SmallPtrSet<const Instruction *, 16> Pushed; 5914 SmallVector<const Instruction *, 8> PoisonStack; 5915 5916 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 5917 // things that are known to be poison under that assumption go on the 5918 // PoisonStack. 5919 Pushed.insert(I); 5920 PoisonStack.push_back(I); 5921 5922 bool LatchControlDependentOnPoison = false; 5923 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 5924 const Instruction *Poison = PoisonStack.pop_back_val(); 5925 5926 for (auto *PoisonUser : Poison->users()) { 5927 if (propagatesPoison(cast<Operator>(PoisonUser))) { 5928 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 5929 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 5930 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 5931 assert(BI->isConditional() && "Only possibility!"); 5932 if (BI->getParent() == LatchBB) { 5933 LatchControlDependentOnPoison = true; 5934 break; 5935 } 5936 } 5937 } 5938 } 5939 5940 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 5941 } 5942 5943 ScalarEvolution::LoopProperties 5944 ScalarEvolution::getLoopProperties(const Loop *L) { 5945 using LoopProperties = ScalarEvolution::LoopProperties; 5946 5947 auto Itr = LoopPropertiesCache.find(L); 5948 if (Itr == LoopPropertiesCache.end()) { 5949 auto HasSideEffects = [](Instruction *I) { 5950 if (auto *SI = dyn_cast<StoreInst>(I)) 5951 return !SI->isSimple(); 5952 5953 return I->mayHaveSideEffects(); 5954 }; 5955 5956 LoopProperties LP = {/* HasNoAbnormalExits */ true, 5957 /*HasNoSideEffects*/ true}; 5958 5959 for (auto *BB : L->getBlocks()) 5960 for (auto &I : *BB) { 5961 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5962 LP.HasNoAbnormalExits = false; 5963 if (HasSideEffects(&I)) 5964 LP.HasNoSideEffects = false; 5965 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 5966 break; // We're already as pessimistic as we can get. 5967 } 5968 5969 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 5970 assert(InsertPair.second && "We just checked!"); 5971 Itr = InsertPair.first; 5972 } 5973 5974 return Itr->second; 5975 } 5976 5977 const SCEV *ScalarEvolution::createSCEV(Value *V) { 5978 if (!isSCEVable(V->getType())) 5979 return getUnknown(V); 5980 5981 if (Instruction *I = dyn_cast<Instruction>(V)) { 5982 // Don't attempt to analyze instructions in blocks that aren't 5983 // reachable. Such instructions don't matter, and they aren't required 5984 // to obey basic rules for definitions dominating uses which this 5985 // analysis depends on. 5986 if (!DT.isReachableFromEntry(I->getParent())) 5987 return getUnknown(UndefValue::get(V->getType())); 5988 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 5989 return getConstant(CI); 5990 else if (isa<ConstantPointerNull>(V)) 5991 return getZero(V->getType()); 5992 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 5993 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 5994 else if (!isa<ConstantExpr>(V)) 5995 return getUnknown(V); 5996 5997 Operator *U = cast<Operator>(V); 5998 if (auto BO = MatchBinaryOp(U, DT)) { 5999 switch (BO->Opcode) { 6000 case Instruction::Add: { 6001 // The simple thing to do would be to just call getSCEV on both operands 6002 // and call getAddExpr with the result. However if we're looking at a 6003 // bunch of things all added together, this can be quite inefficient, 6004 // because it leads to N-1 getAddExpr calls for N ultimate operands. 6005 // Instead, gather up all the operands and make a single getAddExpr call. 6006 // LLVM IR canonical form means we need only traverse the left operands. 6007 SmallVector<const SCEV *, 4> AddOps; 6008 do { 6009 if (BO->Op) { 6010 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6011 AddOps.push_back(OpSCEV); 6012 break; 6013 } 6014 6015 // If a NUW or NSW flag can be applied to the SCEV for this 6016 // addition, then compute the SCEV for this addition by itself 6017 // with a separate call to getAddExpr. We need to do that 6018 // instead of pushing the operands of the addition onto AddOps, 6019 // since the flags are only known to apply to this particular 6020 // addition - they may not apply to other additions that can be 6021 // formed with operands from AddOps. 6022 const SCEV *RHS = getSCEV(BO->RHS); 6023 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6024 if (Flags != SCEV::FlagAnyWrap) { 6025 const SCEV *LHS = getSCEV(BO->LHS); 6026 if (BO->Opcode == Instruction::Sub) 6027 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 6028 else 6029 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 6030 break; 6031 } 6032 } 6033 6034 if (BO->Opcode == Instruction::Sub) 6035 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 6036 else 6037 AddOps.push_back(getSCEV(BO->RHS)); 6038 6039 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6040 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6041 NewBO->Opcode != Instruction::Sub)) { 6042 AddOps.push_back(getSCEV(BO->LHS)); 6043 break; 6044 } 6045 BO = NewBO; 6046 } while (true); 6047 6048 return getAddExpr(AddOps); 6049 } 6050 6051 case Instruction::Mul: { 6052 SmallVector<const SCEV *, 4> MulOps; 6053 do { 6054 if (BO->Op) { 6055 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6056 MulOps.push_back(OpSCEV); 6057 break; 6058 } 6059 6060 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6061 if (Flags != SCEV::FlagAnyWrap) { 6062 MulOps.push_back( 6063 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6064 break; 6065 } 6066 } 6067 6068 MulOps.push_back(getSCEV(BO->RHS)); 6069 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6070 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6071 MulOps.push_back(getSCEV(BO->LHS)); 6072 break; 6073 } 6074 BO = NewBO; 6075 } while (true); 6076 6077 return getMulExpr(MulOps); 6078 } 6079 case Instruction::UDiv: 6080 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6081 case Instruction::URem: 6082 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6083 case Instruction::Sub: { 6084 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6085 if (BO->Op) 6086 Flags = getNoWrapFlagsFromUB(BO->Op); 6087 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6088 } 6089 case Instruction::And: 6090 // For an expression like x&255 that merely masks off the high bits, 6091 // use zext(trunc(x)) as the SCEV expression. 6092 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6093 if (CI->isZero()) 6094 return getSCEV(BO->RHS); 6095 if (CI->isMinusOne()) 6096 return getSCEV(BO->LHS); 6097 const APInt &A = CI->getValue(); 6098 6099 // Instcombine's ShrinkDemandedConstant may strip bits out of 6100 // constants, obscuring what would otherwise be a low-bits mask. 6101 // Use computeKnownBits to compute what ShrinkDemandedConstant 6102 // knew about to reconstruct a low-bits mask value. 6103 unsigned LZ = A.countLeadingZeros(); 6104 unsigned TZ = A.countTrailingZeros(); 6105 unsigned BitWidth = A.getBitWidth(); 6106 KnownBits Known(BitWidth); 6107 computeKnownBits(BO->LHS, Known, getDataLayout(), 6108 0, &AC, nullptr, &DT); 6109 6110 APInt EffectiveMask = 6111 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6112 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6113 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6114 const SCEV *LHS = getSCEV(BO->LHS); 6115 const SCEV *ShiftedLHS = nullptr; 6116 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6117 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6118 // For an expression like (x * 8) & 8, simplify the multiply. 6119 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6120 unsigned GCD = std::min(MulZeros, TZ); 6121 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6122 SmallVector<const SCEV*, 4> MulOps; 6123 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6124 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6125 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6126 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6127 } 6128 } 6129 if (!ShiftedLHS) 6130 ShiftedLHS = getUDivExpr(LHS, MulCount); 6131 return getMulExpr( 6132 getZeroExtendExpr( 6133 getTruncateExpr(ShiftedLHS, 6134 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6135 BO->LHS->getType()), 6136 MulCount); 6137 } 6138 } 6139 break; 6140 6141 case Instruction::Or: 6142 // If the RHS of the Or is a constant, we may have something like: 6143 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6144 // optimizations will transparently handle this case. 6145 // 6146 // In order for this transformation to be safe, the LHS must be of the 6147 // form X*(2^n) and the Or constant must be less than 2^n. 6148 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6149 const SCEV *LHS = getSCEV(BO->LHS); 6150 const APInt &CIVal = CI->getValue(); 6151 if (GetMinTrailingZeros(LHS) >= 6152 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6153 // Build a plain add SCEV. 6154 return getAddExpr(LHS, getSCEV(CI), 6155 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 6156 } 6157 } 6158 break; 6159 6160 case Instruction::Xor: 6161 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6162 // If the RHS of xor is -1, then this is a not operation. 6163 if (CI->isMinusOne()) 6164 return getNotSCEV(getSCEV(BO->LHS)); 6165 6166 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6167 // This is a variant of the check for xor with -1, and it handles 6168 // the case where instcombine has trimmed non-demanded bits out 6169 // of an xor with -1. 6170 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6171 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6172 if (LBO->getOpcode() == Instruction::And && 6173 LCI->getValue() == CI->getValue()) 6174 if (const SCEVZeroExtendExpr *Z = 6175 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6176 Type *UTy = BO->LHS->getType(); 6177 const SCEV *Z0 = Z->getOperand(); 6178 Type *Z0Ty = Z0->getType(); 6179 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6180 6181 // If C is a low-bits mask, the zero extend is serving to 6182 // mask off the high bits. Complement the operand and 6183 // re-apply the zext. 6184 if (CI->getValue().isMask(Z0TySize)) 6185 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6186 6187 // If C is a single bit, it may be in the sign-bit position 6188 // before the zero-extend. In this case, represent the xor 6189 // using an add, which is equivalent, and re-apply the zext. 6190 APInt Trunc = CI->getValue().trunc(Z0TySize); 6191 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6192 Trunc.isSignMask()) 6193 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6194 UTy); 6195 } 6196 } 6197 break; 6198 6199 case Instruction::Shl: 6200 // Turn shift left of a constant amount into a multiply. 6201 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6202 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6203 6204 // If the shift count is not less than the bitwidth, the result of 6205 // the shift is undefined. Don't try to analyze it, because the 6206 // resolution chosen here may differ from the resolution chosen in 6207 // other parts of the compiler. 6208 if (SA->getValue().uge(BitWidth)) 6209 break; 6210 6211 // We can safely preserve the nuw flag in all cases. It's also safe to 6212 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation 6213 // requires special handling. It can be preserved as long as we're not 6214 // left shifting by bitwidth - 1. 6215 auto Flags = SCEV::FlagAnyWrap; 6216 if (BO->Op) { 6217 auto MulFlags = getNoWrapFlagsFromUB(BO->Op); 6218 if ((MulFlags & SCEV::FlagNSW) && 6219 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) 6220 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); 6221 if (MulFlags & SCEV::FlagNUW) 6222 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); 6223 } 6224 6225 Constant *X = ConstantInt::get( 6226 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6227 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6228 } 6229 break; 6230 6231 case Instruction::AShr: { 6232 // AShr X, C, where C is a constant. 6233 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6234 if (!CI) 6235 break; 6236 6237 Type *OuterTy = BO->LHS->getType(); 6238 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6239 // If the shift count is not less than the bitwidth, the result of 6240 // the shift is undefined. Don't try to analyze it, because the 6241 // resolution chosen here may differ from the resolution chosen in 6242 // other parts of the compiler. 6243 if (CI->getValue().uge(BitWidth)) 6244 break; 6245 6246 if (CI->isZero()) 6247 return getSCEV(BO->LHS); // shift by zero --> noop 6248 6249 uint64_t AShrAmt = CI->getZExtValue(); 6250 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6251 6252 Operator *L = dyn_cast<Operator>(BO->LHS); 6253 if (L && L->getOpcode() == Instruction::Shl) { 6254 // X = Shl A, n 6255 // Y = AShr X, m 6256 // Both n and m are constant. 6257 6258 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6259 if (L->getOperand(1) == BO->RHS) 6260 // For a two-shift sext-inreg, i.e. n = m, 6261 // use sext(trunc(x)) as the SCEV expression. 6262 return getSignExtendExpr( 6263 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6264 6265 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6266 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6267 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6268 if (ShlAmt > AShrAmt) { 6269 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6270 // expression. We already checked that ShlAmt < BitWidth, so 6271 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6272 // ShlAmt - AShrAmt < Amt. 6273 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6274 ShlAmt - AShrAmt); 6275 return getSignExtendExpr( 6276 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6277 getConstant(Mul)), OuterTy); 6278 } 6279 } 6280 } 6281 if (BO->IsExact) { 6282 // Given exact arithmetic in-bounds right-shift by a constant, 6283 // we can lower it into: (abs(x) EXACT/u (1<<C)) * signum(x) 6284 const SCEV *X = getSCEV(BO->LHS); 6285 const SCEV *AbsX = getAbsExpr(X, /*IsNSW=*/false); 6286 APInt Mult = APInt::getOneBitSet(BitWidth, AShrAmt); 6287 const SCEV *Div = getUDivExactExpr(AbsX, getConstant(Mult)); 6288 return getMulExpr(Div, getSignumExpr(X), SCEV::FlagNSW); 6289 } 6290 break; 6291 } 6292 } 6293 } 6294 6295 switch (U->getOpcode()) { 6296 case Instruction::Trunc: 6297 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6298 6299 case Instruction::ZExt: 6300 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6301 6302 case Instruction::SExt: 6303 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6304 // The NSW flag of a subtract does not always survive the conversion to 6305 // A + (-1)*B. By pushing sign extension onto its operands we are much 6306 // more likely to preserve NSW and allow later AddRec optimisations. 6307 // 6308 // NOTE: This is effectively duplicating this logic from getSignExtend: 6309 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6310 // but by that point the NSW information has potentially been lost. 6311 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6312 Type *Ty = U->getType(); 6313 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6314 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6315 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6316 } 6317 } 6318 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6319 6320 case Instruction::BitCast: 6321 // BitCasts are no-op casts so we just eliminate the cast. 6322 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6323 return getSCEV(U->getOperand(0)); 6324 break; 6325 6326 case Instruction::SDiv: 6327 // If both operands are non-negative, this is just an udiv. 6328 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 6329 isKnownNonNegative(getSCEV(U->getOperand(1)))) 6330 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 6331 break; 6332 6333 case Instruction::SRem: 6334 // If both operands are non-negative, this is just an urem. 6335 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 6336 isKnownNonNegative(getSCEV(U->getOperand(1)))) 6337 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 6338 break; 6339 6340 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 6341 // lead to pointer expressions which cannot safely be expanded to GEPs, 6342 // because ScalarEvolution doesn't respect the GEP aliasing rules when 6343 // simplifying integer expressions. 6344 6345 case Instruction::GetElementPtr: 6346 return createNodeForGEP(cast<GEPOperator>(U)); 6347 6348 case Instruction::PHI: 6349 return createNodeForPHI(cast<PHINode>(U)); 6350 6351 case Instruction::Select: 6352 // U can also be a select constant expr, which let fall through. Since 6353 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6354 // constant expressions cannot have instructions as operands, we'd have 6355 // returned getUnknown for a select constant expressions anyway. 6356 if (isa<Instruction>(U)) 6357 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6358 U->getOperand(1), U->getOperand(2)); 6359 break; 6360 6361 case Instruction::Call: 6362 case Instruction::Invoke: 6363 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) 6364 return getSCEV(RV); 6365 6366 if (auto *II = dyn_cast<IntrinsicInst>(U)) { 6367 switch (II->getIntrinsicID()) { 6368 case Intrinsic::abs: 6369 return getAbsExpr( 6370 getSCEV(II->getArgOperand(0)), 6371 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne()); 6372 case Intrinsic::umax: 6373 return getUMaxExpr(getSCEV(II->getArgOperand(0)), 6374 getSCEV(II->getArgOperand(1))); 6375 case Intrinsic::umin: 6376 return getUMinExpr(getSCEV(II->getArgOperand(0)), 6377 getSCEV(II->getArgOperand(1))); 6378 case Intrinsic::smax: 6379 return getSMaxExpr(getSCEV(II->getArgOperand(0)), 6380 getSCEV(II->getArgOperand(1))); 6381 case Intrinsic::smin: 6382 return getSMinExpr(getSCEV(II->getArgOperand(0)), 6383 getSCEV(II->getArgOperand(1))); 6384 case Intrinsic::usub_sat: { 6385 const SCEV *X = getSCEV(II->getArgOperand(0)); 6386 const SCEV *Y = getSCEV(II->getArgOperand(1)); 6387 const SCEV *ClampedY = getUMinExpr(X, Y); 6388 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW); 6389 } 6390 case Intrinsic::uadd_sat: { 6391 const SCEV *X = getSCEV(II->getArgOperand(0)); 6392 const SCEV *Y = getSCEV(II->getArgOperand(1)); 6393 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y)); 6394 return getAddExpr(ClampedX, Y, SCEV::FlagNUW); 6395 } 6396 default: 6397 break; 6398 } 6399 } 6400 break; 6401 } 6402 6403 return getUnknown(V); 6404 } 6405 6406 //===----------------------------------------------------------------------===// 6407 // Iteration Count Computation Code 6408 // 6409 6410 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6411 if (!ExitCount) 6412 return 0; 6413 6414 ConstantInt *ExitConst = ExitCount->getValue(); 6415 6416 // Guard against huge trip counts. 6417 if (ExitConst->getValue().getActiveBits() > 32) 6418 return 0; 6419 6420 // In case of integer overflow, this returns 0, which is correct. 6421 return ((unsigned)ExitConst->getZExtValue()) + 1; 6422 } 6423 6424 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6425 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6426 return getSmallConstantTripCount(L, ExitingBB); 6427 6428 // No trip count information for multiple exits. 6429 return 0; 6430 } 6431 6432 unsigned 6433 ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6434 const BasicBlock *ExitingBlock) { 6435 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6436 assert(L->isLoopExiting(ExitingBlock) && 6437 "Exiting block must actually branch out of the loop!"); 6438 const SCEVConstant *ExitCount = 6439 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6440 return getConstantTripCount(ExitCount); 6441 } 6442 6443 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6444 const auto *MaxExitCount = 6445 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); 6446 return getConstantTripCount(MaxExitCount); 6447 } 6448 6449 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6450 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6451 return getSmallConstantTripMultiple(L, ExitingBB); 6452 6453 // No trip multiple information for multiple exits. 6454 return 0; 6455 } 6456 6457 /// Returns the largest constant divisor of the trip count of this loop as a 6458 /// normal unsigned value, if possible. This means that the actual trip count is 6459 /// always a multiple of the returned value (don't forget the trip count could 6460 /// very well be zero as well!). 6461 /// 6462 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6463 /// multiple of a constant (which is also the case if the trip count is simply 6464 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6465 /// if the trip count is very large (>= 2^32). 6466 /// 6467 /// As explained in the comments for getSmallConstantTripCount, this assumes 6468 /// that control exits the loop via ExitingBlock. 6469 unsigned 6470 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6471 const BasicBlock *ExitingBlock) { 6472 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6473 assert(L->isLoopExiting(ExitingBlock) && 6474 "Exiting block must actually branch out of the loop!"); 6475 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6476 if (ExitCount == getCouldNotCompute()) 6477 return 1; 6478 6479 // Get the trip count from the BE count by adding 1. 6480 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6481 6482 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6483 if (!TC) 6484 // Attempt to factor more general cases. Returns the greatest power of 6485 // two divisor. If overflow happens, the trip count expression is still 6486 // divisible by the greatest power of 2 divisor returned. 6487 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 6488 6489 ConstantInt *Result = TC->getValue(); 6490 6491 // Guard against huge trip counts (this requires checking 6492 // for zero to handle the case where the trip count == -1 and the 6493 // addition wraps). 6494 if (!Result || Result->getValue().getActiveBits() > 32 || 6495 Result->getValue().getActiveBits() == 0) 6496 return 1; 6497 6498 return (unsigned)Result->getZExtValue(); 6499 } 6500 6501 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6502 const BasicBlock *ExitingBlock, 6503 ExitCountKind Kind) { 6504 switch (Kind) { 6505 case Exact: 6506 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6507 case ConstantMaximum: 6508 return getBackedgeTakenInfo(L).getMax(ExitingBlock, this); 6509 }; 6510 llvm_unreachable("Invalid ExitCountKind!"); 6511 } 6512 6513 const SCEV * 6514 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6515 SCEVUnionPredicate &Preds) { 6516 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 6517 } 6518 6519 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, 6520 ExitCountKind Kind) { 6521 switch (Kind) { 6522 case Exact: 6523 return getBackedgeTakenInfo(L).getExact(L, this); 6524 case ConstantMaximum: 6525 return getBackedgeTakenInfo(L).getMax(this); 6526 }; 6527 llvm_unreachable("Invalid ExitCountKind!"); 6528 } 6529 6530 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 6531 return getBackedgeTakenInfo(L).isMaxOrZero(this); 6532 } 6533 6534 /// Push PHI nodes in the header of the given loop onto the given Worklist. 6535 static void 6536 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 6537 BasicBlock *Header = L->getHeader(); 6538 6539 // Push all Loop-header PHIs onto the Worklist stack. 6540 for (PHINode &PN : Header->phis()) 6541 Worklist.push_back(&PN); 6542 } 6543 6544 const ScalarEvolution::BackedgeTakenInfo & 6545 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 6546 auto &BTI = getBackedgeTakenInfo(L); 6547 if (BTI.hasFullInfo()) 6548 return BTI; 6549 6550 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6551 6552 if (!Pair.second) 6553 return Pair.first->second; 6554 6555 BackedgeTakenInfo Result = 6556 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 6557 6558 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 6559 } 6560 6561 const ScalarEvolution::BackedgeTakenInfo & 6562 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 6563 // Initially insert an invalid entry for this loop. If the insertion 6564 // succeeds, proceed to actually compute a backedge-taken count and 6565 // update the value. The temporary CouldNotCompute value tells SCEV 6566 // code elsewhere that it shouldn't attempt to request a new 6567 // backedge-taken count, which could result in infinite recursion. 6568 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 6569 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6570 if (!Pair.second) 6571 return Pair.first->second; 6572 6573 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 6574 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 6575 // must be cleared in this scope. 6576 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 6577 6578 // In product build, there are no usage of statistic. 6579 (void)NumTripCountsComputed; 6580 (void)NumTripCountsNotComputed; 6581 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 6582 const SCEV *BEExact = Result.getExact(L, this); 6583 if (BEExact != getCouldNotCompute()) { 6584 assert(isLoopInvariant(BEExact, L) && 6585 isLoopInvariant(Result.getMax(this), L) && 6586 "Computed backedge-taken count isn't loop invariant for loop!"); 6587 ++NumTripCountsComputed; 6588 } 6589 else if (Result.getMax(this) == getCouldNotCompute() && 6590 isa<PHINode>(L->getHeader()->begin())) { 6591 // Only count loops that have phi nodes as not being computable. 6592 ++NumTripCountsNotComputed; 6593 } 6594 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 6595 6596 // Now that we know more about the trip count for this loop, forget any 6597 // existing SCEV values for PHI nodes in this loop since they are only 6598 // conservative estimates made without the benefit of trip count 6599 // information. This is similar to the code in forgetLoop, except that 6600 // it handles SCEVUnknown PHI nodes specially. 6601 if (Result.hasAnyInfo()) { 6602 SmallVector<Instruction *, 16> Worklist; 6603 PushLoopPHIs(L, Worklist); 6604 6605 SmallPtrSet<Instruction *, 8> Discovered; 6606 while (!Worklist.empty()) { 6607 Instruction *I = Worklist.pop_back_val(); 6608 6609 ValueExprMapType::iterator It = 6610 ValueExprMap.find_as(static_cast<Value *>(I)); 6611 if (It != ValueExprMap.end()) { 6612 const SCEV *Old = It->second; 6613 6614 // SCEVUnknown for a PHI either means that it has an unrecognized 6615 // structure, or it's a PHI that's in the progress of being computed 6616 // by createNodeForPHI. In the former case, additional loop trip 6617 // count information isn't going to change anything. In the later 6618 // case, createNodeForPHI will perform the necessary updates on its 6619 // own when it gets to that point. 6620 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 6621 eraseValueFromMap(It->first); 6622 forgetMemoizedResults(Old); 6623 } 6624 if (PHINode *PN = dyn_cast<PHINode>(I)) 6625 ConstantEvolutionLoopExitValue.erase(PN); 6626 } 6627 6628 // Since we don't need to invalidate anything for correctness and we're 6629 // only invalidating to make SCEV's results more precise, we get to stop 6630 // early to avoid invalidating too much. This is especially important in 6631 // cases like: 6632 // 6633 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 6634 // loop0: 6635 // %pn0 = phi 6636 // ... 6637 // loop1: 6638 // %pn1 = phi 6639 // ... 6640 // 6641 // where both loop0 and loop1's backedge taken count uses the SCEV 6642 // expression for %v. If we don't have the early stop below then in cases 6643 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 6644 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 6645 // count for loop1, effectively nullifying SCEV's trip count cache. 6646 for (auto *U : I->users()) 6647 if (auto *I = dyn_cast<Instruction>(U)) { 6648 auto *LoopForUser = LI.getLoopFor(I->getParent()); 6649 if (LoopForUser && L->contains(LoopForUser) && 6650 Discovered.insert(I).second) 6651 Worklist.push_back(I); 6652 } 6653 } 6654 } 6655 6656 // Re-lookup the insert position, since the call to 6657 // computeBackedgeTakenCount above could result in a 6658 // recusive call to getBackedgeTakenInfo (on a different 6659 // loop), which would invalidate the iterator computed 6660 // earlier. 6661 return BackedgeTakenCounts.find(L)->second = std::move(Result); 6662 } 6663 6664 void ScalarEvolution::forgetAllLoops() { 6665 // This method is intended to forget all info about loops. It should 6666 // invalidate caches as if the following happened: 6667 // - The trip counts of all loops have changed arbitrarily 6668 // - Every llvm::Value has been updated in place to produce a different 6669 // result. 6670 BackedgeTakenCounts.clear(); 6671 PredicatedBackedgeTakenCounts.clear(); 6672 LoopPropertiesCache.clear(); 6673 ConstantEvolutionLoopExitValue.clear(); 6674 ValueExprMap.clear(); 6675 ValuesAtScopes.clear(); 6676 LoopDispositions.clear(); 6677 BlockDispositions.clear(); 6678 UnsignedRanges.clear(); 6679 SignedRanges.clear(); 6680 ExprValueMap.clear(); 6681 HasRecMap.clear(); 6682 MinTrailingZerosCache.clear(); 6683 PredicatedSCEVRewrites.clear(); 6684 } 6685 6686 void ScalarEvolution::forgetLoop(const Loop *L) { 6687 // Drop any stored trip count value. 6688 auto RemoveLoopFromBackedgeMap = 6689 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 6690 auto BTCPos = Map.find(L); 6691 if (BTCPos != Map.end()) { 6692 BTCPos->second.clear(); 6693 Map.erase(BTCPos); 6694 } 6695 }; 6696 6697 SmallVector<const Loop *, 16> LoopWorklist(1, L); 6698 SmallVector<Instruction *, 32> Worklist; 6699 SmallPtrSet<Instruction *, 16> Visited; 6700 6701 // Iterate over all the loops and sub-loops to drop SCEV information. 6702 while (!LoopWorklist.empty()) { 6703 auto *CurrL = LoopWorklist.pop_back_val(); 6704 6705 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 6706 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 6707 6708 // Drop information about predicated SCEV rewrites for this loop. 6709 for (auto I = PredicatedSCEVRewrites.begin(); 6710 I != PredicatedSCEVRewrites.end();) { 6711 std::pair<const SCEV *, const Loop *> Entry = I->first; 6712 if (Entry.second == CurrL) 6713 PredicatedSCEVRewrites.erase(I++); 6714 else 6715 ++I; 6716 } 6717 6718 auto LoopUsersItr = LoopUsers.find(CurrL); 6719 if (LoopUsersItr != LoopUsers.end()) { 6720 for (auto *S : LoopUsersItr->second) 6721 forgetMemoizedResults(S); 6722 LoopUsers.erase(LoopUsersItr); 6723 } 6724 6725 // Drop information about expressions based on loop-header PHIs. 6726 PushLoopPHIs(CurrL, Worklist); 6727 6728 while (!Worklist.empty()) { 6729 Instruction *I = Worklist.pop_back_val(); 6730 if (!Visited.insert(I).second) 6731 continue; 6732 6733 ValueExprMapType::iterator It = 6734 ValueExprMap.find_as(static_cast<Value *>(I)); 6735 if (It != ValueExprMap.end()) { 6736 eraseValueFromMap(It->first); 6737 forgetMemoizedResults(It->second); 6738 if (PHINode *PN = dyn_cast<PHINode>(I)) 6739 ConstantEvolutionLoopExitValue.erase(PN); 6740 } 6741 6742 PushDefUseChildren(I, Worklist); 6743 } 6744 6745 LoopPropertiesCache.erase(CurrL); 6746 // Forget all contained loops too, to avoid dangling entries in the 6747 // ValuesAtScopes map. 6748 LoopWorklist.append(CurrL->begin(), CurrL->end()); 6749 } 6750 } 6751 6752 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 6753 while (Loop *Parent = L->getParentLoop()) 6754 L = Parent; 6755 forgetLoop(L); 6756 } 6757 6758 void ScalarEvolution::forgetValue(Value *V) { 6759 Instruction *I = dyn_cast<Instruction>(V); 6760 if (!I) return; 6761 6762 // Drop information about expressions based on loop-header PHIs. 6763 SmallVector<Instruction *, 16> Worklist; 6764 Worklist.push_back(I); 6765 6766 SmallPtrSet<Instruction *, 8> Visited; 6767 while (!Worklist.empty()) { 6768 I = Worklist.pop_back_val(); 6769 if (!Visited.insert(I).second) 6770 continue; 6771 6772 ValueExprMapType::iterator It = 6773 ValueExprMap.find_as(static_cast<Value *>(I)); 6774 if (It != ValueExprMap.end()) { 6775 eraseValueFromMap(It->first); 6776 forgetMemoizedResults(It->second); 6777 if (PHINode *PN = dyn_cast<PHINode>(I)) 6778 ConstantEvolutionLoopExitValue.erase(PN); 6779 } 6780 6781 PushDefUseChildren(I, Worklist); 6782 } 6783 } 6784 6785 void ScalarEvolution::forgetLoopDispositions(const Loop *L) { 6786 LoopDispositions.clear(); 6787 } 6788 6789 /// Get the exact loop backedge taken count considering all loop exits. A 6790 /// computable result can only be returned for loops with all exiting blocks 6791 /// dominating the latch. howFarToZero assumes that the limit of each loop test 6792 /// is never skipped. This is a valid assumption as long as the loop exits via 6793 /// that test. For precise results, it is the caller's responsibility to specify 6794 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 6795 const SCEV * 6796 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 6797 SCEVUnionPredicate *Preds) const { 6798 // If any exits were not computable, the loop is not computable. 6799 if (!isComplete() || ExitNotTaken.empty()) 6800 return SE->getCouldNotCompute(); 6801 6802 const BasicBlock *Latch = L->getLoopLatch(); 6803 // All exiting blocks we have collected must dominate the only backedge. 6804 if (!Latch) 6805 return SE->getCouldNotCompute(); 6806 6807 // All exiting blocks we have gathered dominate loop's latch, so exact trip 6808 // count is simply a minimum out of all these calculated exit counts. 6809 SmallVector<const SCEV *, 2> Ops; 6810 for (auto &ENT : ExitNotTaken) { 6811 const SCEV *BECount = ENT.ExactNotTaken; 6812 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 6813 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 6814 "We should only have known counts for exiting blocks that dominate " 6815 "latch!"); 6816 6817 Ops.push_back(BECount); 6818 6819 if (Preds && !ENT.hasAlwaysTruePredicate()) 6820 Preds->add(ENT.Predicate.get()); 6821 6822 assert((Preds || ENT.hasAlwaysTruePredicate()) && 6823 "Predicate should be always true!"); 6824 } 6825 6826 return SE->getUMinFromMismatchedTypes(Ops); 6827 } 6828 6829 /// Get the exact not taken count for this loop exit. 6830 const SCEV * 6831 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock, 6832 ScalarEvolution *SE) const { 6833 for (auto &ENT : ExitNotTaken) 6834 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6835 return ENT.ExactNotTaken; 6836 6837 return SE->getCouldNotCompute(); 6838 } 6839 6840 const SCEV * 6841 ScalarEvolution::BackedgeTakenInfo::getMax(const BasicBlock *ExitingBlock, 6842 ScalarEvolution *SE) const { 6843 for (auto &ENT : ExitNotTaken) 6844 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6845 return ENT.MaxNotTaken; 6846 6847 return SE->getCouldNotCompute(); 6848 } 6849 6850 /// getMax - Get the max backedge taken count for the loop. 6851 const SCEV * 6852 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 6853 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6854 return !ENT.hasAlwaysTruePredicate(); 6855 }; 6856 6857 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax()) 6858 return SE->getCouldNotCompute(); 6859 6860 assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) && 6861 "No point in having a non-constant max backedge taken count!"); 6862 return getMax(); 6863 } 6864 6865 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const { 6866 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6867 return !ENT.hasAlwaysTruePredicate(); 6868 }; 6869 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 6870 } 6871 6872 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 6873 ScalarEvolution *SE) const { 6874 if (getMax() && getMax() != SE->getCouldNotCompute() && 6875 SE->hasOperand(getMax(), S)) 6876 return true; 6877 6878 for (auto &ENT : ExitNotTaken) 6879 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 6880 SE->hasOperand(ENT.ExactNotTaken, S)) 6881 return true; 6882 6883 return false; 6884 } 6885 6886 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 6887 : ExactNotTaken(E), MaxNotTaken(E) { 6888 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6889 isa<SCEVConstant>(MaxNotTaken)) && 6890 "No point in having a non-constant max backedge taken count!"); 6891 } 6892 6893 ScalarEvolution::ExitLimit::ExitLimit( 6894 const SCEV *E, const SCEV *M, bool MaxOrZero, 6895 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 6896 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 6897 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 6898 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 6899 "Exact is not allowed to be less precise than Max"); 6900 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6901 isa<SCEVConstant>(MaxNotTaken)) && 6902 "No point in having a non-constant max backedge taken count!"); 6903 for (auto *PredSet : PredSetList) 6904 for (auto *P : *PredSet) 6905 addPredicate(P); 6906 } 6907 6908 ScalarEvolution::ExitLimit::ExitLimit( 6909 const SCEV *E, const SCEV *M, bool MaxOrZero, 6910 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 6911 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 6912 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6913 isa<SCEVConstant>(MaxNotTaken)) && 6914 "No point in having a non-constant max backedge taken count!"); 6915 } 6916 6917 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 6918 bool MaxOrZero) 6919 : ExitLimit(E, M, MaxOrZero, None) { 6920 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6921 isa<SCEVConstant>(MaxNotTaken)) && 6922 "No point in having a non-constant max backedge taken count!"); 6923 } 6924 6925 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 6926 /// computable exit into a persistent ExitNotTakenInfo array. 6927 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 6928 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> 6929 ExitCounts, 6930 bool Complete, const SCEV *MaxCount, bool MaxOrZero) 6931 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { 6932 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 6933 6934 ExitNotTaken.reserve(ExitCounts.size()); 6935 std::transform( 6936 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 6937 [&](const EdgeExitInfo &EEI) { 6938 BasicBlock *ExitBB = EEI.first; 6939 const ExitLimit &EL = EEI.second; 6940 if (EL.Predicates.empty()) 6941 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 6942 nullptr); 6943 6944 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 6945 for (auto *Pred : EL.Predicates) 6946 Predicate->add(Pred); 6947 6948 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 6949 std::move(Predicate)); 6950 }); 6951 assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) && 6952 "No point in having a non-constant max backedge taken count!"); 6953 } 6954 6955 /// Invalidate this result and free the ExitNotTakenInfo array. 6956 void ScalarEvolution::BackedgeTakenInfo::clear() { 6957 ExitNotTaken.clear(); 6958 } 6959 6960 /// Compute the number of times the backedge of the specified loop will execute. 6961 ScalarEvolution::BackedgeTakenInfo 6962 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 6963 bool AllowPredicates) { 6964 SmallVector<BasicBlock *, 8> ExitingBlocks; 6965 L->getExitingBlocks(ExitingBlocks); 6966 6967 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 6968 6969 SmallVector<EdgeExitInfo, 4> ExitCounts; 6970 bool CouldComputeBECount = true; 6971 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 6972 const SCEV *MustExitMaxBECount = nullptr; 6973 const SCEV *MayExitMaxBECount = nullptr; 6974 bool MustExitMaxOrZero = false; 6975 6976 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 6977 // and compute maxBECount. 6978 // Do a union of all the predicates here. 6979 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 6980 BasicBlock *ExitBB = ExitingBlocks[i]; 6981 6982 // We canonicalize untaken exits to br (constant), ignore them so that 6983 // proving an exit untaken doesn't negatively impact our ability to reason 6984 // about the loop as whole. 6985 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) 6986 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { 6987 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 6988 if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne())) 6989 continue; 6990 } 6991 6992 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 6993 6994 assert((AllowPredicates || EL.Predicates.empty()) && 6995 "Predicated exit limit when predicates are not allowed!"); 6996 6997 // 1. For each exit that can be computed, add an entry to ExitCounts. 6998 // CouldComputeBECount is true only if all exits can be computed. 6999 if (EL.ExactNotTaken == getCouldNotCompute()) 7000 // We couldn't compute an exact value for this exit, so 7001 // we won't be able to compute an exact value for the loop. 7002 CouldComputeBECount = false; 7003 else 7004 ExitCounts.emplace_back(ExitBB, EL); 7005 7006 // 2. Derive the loop's MaxBECount from each exit's max number of 7007 // non-exiting iterations. Partition the loop exits into two kinds: 7008 // LoopMustExits and LoopMayExits. 7009 // 7010 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 7011 // is a LoopMayExit. If any computable LoopMustExit is found, then 7012 // MaxBECount is the minimum EL.MaxNotTaken of computable 7013 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 7014 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 7015 // computable EL.MaxNotTaken. 7016 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 7017 DT.dominates(ExitBB, Latch)) { 7018 if (!MustExitMaxBECount) { 7019 MustExitMaxBECount = EL.MaxNotTaken; 7020 MustExitMaxOrZero = EL.MaxOrZero; 7021 } else { 7022 MustExitMaxBECount = 7023 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 7024 } 7025 } else if (MayExitMaxBECount != getCouldNotCompute()) { 7026 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 7027 MayExitMaxBECount = EL.MaxNotTaken; 7028 else { 7029 MayExitMaxBECount = 7030 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 7031 } 7032 } 7033 } 7034 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 7035 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 7036 // The loop backedge will be taken the maximum or zero times if there's 7037 // a single exit that must be taken the maximum or zero times. 7038 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 7039 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 7040 MaxBECount, MaxOrZero); 7041 } 7042 7043 ScalarEvolution::ExitLimit 7044 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 7045 bool AllowPredicates) { 7046 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 7047 // If our exiting block does not dominate the latch, then its connection with 7048 // loop's exit limit may be far from trivial. 7049 const BasicBlock *Latch = L->getLoopLatch(); 7050 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 7051 return getCouldNotCompute(); 7052 7053 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 7054 Instruction *Term = ExitingBlock->getTerminator(); 7055 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 7056 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 7057 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7058 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 7059 "It should have one successor in loop and one exit block!"); 7060 // Proceed to the next level to examine the exit condition expression. 7061 return computeExitLimitFromCond( 7062 L, BI->getCondition(), ExitIfTrue, 7063 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 7064 } 7065 7066 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 7067 // For switch, make sure that there is a single exit from the loop. 7068 BasicBlock *Exit = nullptr; 7069 for (auto *SBB : successors(ExitingBlock)) 7070 if (!L->contains(SBB)) { 7071 if (Exit) // Multiple exit successors. 7072 return getCouldNotCompute(); 7073 Exit = SBB; 7074 } 7075 assert(Exit && "Exiting block must have at least one exit"); 7076 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 7077 /*ControlsExit=*/IsOnlyExit); 7078 } 7079 7080 return getCouldNotCompute(); 7081 } 7082 7083 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 7084 const Loop *L, Value *ExitCond, bool ExitIfTrue, 7085 bool ControlsExit, bool AllowPredicates) { 7086 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 7087 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 7088 ControlsExit, AllowPredicates); 7089 } 7090 7091 Optional<ScalarEvolution::ExitLimit> 7092 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 7093 bool ExitIfTrue, bool ControlsExit, 7094 bool AllowPredicates) { 7095 (void)this->L; 7096 (void)this->ExitIfTrue; 7097 (void)this->AllowPredicates; 7098 7099 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7100 this->AllowPredicates == AllowPredicates && 7101 "Variance in assumed invariant key components!"); 7102 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 7103 if (Itr == TripCountMap.end()) 7104 return None; 7105 return Itr->second; 7106 } 7107 7108 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 7109 bool ExitIfTrue, 7110 bool ControlsExit, 7111 bool AllowPredicates, 7112 const ExitLimit &EL) { 7113 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7114 this->AllowPredicates == AllowPredicates && 7115 "Variance in assumed invariant key components!"); 7116 7117 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 7118 assert(InsertResult.second && "Expected successful insertion!"); 7119 (void)InsertResult; 7120 (void)ExitIfTrue; 7121 } 7122 7123 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 7124 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7125 bool ControlsExit, bool AllowPredicates) { 7126 7127 if (auto MaybeEL = 7128 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7129 return *MaybeEL; 7130 7131 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 7132 ControlsExit, AllowPredicates); 7133 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 7134 return EL; 7135 } 7136 7137 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 7138 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7139 bool ControlsExit, bool AllowPredicates) { 7140 // Check if the controlling expression for this loop is an And or Or. 7141 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 7142 if (BO->getOpcode() == Instruction::And) { 7143 // Recurse on the operands of the and. 7144 bool EitherMayExit = !ExitIfTrue; 7145 ExitLimit EL0 = computeExitLimitFromCondCached( 7146 Cache, L, BO->getOperand(0), ExitIfTrue, 7147 ControlsExit && !EitherMayExit, AllowPredicates); 7148 ExitLimit EL1 = computeExitLimitFromCondCached( 7149 Cache, L, BO->getOperand(1), ExitIfTrue, 7150 ControlsExit && !EitherMayExit, AllowPredicates); 7151 // Be robust against unsimplified IR for the form "and i1 X, true" 7152 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) 7153 return CI->isOne() ? EL0 : EL1; 7154 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(0))) 7155 return CI->isOne() ? EL1 : EL0; 7156 const SCEV *BECount = getCouldNotCompute(); 7157 const SCEV *MaxBECount = getCouldNotCompute(); 7158 if (EitherMayExit) { 7159 // Both conditions must be true for the loop to continue executing. 7160 // Choose the less conservative count. 7161 if (EL0.ExactNotTaken == getCouldNotCompute() || 7162 EL1.ExactNotTaken == getCouldNotCompute()) 7163 BECount = getCouldNotCompute(); 7164 else 7165 BECount = 7166 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7167 if (EL0.MaxNotTaken == getCouldNotCompute()) 7168 MaxBECount = EL1.MaxNotTaken; 7169 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7170 MaxBECount = EL0.MaxNotTaken; 7171 else 7172 MaxBECount = 7173 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7174 } else { 7175 // Both conditions must be true at the same time for the loop to exit. 7176 // For now, be conservative. 7177 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7178 MaxBECount = EL0.MaxNotTaken; 7179 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7180 BECount = EL0.ExactNotTaken; 7181 } 7182 7183 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7184 // to be more aggressive when computing BECount than when computing 7185 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7186 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7187 // to not. 7188 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7189 !isa<SCEVCouldNotCompute>(BECount)) 7190 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7191 7192 return ExitLimit(BECount, MaxBECount, false, 7193 {&EL0.Predicates, &EL1.Predicates}); 7194 } 7195 if (BO->getOpcode() == Instruction::Or) { 7196 // Recurse on the operands of the or. 7197 bool EitherMayExit = ExitIfTrue; 7198 ExitLimit EL0 = computeExitLimitFromCondCached( 7199 Cache, L, BO->getOperand(0), ExitIfTrue, 7200 ControlsExit && !EitherMayExit, AllowPredicates); 7201 ExitLimit EL1 = computeExitLimitFromCondCached( 7202 Cache, L, BO->getOperand(1), ExitIfTrue, 7203 ControlsExit && !EitherMayExit, AllowPredicates); 7204 // Be robust against unsimplified IR for the form "or i1 X, true" 7205 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) 7206 return CI->isZero() ? EL0 : EL1; 7207 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(0))) 7208 return CI->isZero() ? EL1 : EL0; 7209 const SCEV *BECount = getCouldNotCompute(); 7210 const SCEV *MaxBECount = getCouldNotCompute(); 7211 if (EitherMayExit) { 7212 // Both conditions must be false for the loop to continue executing. 7213 // Choose the less conservative count. 7214 if (EL0.ExactNotTaken == getCouldNotCompute() || 7215 EL1.ExactNotTaken == getCouldNotCompute()) 7216 BECount = getCouldNotCompute(); 7217 else 7218 BECount = 7219 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7220 if (EL0.MaxNotTaken == getCouldNotCompute()) 7221 MaxBECount = EL1.MaxNotTaken; 7222 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7223 MaxBECount = EL0.MaxNotTaken; 7224 else 7225 MaxBECount = 7226 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7227 } else { 7228 // Both conditions must be false at the same time for the loop to exit. 7229 // For now, be conservative. 7230 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7231 MaxBECount = EL0.MaxNotTaken; 7232 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7233 BECount = EL0.ExactNotTaken; 7234 } 7235 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7236 // to be more aggressive when computing BECount than when computing 7237 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7238 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7239 // to not. 7240 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7241 !isa<SCEVCouldNotCompute>(BECount)) 7242 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7243 7244 return ExitLimit(BECount, MaxBECount, false, 7245 {&EL0.Predicates, &EL1.Predicates}); 7246 } 7247 } 7248 7249 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7250 // Proceed to the next level to examine the icmp. 7251 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7252 ExitLimit EL = 7253 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 7254 if (EL.hasFullInfo() || !AllowPredicates) 7255 return EL; 7256 7257 // Try again, but use SCEV predicates this time. 7258 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 7259 /*AllowPredicates=*/true); 7260 } 7261 7262 // Check for a constant condition. These are normally stripped out by 7263 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7264 // preserve the CFG and is temporarily leaving constant conditions 7265 // in place. 7266 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7267 if (ExitIfTrue == !CI->getZExtValue()) 7268 // The backedge is always taken. 7269 return getCouldNotCompute(); 7270 else 7271 // The backedge is never taken. 7272 return getZero(CI->getType()); 7273 } 7274 7275 // If it's not an integer or pointer comparison then compute it the hard way. 7276 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7277 } 7278 7279 ScalarEvolution::ExitLimit 7280 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7281 ICmpInst *ExitCond, 7282 bool ExitIfTrue, 7283 bool ControlsExit, 7284 bool AllowPredicates) { 7285 // If the condition was exit on true, convert the condition to exit on false 7286 ICmpInst::Predicate Pred; 7287 if (!ExitIfTrue) 7288 Pred = ExitCond->getPredicate(); 7289 else 7290 Pred = ExitCond->getInversePredicate(); 7291 const ICmpInst::Predicate OriginalPred = Pred; 7292 7293 // Handle common loops like: for (X = "string"; *X; ++X) 7294 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7295 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7296 ExitLimit ItCnt = 7297 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); 7298 if (ItCnt.hasAnyInfo()) 7299 return ItCnt; 7300 } 7301 7302 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7303 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7304 7305 // Try to evaluate any dependencies out of the loop. 7306 LHS = getSCEVAtScope(LHS, L); 7307 RHS = getSCEVAtScope(RHS, L); 7308 7309 // At this point, we would like to compute how many iterations of the 7310 // loop the predicate will return true for these inputs. 7311 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7312 // If there is a loop-invariant, force it into the RHS. 7313 std::swap(LHS, RHS); 7314 Pred = ICmpInst::getSwappedPredicate(Pred); 7315 } 7316 7317 // Simplify the operands before analyzing them. 7318 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7319 7320 // If we have a comparison of a chrec against a constant, try to use value 7321 // ranges to answer this query. 7322 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7323 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7324 if (AddRec->getLoop() == L) { 7325 // Form the constant range. 7326 ConstantRange CompRange = 7327 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 7328 7329 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7330 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7331 } 7332 7333 switch (Pred) { 7334 case ICmpInst::ICMP_NE: { // while (X != Y) 7335 // Convert to: while (X-Y != 0) 7336 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7337 AllowPredicates); 7338 if (EL.hasAnyInfo()) return EL; 7339 break; 7340 } 7341 case ICmpInst::ICMP_EQ: { // while (X == Y) 7342 // Convert to: while (X-Y == 0) 7343 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7344 if (EL.hasAnyInfo()) return EL; 7345 break; 7346 } 7347 case ICmpInst::ICMP_SLT: 7348 case ICmpInst::ICMP_ULT: { // while (X < Y) 7349 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 7350 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7351 AllowPredicates); 7352 if (EL.hasAnyInfo()) return EL; 7353 break; 7354 } 7355 case ICmpInst::ICMP_SGT: 7356 case ICmpInst::ICMP_UGT: { // while (X > Y) 7357 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 7358 ExitLimit EL = 7359 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7360 AllowPredicates); 7361 if (EL.hasAnyInfo()) return EL; 7362 break; 7363 } 7364 default: 7365 break; 7366 } 7367 7368 auto *ExhaustiveCount = 7369 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7370 7371 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7372 return ExhaustiveCount; 7373 7374 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7375 ExitCond->getOperand(1), L, OriginalPred); 7376 } 7377 7378 ScalarEvolution::ExitLimit 7379 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7380 SwitchInst *Switch, 7381 BasicBlock *ExitingBlock, 7382 bool ControlsExit) { 7383 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7384 7385 // Give up if the exit is the default dest of a switch. 7386 if (Switch->getDefaultDest() == ExitingBlock) 7387 return getCouldNotCompute(); 7388 7389 assert(L->contains(Switch->getDefaultDest()) && 7390 "Default case must not exit the loop!"); 7391 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7392 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7393 7394 // while (X != Y) --> while (X-Y != 0) 7395 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7396 if (EL.hasAnyInfo()) 7397 return EL; 7398 7399 return getCouldNotCompute(); 7400 } 7401 7402 static ConstantInt * 7403 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7404 ScalarEvolution &SE) { 7405 const SCEV *InVal = SE.getConstant(C); 7406 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7407 assert(isa<SCEVConstant>(Val) && 7408 "Evaluation of SCEV at constant didn't fold correctly?"); 7409 return cast<SCEVConstant>(Val)->getValue(); 7410 } 7411 7412 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7413 /// compute the backedge execution count. 7414 ScalarEvolution::ExitLimit 7415 ScalarEvolution::computeLoadConstantCompareExitLimit( 7416 LoadInst *LI, 7417 Constant *RHS, 7418 const Loop *L, 7419 ICmpInst::Predicate predicate) { 7420 if (LI->isVolatile()) return getCouldNotCompute(); 7421 7422 // Check to see if the loaded pointer is a getelementptr of a global. 7423 // TODO: Use SCEV instead of manually grubbing with GEPs. 7424 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7425 if (!GEP) return getCouldNotCompute(); 7426 7427 // Make sure that it is really a constant global we are gepping, with an 7428 // initializer, and make sure the first IDX is really 0. 7429 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7430 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7431 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7432 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7433 return getCouldNotCompute(); 7434 7435 // Okay, we allow one non-constant index into the GEP instruction. 7436 Value *VarIdx = nullptr; 7437 std::vector<Constant*> Indexes; 7438 unsigned VarIdxNum = 0; 7439 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7440 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7441 Indexes.push_back(CI); 7442 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7443 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7444 VarIdx = GEP->getOperand(i); 7445 VarIdxNum = i-2; 7446 Indexes.push_back(nullptr); 7447 } 7448 7449 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7450 if (!VarIdx) 7451 return getCouldNotCompute(); 7452 7453 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7454 // Check to see if X is a loop variant variable value now. 7455 const SCEV *Idx = getSCEV(VarIdx); 7456 Idx = getSCEVAtScope(Idx, L); 7457 7458 // We can only recognize very limited forms of loop index expressions, in 7459 // particular, only affine AddRec's like {C1,+,C2}. 7460 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7461 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 7462 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7463 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7464 return getCouldNotCompute(); 7465 7466 unsigned MaxSteps = MaxBruteForceIterations; 7467 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7468 ConstantInt *ItCst = ConstantInt::get( 7469 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7470 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7471 7472 // Form the GEP offset. 7473 Indexes[VarIdxNum] = Val; 7474 7475 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7476 Indexes); 7477 if (!Result) break; // Cannot compute! 7478 7479 // Evaluate the condition for this iteration. 7480 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7481 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7482 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7483 ++NumArrayLenItCounts; 7484 return getConstant(ItCst); // Found terminating iteration! 7485 } 7486 } 7487 return getCouldNotCompute(); 7488 } 7489 7490 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7491 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7492 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7493 if (!RHS) 7494 return getCouldNotCompute(); 7495 7496 const BasicBlock *Latch = L->getLoopLatch(); 7497 if (!Latch) 7498 return getCouldNotCompute(); 7499 7500 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7501 if (!Predecessor) 7502 return getCouldNotCompute(); 7503 7504 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7505 // Return LHS in OutLHS and shift_opt in OutOpCode. 7506 auto MatchPositiveShift = 7507 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7508 7509 using namespace PatternMatch; 7510 7511 ConstantInt *ShiftAmt; 7512 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7513 OutOpCode = Instruction::LShr; 7514 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7515 OutOpCode = Instruction::AShr; 7516 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7517 OutOpCode = Instruction::Shl; 7518 else 7519 return false; 7520 7521 return ShiftAmt->getValue().isStrictlyPositive(); 7522 }; 7523 7524 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 7525 // 7526 // loop: 7527 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 7528 // %iv.shifted = lshr i32 %iv, <positive constant> 7529 // 7530 // Return true on a successful match. Return the corresponding PHI node (%iv 7531 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 7532 auto MatchShiftRecurrence = 7533 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 7534 Optional<Instruction::BinaryOps> PostShiftOpCode; 7535 7536 { 7537 Instruction::BinaryOps OpC; 7538 Value *V; 7539 7540 // If we encounter a shift instruction, "peel off" the shift operation, 7541 // and remember that we did so. Later when we inspect %iv's backedge 7542 // value, we will make sure that the backedge value uses the same 7543 // operation. 7544 // 7545 // Note: the peeled shift operation does not have to be the same 7546 // instruction as the one feeding into the PHI's backedge value. We only 7547 // really care about it being the same *kind* of shift instruction -- 7548 // that's all that is required for our later inferences to hold. 7549 if (MatchPositiveShift(LHS, V, OpC)) { 7550 PostShiftOpCode = OpC; 7551 LHS = V; 7552 } 7553 } 7554 7555 PNOut = dyn_cast<PHINode>(LHS); 7556 if (!PNOut || PNOut->getParent() != L->getHeader()) 7557 return false; 7558 7559 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 7560 Value *OpLHS; 7561 7562 return 7563 // The backedge value for the PHI node must be a shift by a positive 7564 // amount 7565 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 7566 7567 // of the PHI node itself 7568 OpLHS == PNOut && 7569 7570 // and the kind of shift should be match the kind of shift we peeled 7571 // off, if any. 7572 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 7573 }; 7574 7575 PHINode *PN; 7576 Instruction::BinaryOps OpCode; 7577 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 7578 return getCouldNotCompute(); 7579 7580 const DataLayout &DL = getDataLayout(); 7581 7582 // The key rationale for this optimization is that for some kinds of shift 7583 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 7584 // within a finite number of iterations. If the condition guarding the 7585 // backedge (in the sense that the backedge is taken if the condition is true) 7586 // is false for the value the shift recurrence stabilizes to, then we know 7587 // that the backedge is taken only a finite number of times. 7588 7589 ConstantInt *StableValue = nullptr; 7590 switch (OpCode) { 7591 default: 7592 llvm_unreachable("Impossible case!"); 7593 7594 case Instruction::AShr: { 7595 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 7596 // bitwidth(K) iterations. 7597 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 7598 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 7599 Predecessor->getTerminator(), &DT); 7600 auto *Ty = cast<IntegerType>(RHS->getType()); 7601 if (Known.isNonNegative()) 7602 StableValue = ConstantInt::get(Ty, 0); 7603 else if (Known.isNegative()) 7604 StableValue = ConstantInt::get(Ty, -1, true); 7605 else 7606 return getCouldNotCompute(); 7607 7608 break; 7609 } 7610 case Instruction::LShr: 7611 case Instruction::Shl: 7612 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 7613 // stabilize to 0 in at most bitwidth(K) iterations. 7614 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 7615 break; 7616 } 7617 7618 auto *Result = 7619 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 7620 assert(Result->getType()->isIntegerTy(1) && 7621 "Otherwise cannot be an operand to a branch instruction"); 7622 7623 if (Result->isZeroValue()) { 7624 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 7625 const SCEV *UpperBound = 7626 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 7627 return ExitLimit(getCouldNotCompute(), UpperBound, false); 7628 } 7629 7630 return getCouldNotCompute(); 7631 } 7632 7633 /// Return true if we can constant fold an instruction of the specified type, 7634 /// assuming that all operands were constants. 7635 static bool CanConstantFold(const Instruction *I) { 7636 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 7637 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7638 isa<LoadInst>(I) || isa<ExtractValueInst>(I)) 7639 return true; 7640 7641 if (const CallInst *CI = dyn_cast<CallInst>(I)) 7642 if (const Function *F = CI->getCalledFunction()) 7643 return canConstantFoldCallTo(CI, F); 7644 return false; 7645 } 7646 7647 /// Determine whether this instruction can constant evolve within this loop 7648 /// assuming its operands can all constant evolve. 7649 static bool canConstantEvolve(Instruction *I, const Loop *L) { 7650 // An instruction outside of the loop can't be derived from a loop PHI. 7651 if (!L->contains(I)) return false; 7652 7653 if (isa<PHINode>(I)) { 7654 // We don't currently keep track of the control flow needed to evaluate 7655 // PHIs, so we cannot handle PHIs inside of loops. 7656 return L->getHeader() == I->getParent(); 7657 } 7658 7659 // If we won't be able to constant fold this expression even if the operands 7660 // are constants, bail early. 7661 return CanConstantFold(I); 7662 } 7663 7664 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 7665 /// recursing through each instruction operand until reaching a loop header phi. 7666 static PHINode * 7667 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 7668 DenseMap<Instruction *, PHINode *> &PHIMap, 7669 unsigned Depth) { 7670 if (Depth > MaxConstantEvolvingDepth) 7671 return nullptr; 7672 7673 // Otherwise, we can evaluate this instruction if all of its operands are 7674 // constant or derived from a PHI node themselves. 7675 PHINode *PHI = nullptr; 7676 for (Value *Op : UseInst->operands()) { 7677 if (isa<Constant>(Op)) continue; 7678 7679 Instruction *OpInst = dyn_cast<Instruction>(Op); 7680 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 7681 7682 PHINode *P = dyn_cast<PHINode>(OpInst); 7683 if (!P) 7684 // If this operand is already visited, reuse the prior result. 7685 // We may have P != PHI if this is the deepest point at which the 7686 // inconsistent paths meet. 7687 P = PHIMap.lookup(OpInst); 7688 if (!P) { 7689 // Recurse and memoize the results, whether a phi is found or not. 7690 // This recursive call invalidates pointers into PHIMap. 7691 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 7692 PHIMap[OpInst] = P; 7693 } 7694 if (!P) 7695 return nullptr; // Not evolving from PHI 7696 if (PHI && PHI != P) 7697 return nullptr; // Evolving from multiple different PHIs. 7698 PHI = P; 7699 } 7700 // This is a expression evolving from a constant PHI! 7701 return PHI; 7702 } 7703 7704 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 7705 /// in the loop that V is derived from. We allow arbitrary operations along the 7706 /// way, but the operands of an operation must either be constants or a value 7707 /// derived from a constant PHI. If this expression does not fit with these 7708 /// constraints, return null. 7709 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 7710 Instruction *I = dyn_cast<Instruction>(V); 7711 if (!I || !canConstantEvolve(I, L)) return nullptr; 7712 7713 if (PHINode *PN = dyn_cast<PHINode>(I)) 7714 return PN; 7715 7716 // Record non-constant instructions contained by the loop. 7717 DenseMap<Instruction *, PHINode *> PHIMap; 7718 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 7719 } 7720 7721 /// EvaluateExpression - Given an expression that passes the 7722 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 7723 /// in the loop has the value PHIVal. If we can't fold this expression for some 7724 /// reason, return null. 7725 static Constant *EvaluateExpression(Value *V, const Loop *L, 7726 DenseMap<Instruction *, Constant *> &Vals, 7727 const DataLayout &DL, 7728 const TargetLibraryInfo *TLI) { 7729 // Convenient constant check, but redundant for recursive calls. 7730 if (Constant *C = dyn_cast<Constant>(V)) return C; 7731 Instruction *I = dyn_cast<Instruction>(V); 7732 if (!I) return nullptr; 7733 7734 if (Constant *C = Vals.lookup(I)) return C; 7735 7736 // An instruction inside the loop depends on a value outside the loop that we 7737 // weren't given a mapping for, or a value such as a call inside the loop. 7738 if (!canConstantEvolve(I, L)) return nullptr; 7739 7740 // An unmapped PHI can be due to a branch or another loop inside this loop, 7741 // or due to this not being the initial iteration through a loop where we 7742 // couldn't compute the evolution of this particular PHI last time. 7743 if (isa<PHINode>(I)) return nullptr; 7744 7745 std::vector<Constant*> Operands(I->getNumOperands()); 7746 7747 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 7748 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 7749 if (!Operand) { 7750 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 7751 if (!Operands[i]) return nullptr; 7752 continue; 7753 } 7754 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 7755 Vals[Operand] = C; 7756 if (!C) return nullptr; 7757 Operands[i] = C; 7758 } 7759 7760 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 7761 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7762 Operands[1], DL, TLI); 7763 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7764 if (!LI->isVolatile()) 7765 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7766 } 7767 return ConstantFoldInstOperands(I, Operands, DL, TLI); 7768 } 7769 7770 7771 // If every incoming value to PN except the one for BB is a specific Constant, 7772 // return that, else return nullptr. 7773 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 7774 Constant *IncomingVal = nullptr; 7775 7776 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 7777 if (PN->getIncomingBlock(i) == BB) 7778 continue; 7779 7780 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 7781 if (!CurrentVal) 7782 return nullptr; 7783 7784 if (IncomingVal != CurrentVal) { 7785 if (IncomingVal) 7786 return nullptr; 7787 IncomingVal = CurrentVal; 7788 } 7789 } 7790 7791 return IncomingVal; 7792 } 7793 7794 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 7795 /// in the header of its containing loop, we know the loop executes a 7796 /// constant number of times, and the PHI node is just a recurrence 7797 /// involving constants, fold it. 7798 Constant * 7799 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 7800 const APInt &BEs, 7801 const Loop *L) { 7802 auto I = ConstantEvolutionLoopExitValue.find(PN); 7803 if (I != ConstantEvolutionLoopExitValue.end()) 7804 return I->second; 7805 7806 if (BEs.ugt(MaxBruteForceIterations)) 7807 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 7808 7809 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 7810 7811 DenseMap<Instruction *, Constant *> CurrentIterVals; 7812 BasicBlock *Header = L->getHeader(); 7813 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7814 7815 BasicBlock *Latch = L->getLoopLatch(); 7816 if (!Latch) 7817 return nullptr; 7818 7819 for (PHINode &PHI : Header->phis()) { 7820 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 7821 CurrentIterVals[&PHI] = StartCST; 7822 } 7823 if (!CurrentIterVals.count(PN)) 7824 return RetVal = nullptr; 7825 7826 Value *BEValue = PN->getIncomingValueForBlock(Latch); 7827 7828 // Execute the loop symbolically to determine the exit value. 7829 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 7830 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 7831 7832 unsigned NumIterations = BEs.getZExtValue(); // must be in range 7833 unsigned IterationNum = 0; 7834 const DataLayout &DL = getDataLayout(); 7835 for (; ; ++IterationNum) { 7836 if (IterationNum == NumIterations) 7837 return RetVal = CurrentIterVals[PN]; // Got exit value! 7838 7839 // Compute the value of the PHIs for the next iteration. 7840 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 7841 DenseMap<Instruction *, Constant *> NextIterVals; 7842 Constant *NextPHI = 7843 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7844 if (!NextPHI) 7845 return nullptr; // Couldn't evaluate! 7846 NextIterVals[PN] = NextPHI; 7847 7848 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 7849 7850 // Also evaluate the other PHI nodes. However, we don't get to stop if we 7851 // cease to be able to evaluate one of them or if they stop evolving, 7852 // because that doesn't necessarily prevent us from computing PN. 7853 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 7854 for (const auto &I : CurrentIterVals) { 7855 PHINode *PHI = dyn_cast<PHINode>(I.first); 7856 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 7857 PHIsToCompute.emplace_back(PHI, I.second); 7858 } 7859 // We use two distinct loops because EvaluateExpression may invalidate any 7860 // iterators into CurrentIterVals. 7861 for (const auto &I : PHIsToCompute) { 7862 PHINode *PHI = I.first; 7863 Constant *&NextPHI = NextIterVals[PHI]; 7864 if (!NextPHI) { // Not already computed. 7865 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7866 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7867 } 7868 if (NextPHI != I.second) 7869 StoppedEvolving = false; 7870 } 7871 7872 // If all entries in CurrentIterVals == NextIterVals then we can stop 7873 // iterating, the loop can't continue to change. 7874 if (StoppedEvolving) 7875 return RetVal = CurrentIterVals[PN]; 7876 7877 CurrentIterVals.swap(NextIterVals); 7878 } 7879 } 7880 7881 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 7882 Value *Cond, 7883 bool ExitWhen) { 7884 PHINode *PN = getConstantEvolvingPHI(Cond, L); 7885 if (!PN) return getCouldNotCompute(); 7886 7887 // If the loop is canonicalized, the PHI will have exactly two entries. 7888 // That's the only form we support here. 7889 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 7890 7891 DenseMap<Instruction *, Constant *> CurrentIterVals; 7892 BasicBlock *Header = L->getHeader(); 7893 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7894 7895 BasicBlock *Latch = L->getLoopLatch(); 7896 assert(Latch && "Should follow from NumIncomingValues == 2!"); 7897 7898 for (PHINode &PHI : Header->phis()) { 7899 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 7900 CurrentIterVals[&PHI] = StartCST; 7901 } 7902 if (!CurrentIterVals.count(PN)) 7903 return getCouldNotCompute(); 7904 7905 // Okay, we find a PHI node that defines the trip count of this loop. Execute 7906 // the loop symbolically to determine when the condition gets a value of 7907 // "ExitWhen". 7908 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 7909 const DataLayout &DL = getDataLayout(); 7910 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 7911 auto *CondVal = dyn_cast_or_null<ConstantInt>( 7912 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 7913 7914 // Couldn't symbolically evaluate. 7915 if (!CondVal) return getCouldNotCompute(); 7916 7917 if (CondVal->getValue() == uint64_t(ExitWhen)) { 7918 ++NumBruteForceTripCountsComputed; 7919 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 7920 } 7921 7922 // Update all the PHI nodes for the next iteration. 7923 DenseMap<Instruction *, Constant *> NextIterVals; 7924 7925 // Create a list of which PHIs we need to compute. We want to do this before 7926 // calling EvaluateExpression on them because that may invalidate iterators 7927 // into CurrentIterVals. 7928 SmallVector<PHINode *, 8> PHIsToCompute; 7929 for (const auto &I : CurrentIterVals) { 7930 PHINode *PHI = dyn_cast<PHINode>(I.first); 7931 if (!PHI || PHI->getParent() != Header) continue; 7932 PHIsToCompute.push_back(PHI); 7933 } 7934 for (PHINode *PHI : PHIsToCompute) { 7935 Constant *&NextPHI = NextIterVals[PHI]; 7936 if (NextPHI) continue; // Already computed! 7937 7938 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7939 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7940 } 7941 CurrentIterVals.swap(NextIterVals); 7942 } 7943 7944 // Too many iterations were needed to evaluate. 7945 return getCouldNotCompute(); 7946 } 7947 7948 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 7949 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 7950 ValuesAtScopes[V]; 7951 // Check to see if we've folded this expression at this loop before. 7952 for (auto &LS : Values) 7953 if (LS.first == L) 7954 return LS.second ? LS.second : V; 7955 7956 Values.emplace_back(L, nullptr); 7957 7958 // Otherwise compute it. 7959 const SCEV *C = computeSCEVAtScope(V, L); 7960 for (auto &LS : reverse(ValuesAtScopes[V])) 7961 if (LS.first == L) { 7962 LS.second = C; 7963 break; 7964 } 7965 return C; 7966 } 7967 7968 /// This builds up a Constant using the ConstantExpr interface. That way, we 7969 /// will return Constants for objects which aren't represented by a 7970 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 7971 /// Returns NULL if the SCEV isn't representable as a Constant. 7972 static Constant *BuildConstantFromSCEV(const SCEV *V) { 7973 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 7974 case scCouldNotCompute: 7975 case scAddRecExpr: 7976 break; 7977 case scConstant: 7978 return cast<SCEVConstant>(V)->getValue(); 7979 case scUnknown: 7980 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 7981 case scSignExtend: { 7982 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 7983 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 7984 return ConstantExpr::getSExt(CastOp, SS->getType()); 7985 break; 7986 } 7987 case scZeroExtend: { 7988 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 7989 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 7990 return ConstantExpr::getZExt(CastOp, SZ->getType()); 7991 break; 7992 } 7993 case scTruncate: { 7994 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 7995 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 7996 return ConstantExpr::getTrunc(CastOp, ST->getType()); 7997 break; 7998 } 7999 case scAddExpr: { 8000 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 8001 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 8002 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8003 unsigned AS = PTy->getAddressSpace(); 8004 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8005 C = ConstantExpr::getBitCast(C, DestPtrTy); 8006 } 8007 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 8008 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 8009 if (!C2) return nullptr; 8010 8011 // First pointer! 8012 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 8013 unsigned AS = C2->getType()->getPointerAddressSpace(); 8014 std::swap(C, C2); 8015 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8016 // The offsets have been converted to bytes. We can add bytes to an 8017 // i8* by GEP with the byte count in the first index. 8018 C = ConstantExpr::getBitCast(C, DestPtrTy); 8019 } 8020 8021 // Don't bother trying to sum two pointers. We probably can't 8022 // statically compute a load that results from it anyway. 8023 if (C2->getType()->isPointerTy()) 8024 return nullptr; 8025 8026 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8027 if (PTy->getElementType()->isStructTy()) 8028 C2 = ConstantExpr::getIntegerCast( 8029 C2, Type::getInt32Ty(C->getContext()), true); 8030 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 8031 } else 8032 C = ConstantExpr::getAdd(C, C2); 8033 } 8034 return C; 8035 } 8036 break; 8037 } 8038 case scMulExpr: { 8039 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 8040 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 8041 // Don't bother with pointers at all. 8042 if (C->getType()->isPointerTy()) return nullptr; 8043 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 8044 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 8045 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 8046 C = ConstantExpr::getMul(C, C2); 8047 } 8048 return C; 8049 } 8050 break; 8051 } 8052 case scUDivExpr: { 8053 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 8054 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 8055 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 8056 if (LHS->getType() == RHS->getType()) 8057 return ConstantExpr::getUDiv(LHS, RHS); 8058 break; 8059 } 8060 case scSMaxExpr: 8061 case scUMaxExpr: 8062 case scSMinExpr: 8063 case scUMinExpr: 8064 break; // TODO: smax, umax, smin, umax. 8065 } 8066 return nullptr; 8067 } 8068 8069 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 8070 if (isa<SCEVConstant>(V)) return V; 8071 8072 // If this instruction is evolved from a constant-evolving PHI, compute the 8073 // exit value from the loop without using SCEVs. 8074 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 8075 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 8076 if (PHINode *PN = dyn_cast<PHINode>(I)) { 8077 const Loop *CurrLoop = this->LI[I->getParent()]; 8078 // Looking for loop exit value. 8079 if (CurrLoop && CurrLoop->getParentLoop() == L && 8080 PN->getParent() == CurrLoop->getHeader()) { 8081 // Okay, there is no closed form solution for the PHI node. Check 8082 // to see if the loop that contains it has a known backedge-taken 8083 // count. If so, we may be able to force computation of the exit 8084 // value. 8085 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop); 8086 // This trivial case can show up in some degenerate cases where 8087 // the incoming IR has not yet been fully simplified. 8088 if (BackedgeTakenCount->isZero()) { 8089 Value *InitValue = nullptr; 8090 bool MultipleInitValues = false; 8091 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 8092 if (!CurrLoop->contains(PN->getIncomingBlock(i))) { 8093 if (!InitValue) 8094 InitValue = PN->getIncomingValue(i); 8095 else if (InitValue != PN->getIncomingValue(i)) { 8096 MultipleInitValues = true; 8097 break; 8098 } 8099 } 8100 } 8101 if (!MultipleInitValues && InitValue) 8102 return getSCEV(InitValue); 8103 } 8104 // Do we have a loop invariant value flowing around the backedge 8105 // for a loop which must execute the backedge? 8106 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 8107 isKnownPositive(BackedgeTakenCount) && 8108 PN->getNumIncomingValues() == 2) { 8109 8110 unsigned InLoopPred = 8111 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1; 8112 Value *BackedgeVal = PN->getIncomingValue(InLoopPred); 8113 if (CurrLoop->isLoopInvariant(BackedgeVal)) 8114 return getSCEV(BackedgeVal); 8115 } 8116 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 8117 // Okay, we know how many times the containing loop executes. If 8118 // this is a constant evolving PHI node, get the final value at 8119 // the specified iteration number. 8120 Constant *RV = getConstantEvolutionLoopExitValue( 8121 PN, BTCC->getAPInt(), CurrLoop); 8122 if (RV) return getSCEV(RV); 8123 } 8124 } 8125 8126 // If there is a single-input Phi, evaluate it at our scope. If we can 8127 // prove that this replacement does not break LCSSA form, use new value. 8128 if (PN->getNumOperands() == 1) { 8129 const SCEV *Input = getSCEV(PN->getOperand(0)); 8130 const SCEV *InputAtScope = getSCEVAtScope(Input, L); 8131 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, 8132 // for the simplest case just support constants. 8133 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope; 8134 } 8135 } 8136 8137 // Okay, this is an expression that we cannot symbolically evaluate 8138 // into a SCEV. Check to see if it's possible to symbolically evaluate 8139 // the arguments into constants, and if so, try to constant propagate the 8140 // result. This is particularly useful for computing loop exit values. 8141 if (CanConstantFold(I)) { 8142 SmallVector<Constant *, 4> Operands; 8143 bool MadeImprovement = false; 8144 for (Value *Op : I->operands()) { 8145 if (Constant *C = dyn_cast<Constant>(Op)) { 8146 Operands.push_back(C); 8147 continue; 8148 } 8149 8150 // If any of the operands is non-constant and if they are 8151 // non-integer and non-pointer, don't even try to analyze them 8152 // with scev techniques. 8153 if (!isSCEVable(Op->getType())) 8154 return V; 8155 8156 const SCEV *OrigV = getSCEV(Op); 8157 const SCEV *OpV = getSCEVAtScope(OrigV, L); 8158 MadeImprovement |= OrigV != OpV; 8159 8160 Constant *C = BuildConstantFromSCEV(OpV); 8161 if (!C) return V; 8162 if (C->getType() != Op->getType()) 8163 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 8164 Op->getType(), 8165 false), 8166 C, Op->getType()); 8167 Operands.push_back(C); 8168 } 8169 8170 // Check to see if getSCEVAtScope actually made an improvement. 8171 if (MadeImprovement) { 8172 Constant *C = nullptr; 8173 const DataLayout &DL = getDataLayout(); 8174 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 8175 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8176 Operands[1], DL, &TLI); 8177 else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) { 8178 if (!Load->isVolatile()) 8179 C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(), 8180 DL); 8181 } else 8182 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 8183 if (!C) return V; 8184 return getSCEV(C); 8185 } 8186 } 8187 } 8188 8189 // This is some other type of SCEVUnknown, just return it. 8190 return V; 8191 } 8192 8193 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 8194 // Avoid performing the look-up in the common case where the specified 8195 // expression has no loop-variant portions. 8196 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 8197 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8198 if (OpAtScope != Comm->getOperand(i)) { 8199 // Okay, at least one of these operands is loop variant but might be 8200 // foldable. Build a new instance of the folded commutative expression. 8201 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 8202 Comm->op_begin()+i); 8203 NewOps.push_back(OpAtScope); 8204 8205 for (++i; i != e; ++i) { 8206 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8207 NewOps.push_back(OpAtScope); 8208 } 8209 if (isa<SCEVAddExpr>(Comm)) 8210 return getAddExpr(NewOps, Comm->getNoWrapFlags()); 8211 if (isa<SCEVMulExpr>(Comm)) 8212 return getMulExpr(NewOps, Comm->getNoWrapFlags()); 8213 if (isa<SCEVMinMaxExpr>(Comm)) 8214 return getMinMaxExpr(Comm->getSCEVType(), NewOps); 8215 llvm_unreachable("Unknown commutative SCEV type!"); 8216 } 8217 } 8218 // If we got here, all operands are loop invariant. 8219 return Comm; 8220 } 8221 8222 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 8223 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 8224 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 8225 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 8226 return Div; // must be loop invariant 8227 return getUDivExpr(LHS, RHS); 8228 } 8229 8230 // If this is a loop recurrence for a loop that does not contain L, then we 8231 // are dealing with the final value computed by the loop. 8232 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 8233 // First, attempt to evaluate each operand. 8234 // Avoid performing the look-up in the common case where the specified 8235 // expression has no loop-variant portions. 8236 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 8237 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 8238 if (OpAtScope == AddRec->getOperand(i)) 8239 continue; 8240 8241 // Okay, at least one of these operands is loop variant but might be 8242 // foldable. Build a new instance of the folded commutative expression. 8243 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8244 AddRec->op_begin()+i); 8245 NewOps.push_back(OpAtScope); 8246 for (++i; i != e; ++i) 8247 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8248 8249 const SCEV *FoldedRec = 8250 getAddRecExpr(NewOps, AddRec->getLoop(), 8251 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8252 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8253 // The addrec may be folded to a nonrecurrence, for example, if the 8254 // induction variable is multiplied by zero after constant folding. Go 8255 // ahead and return the folded value. 8256 if (!AddRec) 8257 return FoldedRec; 8258 break; 8259 } 8260 8261 // If the scope is outside the addrec's loop, evaluate it by using the 8262 // loop exit value of the addrec. 8263 if (!AddRec->getLoop()->contains(L)) { 8264 // To evaluate this recurrence, we need to know how many times the AddRec 8265 // loop iterates. Compute this now. 8266 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8267 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8268 8269 // Then, evaluate the AddRec. 8270 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8271 } 8272 8273 return AddRec; 8274 } 8275 8276 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8277 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8278 if (Op == Cast->getOperand()) 8279 return Cast; // must be loop invariant 8280 return getZeroExtendExpr(Op, Cast->getType()); 8281 } 8282 8283 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 8284 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8285 if (Op == Cast->getOperand()) 8286 return Cast; // must be loop invariant 8287 return getSignExtendExpr(Op, Cast->getType()); 8288 } 8289 8290 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 8291 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8292 if (Op == Cast->getOperand()) 8293 return Cast; // must be loop invariant 8294 return getTruncateExpr(Op, Cast->getType()); 8295 } 8296 8297 llvm_unreachable("Unknown SCEV type!"); 8298 } 8299 8300 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 8301 return getSCEVAtScope(getSCEV(V), L); 8302 } 8303 8304 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 8305 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 8306 return stripInjectiveFunctions(ZExt->getOperand()); 8307 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 8308 return stripInjectiveFunctions(SExt->getOperand()); 8309 return S; 8310 } 8311 8312 /// Finds the minimum unsigned root of the following equation: 8313 /// 8314 /// A * X = B (mod N) 8315 /// 8316 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 8317 /// A and B isn't important. 8318 /// 8319 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 8320 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 8321 ScalarEvolution &SE) { 8322 uint32_t BW = A.getBitWidth(); 8323 assert(BW == SE.getTypeSizeInBits(B->getType())); 8324 assert(A != 0 && "A must be non-zero."); 8325 8326 // 1. D = gcd(A, N) 8327 // 8328 // The gcd of A and N may have only one prime factor: 2. The number of 8329 // trailing zeros in A is its multiplicity 8330 uint32_t Mult2 = A.countTrailingZeros(); 8331 // D = 2^Mult2 8332 8333 // 2. Check if B is divisible by D. 8334 // 8335 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8336 // is not less than multiplicity of this prime factor for D. 8337 if (SE.GetMinTrailingZeros(B) < Mult2) 8338 return SE.getCouldNotCompute(); 8339 8340 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8341 // modulo (N / D). 8342 // 8343 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8344 // (N / D) in general. The inverse itself always fits into BW bits, though, 8345 // so we immediately truncate it. 8346 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8347 APInt Mod(BW + 1, 0); 8348 Mod.setBit(BW - Mult2); // Mod = N / D 8349 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8350 8351 // 4. Compute the minimum unsigned root of the equation: 8352 // I * (B / D) mod (N / D) 8353 // To simplify the computation, we factor out the divide by D: 8354 // (I * B mod N) / D 8355 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8356 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8357 } 8358 8359 /// For a given quadratic addrec, generate coefficients of the corresponding 8360 /// quadratic equation, multiplied by a common value to ensure that they are 8361 /// integers. 8362 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 8363 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 8364 /// were multiplied by, and BitWidth is the bit width of the original addrec 8365 /// coefficients. 8366 /// This function returns None if the addrec coefficients are not compile- 8367 /// time constants. 8368 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 8369 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 8370 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8371 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8372 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8373 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8374 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 8375 << *AddRec << '\n'); 8376 8377 // We currently can only solve this if the coefficients are constants. 8378 if (!LC || !MC || !NC) { 8379 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 8380 return None; 8381 } 8382 8383 APInt L = LC->getAPInt(); 8384 APInt M = MC->getAPInt(); 8385 APInt N = NC->getAPInt(); 8386 assert(!N.isNullValue() && "This is not a quadratic addrec"); 8387 8388 unsigned BitWidth = LC->getAPInt().getBitWidth(); 8389 unsigned NewWidth = BitWidth + 1; 8390 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 8391 << BitWidth << '\n'); 8392 // The sign-extension (as opposed to a zero-extension) here matches the 8393 // extension used in SolveQuadraticEquationWrap (with the same motivation). 8394 N = N.sext(NewWidth); 8395 M = M.sext(NewWidth); 8396 L = L.sext(NewWidth); 8397 8398 // The increments are M, M+N, M+2N, ..., so the accumulated values are 8399 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 8400 // L+M, L+2M+N, L+3M+3N, ... 8401 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 8402 // 8403 // The equation Acc = 0 is then 8404 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 8405 // In a quadratic form it becomes: 8406 // N n^2 + (2M-N) n + 2L = 0. 8407 8408 APInt A = N; 8409 APInt B = 2 * M - A; 8410 APInt C = 2 * L; 8411 APInt T = APInt(NewWidth, 2); 8412 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 8413 << "x + " << C << ", coeff bw: " << NewWidth 8414 << ", multiplied by " << T << '\n'); 8415 return std::make_tuple(A, B, C, T, BitWidth); 8416 } 8417 8418 /// Helper function to compare optional APInts: 8419 /// (a) if X and Y both exist, return min(X, Y), 8420 /// (b) if neither X nor Y exist, return None, 8421 /// (c) if exactly one of X and Y exists, return that value. 8422 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 8423 if (X.hasValue() && Y.hasValue()) { 8424 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 8425 APInt XW = X->sextOrSelf(W); 8426 APInt YW = Y->sextOrSelf(W); 8427 return XW.slt(YW) ? *X : *Y; 8428 } 8429 if (!X.hasValue() && !Y.hasValue()) 8430 return None; 8431 return X.hasValue() ? *X : *Y; 8432 } 8433 8434 /// Helper function to truncate an optional APInt to a given BitWidth. 8435 /// When solving addrec-related equations, it is preferable to return a value 8436 /// that has the same bit width as the original addrec's coefficients. If the 8437 /// solution fits in the original bit width, truncate it (except for i1). 8438 /// Returning a value of a different bit width may inhibit some optimizations. 8439 /// 8440 /// In general, a solution to a quadratic equation generated from an addrec 8441 /// may require BW+1 bits, where BW is the bit width of the addrec's 8442 /// coefficients. The reason is that the coefficients of the quadratic 8443 /// equation are BW+1 bits wide (to avoid truncation when converting from 8444 /// the addrec to the equation). 8445 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 8446 if (!X.hasValue()) 8447 return None; 8448 unsigned W = X->getBitWidth(); 8449 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 8450 return X->trunc(BitWidth); 8451 return X; 8452 } 8453 8454 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 8455 /// iterations. The values L, M, N are assumed to be signed, and they 8456 /// should all have the same bit widths. 8457 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 8458 /// where BW is the bit width of the addrec's coefficients. 8459 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 8460 /// returned as such, otherwise the bit width of the returned value may 8461 /// be greater than BW. 8462 /// 8463 /// This function returns None if 8464 /// (a) the addrec coefficients are not constant, or 8465 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 8466 /// like x^2 = 5, no integer solutions exist, in other cases an integer 8467 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 8468 static Optional<APInt> 8469 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8470 APInt A, B, C, M; 8471 unsigned BitWidth; 8472 auto T = GetQuadraticEquation(AddRec); 8473 if (!T.hasValue()) 8474 return None; 8475 8476 std::tie(A, B, C, M, BitWidth) = *T; 8477 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 8478 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 8479 if (!X.hasValue()) 8480 return None; 8481 8482 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 8483 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 8484 if (!V->isZero()) 8485 return None; 8486 8487 return TruncIfPossible(X, BitWidth); 8488 } 8489 8490 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 8491 /// iterations. The values M, N are assumed to be signed, and they 8492 /// should all have the same bit widths. 8493 /// Find the least n such that c(n) does not belong to the given range, 8494 /// while c(n-1) does. 8495 /// 8496 /// This function returns None if 8497 /// (a) the addrec coefficients are not constant, or 8498 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 8499 /// bounds of the range. 8500 static Optional<APInt> 8501 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 8502 const ConstantRange &Range, ScalarEvolution &SE) { 8503 assert(AddRec->getOperand(0)->isZero() && 8504 "Starting value of addrec should be 0"); 8505 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 8506 << Range << ", addrec " << *AddRec << '\n'); 8507 // This case is handled in getNumIterationsInRange. Here we can assume that 8508 // we start in the range. 8509 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 8510 "Addrec's initial value should be in range"); 8511 8512 APInt A, B, C, M; 8513 unsigned BitWidth; 8514 auto T = GetQuadraticEquation(AddRec); 8515 if (!T.hasValue()) 8516 return None; 8517 8518 // Be careful about the return value: there can be two reasons for not 8519 // returning an actual number. First, if no solutions to the equations 8520 // were found, and second, if the solutions don't leave the given range. 8521 // The first case means that the actual solution is "unknown", the second 8522 // means that it's known, but not valid. If the solution is unknown, we 8523 // cannot make any conclusions. 8524 // Return a pair: the optional solution and a flag indicating if the 8525 // solution was found. 8526 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 8527 // Solve for signed overflow and unsigned overflow, pick the lower 8528 // solution. 8529 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 8530 << Bound << " (before multiplying by " << M << ")\n"); 8531 Bound *= M; // The quadratic equation multiplier. 8532 8533 Optional<APInt> SO = None; 8534 if (BitWidth > 1) { 8535 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8536 "signed overflow\n"); 8537 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 8538 } 8539 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8540 "unsigned overflow\n"); 8541 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 8542 BitWidth+1); 8543 8544 auto LeavesRange = [&] (const APInt &X) { 8545 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 8546 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 8547 if (Range.contains(V0->getValue())) 8548 return false; 8549 // X should be at least 1, so X-1 is non-negative. 8550 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 8551 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 8552 if (Range.contains(V1->getValue())) 8553 return true; 8554 return false; 8555 }; 8556 8557 // If SolveQuadraticEquationWrap returns None, it means that there can 8558 // be a solution, but the function failed to find it. We cannot treat it 8559 // as "no solution". 8560 if (!SO.hasValue() || !UO.hasValue()) 8561 return { None, false }; 8562 8563 // Check the smaller value first to see if it leaves the range. 8564 // At this point, both SO and UO must have values. 8565 Optional<APInt> Min = MinOptional(SO, UO); 8566 if (LeavesRange(*Min)) 8567 return { Min, true }; 8568 Optional<APInt> Max = Min == SO ? UO : SO; 8569 if (LeavesRange(*Max)) 8570 return { Max, true }; 8571 8572 // Solutions were found, but were eliminated, hence the "true". 8573 return { None, true }; 8574 }; 8575 8576 std::tie(A, B, C, M, BitWidth) = *T; 8577 // Lower bound is inclusive, subtract 1 to represent the exiting value. 8578 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; 8579 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); 8580 auto SL = SolveForBoundary(Lower); 8581 auto SU = SolveForBoundary(Upper); 8582 // If any of the solutions was unknown, no meaninigful conclusions can 8583 // be made. 8584 if (!SL.second || !SU.second) 8585 return None; 8586 8587 // Claim: The correct solution is not some value between Min and Max. 8588 // 8589 // Justification: Assuming that Min and Max are different values, one of 8590 // them is when the first signed overflow happens, the other is when the 8591 // first unsigned overflow happens. Crossing the range boundary is only 8592 // possible via an overflow (treating 0 as a special case of it, modeling 8593 // an overflow as crossing k*2^W for some k). 8594 // 8595 // The interesting case here is when Min was eliminated as an invalid 8596 // solution, but Max was not. The argument is that if there was another 8597 // overflow between Min and Max, it would also have been eliminated if 8598 // it was considered. 8599 // 8600 // For a given boundary, it is possible to have two overflows of the same 8601 // type (signed/unsigned) without having the other type in between: this 8602 // can happen when the vertex of the parabola is between the iterations 8603 // corresponding to the overflows. This is only possible when the two 8604 // overflows cross k*2^W for the same k. In such case, if the second one 8605 // left the range (and was the first one to do so), the first overflow 8606 // would have to enter the range, which would mean that either we had left 8607 // the range before or that we started outside of it. Both of these cases 8608 // are contradictions. 8609 // 8610 // Claim: In the case where SolveForBoundary returns None, the correct 8611 // solution is not some value between the Max for this boundary and the 8612 // Min of the other boundary. 8613 // 8614 // Justification: Assume that we had such Max_A and Min_B corresponding 8615 // to range boundaries A and B and such that Max_A < Min_B. If there was 8616 // a solution between Max_A and Min_B, it would have to be caused by an 8617 // overflow corresponding to either A or B. It cannot correspond to B, 8618 // since Min_B is the first occurrence of such an overflow. If it 8619 // corresponded to A, it would have to be either a signed or an unsigned 8620 // overflow that is larger than both eliminated overflows for A. But 8621 // between the eliminated overflows and this overflow, the values would 8622 // cover the entire value space, thus crossing the other boundary, which 8623 // is a contradiction. 8624 8625 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 8626 } 8627 8628 ScalarEvolution::ExitLimit 8629 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 8630 bool AllowPredicates) { 8631 8632 // This is only used for loops with a "x != y" exit test. The exit condition 8633 // is now expressed as a single expression, V = x-y. So the exit test is 8634 // effectively V != 0. We know and take advantage of the fact that this 8635 // expression only being used in a comparison by zero context. 8636 8637 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 8638 // If the value is a constant 8639 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8640 // If the value is already zero, the branch will execute zero times. 8641 if (C->getValue()->isZero()) return C; 8642 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8643 } 8644 8645 const SCEVAddRecExpr *AddRec = 8646 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 8647 8648 if (!AddRec && AllowPredicates) 8649 // Try to make this an AddRec using runtime tests, in the first X 8650 // iterations of this loop, where X is the SCEV expression found by the 8651 // algorithm below. 8652 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 8653 8654 if (!AddRec || AddRec->getLoop() != L) 8655 return getCouldNotCompute(); 8656 8657 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 8658 // the quadratic equation to solve it. 8659 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 8660 // We can only use this value if the chrec ends up with an exact zero 8661 // value at this index. When solving for "X*X != 5", for example, we 8662 // should not accept a root of 2. 8663 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 8664 const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); 8665 return ExitLimit(R, R, false, Predicates); 8666 } 8667 return getCouldNotCompute(); 8668 } 8669 8670 // Otherwise we can only handle this if it is affine. 8671 if (!AddRec->isAffine()) 8672 return getCouldNotCompute(); 8673 8674 // If this is an affine expression, the execution count of this branch is 8675 // the minimum unsigned root of the following equation: 8676 // 8677 // Start + Step*N = 0 (mod 2^BW) 8678 // 8679 // equivalent to: 8680 // 8681 // Step*N = -Start (mod 2^BW) 8682 // 8683 // where BW is the common bit width of Start and Step. 8684 8685 // Get the initial value for the loop. 8686 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 8687 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 8688 8689 // For now we handle only constant steps. 8690 // 8691 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 8692 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 8693 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 8694 // We have not yet seen any such cases. 8695 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 8696 if (!StepC || StepC->getValue()->isZero()) 8697 return getCouldNotCompute(); 8698 8699 // For positive steps (counting up until unsigned overflow): 8700 // N = -Start/Step (as unsigned) 8701 // For negative steps (counting down to zero): 8702 // N = Start/-Step 8703 // First compute the unsigned distance from zero in the direction of Step. 8704 bool CountDown = StepC->getAPInt().isNegative(); 8705 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 8706 8707 // Handle unitary steps, which cannot wraparound. 8708 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 8709 // N = Distance (as unsigned) 8710 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 8711 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L)); 8712 APInt MaxBECountBase = getUnsignedRangeMax(Distance); 8713 if (MaxBECountBase.ult(MaxBECount)) 8714 MaxBECount = MaxBECountBase; 8715 8716 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 8717 // we end up with a loop whose backedge-taken count is n - 1. Detect this 8718 // case, and see if we can improve the bound. 8719 // 8720 // Explicitly handling this here is necessary because getUnsignedRange 8721 // isn't context-sensitive; it doesn't know that we only care about the 8722 // range inside the loop. 8723 const SCEV *Zero = getZero(Distance->getType()); 8724 const SCEV *One = getOne(Distance->getType()); 8725 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 8726 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 8727 // If Distance + 1 doesn't overflow, we can compute the maximum distance 8728 // as "unsigned_max(Distance + 1) - 1". 8729 ConstantRange CR = getUnsignedRange(DistancePlusOne); 8730 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 8731 } 8732 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 8733 } 8734 8735 // If the condition controls loop exit (the loop exits only if the expression 8736 // is true) and the addition is no-wrap we can use unsigned divide to 8737 // compute the backedge count. In this case, the step may not divide the 8738 // distance, but we don't care because if the condition is "missed" the loop 8739 // will have undefined behavior due to wrapping. 8740 if (ControlsExit && AddRec->hasNoSelfWrap() && 8741 loopHasNoAbnormalExits(AddRec->getLoop())) { 8742 const SCEV *Exact = 8743 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 8744 const SCEV *Max = 8745 Exact == getCouldNotCompute() 8746 ? Exact 8747 : getConstant(getUnsignedRangeMax(Exact)); 8748 return ExitLimit(Exact, Max, false, Predicates); 8749 } 8750 8751 // Solve the general equation. 8752 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 8753 getNegativeSCEV(Start), *this); 8754 const SCEV *M = E == getCouldNotCompute() 8755 ? E 8756 : getConstant(getUnsignedRangeMax(E)); 8757 return ExitLimit(E, M, false, Predicates); 8758 } 8759 8760 ScalarEvolution::ExitLimit 8761 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 8762 // Loops that look like: while (X == 0) are very strange indeed. We don't 8763 // handle them yet except for the trivial case. This could be expanded in the 8764 // future as needed. 8765 8766 // If the value is a constant, check to see if it is known to be non-zero 8767 // already. If so, the backedge will execute zero times. 8768 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8769 if (!C->getValue()->isZero()) 8770 return getZero(C->getType()); 8771 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8772 } 8773 8774 // We could implement others, but I really doubt anyone writes loops like 8775 // this, and if they did, they would already be constant folded. 8776 return getCouldNotCompute(); 8777 } 8778 8779 std::pair<const BasicBlock *, const BasicBlock *> 8780 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) 8781 const { 8782 // If the block has a unique predecessor, then there is no path from the 8783 // predecessor to the block that does not go through the direct edge 8784 // from the predecessor to the block. 8785 if (const BasicBlock *Pred = BB->getSinglePredecessor()) 8786 return {Pred, BB}; 8787 8788 // A loop's header is defined to be a block that dominates the loop. 8789 // If the header has a unique predecessor outside the loop, it must be 8790 // a block that has exactly one successor that can reach the loop. 8791 if (const Loop *L = LI.getLoopFor(BB)) 8792 return {L->getLoopPredecessor(), L->getHeader()}; 8793 8794 return {nullptr, nullptr}; 8795 } 8796 8797 /// SCEV structural equivalence is usually sufficient for testing whether two 8798 /// expressions are equal, however for the purposes of looking for a condition 8799 /// guarding a loop, it can be useful to be a little more general, since a 8800 /// front-end may have replicated the controlling expression. 8801 static bool HasSameValue(const SCEV *A, const SCEV *B) { 8802 // Quick check to see if they are the same SCEV. 8803 if (A == B) return true; 8804 8805 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 8806 // Not all instructions that are "identical" compute the same value. For 8807 // instance, two distinct alloca instructions allocating the same type are 8808 // identical and do not read memory; but compute distinct values. 8809 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 8810 }; 8811 8812 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 8813 // two different instructions with the same value. Check for this case. 8814 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 8815 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 8816 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 8817 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 8818 if (ComputesEqualValues(AI, BI)) 8819 return true; 8820 8821 // Otherwise assume they may have a different value. 8822 return false; 8823 } 8824 8825 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 8826 const SCEV *&LHS, const SCEV *&RHS, 8827 unsigned Depth) { 8828 bool Changed = false; 8829 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 8830 // '0 != 0'. 8831 auto TrivialCase = [&](bool TriviallyTrue) { 8832 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8833 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 8834 return true; 8835 }; 8836 // If we hit the max recursion limit bail out. 8837 if (Depth >= 3) 8838 return false; 8839 8840 // Canonicalize a constant to the right side. 8841 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 8842 // Check for both operands constant. 8843 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 8844 if (ConstantExpr::getICmp(Pred, 8845 LHSC->getValue(), 8846 RHSC->getValue())->isNullValue()) 8847 return TrivialCase(false); 8848 else 8849 return TrivialCase(true); 8850 } 8851 // Otherwise swap the operands to put the constant on the right. 8852 std::swap(LHS, RHS); 8853 Pred = ICmpInst::getSwappedPredicate(Pred); 8854 Changed = true; 8855 } 8856 8857 // If we're comparing an addrec with a value which is loop-invariant in the 8858 // addrec's loop, put the addrec on the left. Also make a dominance check, 8859 // as both operands could be addrecs loop-invariant in each other's loop. 8860 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 8861 const Loop *L = AR->getLoop(); 8862 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 8863 std::swap(LHS, RHS); 8864 Pred = ICmpInst::getSwappedPredicate(Pred); 8865 Changed = true; 8866 } 8867 } 8868 8869 // If there's a constant operand, canonicalize comparisons with boundary 8870 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 8871 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 8872 const APInt &RA = RC->getAPInt(); 8873 8874 bool SimplifiedByConstantRange = false; 8875 8876 if (!ICmpInst::isEquality(Pred)) { 8877 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 8878 if (ExactCR.isFullSet()) 8879 return TrivialCase(true); 8880 else if (ExactCR.isEmptySet()) 8881 return TrivialCase(false); 8882 8883 APInt NewRHS; 8884 CmpInst::Predicate NewPred; 8885 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 8886 ICmpInst::isEquality(NewPred)) { 8887 // We were able to convert an inequality to an equality. 8888 Pred = NewPred; 8889 RHS = getConstant(NewRHS); 8890 Changed = SimplifiedByConstantRange = true; 8891 } 8892 } 8893 8894 if (!SimplifiedByConstantRange) { 8895 switch (Pred) { 8896 default: 8897 break; 8898 case ICmpInst::ICMP_EQ: 8899 case ICmpInst::ICMP_NE: 8900 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 8901 if (!RA) 8902 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 8903 if (const SCEVMulExpr *ME = 8904 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 8905 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 8906 ME->getOperand(0)->isAllOnesValue()) { 8907 RHS = AE->getOperand(1); 8908 LHS = ME->getOperand(1); 8909 Changed = true; 8910 } 8911 break; 8912 8913 8914 // The "Should have been caught earlier!" messages refer to the fact 8915 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 8916 // should have fired on the corresponding cases, and canonicalized the 8917 // check to trivial case. 8918 8919 case ICmpInst::ICMP_UGE: 8920 assert(!RA.isMinValue() && "Should have been caught earlier!"); 8921 Pred = ICmpInst::ICMP_UGT; 8922 RHS = getConstant(RA - 1); 8923 Changed = true; 8924 break; 8925 case ICmpInst::ICMP_ULE: 8926 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 8927 Pred = ICmpInst::ICMP_ULT; 8928 RHS = getConstant(RA + 1); 8929 Changed = true; 8930 break; 8931 case ICmpInst::ICMP_SGE: 8932 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 8933 Pred = ICmpInst::ICMP_SGT; 8934 RHS = getConstant(RA - 1); 8935 Changed = true; 8936 break; 8937 case ICmpInst::ICMP_SLE: 8938 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 8939 Pred = ICmpInst::ICMP_SLT; 8940 RHS = getConstant(RA + 1); 8941 Changed = true; 8942 break; 8943 } 8944 } 8945 } 8946 8947 // Check for obvious equality. 8948 if (HasSameValue(LHS, RHS)) { 8949 if (ICmpInst::isTrueWhenEqual(Pred)) 8950 return TrivialCase(true); 8951 if (ICmpInst::isFalseWhenEqual(Pred)) 8952 return TrivialCase(false); 8953 } 8954 8955 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 8956 // adding or subtracting 1 from one of the operands. 8957 switch (Pred) { 8958 case ICmpInst::ICMP_SLE: 8959 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 8960 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8961 SCEV::FlagNSW); 8962 Pred = ICmpInst::ICMP_SLT; 8963 Changed = true; 8964 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 8965 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 8966 SCEV::FlagNSW); 8967 Pred = ICmpInst::ICMP_SLT; 8968 Changed = true; 8969 } 8970 break; 8971 case ICmpInst::ICMP_SGE: 8972 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 8973 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 8974 SCEV::FlagNSW); 8975 Pred = ICmpInst::ICMP_SGT; 8976 Changed = true; 8977 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 8978 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 8979 SCEV::FlagNSW); 8980 Pred = ICmpInst::ICMP_SGT; 8981 Changed = true; 8982 } 8983 break; 8984 case ICmpInst::ICMP_ULE: 8985 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 8986 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8987 SCEV::FlagNUW); 8988 Pred = ICmpInst::ICMP_ULT; 8989 Changed = true; 8990 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 8991 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 8992 Pred = ICmpInst::ICMP_ULT; 8993 Changed = true; 8994 } 8995 break; 8996 case ICmpInst::ICMP_UGE: 8997 if (!getUnsignedRangeMin(RHS).isMinValue()) { 8998 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 8999 Pred = ICmpInst::ICMP_UGT; 9000 Changed = true; 9001 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 9002 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9003 SCEV::FlagNUW); 9004 Pred = ICmpInst::ICMP_UGT; 9005 Changed = true; 9006 } 9007 break; 9008 default: 9009 break; 9010 } 9011 9012 // TODO: More simplifications are possible here. 9013 9014 // Recursively simplify until we either hit a recursion limit or nothing 9015 // changes. 9016 if (Changed) 9017 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 9018 9019 return Changed; 9020 } 9021 9022 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 9023 return getSignedRangeMax(S).isNegative(); 9024 } 9025 9026 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 9027 return getSignedRangeMin(S).isStrictlyPositive(); 9028 } 9029 9030 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 9031 return !getSignedRangeMin(S).isNegative(); 9032 } 9033 9034 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 9035 return !getSignedRangeMax(S).isStrictlyPositive(); 9036 } 9037 9038 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 9039 return isKnownNegative(S) || isKnownPositive(S); 9040 } 9041 9042 std::pair<const SCEV *, const SCEV *> 9043 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 9044 // Compute SCEV on entry of loop L. 9045 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 9046 if (Start == getCouldNotCompute()) 9047 return { Start, Start }; 9048 // Compute post increment SCEV for loop L. 9049 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 9050 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 9051 return { Start, PostInc }; 9052 } 9053 9054 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 9055 const SCEV *LHS, const SCEV *RHS) { 9056 // First collect all loops. 9057 SmallPtrSet<const Loop *, 8> LoopsUsed; 9058 getUsedLoops(LHS, LoopsUsed); 9059 getUsedLoops(RHS, LoopsUsed); 9060 9061 if (LoopsUsed.empty()) 9062 return false; 9063 9064 // Domination relationship must be a linear order on collected loops. 9065 #ifndef NDEBUG 9066 for (auto *L1 : LoopsUsed) 9067 for (auto *L2 : LoopsUsed) 9068 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 9069 DT.dominates(L2->getHeader(), L1->getHeader())) && 9070 "Domination relationship is not a linear order"); 9071 #endif 9072 9073 const Loop *MDL = 9074 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 9075 [&](const Loop *L1, const Loop *L2) { 9076 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 9077 }); 9078 9079 // Get init and post increment value for LHS. 9080 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 9081 // if LHS contains unknown non-invariant SCEV then bail out. 9082 if (SplitLHS.first == getCouldNotCompute()) 9083 return false; 9084 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 9085 // Get init and post increment value for RHS. 9086 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 9087 // if RHS contains unknown non-invariant SCEV then bail out. 9088 if (SplitRHS.first == getCouldNotCompute()) 9089 return false; 9090 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 9091 // It is possible that init SCEV contains an invariant load but it does 9092 // not dominate MDL and is not available at MDL loop entry, so we should 9093 // check it here. 9094 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 9095 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 9096 return false; 9097 9098 // It seems backedge guard check is faster than entry one so in some cases 9099 // it can speed up whole estimation by short circuit 9100 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 9101 SplitRHS.second) && 9102 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); 9103 } 9104 9105 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 9106 const SCEV *LHS, const SCEV *RHS) { 9107 // Canonicalize the inputs first. 9108 (void)SimplifyICmpOperands(Pred, LHS, RHS); 9109 9110 if (isKnownViaInduction(Pred, LHS, RHS)) 9111 return true; 9112 9113 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 9114 return true; 9115 9116 // Otherwise see what can be done with some simple reasoning. 9117 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 9118 } 9119 9120 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred, 9121 const SCEV *LHS, const SCEV *RHS, 9122 const Instruction *Context) { 9123 // TODO: Analyze guards and assumes from Context's block. 9124 return isKnownPredicate(Pred, LHS, RHS) || 9125 isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS); 9126 } 9127 9128 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 9129 const SCEVAddRecExpr *LHS, 9130 const SCEV *RHS) { 9131 const Loop *L = LHS->getLoop(); 9132 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 9133 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 9134 } 9135 9136 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 9137 ICmpInst::Predicate Pred, 9138 bool &Increasing) { 9139 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 9140 9141 #ifndef NDEBUG 9142 // Verify an invariant: inverting the predicate should turn a monotonically 9143 // increasing change to a monotonically decreasing one, and vice versa. 9144 bool IncreasingSwapped; 9145 bool ResultSwapped = isMonotonicPredicateImpl( 9146 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 9147 9148 assert(Result == ResultSwapped && "should be able to analyze both!"); 9149 if (ResultSwapped) 9150 assert(Increasing == !IncreasingSwapped && 9151 "monotonicity should flip as we flip the predicate"); 9152 #endif 9153 9154 return Result; 9155 } 9156 9157 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 9158 ICmpInst::Predicate Pred, 9159 bool &Increasing) { 9160 9161 // A zero step value for LHS means the induction variable is essentially a 9162 // loop invariant value. We don't really depend on the predicate actually 9163 // flipping from false to true (for increasing predicates, and the other way 9164 // around for decreasing predicates), all we care about is that *if* the 9165 // predicate changes then it only changes from false to true. 9166 // 9167 // A zero step value in itself is not very useful, but there may be places 9168 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 9169 // as general as possible. 9170 9171 switch (Pred) { 9172 default: 9173 return false; // Conservative answer 9174 9175 case ICmpInst::ICMP_UGT: 9176 case ICmpInst::ICMP_UGE: 9177 case ICmpInst::ICMP_ULT: 9178 case ICmpInst::ICMP_ULE: 9179 if (!LHS->hasNoUnsignedWrap()) 9180 return false; 9181 9182 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 9183 return true; 9184 9185 case ICmpInst::ICMP_SGT: 9186 case ICmpInst::ICMP_SGE: 9187 case ICmpInst::ICMP_SLT: 9188 case ICmpInst::ICMP_SLE: { 9189 if (!LHS->hasNoSignedWrap()) 9190 return false; 9191 9192 const SCEV *Step = LHS->getStepRecurrence(*this); 9193 9194 if (isKnownNonNegative(Step)) { 9195 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 9196 return true; 9197 } 9198 9199 if (isKnownNonPositive(Step)) { 9200 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 9201 return true; 9202 } 9203 9204 return false; 9205 } 9206 9207 } 9208 9209 llvm_unreachable("switch has default clause!"); 9210 } 9211 9212 bool ScalarEvolution::isLoopInvariantPredicate( 9213 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 9214 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 9215 const SCEV *&InvariantRHS) { 9216 9217 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9218 if (!isLoopInvariant(RHS, L)) { 9219 if (!isLoopInvariant(LHS, L)) 9220 return false; 9221 9222 std::swap(LHS, RHS); 9223 Pred = ICmpInst::getSwappedPredicate(Pred); 9224 } 9225 9226 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9227 if (!ArLHS || ArLHS->getLoop() != L) 9228 return false; 9229 9230 bool Increasing; 9231 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 9232 return false; 9233 9234 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 9235 // true as the loop iterates, and the backedge is control dependent on 9236 // "ArLHS `Pred` RHS" == true then we can reason as follows: 9237 // 9238 // * if the predicate was false in the first iteration then the predicate 9239 // is never evaluated again, since the loop exits without taking the 9240 // backedge. 9241 // * if the predicate was true in the first iteration then it will 9242 // continue to be true for all future iterations since it is 9243 // monotonically increasing. 9244 // 9245 // For both the above possibilities, we can replace the loop varying 9246 // predicate with its value on the first iteration of the loop (which is 9247 // loop invariant). 9248 // 9249 // A similar reasoning applies for a monotonically decreasing predicate, by 9250 // replacing true with false and false with true in the above two bullets. 9251 9252 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 9253 9254 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 9255 return false; 9256 9257 InvariantPred = Pred; 9258 InvariantLHS = ArLHS->getStart(); 9259 InvariantRHS = RHS; 9260 return true; 9261 } 9262 9263 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 9264 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 9265 if (HasSameValue(LHS, RHS)) 9266 return ICmpInst::isTrueWhenEqual(Pred); 9267 9268 // This code is split out from isKnownPredicate because it is called from 9269 // within isLoopEntryGuardedByCond. 9270 9271 auto CheckRanges = 9272 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 9273 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 9274 .contains(RangeLHS); 9275 }; 9276 9277 // The check at the top of the function catches the case where the values are 9278 // known to be equal. 9279 if (Pred == CmpInst::ICMP_EQ) 9280 return false; 9281 9282 if (Pred == CmpInst::ICMP_NE) 9283 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 9284 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 9285 isKnownNonZero(getMinusSCEV(LHS, RHS)); 9286 9287 if (CmpInst::isSigned(Pred)) 9288 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 9289 9290 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 9291 } 9292 9293 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 9294 const SCEV *LHS, 9295 const SCEV *RHS) { 9296 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 9297 // Return Y via OutY. 9298 auto MatchBinaryAddToConst = 9299 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 9300 SCEV::NoWrapFlags ExpectedFlags) { 9301 const SCEV *NonConstOp, *ConstOp; 9302 SCEV::NoWrapFlags FlagsPresent; 9303 9304 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 9305 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 9306 return false; 9307 9308 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 9309 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 9310 }; 9311 9312 APInt C; 9313 9314 switch (Pred) { 9315 default: 9316 break; 9317 9318 case ICmpInst::ICMP_SGE: 9319 std::swap(LHS, RHS); 9320 LLVM_FALLTHROUGH; 9321 case ICmpInst::ICMP_SLE: 9322 // X s<= (X + C)<nsw> if C >= 0 9323 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 9324 return true; 9325 9326 // (X + C)<nsw> s<= X if C <= 0 9327 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 9328 !C.isStrictlyPositive()) 9329 return true; 9330 break; 9331 9332 case ICmpInst::ICMP_SGT: 9333 std::swap(LHS, RHS); 9334 LLVM_FALLTHROUGH; 9335 case ICmpInst::ICMP_SLT: 9336 // X s< (X + C)<nsw> if C > 0 9337 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 9338 C.isStrictlyPositive()) 9339 return true; 9340 9341 // (X + C)<nsw> s< X if C < 0 9342 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 9343 return true; 9344 break; 9345 9346 case ICmpInst::ICMP_UGE: 9347 std::swap(LHS, RHS); 9348 LLVM_FALLTHROUGH; 9349 case ICmpInst::ICMP_ULE: 9350 // X u<= (X + C)<nuw> for any C 9351 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW)) 9352 return true; 9353 break; 9354 9355 case ICmpInst::ICMP_UGT: 9356 std::swap(LHS, RHS); 9357 LLVM_FALLTHROUGH; 9358 case ICmpInst::ICMP_ULT: 9359 // X u< (X + C)<nuw> if C != 0 9360 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW) && !C.isNullValue()) 9361 return true; 9362 break; 9363 } 9364 9365 return false; 9366 } 9367 9368 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 9369 const SCEV *LHS, 9370 const SCEV *RHS) { 9371 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 9372 return false; 9373 9374 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 9375 // the stack can result in exponential time complexity. 9376 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 9377 9378 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 9379 // 9380 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 9381 // isKnownPredicate. isKnownPredicate is more powerful, but also more 9382 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 9383 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 9384 // use isKnownPredicate later if needed. 9385 return isKnownNonNegative(RHS) && 9386 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 9387 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 9388 } 9389 9390 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, 9391 ICmpInst::Predicate Pred, 9392 const SCEV *LHS, const SCEV *RHS) { 9393 // No need to even try if we know the module has no guards. 9394 if (!HasGuards) 9395 return false; 9396 9397 return any_of(*BB, [&](const Instruction &I) { 9398 using namespace llvm::PatternMatch; 9399 9400 Value *Condition; 9401 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 9402 m_Value(Condition))) && 9403 isImpliedCond(Pred, LHS, RHS, Condition, false); 9404 }); 9405 } 9406 9407 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 9408 /// protected by a conditional between LHS and RHS. This is used to 9409 /// to eliminate casts. 9410 bool 9411 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 9412 ICmpInst::Predicate Pred, 9413 const SCEV *LHS, const SCEV *RHS) { 9414 // Interpret a null as meaning no loop, where there is obviously no guard 9415 // (interprocedural conditions notwithstanding). 9416 if (!L) return true; 9417 9418 if (VerifyIR) 9419 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 9420 "This cannot be done on broken IR!"); 9421 9422 9423 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9424 return true; 9425 9426 BasicBlock *Latch = L->getLoopLatch(); 9427 if (!Latch) 9428 return false; 9429 9430 BranchInst *LoopContinuePredicate = 9431 dyn_cast<BranchInst>(Latch->getTerminator()); 9432 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 9433 isImpliedCond(Pred, LHS, RHS, 9434 LoopContinuePredicate->getCondition(), 9435 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 9436 return true; 9437 9438 // We don't want more than one activation of the following loops on the stack 9439 // -- that can lead to O(n!) time complexity. 9440 if (WalkingBEDominatingConds) 9441 return false; 9442 9443 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 9444 9445 // See if we can exploit a trip count to prove the predicate. 9446 const auto &BETakenInfo = getBackedgeTakenInfo(L); 9447 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 9448 if (LatchBECount != getCouldNotCompute()) { 9449 // We know that Latch branches back to the loop header exactly 9450 // LatchBECount times. This means the backdege condition at Latch is 9451 // equivalent to "{0,+,1} u< LatchBECount". 9452 Type *Ty = LatchBECount->getType(); 9453 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 9454 const SCEV *LoopCounter = 9455 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 9456 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 9457 LatchBECount)) 9458 return true; 9459 } 9460 9461 // Check conditions due to any @llvm.assume intrinsics. 9462 for (auto &AssumeVH : AC.assumptions()) { 9463 if (!AssumeVH) 9464 continue; 9465 auto *CI = cast<CallInst>(AssumeVH); 9466 if (!DT.dominates(CI, Latch->getTerminator())) 9467 continue; 9468 9469 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 9470 return true; 9471 } 9472 9473 // If the loop is not reachable from the entry block, we risk running into an 9474 // infinite loop as we walk up into the dom tree. These loops do not matter 9475 // anyway, so we just return a conservative answer when we see them. 9476 if (!DT.isReachableFromEntry(L->getHeader())) 9477 return false; 9478 9479 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 9480 return true; 9481 9482 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 9483 DTN != HeaderDTN; DTN = DTN->getIDom()) { 9484 assert(DTN && "should reach the loop header before reaching the root!"); 9485 9486 BasicBlock *BB = DTN->getBlock(); 9487 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 9488 return true; 9489 9490 BasicBlock *PBB = BB->getSinglePredecessor(); 9491 if (!PBB) 9492 continue; 9493 9494 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 9495 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 9496 continue; 9497 9498 Value *Condition = ContinuePredicate->getCondition(); 9499 9500 // If we have an edge `E` within the loop body that dominates the only 9501 // latch, the condition guarding `E` also guards the backedge. This 9502 // reasoning works only for loops with a single latch. 9503 9504 BasicBlockEdge DominatingEdge(PBB, BB); 9505 if (DominatingEdge.isSingleEdge()) { 9506 // We're constructively (and conservatively) enumerating edges within the 9507 // loop body that dominate the latch. The dominator tree better agree 9508 // with us on this: 9509 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 9510 9511 if (isImpliedCond(Pred, LHS, RHS, Condition, 9512 BB != ContinuePredicate->getSuccessor(0))) 9513 return true; 9514 } 9515 } 9516 9517 return false; 9518 } 9519 9520 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, 9521 ICmpInst::Predicate Pred, 9522 const SCEV *LHS, 9523 const SCEV *RHS) { 9524 if (VerifyIR) 9525 assert(!verifyFunction(*BB->getParent(), &dbgs()) && 9526 "This cannot be done on broken IR!"); 9527 9528 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9529 return true; 9530 9531 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 9532 // the facts (a >= b && a != b) separately. A typical situation is when the 9533 // non-strict comparison is known from ranges and non-equality is known from 9534 // dominating predicates. If we are proving strict comparison, we always try 9535 // to prove non-equality and non-strict comparison separately. 9536 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 9537 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 9538 bool ProvedNonStrictComparison = false; 9539 bool ProvedNonEquality = false; 9540 9541 if (ProvingStrictComparison) { 9542 ProvedNonStrictComparison = 9543 isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS); 9544 ProvedNonEquality = 9545 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, LHS, RHS); 9546 if (ProvedNonStrictComparison && ProvedNonEquality) 9547 return true; 9548 } 9549 9550 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 9551 auto ProveViaGuard = [&](const BasicBlock *Block) { 9552 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 9553 return true; 9554 if (ProvingStrictComparison) { 9555 if (!ProvedNonStrictComparison) 9556 ProvedNonStrictComparison = 9557 isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS); 9558 if (!ProvedNonEquality) 9559 ProvedNonEquality = 9560 isImpliedViaGuard(Block, ICmpInst::ICMP_NE, LHS, RHS); 9561 if (ProvedNonStrictComparison && ProvedNonEquality) 9562 return true; 9563 } 9564 return false; 9565 }; 9566 9567 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 9568 auto ProveViaCond = [&](const Value *Condition, bool Inverse) { 9569 const Instruction *Context = &BB->front(); 9570 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, Context)) 9571 return true; 9572 if (ProvingStrictComparison) { 9573 if (!ProvedNonStrictComparison) 9574 ProvedNonStrictComparison = isImpliedCond(NonStrictPredicate, LHS, RHS, 9575 Condition, Inverse, Context); 9576 if (!ProvedNonEquality) 9577 ProvedNonEquality = isImpliedCond(ICmpInst::ICMP_NE, LHS, RHS, 9578 Condition, Inverse, Context); 9579 if (ProvedNonStrictComparison && ProvedNonEquality) 9580 return true; 9581 } 9582 return false; 9583 }; 9584 9585 // Starting at the block's predecessor, climb up the predecessor chain, as long 9586 // as there are predecessors that can be found that have unique successors 9587 // leading to the original block. 9588 const Loop *ContainingLoop = LI.getLoopFor(BB); 9589 const BasicBlock *PredBB; 9590 if (ContainingLoop && ContainingLoop->getHeader() == BB) 9591 PredBB = ContainingLoop->getLoopPredecessor(); 9592 else 9593 PredBB = BB->getSinglePredecessor(); 9594 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB); 9595 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 9596 if (ProveViaGuard(Pair.first)) 9597 return true; 9598 9599 const BranchInst *LoopEntryPredicate = 9600 dyn_cast<BranchInst>(Pair.first->getTerminator()); 9601 if (!LoopEntryPredicate || 9602 LoopEntryPredicate->isUnconditional()) 9603 continue; 9604 9605 if (ProveViaCond(LoopEntryPredicate->getCondition(), 9606 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 9607 return true; 9608 } 9609 9610 // Check conditions due to any @llvm.assume intrinsics. 9611 for (auto &AssumeVH : AC.assumptions()) { 9612 if (!AssumeVH) 9613 continue; 9614 auto *CI = cast<CallInst>(AssumeVH); 9615 if (!DT.dominates(CI, BB)) 9616 continue; 9617 9618 if (ProveViaCond(CI->getArgOperand(0), false)) 9619 return true; 9620 } 9621 9622 return false; 9623 } 9624 9625 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 9626 ICmpInst::Predicate Pred, 9627 const SCEV *LHS, 9628 const SCEV *RHS) { 9629 // Interpret a null as meaning no loop, where there is obviously no guard 9630 // (interprocedural conditions notwithstanding). 9631 if (!L) 9632 return false; 9633 9634 // Both LHS and RHS must be available at loop entry. 9635 assert(isAvailableAtLoopEntry(LHS, L) && 9636 "LHS is not available at Loop Entry"); 9637 assert(isAvailableAtLoopEntry(RHS, L) && 9638 "RHS is not available at Loop Entry"); 9639 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS); 9640 } 9641 9642 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 9643 const SCEV *RHS, 9644 const Value *FoundCondValue, bool Inverse, 9645 const Instruction *Context) { 9646 if (!PendingLoopPredicates.insert(FoundCondValue).second) 9647 return false; 9648 9649 auto ClearOnExit = 9650 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 9651 9652 // Recursively handle And and Or conditions. 9653 if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 9654 if (BO->getOpcode() == Instruction::And) { 9655 if (!Inverse) 9656 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse, 9657 Context) || 9658 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse, 9659 Context); 9660 } else if (BO->getOpcode() == Instruction::Or) { 9661 if (Inverse) 9662 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse, 9663 Context) || 9664 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse, 9665 Context); 9666 } 9667 } 9668 9669 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 9670 if (!ICI) return false; 9671 9672 // Now that we found a conditional branch that dominates the loop or controls 9673 // the loop latch. Check to see if it is the comparison we are looking for. 9674 ICmpInst::Predicate FoundPred; 9675 if (Inverse) 9676 FoundPred = ICI->getInversePredicate(); 9677 else 9678 FoundPred = ICI->getPredicate(); 9679 9680 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 9681 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 9682 9683 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, Context); 9684 } 9685 9686 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 9687 const SCEV *RHS, 9688 ICmpInst::Predicate FoundPred, 9689 const SCEV *FoundLHS, const SCEV *FoundRHS, 9690 const Instruction *Context) { 9691 // Balance the types. 9692 if (getTypeSizeInBits(LHS->getType()) < 9693 getTypeSizeInBits(FoundLHS->getType())) { 9694 if (CmpInst::isSigned(Pred)) { 9695 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 9696 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 9697 } else { 9698 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 9699 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 9700 } 9701 } else if (getTypeSizeInBits(LHS->getType()) > 9702 getTypeSizeInBits(FoundLHS->getType())) { 9703 if (CmpInst::isSigned(FoundPred)) { 9704 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 9705 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 9706 } else { 9707 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 9708 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 9709 } 9710 } 9711 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS, 9712 FoundRHS, Context); 9713 } 9714 9715 bool ScalarEvolution::isImpliedCondBalancedTypes( 9716 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 9717 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS, 9718 const Instruction *Context) { 9719 assert(getTypeSizeInBits(LHS->getType()) == 9720 getTypeSizeInBits(FoundLHS->getType()) && 9721 "Types should be balanced!"); 9722 // Canonicalize the query to match the way instcombine will have 9723 // canonicalized the comparison. 9724 if (SimplifyICmpOperands(Pred, LHS, RHS)) 9725 if (LHS == RHS) 9726 return CmpInst::isTrueWhenEqual(Pred); 9727 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 9728 if (FoundLHS == FoundRHS) 9729 return CmpInst::isFalseWhenEqual(FoundPred); 9730 9731 // Check to see if we can make the LHS or RHS match. 9732 if (LHS == FoundRHS || RHS == FoundLHS) { 9733 if (isa<SCEVConstant>(RHS)) { 9734 std::swap(FoundLHS, FoundRHS); 9735 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 9736 } else { 9737 std::swap(LHS, RHS); 9738 Pred = ICmpInst::getSwappedPredicate(Pred); 9739 } 9740 } 9741 9742 // Check whether the found predicate is the same as the desired predicate. 9743 if (FoundPred == Pred) 9744 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); 9745 9746 // Check whether swapping the found predicate makes it the same as the 9747 // desired predicate. 9748 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 9749 if (isa<SCEVConstant>(RHS)) 9750 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, Context); 9751 else 9752 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), RHS, 9753 LHS, FoundLHS, FoundRHS, Context); 9754 } 9755 9756 // Unsigned comparison is the same as signed comparison when both the operands 9757 // are non-negative. 9758 if (CmpInst::isUnsigned(FoundPred) && 9759 CmpInst::getSignedPredicate(FoundPred) == Pred && 9760 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 9761 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); 9762 9763 // Check if we can make progress by sharpening ranges. 9764 if (FoundPred == ICmpInst::ICMP_NE && 9765 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 9766 9767 const SCEVConstant *C = nullptr; 9768 const SCEV *V = nullptr; 9769 9770 if (isa<SCEVConstant>(FoundLHS)) { 9771 C = cast<SCEVConstant>(FoundLHS); 9772 V = FoundRHS; 9773 } else { 9774 C = cast<SCEVConstant>(FoundRHS); 9775 V = FoundLHS; 9776 } 9777 9778 // The guarding predicate tells us that C != V. If the known range 9779 // of V is [C, t), we can sharpen the range to [C + 1, t). The 9780 // range we consider has to correspond to same signedness as the 9781 // predicate we're interested in folding. 9782 9783 APInt Min = ICmpInst::isSigned(Pred) ? 9784 getSignedRangeMin(V) : getUnsignedRangeMin(V); 9785 9786 if (Min == C->getAPInt()) { 9787 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 9788 // This is true even if (Min + 1) wraps around -- in case of 9789 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 9790 9791 APInt SharperMin = Min + 1; 9792 9793 switch (Pred) { 9794 case ICmpInst::ICMP_SGE: 9795 case ICmpInst::ICMP_UGE: 9796 // We know V `Pred` SharperMin. If this implies LHS `Pred` 9797 // RHS, we're done. 9798 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin), 9799 Context)) 9800 return true; 9801 LLVM_FALLTHROUGH; 9802 9803 case ICmpInst::ICMP_SGT: 9804 case ICmpInst::ICMP_UGT: 9805 // We know from the range information that (V `Pred` Min || 9806 // V == Min). We know from the guarding condition that !(V 9807 // == Min). This gives us 9808 // 9809 // V `Pred` Min || V == Min && !(V == Min) 9810 // => V `Pred` Min 9811 // 9812 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 9813 9814 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), 9815 Context)) 9816 return true; 9817 break; 9818 9819 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively. 9820 case ICmpInst::ICMP_SLE: 9821 case ICmpInst::ICMP_ULE: 9822 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 9823 LHS, V, getConstant(SharperMin), Context)) 9824 return true; 9825 LLVM_FALLTHROUGH; 9826 9827 case ICmpInst::ICMP_SLT: 9828 case ICmpInst::ICMP_ULT: 9829 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 9830 LHS, V, getConstant(Min), Context)) 9831 return true; 9832 break; 9833 9834 default: 9835 // No change 9836 break; 9837 } 9838 } 9839 } 9840 9841 // Check whether the actual condition is beyond sufficient. 9842 if (FoundPred == ICmpInst::ICMP_EQ) 9843 if (ICmpInst::isTrueWhenEqual(Pred)) 9844 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context)) 9845 return true; 9846 if (Pred == ICmpInst::ICMP_NE) 9847 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 9848 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, 9849 Context)) 9850 return true; 9851 9852 // Otherwise assume the worst. 9853 return false; 9854 } 9855 9856 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 9857 const SCEV *&L, const SCEV *&R, 9858 SCEV::NoWrapFlags &Flags) { 9859 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 9860 if (!AE || AE->getNumOperands() != 2) 9861 return false; 9862 9863 L = AE->getOperand(0); 9864 R = AE->getOperand(1); 9865 Flags = AE->getNoWrapFlags(); 9866 return true; 9867 } 9868 9869 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 9870 const SCEV *Less) { 9871 // We avoid subtracting expressions here because this function is usually 9872 // fairly deep in the call stack (i.e. is called many times). 9873 9874 // X - X = 0. 9875 if (More == Less) 9876 return APInt(getTypeSizeInBits(More->getType()), 0); 9877 9878 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 9879 const auto *LAR = cast<SCEVAddRecExpr>(Less); 9880 const auto *MAR = cast<SCEVAddRecExpr>(More); 9881 9882 if (LAR->getLoop() != MAR->getLoop()) 9883 return None; 9884 9885 // We look at affine expressions only; not for correctness but to keep 9886 // getStepRecurrence cheap. 9887 if (!LAR->isAffine() || !MAR->isAffine()) 9888 return None; 9889 9890 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 9891 return None; 9892 9893 Less = LAR->getStart(); 9894 More = MAR->getStart(); 9895 9896 // fall through 9897 } 9898 9899 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 9900 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 9901 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 9902 return M - L; 9903 } 9904 9905 SCEV::NoWrapFlags Flags; 9906 const SCEV *LLess = nullptr, *RLess = nullptr; 9907 const SCEV *LMore = nullptr, *RMore = nullptr; 9908 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 9909 // Compare (X + C1) vs X. 9910 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 9911 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 9912 if (RLess == More) 9913 return -(C1->getAPInt()); 9914 9915 // Compare X vs (X + C2). 9916 if (splitBinaryAdd(More, LMore, RMore, Flags)) 9917 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 9918 if (RMore == Less) 9919 return C2->getAPInt(); 9920 9921 // Compare (X + C1) vs (X + C2). 9922 if (C1 && C2 && RLess == RMore) 9923 return C2->getAPInt() - C1->getAPInt(); 9924 9925 return None; 9926 } 9927 9928 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart( 9929 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 9930 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *Context) { 9931 // Try to recognize the following pattern: 9932 // 9933 // FoundRHS = ... 9934 // ... 9935 // loop: 9936 // FoundLHS = {Start,+,W} 9937 // context_bb: // Basic block from the same loop 9938 // known(Pred, FoundLHS, FoundRHS) 9939 // 9940 // If some predicate is known in the context of a loop, it is also known on 9941 // each iteration of this loop, including the first iteration. Therefore, in 9942 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to 9943 // prove the original pred using this fact. 9944 if (!Context) 9945 return false; 9946 const BasicBlock *ContextBB = Context->getParent(); 9947 // Make sure AR varies in the context block. 9948 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) { 9949 const Loop *L = AR->getLoop(); 9950 // Make sure that context belongs to the loop and executes on 1st iteration 9951 // (if it ever executes at all). 9952 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 9953 return false; 9954 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop())) 9955 return false; 9956 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS); 9957 } 9958 9959 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) { 9960 const Loop *L = AR->getLoop(); 9961 // Make sure that context belongs to the loop and executes on 1st iteration 9962 // (if it ever executes at all). 9963 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 9964 return false; 9965 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop())) 9966 return false; 9967 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart()); 9968 } 9969 9970 return false; 9971 } 9972 9973 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 9974 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 9975 const SCEV *FoundLHS, const SCEV *FoundRHS) { 9976 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 9977 return false; 9978 9979 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9980 if (!AddRecLHS) 9981 return false; 9982 9983 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 9984 if (!AddRecFoundLHS) 9985 return false; 9986 9987 // We'd like to let SCEV reason about control dependencies, so we constrain 9988 // both the inequalities to be about add recurrences on the same loop. This 9989 // way we can use isLoopEntryGuardedByCond later. 9990 9991 const Loop *L = AddRecFoundLHS->getLoop(); 9992 if (L != AddRecLHS->getLoop()) 9993 return false; 9994 9995 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 9996 // 9997 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 9998 // ... (2) 9999 // 10000 // Informal proof for (2), assuming (1) [*]: 10001 // 10002 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 10003 // 10004 // Then 10005 // 10006 // FoundLHS s< FoundRHS s< INT_MIN - C 10007 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 10008 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 10009 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 10010 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 10011 // <=> FoundLHS + C s< FoundRHS + C 10012 // 10013 // [*]: (1) can be proved by ruling out overflow. 10014 // 10015 // [**]: This can be proved by analyzing all the four possibilities: 10016 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 10017 // (A s>= 0, B s>= 0). 10018 // 10019 // Note: 10020 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 10021 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 10022 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 10023 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 10024 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 10025 // C)". 10026 10027 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 10028 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 10029 if (!LDiff || !RDiff || *LDiff != *RDiff) 10030 return false; 10031 10032 if (LDiff->isMinValue()) 10033 return true; 10034 10035 APInt FoundRHSLimit; 10036 10037 if (Pred == CmpInst::ICMP_ULT) { 10038 FoundRHSLimit = -(*RDiff); 10039 } else { 10040 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 10041 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 10042 } 10043 10044 // Try to prove (1) or (2), as needed. 10045 return isAvailableAtLoopEntry(FoundRHS, L) && 10046 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 10047 getConstant(FoundRHSLimit)); 10048 } 10049 10050 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 10051 const SCEV *LHS, const SCEV *RHS, 10052 const SCEV *FoundLHS, 10053 const SCEV *FoundRHS, unsigned Depth) { 10054 const PHINode *LPhi = nullptr, *RPhi = nullptr; 10055 10056 auto ClearOnExit = make_scope_exit([&]() { 10057 if (LPhi) { 10058 bool Erased = PendingMerges.erase(LPhi); 10059 assert(Erased && "Failed to erase LPhi!"); 10060 (void)Erased; 10061 } 10062 if (RPhi) { 10063 bool Erased = PendingMerges.erase(RPhi); 10064 assert(Erased && "Failed to erase RPhi!"); 10065 (void)Erased; 10066 } 10067 }); 10068 10069 // Find respective Phis and check that they are not being pending. 10070 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 10071 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 10072 if (!PendingMerges.insert(Phi).second) 10073 return false; 10074 LPhi = Phi; 10075 } 10076 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 10077 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 10078 // If we detect a loop of Phi nodes being processed by this method, for 10079 // example: 10080 // 10081 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 10082 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 10083 // 10084 // we don't want to deal with a case that complex, so return conservative 10085 // answer false. 10086 if (!PendingMerges.insert(Phi).second) 10087 return false; 10088 RPhi = Phi; 10089 } 10090 10091 // If none of LHS, RHS is a Phi, nothing to do here. 10092 if (!LPhi && !RPhi) 10093 return false; 10094 10095 // If there is a SCEVUnknown Phi we are interested in, make it left. 10096 if (!LPhi) { 10097 std::swap(LHS, RHS); 10098 std::swap(FoundLHS, FoundRHS); 10099 std::swap(LPhi, RPhi); 10100 Pred = ICmpInst::getSwappedPredicate(Pred); 10101 } 10102 10103 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 10104 const BasicBlock *LBB = LPhi->getParent(); 10105 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10106 10107 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 10108 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 10109 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 10110 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 10111 }; 10112 10113 if (RPhi && RPhi->getParent() == LBB) { 10114 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 10115 // If we compare two Phis from the same block, and for each entry block 10116 // the predicate is true for incoming values from this block, then the 10117 // predicate is also true for the Phis. 10118 for (const BasicBlock *IncBB : predecessors(LBB)) { 10119 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10120 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 10121 if (!ProvedEasily(L, R)) 10122 return false; 10123 } 10124 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 10125 // Case two: RHS is also a Phi from the same basic block, and it is an 10126 // AddRec. It means that there is a loop which has both AddRec and Unknown 10127 // PHIs, for it we can compare incoming values of AddRec from above the loop 10128 // and latch with their respective incoming values of LPhi. 10129 // TODO: Generalize to handle loops with many inputs in a header. 10130 if (LPhi->getNumIncomingValues() != 2) return false; 10131 10132 auto *RLoop = RAR->getLoop(); 10133 auto *Predecessor = RLoop->getLoopPredecessor(); 10134 assert(Predecessor && "Loop with AddRec with no predecessor?"); 10135 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 10136 if (!ProvedEasily(L1, RAR->getStart())) 10137 return false; 10138 auto *Latch = RLoop->getLoopLatch(); 10139 assert(Latch && "Loop with AddRec with no latch?"); 10140 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 10141 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 10142 return false; 10143 } else { 10144 // In all other cases go over inputs of LHS and compare each of them to RHS, 10145 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 10146 // At this point RHS is either a non-Phi, or it is a Phi from some block 10147 // different from LBB. 10148 for (const BasicBlock *IncBB : predecessors(LBB)) { 10149 // Check that RHS is available in this block. 10150 if (!dominates(RHS, IncBB)) 10151 return false; 10152 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10153 if (!ProvedEasily(L, RHS)) 10154 return false; 10155 } 10156 } 10157 return true; 10158 } 10159 10160 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 10161 const SCEV *LHS, const SCEV *RHS, 10162 const SCEV *FoundLHS, 10163 const SCEV *FoundRHS, 10164 const Instruction *Context) { 10165 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10166 return true; 10167 10168 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10169 return true; 10170 10171 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS, 10172 Context)) 10173 return true; 10174 10175 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 10176 FoundLHS, FoundRHS) || 10177 // ~x < ~y --> x > y 10178 isImpliedCondOperandsHelper(Pred, LHS, RHS, 10179 getNotSCEV(FoundRHS), 10180 getNotSCEV(FoundLHS)); 10181 } 10182 10183 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? 10184 template <typename MinMaxExprType> 10185 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, 10186 const SCEV *Candidate) { 10187 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); 10188 if (!MinMaxExpr) 10189 return false; 10190 10191 return find(MinMaxExpr->operands(), Candidate) != MinMaxExpr->op_end(); 10192 } 10193 10194 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 10195 ICmpInst::Predicate Pred, 10196 const SCEV *LHS, const SCEV *RHS) { 10197 // If both sides are affine addrecs for the same loop, with equal 10198 // steps, and we know the recurrences don't wrap, then we only 10199 // need to check the predicate on the starting values. 10200 10201 if (!ICmpInst::isRelational(Pred)) 10202 return false; 10203 10204 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 10205 if (!LAR) 10206 return false; 10207 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10208 if (!RAR) 10209 return false; 10210 if (LAR->getLoop() != RAR->getLoop()) 10211 return false; 10212 if (!LAR->isAffine() || !RAR->isAffine()) 10213 return false; 10214 10215 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 10216 return false; 10217 10218 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 10219 SCEV::FlagNSW : SCEV::FlagNUW; 10220 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 10221 return false; 10222 10223 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 10224 } 10225 10226 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 10227 /// expression? 10228 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 10229 ICmpInst::Predicate Pred, 10230 const SCEV *LHS, const SCEV *RHS) { 10231 switch (Pred) { 10232 default: 10233 return false; 10234 10235 case ICmpInst::ICMP_SGE: 10236 std::swap(LHS, RHS); 10237 LLVM_FALLTHROUGH; 10238 case ICmpInst::ICMP_SLE: 10239 return 10240 // min(A, ...) <= A 10241 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || 10242 // A <= max(A, ...) 10243 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 10244 10245 case ICmpInst::ICMP_UGE: 10246 std::swap(LHS, RHS); 10247 LLVM_FALLTHROUGH; 10248 case ICmpInst::ICMP_ULE: 10249 return 10250 // min(A, ...) <= A 10251 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || 10252 // A <= max(A, ...) 10253 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 10254 } 10255 10256 llvm_unreachable("covered switch fell through?!"); 10257 } 10258 10259 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 10260 const SCEV *LHS, const SCEV *RHS, 10261 const SCEV *FoundLHS, 10262 const SCEV *FoundRHS, 10263 unsigned Depth) { 10264 assert(getTypeSizeInBits(LHS->getType()) == 10265 getTypeSizeInBits(RHS->getType()) && 10266 "LHS and RHS have different sizes?"); 10267 assert(getTypeSizeInBits(FoundLHS->getType()) == 10268 getTypeSizeInBits(FoundRHS->getType()) && 10269 "FoundLHS and FoundRHS have different sizes?"); 10270 // We want to avoid hurting the compile time with analysis of too big trees. 10271 if (Depth > MaxSCEVOperationsImplicationDepth) 10272 return false; 10273 10274 // We only want to work with GT comparison so far. 10275 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) { 10276 Pred = CmpInst::getSwappedPredicate(Pred); 10277 std::swap(LHS, RHS); 10278 std::swap(FoundLHS, FoundRHS); 10279 } 10280 10281 // For unsigned, try to reduce it to corresponding signed comparison. 10282 if (Pred == ICmpInst::ICMP_UGT) 10283 // We can replace unsigned predicate with its signed counterpart if all 10284 // involved values are non-negative. 10285 // TODO: We could have better support for unsigned. 10286 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) { 10287 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing 10288 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us 10289 // use this fact to prove that LHS and RHS are non-negative. 10290 const SCEV *MinusOne = getMinusOne(LHS->getType()); 10291 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS, 10292 FoundRHS) && 10293 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS, 10294 FoundRHS)) 10295 Pred = ICmpInst::ICMP_SGT; 10296 } 10297 10298 if (Pred != ICmpInst::ICMP_SGT) 10299 return false; 10300 10301 auto GetOpFromSExt = [&](const SCEV *S) { 10302 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 10303 return Ext->getOperand(); 10304 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 10305 // the constant in some cases. 10306 return S; 10307 }; 10308 10309 // Acquire values from extensions. 10310 auto *OrigLHS = LHS; 10311 auto *OrigFoundLHS = FoundLHS; 10312 LHS = GetOpFromSExt(LHS); 10313 FoundLHS = GetOpFromSExt(FoundLHS); 10314 10315 // Is the SGT predicate can be proved trivially or using the found context. 10316 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 10317 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 10318 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 10319 FoundRHS, Depth + 1); 10320 }; 10321 10322 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 10323 // We want to avoid creation of any new non-constant SCEV. Since we are 10324 // going to compare the operands to RHS, we should be certain that we don't 10325 // need any size extensions for this. So let's decline all cases when the 10326 // sizes of types of LHS and RHS do not match. 10327 // TODO: Maybe try to get RHS from sext to catch more cases? 10328 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 10329 return false; 10330 10331 // Should not overflow. 10332 if (!LHSAddExpr->hasNoSignedWrap()) 10333 return false; 10334 10335 auto *LL = LHSAddExpr->getOperand(0); 10336 auto *LR = LHSAddExpr->getOperand(1); 10337 auto *MinusOne = getMinusOne(RHS->getType()); 10338 10339 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 10340 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 10341 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 10342 }; 10343 // Try to prove the following rule: 10344 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 10345 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 10346 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 10347 return true; 10348 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 10349 Value *LL, *LR; 10350 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 10351 10352 using namespace llvm::PatternMatch; 10353 10354 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 10355 // Rules for division. 10356 // We are going to perform some comparisons with Denominator and its 10357 // derivative expressions. In general case, creating a SCEV for it may 10358 // lead to a complex analysis of the entire graph, and in particular it 10359 // can request trip count recalculation for the same loop. This would 10360 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 10361 // this, we only want to create SCEVs that are constants in this section. 10362 // So we bail if Denominator is not a constant. 10363 if (!isa<ConstantInt>(LR)) 10364 return false; 10365 10366 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 10367 10368 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 10369 // then a SCEV for the numerator already exists and matches with FoundLHS. 10370 auto *Numerator = getExistingSCEV(LL); 10371 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 10372 return false; 10373 10374 // Make sure that the numerator matches with FoundLHS and the denominator 10375 // is positive. 10376 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 10377 return false; 10378 10379 auto *DTy = Denominator->getType(); 10380 auto *FRHSTy = FoundRHS->getType(); 10381 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 10382 // One of types is a pointer and another one is not. We cannot extend 10383 // them properly to a wider type, so let us just reject this case. 10384 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 10385 // to avoid this check. 10386 return false; 10387 10388 // Given that: 10389 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 10390 auto *WTy = getWiderType(DTy, FRHSTy); 10391 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 10392 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 10393 10394 // Try to prove the following rule: 10395 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 10396 // For example, given that FoundLHS > 2. It means that FoundLHS is at 10397 // least 3. If we divide it by Denominator < 4, we will have at least 1. 10398 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 10399 if (isKnownNonPositive(RHS) && 10400 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 10401 return true; 10402 10403 // Try to prove the following rule: 10404 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 10405 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 10406 // If we divide it by Denominator > 2, then: 10407 // 1. If FoundLHS is negative, then the result is 0. 10408 // 2. If FoundLHS is non-negative, then the result is non-negative. 10409 // Anyways, the result is non-negative. 10410 auto *MinusOne = getMinusOne(WTy); 10411 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 10412 if (isKnownNegative(RHS) && 10413 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 10414 return true; 10415 } 10416 } 10417 10418 // If our expression contained SCEVUnknown Phis, and we split it down and now 10419 // need to prove something for them, try to prove the predicate for every 10420 // possible incoming values of those Phis. 10421 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 10422 return true; 10423 10424 return false; 10425 } 10426 10427 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, 10428 const SCEV *LHS, const SCEV *RHS) { 10429 // zext x u<= sext x, sext x s<= zext x 10430 switch (Pred) { 10431 case ICmpInst::ICMP_SGE: 10432 std::swap(LHS, RHS); 10433 LLVM_FALLTHROUGH; 10434 case ICmpInst::ICMP_SLE: { 10435 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. 10436 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); 10437 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); 10438 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 10439 return true; 10440 break; 10441 } 10442 case ICmpInst::ICMP_UGE: 10443 std::swap(LHS, RHS); 10444 LLVM_FALLTHROUGH; 10445 case ICmpInst::ICMP_ULE: { 10446 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. 10447 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); 10448 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); 10449 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 10450 return true; 10451 break; 10452 } 10453 default: 10454 break; 10455 }; 10456 return false; 10457 } 10458 10459 bool 10460 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 10461 const SCEV *LHS, const SCEV *RHS) { 10462 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || 10463 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 10464 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 10465 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 10466 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 10467 } 10468 10469 bool 10470 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 10471 const SCEV *LHS, const SCEV *RHS, 10472 const SCEV *FoundLHS, 10473 const SCEV *FoundRHS) { 10474 switch (Pred) { 10475 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 10476 case ICmpInst::ICMP_EQ: 10477 case ICmpInst::ICMP_NE: 10478 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 10479 return true; 10480 break; 10481 case ICmpInst::ICMP_SLT: 10482 case ICmpInst::ICMP_SLE: 10483 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 10484 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 10485 return true; 10486 break; 10487 case ICmpInst::ICMP_SGT: 10488 case ICmpInst::ICMP_SGE: 10489 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 10490 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 10491 return true; 10492 break; 10493 case ICmpInst::ICMP_ULT: 10494 case ICmpInst::ICMP_ULE: 10495 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 10496 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 10497 return true; 10498 break; 10499 case ICmpInst::ICMP_UGT: 10500 case ICmpInst::ICMP_UGE: 10501 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 10502 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 10503 return true; 10504 break; 10505 } 10506 10507 // Maybe it can be proved via operations? 10508 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10509 return true; 10510 10511 return false; 10512 } 10513 10514 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 10515 const SCEV *LHS, 10516 const SCEV *RHS, 10517 const SCEV *FoundLHS, 10518 const SCEV *FoundRHS) { 10519 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 10520 // The restriction on `FoundRHS` be lifted easily -- it exists only to 10521 // reduce the compile time impact of this optimization. 10522 return false; 10523 10524 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 10525 if (!Addend) 10526 return false; 10527 10528 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 10529 10530 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 10531 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 10532 ConstantRange FoundLHSRange = 10533 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 10534 10535 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 10536 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 10537 10538 // We can also compute the range of values for `LHS` that satisfy the 10539 // consequent, "`LHS` `Pred` `RHS`": 10540 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 10541 ConstantRange SatisfyingLHSRange = 10542 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 10543 10544 // The antecedent implies the consequent if every value of `LHS` that 10545 // satisfies the antecedent also satisfies the consequent. 10546 return SatisfyingLHSRange.contains(LHSRange); 10547 } 10548 10549 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 10550 bool IsSigned, bool NoWrap) { 10551 assert(isKnownPositive(Stride) && "Positive stride expected!"); 10552 10553 if (NoWrap) return false; 10554 10555 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10556 const SCEV *One = getOne(Stride->getType()); 10557 10558 if (IsSigned) { 10559 APInt MaxRHS = getSignedRangeMax(RHS); 10560 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 10561 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10562 10563 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 10564 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 10565 } 10566 10567 APInt MaxRHS = getUnsignedRangeMax(RHS); 10568 APInt MaxValue = APInt::getMaxValue(BitWidth); 10569 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10570 10571 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 10572 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 10573 } 10574 10575 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 10576 bool IsSigned, bool NoWrap) { 10577 if (NoWrap) return false; 10578 10579 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10580 const SCEV *One = getOne(Stride->getType()); 10581 10582 if (IsSigned) { 10583 APInt MinRHS = getSignedRangeMin(RHS); 10584 APInt MinValue = APInt::getSignedMinValue(BitWidth); 10585 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10586 10587 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 10588 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 10589 } 10590 10591 APInt MinRHS = getUnsignedRangeMin(RHS); 10592 APInt MinValue = APInt::getMinValue(BitWidth); 10593 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10594 10595 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 10596 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 10597 } 10598 10599 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 10600 bool Equality) { 10601 const SCEV *One = getOne(Step->getType()); 10602 Delta = Equality ? getAddExpr(Delta, Step) 10603 : getAddExpr(Delta, getMinusSCEV(Step, One)); 10604 return getUDivExpr(Delta, Step); 10605 } 10606 10607 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 10608 const SCEV *Stride, 10609 const SCEV *End, 10610 unsigned BitWidth, 10611 bool IsSigned) { 10612 10613 assert(!isKnownNonPositive(Stride) && 10614 "Stride is expected strictly positive!"); 10615 // Calculate the maximum backedge count based on the range of values 10616 // permitted by Start, End, and Stride. 10617 const SCEV *MaxBECount; 10618 APInt MinStart = 10619 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 10620 10621 APInt StrideForMaxBECount = 10622 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 10623 10624 // We already know that the stride is positive, so we paper over conservatism 10625 // in our range computation by forcing StrideForMaxBECount to be at least one. 10626 // In theory this is unnecessary, but we expect MaxBECount to be a 10627 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 10628 // is nothing to constant fold it to). 10629 APInt One(BitWidth, 1, IsSigned); 10630 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 10631 10632 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 10633 : APInt::getMaxValue(BitWidth); 10634 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 10635 10636 // Although End can be a MAX expression we estimate MaxEnd considering only 10637 // the case End = RHS of the loop termination condition. This is safe because 10638 // in the other case (End - Start) is zero, leading to a zero maximum backedge 10639 // taken count. 10640 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 10641 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 10642 10643 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 10644 getConstant(StrideForMaxBECount) /* Step */, 10645 false /* Equality */); 10646 10647 return MaxBECount; 10648 } 10649 10650 ScalarEvolution::ExitLimit 10651 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 10652 const Loop *L, bool IsSigned, 10653 bool ControlsExit, bool AllowPredicates) { 10654 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10655 10656 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10657 bool PredicatedIV = false; 10658 10659 if (!IV && AllowPredicates) { 10660 // Try to make this an AddRec using runtime tests, in the first X 10661 // iterations of this loop, where X is the SCEV expression found by the 10662 // algorithm below. 10663 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10664 PredicatedIV = true; 10665 } 10666 10667 // Avoid weird loops 10668 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10669 return getCouldNotCompute(); 10670 10671 bool NoWrap = ControlsExit && 10672 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10673 10674 const SCEV *Stride = IV->getStepRecurrence(*this); 10675 10676 bool PositiveStride = isKnownPositive(Stride); 10677 10678 // Avoid negative or zero stride values. 10679 if (!PositiveStride) { 10680 // We can compute the correct backedge taken count for loops with unknown 10681 // strides if we can prove that the loop is not an infinite loop with side 10682 // effects. Here's the loop structure we are trying to handle - 10683 // 10684 // i = start 10685 // do { 10686 // A[i] = i; 10687 // i += s; 10688 // } while (i < end); 10689 // 10690 // The backedge taken count for such loops is evaluated as - 10691 // (max(end, start + stride) - start - 1) /u stride 10692 // 10693 // The additional preconditions that we need to check to prove correctness 10694 // of the above formula is as follows - 10695 // 10696 // a) IV is either nuw or nsw depending upon signedness (indicated by the 10697 // NoWrap flag). 10698 // b) loop is single exit with no side effects. 10699 // 10700 // 10701 // Precondition a) implies that if the stride is negative, this is a single 10702 // trip loop. The backedge taken count formula reduces to zero in this case. 10703 // 10704 // Precondition b) implies that the unknown stride cannot be zero otherwise 10705 // we have UB. 10706 // 10707 // The positive stride case is the same as isKnownPositive(Stride) returning 10708 // true (original behavior of the function). 10709 // 10710 // We want to make sure that the stride is truly unknown as there are edge 10711 // cases where ScalarEvolution propagates no wrap flags to the 10712 // post-increment/decrement IV even though the increment/decrement operation 10713 // itself is wrapping. The computed backedge taken count may be wrong in 10714 // such cases. This is prevented by checking that the stride is not known to 10715 // be either positive or non-positive. For example, no wrap flags are 10716 // propagated to the post-increment IV of this loop with a trip count of 2 - 10717 // 10718 // unsigned char i; 10719 // for(i=127; i<128; i+=129) 10720 // A[i] = i; 10721 // 10722 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 10723 !loopHasNoSideEffects(L)) 10724 return getCouldNotCompute(); 10725 } else if (!Stride->isOne() && 10726 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 10727 // Avoid proven overflow cases: this will ensure that the backedge taken 10728 // count will not generate any unsigned overflow. Relaxed no-overflow 10729 // conditions exploit NoWrapFlags, allowing to optimize in presence of 10730 // undefined behaviors like the case of C language. 10731 return getCouldNotCompute(); 10732 10733 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 10734 : ICmpInst::ICMP_ULT; 10735 const SCEV *Start = IV->getStart(); 10736 const SCEV *End = RHS; 10737 // When the RHS is not invariant, we do not know the end bound of the loop and 10738 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 10739 // calculate the MaxBECount, given the start, stride and max value for the end 10740 // bound of the loop (RHS), and the fact that IV does not overflow (which is 10741 // checked above). 10742 if (!isLoopInvariant(RHS, L)) { 10743 const SCEV *MaxBECount = computeMaxBECountForLT( 10744 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10745 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 10746 false /*MaxOrZero*/, Predicates); 10747 } 10748 // If the backedge is taken at least once, then it will be taken 10749 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 10750 // is the LHS value of the less-than comparison the first time it is evaluated 10751 // and End is the RHS. 10752 const SCEV *BECountIfBackedgeTaken = 10753 computeBECount(getMinusSCEV(End, Start), Stride, false); 10754 // If the loop entry is guarded by the result of the backedge test of the 10755 // first loop iteration, then we know the backedge will be taken at least 10756 // once and so the backedge taken count is as above. If not then we use the 10757 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 10758 // as if the backedge is taken at least once max(End,Start) is End and so the 10759 // result is as above, and if not max(End,Start) is Start so we get a backedge 10760 // count of zero. 10761 const SCEV *BECount; 10762 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 10763 BECount = BECountIfBackedgeTaken; 10764 else { 10765 // If we know that RHS >= Start in the context of loop, then we know that 10766 // max(RHS, Start) = RHS at this point. 10767 if (isLoopEntryGuardedByCond( 10768 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, RHS, Start)) 10769 End = RHS; 10770 else 10771 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 10772 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 10773 } 10774 10775 const SCEV *MaxBECount; 10776 bool MaxOrZero = false; 10777 if (isa<SCEVConstant>(BECount)) 10778 MaxBECount = BECount; 10779 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 10780 // If we know exactly how many times the backedge will be taken if it's 10781 // taken at least once, then the backedge count will either be that or 10782 // zero. 10783 MaxBECount = BECountIfBackedgeTaken; 10784 MaxOrZero = true; 10785 } else { 10786 MaxBECount = computeMaxBECountForLT( 10787 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10788 } 10789 10790 if (isa<SCEVCouldNotCompute>(MaxBECount) && 10791 !isa<SCEVCouldNotCompute>(BECount)) 10792 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 10793 10794 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 10795 } 10796 10797 ScalarEvolution::ExitLimit 10798 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 10799 const Loop *L, bool IsSigned, 10800 bool ControlsExit, bool AllowPredicates) { 10801 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10802 // We handle only IV > Invariant 10803 if (!isLoopInvariant(RHS, L)) 10804 return getCouldNotCompute(); 10805 10806 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10807 if (!IV && AllowPredicates) 10808 // Try to make this an AddRec using runtime tests, in the first X 10809 // iterations of this loop, where X is the SCEV expression found by the 10810 // algorithm below. 10811 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10812 10813 // Avoid weird loops 10814 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10815 return getCouldNotCompute(); 10816 10817 bool NoWrap = ControlsExit && 10818 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10819 10820 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 10821 10822 // Avoid negative or zero stride values 10823 if (!isKnownPositive(Stride)) 10824 return getCouldNotCompute(); 10825 10826 // Avoid proven overflow cases: this will ensure that the backedge taken count 10827 // will not generate any unsigned overflow. Relaxed no-overflow conditions 10828 // exploit NoWrapFlags, allowing to optimize in presence of undefined 10829 // behaviors like the case of C language. 10830 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 10831 return getCouldNotCompute(); 10832 10833 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 10834 : ICmpInst::ICMP_UGT; 10835 10836 const SCEV *Start = IV->getStart(); 10837 const SCEV *End = RHS; 10838 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { 10839 // If we know that Start >= RHS in the context of loop, then we know that 10840 // min(RHS, Start) = RHS at this point. 10841 if (isLoopEntryGuardedByCond( 10842 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS)) 10843 End = RHS; 10844 else 10845 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 10846 } 10847 10848 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 10849 10850 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 10851 : getUnsignedRangeMax(Start); 10852 10853 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 10854 : getUnsignedRangeMin(Stride); 10855 10856 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 10857 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 10858 : APInt::getMinValue(BitWidth) + (MinStride - 1); 10859 10860 // Although End can be a MIN expression we estimate MinEnd considering only 10861 // the case End = RHS. This is safe because in the other case (Start - End) 10862 // is zero, leading to a zero maximum backedge taken count. 10863 APInt MinEnd = 10864 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 10865 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 10866 10867 const SCEV *MaxBECount = isa<SCEVConstant>(BECount) 10868 ? BECount 10869 : computeBECount(getConstant(MaxStart - MinEnd), 10870 getConstant(MinStride), false); 10871 10872 if (isa<SCEVCouldNotCompute>(MaxBECount)) 10873 MaxBECount = BECount; 10874 10875 return ExitLimit(BECount, MaxBECount, false, Predicates); 10876 } 10877 10878 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 10879 ScalarEvolution &SE) const { 10880 if (Range.isFullSet()) // Infinite loop. 10881 return SE.getCouldNotCompute(); 10882 10883 // If the start is a non-zero constant, shift the range to simplify things. 10884 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 10885 if (!SC->getValue()->isZero()) { 10886 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 10887 Operands[0] = SE.getZero(SC->getType()); 10888 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 10889 getNoWrapFlags(FlagNW)); 10890 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 10891 return ShiftedAddRec->getNumIterationsInRange( 10892 Range.subtract(SC->getAPInt()), SE); 10893 // This is strange and shouldn't happen. 10894 return SE.getCouldNotCompute(); 10895 } 10896 10897 // The only time we can solve this is when we have all constant indices. 10898 // Otherwise, we cannot determine the overflow conditions. 10899 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 10900 return SE.getCouldNotCompute(); 10901 10902 // Okay at this point we know that all elements of the chrec are constants and 10903 // that the start element is zero. 10904 10905 // First check to see if the range contains zero. If not, the first 10906 // iteration exits. 10907 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 10908 if (!Range.contains(APInt(BitWidth, 0))) 10909 return SE.getZero(getType()); 10910 10911 if (isAffine()) { 10912 // If this is an affine expression then we have this situation: 10913 // Solve {0,+,A} in Range === Ax in Range 10914 10915 // We know that zero is in the range. If A is positive then we know that 10916 // the upper value of the range must be the first possible exit value. 10917 // If A is negative then the lower of the range is the last possible loop 10918 // value. Also note that we already checked for a full range. 10919 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 10920 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 10921 10922 // The exit value should be (End+A)/A. 10923 APInt ExitVal = (End + A).udiv(A); 10924 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 10925 10926 // Evaluate at the exit value. If we really did fall out of the valid 10927 // range, then we computed our trip count, otherwise wrap around or other 10928 // things must have happened. 10929 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 10930 if (Range.contains(Val->getValue())) 10931 return SE.getCouldNotCompute(); // Something strange happened 10932 10933 // Ensure that the previous value is in the range. This is a sanity check. 10934 assert(Range.contains( 10935 EvaluateConstantChrecAtConstant(this, 10936 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 10937 "Linear scev computation is off in a bad way!"); 10938 return SE.getConstant(ExitValue); 10939 } 10940 10941 if (isQuadratic()) { 10942 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 10943 return SE.getConstant(S.getValue()); 10944 } 10945 10946 return SE.getCouldNotCompute(); 10947 } 10948 10949 const SCEVAddRecExpr * 10950 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 10951 assert(getNumOperands() > 1 && "AddRec with zero step?"); 10952 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 10953 // but in this case we cannot guarantee that the value returned will be an 10954 // AddRec because SCEV does not have a fixed point where it stops 10955 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 10956 // may happen if we reach arithmetic depth limit while simplifying. So we 10957 // construct the returned value explicitly. 10958 SmallVector<const SCEV *, 3> Ops; 10959 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 10960 // (this + Step) is {A+B,+,B+C,+...,+,N}. 10961 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 10962 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 10963 // We know that the last operand is not a constant zero (otherwise it would 10964 // have been popped out earlier). This guarantees us that if the result has 10965 // the same last operand, then it will also not be popped out, meaning that 10966 // the returned value will be an AddRec. 10967 const SCEV *Last = getOperand(getNumOperands() - 1); 10968 assert(!Last->isZero() && "Recurrency with zero step?"); 10969 Ops.push_back(Last); 10970 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 10971 SCEV::FlagAnyWrap)); 10972 } 10973 10974 // Return true when S contains at least an undef value. 10975 static inline bool containsUndefs(const SCEV *S) { 10976 return SCEVExprContains(S, [](const SCEV *S) { 10977 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 10978 return isa<UndefValue>(SU->getValue()); 10979 return false; 10980 }); 10981 } 10982 10983 namespace { 10984 10985 // Collect all steps of SCEV expressions. 10986 struct SCEVCollectStrides { 10987 ScalarEvolution &SE; 10988 SmallVectorImpl<const SCEV *> &Strides; 10989 10990 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 10991 : SE(SE), Strides(S) {} 10992 10993 bool follow(const SCEV *S) { 10994 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 10995 Strides.push_back(AR->getStepRecurrence(SE)); 10996 return true; 10997 } 10998 10999 bool isDone() const { return false; } 11000 }; 11001 11002 // Collect all SCEVUnknown and SCEVMulExpr expressions. 11003 struct SCEVCollectTerms { 11004 SmallVectorImpl<const SCEV *> &Terms; 11005 11006 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 11007 11008 bool follow(const SCEV *S) { 11009 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 11010 isa<SCEVSignExtendExpr>(S)) { 11011 if (!containsUndefs(S)) 11012 Terms.push_back(S); 11013 11014 // Stop recursion: once we collected a term, do not walk its operands. 11015 return false; 11016 } 11017 11018 // Keep looking. 11019 return true; 11020 } 11021 11022 bool isDone() const { return false; } 11023 }; 11024 11025 // Check if a SCEV contains an AddRecExpr. 11026 struct SCEVHasAddRec { 11027 bool &ContainsAddRec; 11028 11029 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 11030 ContainsAddRec = false; 11031 } 11032 11033 bool follow(const SCEV *S) { 11034 if (isa<SCEVAddRecExpr>(S)) { 11035 ContainsAddRec = true; 11036 11037 // Stop recursion: once we collected a term, do not walk its operands. 11038 return false; 11039 } 11040 11041 // Keep looking. 11042 return true; 11043 } 11044 11045 bool isDone() const { return false; } 11046 }; 11047 11048 // Find factors that are multiplied with an expression that (possibly as a 11049 // subexpression) contains an AddRecExpr. In the expression: 11050 // 11051 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 11052 // 11053 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 11054 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 11055 // parameters as they form a product with an induction variable. 11056 // 11057 // This collector expects all array size parameters to be in the same MulExpr. 11058 // It might be necessary to later add support for collecting parameters that are 11059 // spread over different nested MulExpr. 11060 struct SCEVCollectAddRecMultiplies { 11061 SmallVectorImpl<const SCEV *> &Terms; 11062 ScalarEvolution &SE; 11063 11064 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 11065 : Terms(T), SE(SE) {} 11066 11067 bool follow(const SCEV *S) { 11068 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 11069 bool HasAddRec = false; 11070 SmallVector<const SCEV *, 0> Operands; 11071 for (auto Op : Mul->operands()) { 11072 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 11073 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 11074 Operands.push_back(Op); 11075 } else if (Unknown) { 11076 HasAddRec = true; 11077 } else { 11078 bool ContainsAddRec = false; 11079 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 11080 visitAll(Op, ContiansAddRec); 11081 HasAddRec |= ContainsAddRec; 11082 } 11083 } 11084 if (Operands.size() == 0) 11085 return true; 11086 11087 if (!HasAddRec) 11088 return false; 11089 11090 Terms.push_back(SE.getMulExpr(Operands)); 11091 // Stop recursion: once we collected a term, do not walk its operands. 11092 return false; 11093 } 11094 11095 // Keep looking. 11096 return true; 11097 } 11098 11099 bool isDone() const { return false; } 11100 }; 11101 11102 } // end anonymous namespace 11103 11104 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 11105 /// two places: 11106 /// 1) The strides of AddRec expressions. 11107 /// 2) Unknowns that are multiplied with AddRec expressions. 11108 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 11109 SmallVectorImpl<const SCEV *> &Terms) { 11110 SmallVector<const SCEV *, 4> Strides; 11111 SCEVCollectStrides StrideCollector(*this, Strides); 11112 visitAll(Expr, StrideCollector); 11113 11114 LLVM_DEBUG({ 11115 dbgs() << "Strides:\n"; 11116 for (const SCEV *S : Strides) 11117 dbgs() << *S << "\n"; 11118 }); 11119 11120 for (const SCEV *S : Strides) { 11121 SCEVCollectTerms TermCollector(Terms); 11122 visitAll(S, TermCollector); 11123 } 11124 11125 LLVM_DEBUG({ 11126 dbgs() << "Terms:\n"; 11127 for (const SCEV *T : Terms) 11128 dbgs() << *T << "\n"; 11129 }); 11130 11131 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 11132 visitAll(Expr, MulCollector); 11133 } 11134 11135 static bool findArrayDimensionsRec(ScalarEvolution &SE, 11136 SmallVectorImpl<const SCEV *> &Terms, 11137 SmallVectorImpl<const SCEV *> &Sizes) { 11138 int Last = Terms.size() - 1; 11139 const SCEV *Step = Terms[Last]; 11140 11141 // End of recursion. 11142 if (Last == 0) { 11143 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 11144 SmallVector<const SCEV *, 2> Qs; 11145 for (const SCEV *Op : M->operands()) 11146 if (!isa<SCEVConstant>(Op)) 11147 Qs.push_back(Op); 11148 11149 Step = SE.getMulExpr(Qs); 11150 } 11151 11152 Sizes.push_back(Step); 11153 return true; 11154 } 11155 11156 for (const SCEV *&Term : Terms) { 11157 // Normalize the terms before the next call to findArrayDimensionsRec. 11158 const SCEV *Q, *R; 11159 SCEVDivision::divide(SE, Term, Step, &Q, &R); 11160 11161 // Bail out when GCD does not evenly divide one of the terms. 11162 if (!R->isZero()) 11163 return false; 11164 11165 Term = Q; 11166 } 11167 11168 // Remove all SCEVConstants. 11169 Terms.erase( 11170 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 11171 Terms.end()); 11172 11173 if (Terms.size() > 0) 11174 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 11175 return false; 11176 11177 Sizes.push_back(Step); 11178 return true; 11179 } 11180 11181 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 11182 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 11183 for (const SCEV *T : Terms) 11184 if (SCEVExprContains(T, [](const SCEV *S) { return isa<SCEVUnknown>(S); })) 11185 return true; 11186 11187 return false; 11188 } 11189 11190 // Return the number of product terms in S. 11191 static inline int numberOfTerms(const SCEV *S) { 11192 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 11193 return Expr->getNumOperands(); 11194 return 1; 11195 } 11196 11197 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 11198 if (isa<SCEVConstant>(T)) 11199 return nullptr; 11200 11201 if (isa<SCEVUnknown>(T)) 11202 return T; 11203 11204 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 11205 SmallVector<const SCEV *, 2> Factors; 11206 for (const SCEV *Op : M->operands()) 11207 if (!isa<SCEVConstant>(Op)) 11208 Factors.push_back(Op); 11209 11210 return SE.getMulExpr(Factors); 11211 } 11212 11213 return T; 11214 } 11215 11216 /// Return the size of an element read or written by Inst. 11217 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 11218 Type *Ty; 11219 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 11220 Ty = Store->getValueOperand()->getType(); 11221 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 11222 Ty = Load->getType(); 11223 else 11224 return nullptr; 11225 11226 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 11227 return getSizeOfExpr(ETy, Ty); 11228 } 11229 11230 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 11231 SmallVectorImpl<const SCEV *> &Sizes, 11232 const SCEV *ElementSize) { 11233 if (Terms.size() < 1 || !ElementSize) 11234 return; 11235 11236 // Early return when Terms do not contain parameters: we do not delinearize 11237 // non parametric SCEVs. 11238 if (!containsParameters(Terms)) 11239 return; 11240 11241 LLVM_DEBUG({ 11242 dbgs() << "Terms:\n"; 11243 for (const SCEV *T : Terms) 11244 dbgs() << *T << "\n"; 11245 }); 11246 11247 // Remove duplicates. 11248 array_pod_sort(Terms.begin(), Terms.end()); 11249 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 11250 11251 // Put larger terms first. 11252 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) { 11253 return numberOfTerms(LHS) > numberOfTerms(RHS); 11254 }); 11255 11256 // Try to divide all terms by the element size. If term is not divisible by 11257 // element size, proceed with the original term. 11258 for (const SCEV *&Term : Terms) { 11259 const SCEV *Q, *R; 11260 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 11261 if (!Q->isZero()) 11262 Term = Q; 11263 } 11264 11265 SmallVector<const SCEV *, 4> NewTerms; 11266 11267 // Remove constant factors. 11268 for (const SCEV *T : Terms) 11269 if (const SCEV *NewT = removeConstantFactors(*this, T)) 11270 NewTerms.push_back(NewT); 11271 11272 LLVM_DEBUG({ 11273 dbgs() << "Terms after sorting:\n"; 11274 for (const SCEV *T : NewTerms) 11275 dbgs() << *T << "\n"; 11276 }); 11277 11278 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 11279 Sizes.clear(); 11280 return; 11281 } 11282 11283 // The last element to be pushed into Sizes is the size of an element. 11284 Sizes.push_back(ElementSize); 11285 11286 LLVM_DEBUG({ 11287 dbgs() << "Sizes:\n"; 11288 for (const SCEV *S : Sizes) 11289 dbgs() << *S << "\n"; 11290 }); 11291 } 11292 11293 void ScalarEvolution::computeAccessFunctions( 11294 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 11295 SmallVectorImpl<const SCEV *> &Sizes) { 11296 // Early exit in case this SCEV is not an affine multivariate function. 11297 if (Sizes.empty()) 11298 return; 11299 11300 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 11301 if (!AR->isAffine()) 11302 return; 11303 11304 const SCEV *Res = Expr; 11305 int Last = Sizes.size() - 1; 11306 for (int i = Last; i >= 0; i--) { 11307 const SCEV *Q, *R; 11308 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 11309 11310 LLVM_DEBUG({ 11311 dbgs() << "Res: " << *Res << "\n"; 11312 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 11313 dbgs() << "Res divided by Sizes[i]:\n"; 11314 dbgs() << "Quotient: " << *Q << "\n"; 11315 dbgs() << "Remainder: " << *R << "\n"; 11316 }); 11317 11318 Res = Q; 11319 11320 // Do not record the last subscript corresponding to the size of elements in 11321 // the array. 11322 if (i == Last) { 11323 11324 // Bail out if the remainder is too complex. 11325 if (isa<SCEVAddRecExpr>(R)) { 11326 Subscripts.clear(); 11327 Sizes.clear(); 11328 return; 11329 } 11330 11331 continue; 11332 } 11333 11334 // Record the access function for the current subscript. 11335 Subscripts.push_back(R); 11336 } 11337 11338 // Also push in last position the remainder of the last division: it will be 11339 // the access function of the innermost dimension. 11340 Subscripts.push_back(Res); 11341 11342 std::reverse(Subscripts.begin(), Subscripts.end()); 11343 11344 LLVM_DEBUG({ 11345 dbgs() << "Subscripts:\n"; 11346 for (const SCEV *S : Subscripts) 11347 dbgs() << *S << "\n"; 11348 }); 11349 } 11350 11351 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 11352 /// sizes of an array access. Returns the remainder of the delinearization that 11353 /// is the offset start of the array. The SCEV->delinearize algorithm computes 11354 /// the multiples of SCEV coefficients: that is a pattern matching of sub 11355 /// expressions in the stride and base of a SCEV corresponding to the 11356 /// computation of a GCD (greatest common divisor) of base and stride. When 11357 /// SCEV->delinearize fails, it returns the SCEV unchanged. 11358 /// 11359 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 11360 /// 11361 /// void foo(long n, long m, long o, double A[n][m][o]) { 11362 /// 11363 /// for (long i = 0; i < n; i++) 11364 /// for (long j = 0; j < m; j++) 11365 /// for (long k = 0; k < o; k++) 11366 /// A[i][j][k] = 1.0; 11367 /// } 11368 /// 11369 /// the delinearization input is the following AddRec SCEV: 11370 /// 11371 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 11372 /// 11373 /// From this SCEV, we are able to say that the base offset of the access is %A 11374 /// because it appears as an offset that does not divide any of the strides in 11375 /// the loops: 11376 /// 11377 /// CHECK: Base offset: %A 11378 /// 11379 /// and then SCEV->delinearize determines the size of some of the dimensions of 11380 /// the array as these are the multiples by which the strides are happening: 11381 /// 11382 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 11383 /// 11384 /// Note that the outermost dimension remains of UnknownSize because there are 11385 /// no strides that would help identifying the size of the last dimension: when 11386 /// the array has been statically allocated, one could compute the size of that 11387 /// dimension by dividing the overall size of the array by the size of the known 11388 /// dimensions: %m * %o * 8. 11389 /// 11390 /// Finally delinearize provides the access functions for the array reference 11391 /// that does correspond to A[i][j][k] of the above C testcase: 11392 /// 11393 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 11394 /// 11395 /// The testcases are checking the output of a function pass: 11396 /// DelinearizationPass that walks through all loads and stores of a function 11397 /// asking for the SCEV of the memory access with respect to all enclosing 11398 /// loops, calling SCEV->delinearize on that and printing the results. 11399 void ScalarEvolution::delinearize(const SCEV *Expr, 11400 SmallVectorImpl<const SCEV *> &Subscripts, 11401 SmallVectorImpl<const SCEV *> &Sizes, 11402 const SCEV *ElementSize) { 11403 // First step: collect parametric terms. 11404 SmallVector<const SCEV *, 4> Terms; 11405 collectParametricTerms(Expr, Terms); 11406 11407 if (Terms.empty()) 11408 return; 11409 11410 // Second step: find subscript sizes. 11411 findArrayDimensions(Terms, Sizes, ElementSize); 11412 11413 if (Sizes.empty()) 11414 return; 11415 11416 // Third step: compute the access functions for each subscript. 11417 computeAccessFunctions(Expr, Subscripts, Sizes); 11418 11419 if (Subscripts.empty()) 11420 return; 11421 11422 LLVM_DEBUG({ 11423 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 11424 dbgs() << "ArrayDecl[UnknownSize]"; 11425 for (const SCEV *S : Sizes) 11426 dbgs() << "[" << *S << "]"; 11427 11428 dbgs() << "\nArrayRef"; 11429 for (const SCEV *S : Subscripts) 11430 dbgs() << "[" << *S << "]"; 11431 dbgs() << "\n"; 11432 }); 11433 } 11434 11435 bool ScalarEvolution::getIndexExpressionsFromGEP( 11436 const GetElementPtrInst *GEP, SmallVectorImpl<const SCEV *> &Subscripts, 11437 SmallVectorImpl<int> &Sizes) { 11438 assert(Subscripts.empty() && Sizes.empty() && 11439 "Expected output lists to be empty on entry to this function."); 11440 assert(GEP && "getIndexExpressionsFromGEP called with a null GEP"); 11441 Type *Ty = GEP->getPointerOperandType(); 11442 bool DroppedFirstDim = false; 11443 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 11444 const SCEV *Expr = getSCEV(GEP->getOperand(i)); 11445 if (i == 1) { 11446 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) { 11447 Ty = PtrTy->getElementType(); 11448 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) { 11449 Ty = ArrayTy->getElementType(); 11450 } else { 11451 Subscripts.clear(); 11452 Sizes.clear(); 11453 return false; 11454 } 11455 if (auto *Const = dyn_cast<SCEVConstant>(Expr)) 11456 if (Const->getValue()->isZero()) { 11457 DroppedFirstDim = true; 11458 continue; 11459 } 11460 Subscripts.push_back(Expr); 11461 continue; 11462 } 11463 11464 auto *ArrayTy = dyn_cast<ArrayType>(Ty); 11465 if (!ArrayTy) { 11466 Subscripts.clear(); 11467 Sizes.clear(); 11468 return false; 11469 } 11470 11471 Subscripts.push_back(Expr); 11472 if (!(DroppedFirstDim && i == 2)) 11473 Sizes.push_back(ArrayTy->getNumElements()); 11474 11475 Ty = ArrayTy->getElementType(); 11476 } 11477 return !Subscripts.empty(); 11478 } 11479 11480 //===----------------------------------------------------------------------===// 11481 // SCEVCallbackVH Class Implementation 11482 //===----------------------------------------------------------------------===// 11483 11484 void ScalarEvolution::SCEVCallbackVH::deleted() { 11485 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11486 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 11487 SE->ConstantEvolutionLoopExitValue.erase(PN); 11488 SE->eraseValueFromMap(getValPtr()); 11489 // this now dangles! 11490 } 11491 11492 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 11493 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11494 11495 // Forget all the expressions associated with users of the old value, 11496 // so that future queries will recompute the expressions using the new 11497 // value. 11498 Value *Old = getValPtr(); 11499 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 11500 SmallPtrSet<User *, 8> Visited; 11501 while (!Worklist.empty()) { 11502 User *U = Worklist.pop_back_val(); 11503 // Deleting the Old value will cause this to dangle. Postpone 11504 // that until everything else is done. 11505 if (U == Old) 11506 continue; 11507 if (!Visited.insert(U).second) 11508 continue; 11509 if (PHINode *PN = dyn_cast<PHINode>(U)) 11510 SE->ConstantEvolutionLoopExitValue.erase(PN); 11511 SE->eraseValueFromMap(U); 11512 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 11513 } 11514 // Delete the Old value. 11515 if (PHINode *PN = dyn_cast<PHINode>(Old)) 11516 SE->ConstantEvolutionLoopExitValue.erase(PN); 11517 SE->eraseValueFromMap(Old); 11518 // this now dangles! 11519 } 11520 11521 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 11522 : CallbackVH(V), SE(se) {} 11523 11524 //===----------------------------------------------------------------------===// 11525 // ScalarEvolution Class Implementation 11526 //===----------------------------------------------------------------------===// 11527 11528 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 11529 AssumptionCache &AC, DominatorTree &DT, 11530 LoopInfo &LI) 11531 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 11532 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 11533 LoopDispositions(64), BlockDispositions(64) { 11534 // To use guards for proving predicates, we need to scan every instruction in 11535 // relevant basic blocks, and not just terminators. Doing this is a waste of 11536 // time if the IR does not actually contain any calls to 11537 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 11538 // 11539 // This pessimizes the case where a pass that preserves ScalarEvolution wants 11540 // to _add_ guards to the module when there weren't any before, and wants 11541 // ScalarEvolution to optimize based on those guards. For now we prefer to be 11542 // efficient in lieu of being smart in that rather obscure case. 11543 11544 auto *GuardDecl = F.getParent()->getFunction( 11545 Intrinsic::getName(Intrinsic::experimental_guard)); 11546 HasGuards = GuardDecl && !GuardDecl->use_empty(); 11547 } 11548 11549 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 11550 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 11551 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 11552 ValueExprMap(std::move(Arg.ValueExprMap)), 11553 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 11554 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 11555 PendingMerges(std::move(Arg.PendingMerges)), 11556 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 11557 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 11558 PredicatedBackedgeTakenCounts( 11559 std::move(Arg.PredicatedBackedgeTakenCounts)), 11560 ConstantEvolutionLoopExitValue( 11561 std::move(Arg.ConstantEvolutionLoopExitValue)), 11562 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 11563 LoopDispositions(std::move(Arg.LoopDispositions)), 11564 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 11565 BlockDispositions(std::move(Arg.BlockDispositions)), 11566 UnsignedRanges(std::move(Arg.UnsignedRanges)), 11567 SignedRanges(std::move(Arg.SignedRanges)), 11568 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 11569 UniquePreds(std::move(Arg.UniquePreds)), 11570 SCEVAllocator(std::move(Arg.SCEVAllocator)), 11571 LoopUsers(std::move(Arg.LoopUsers)), 11572 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 11573 FirstUnknown(Arg.FirstUnknown) { 11574 Arg.FirstUnknown = nullptr; 11575 } 11576 11577 ScalarEvolution::~ScalarEvolution() { 11578 // Iterate through all the SCEVUnknown instances and call their 11579 // destructors, so that they release their references to their values. 11580 for (SCEVUnknown *U = FirstUnknown; U;) { 11581 SCEVUnknown *Tmp = U; 11582 U = U->Next; 11583 Tmp->~SCEVUnknown(); 11584 } 11585 FirstUnknown = nullptr; 11586 11587 ExprValueMap.clear(); 11588 ValueExprMap.clear(); 11589 HasRecMap.clear(); 11590 11591 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 11592 // that a loop had multiple computable exits. 11593 for (auto &BTCI : BackedgeTakenCounts) 11594 BTCI.second.clear(); 11595 for (auto &BTCI : PredicatedBackedgeTakenCounts) 11596 BTCI.second.clear(); 11597 11598 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 11599 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 11600 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 11601 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 11602 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 11603 } 11604 11605 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 11606 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 11607 } 11608 11609 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 11610 const Loop *L) { 11611 // Print all inner loops first 11612 for (Loop *I : *L) 11613 PrintLoopInfo(OS, SE, I); 11614 11615 OS << "Loop "; 11616 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11617 OS << ": "; 11618 11619 SmallVector<BasicBlock *, 8> ExitingBlocks; 11620 L->getExitingBlocks(ExitingBlocks); 11621 if (ExitingBlocks.size() != 1) 11622 OS << "<multiple exits> "; 11623 11624 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 11625 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; 11626 else 11627 OS << "Unpredictable backedge-taken count.\n"; 11628 11629 if (ExitingBlocks.size() > 1) 11630 for (BasicBlock *ExitingBlock : ExitingBlocks) { 11631 OS << " exit count for " << ExitingBlock->getName() << ": " 11632 << *SE->getExitCount(L, ExitingBlock) << "\n"; 11633 } 11634 11635 OS << "Loop "; 11636 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11637 OS << ": "; 11638 11639 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) { 11640 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L); 11641 if (SE->isBackedgeTakenCountMaxOrZero(L)) 11642 OS << ", actual taken count either this or zero."; 11643 } else { 11644 OS << "Unpredictable max backedge-taken count. "; 11645 } 11646 11647 OS << "\n" 11648 "Loop "; 11649 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11650 OS << ": "; 11651 11652 SCEVUnionPredicate Pred; 11653 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 11654 if (!isa<SCEVCouldNotCompute>(PBT)) { 11655 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 11656 OS << " Predicates:\n"; 11657 Pred.print(OS, 4); 11658 } else { 11659 OS << "Unpredictable predicated backedge-taken count. "; 11660 } 11661 OS << "\n"; 11662 11663 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 11664 OS << "Loop "; 11665 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11666 OS << ": "; 11667 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 11668 } 11669 } 11670 11671 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 11672 switch (LD) { 11673 case ScalarEvolution::LoopVariant: 11674 return "Variant"; 11675 case ScalarEvolution::LoopInvariant: 11676 return "Invariant"; 11677 case ScalarEvolution::LoopComputable: 11678 return "Computable"; 11679 } 11680 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 11681 } 11682 11683 void ScalarEvolution::print(raw_ostream &OS) const { 11684 // ScalarEvolution's implementation of the print method is to print 11685 // out SCEV values of all instructions that are interesting. Doing 11686 // this potentially causes it to create new SCEV objects though, 11687 // which technically conflicts with the const qualifier. This isn't 11688 // observable from outside the class though, so casting away the 11689 // const isn't dangerous. 11690 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11691 11692 if (ClassifyExpressions) { 11693 OS << "Classifying expressions for: "; 11694 F.printAsOperand(OS, /*PrintType=*/false); 11695 OS << "\n"; 11696 for (Instruction &I : instructions(F)) 11697 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 11698 OS << I << '\n'; 11699 OS << " --> "; 11700 const SCEV *SV = SE.getSCEV(&I); 11701 SV->print(OS); 11702 if (!isa<SCEVCouldNotCompute>(SV)) { 11703 OS << " U: "; 11704 SE.getUnsignedRange(SV).print(OS); 11705 OS << " S: "; 11706 SE.getSignedRange(SV).print(OS); 11707 } 11708 11709 const Loop *L = LI.getLoopFor(I.getParent()); 11710 11711 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 11712 if (AtUse != SV) { 11713 OS << " --> "; 11714 AtUse->print(OS); 11715 if (!isa<SCEVCouldNotCompute>(AtUse)) { 11716 OS << " U: "; 11717 SE.getUnsignedRange(AtUse).print(OS); 11718 OS << " S: "; 11719 SE.getSignedRange(AtUse).print(OS); 11720 } 11721 } 11722 11723 if (L) { 11724 OS << "\t\t" "Exits: "; 11725 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 11726 if (!SE.isLoopInvariant(ExitValue, L)) { 11727 OS << "<<Unknown>>"; 11728 } else { 11729 OS << *ExitValue; 11730 } 11731 11732 bool First = true; 11733 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 11734 if (First) { 11735 OS << "\t\t" "LoopDispositions: { "; 11736 First = false; 11737 } else { 11738 OS << ", "; 11739 } 11740 11741 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11742 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 11743 } 11744 11745 for (auto *InnerL : depth_first(L)) { 11746 if (InnerL == L) 11747 continue; 11748 if (First) { 11749 OS << "\t\t" "LoopDispositions: { "; 11750 First = false; 11751 } else { 11752 OS << ", "; 11753 } 11754 11755 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11756 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 11757 } 11758 11759 OS << " }"; 11760 } 11761 11762 OS << "\n"; 11763 } 11764 } 11765 11766 OS << "Determining loop execution counts for: "; 11767 F.printAsOperand(OS, /*PrintType=*/false); 11768 OS << "\n"; 11769 for (Loop *I : LI) 11770 PrintLoopInfo(OS, &SE, I); 11771 } 11772 11773 ScalarEvolution::LoopDisposition 11774 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 11775 auto &Values = LoopDispositions[S]; 11776 for (auto &V : Values) { 11777 if (V.getPointer() == L) 11778 return V.getInt(); 11779 } 11780 Values.emplace_back(L, LoopVariant); 11781 LoopDisposition D = computeLoopDisposition(S, L); 11782 auto &Values2 = LoopDispositions[S]; 11783 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11784 if (V.getPointer() == L) { 11785 V.setInt(D); 11786 break; 11787 } 11788 } 11789 return D; 11790 } 11791 11792 ScalarEvolution::LoopDisposition 11793 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 11794 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11795 case scConstant: 11796 return LoopInvariant; 11797 case scTruncate: 11798 case scZeroExtend: 11799 case scSignExtend: 11800 return getLoopDisposition(cast<SCEVIntegralCastExpr>(S)->getOperand(), L); 11801 case scAddRecExpr: { 11802 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11803 11804 // If L is the addrec's loop, it's computable. 11805 if (AR->getLoop() == L) 11806 return LoopComputable; 11807 11808 // Add recurrences are never invariant in the function-body (null loop). 11809 if (!L) 11810 return LoopVariant; 11811 11812 // Everything that is not defined at loop entry is variant. 11813 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 11814 return LoopVariant; 11815 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 11816 " dominate the contained loop's header?"); 11817 11818 // This recurrence is invariant w.r.t. L if AR's loop contains L. 11819 if (AR->getLoop()->contains(L)) 11820 return LoopInvariant; 11821 11822 // This recurrence is variant w.r.t. L if any of its operands 11823 // are variant. 11824 for (auto *Op : AR->operands()) 11825 if (!isLoopInvariant(Op, L)) 11826 return LoopVariant; 11827 11828 // Otherwise it's loop-invariant. 11829 return LoopInvariant; 11830 } 11831 case scAddExpr: 11832 case scMulExpr: 11833 case scUMaxExpr: 11834 case scSMaxExpr: 11835 case scUMinExpr: 11836 case scSMinExpr: { 11837 bool HasVarying = false; 11838 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 11839 LoopDisposition D = getLoopDisposition(Op, L); 11840 if (D == LoopVariant) 11841 return LoopVariant; 11842 if (D == LoopComputable) 11843 HasVarying = true; 11844 } 11845 return HasVarying ? LoopComputable : LoopInvariant; 11846 } 11847 case scUDivExpr: { 11848 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11849 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 11850 if (LD == LoopVariant) 11851 return LoopVariant; 11852 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 11853 if (RD == LoopVariant) 11854 return LoopVariant; 11855 return (LD == LoopInvariant && RD == LoopInvariant) ? 11856 LoopInvariant : LoopComputable; 11857 } 11858 case scUnknown: 11859 // All non-instruction values are loop invariant. All instructions are loop 11860 // invariant if they are not contained in the specified loop. 11861 // Instructions are never considered invariant in the function body 11862 // (null loop) because they are defined within the "loop". 11863 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 11864 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 11865 return LoopInvariant; 11866 case scCouldNotCompute: 11867 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11868 } 11869 llvm_unreachable("Unknown SCEV kind!"); 11870 } 11871 11872 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 11873 return getLoopDisposition(S, L) == LoopInvariant; 11874 } 11875 11876 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 11877 return getLoopDisposition(S, L) == LoopComputable; 11878 } 11879 11880 ScalarEvolution::BlockDisposition 11881 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11882 auto &Values = BlockDispositions[S]; 11883 for (auto &V : Values) { 11884 if (V.getPointer() == BB) 11885 return V.getInt(); 11886 } 11887 Values.emplace_back(BB, DoesNotDominateBlock); 11888 BlockDisposition D = computeBlockDisposition(S, BB); 11889 auto &Values2 = BlockDispositions[S]; 11890 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11891 if (V.getPointer() == BB) { 11892 V.setInt(D); 11893 break; 11894 } 11895 } 11896 return D; 11897 } 11898 11899 ScalarEvolution::BlockDisposition 11900 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11901 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11902 case scConstant: 11903 return ProperlyDominatesBlock; 11904 case scTruncate: 11905 case scZeroExtend: 11906 case scSignExtend: 11907 return getBlockDisposition(cast<SCEVIntegralCastExpr>(S)->getOperand(), BB); 11908 case scAddRecExpr: { 11909 // This uses a "dominates" query instead of "properly dominates" query 11910 // to test for proper dominance too, because the instruction which 11911 // produces the addrec's value is a PHI, and a PHI effectively properly 11912 // dominates its entire containing block. 11913 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11914 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 11915 return DoesNotDominateBlock; 11916 11917 // Fall through into SCEVNAryExpr handling. 11918 LLVM_FALLTHROUGH; 11919 } 11920 case scAddExpr: 11921 case scMulExpr: 11922 case scUMaxExpr: 11923 case scSMaxExpr: 11924 case scUMinExpr: 11925 case scSMinExpr: { 11926 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 11927 bool Proper = true; 11928 for (const SCEV *NAryOp : NAry->operands()) { 11929 BlockDisposition D = getBlockDisposition(NAryOp, BB); 11930 if (D == DoesNotDominateBlock) 11931 return DoesNotDominateBlock; 11932 if (D == DominatesBlock) 11933 Proper = false; 11934 } 11935 return Proper ? ProperlyDominatesBlock : DominatesBlock; 11936 } 11937 case scUDivExpr: { 11938 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11939 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 11940 BlockDisposition LD = getBlockDisposition(LHS, BB); 11941 if (LD == DoesNotDominateBlock) 11942 return DoesNotDominateBlock; 11943 BlockDisposition RD = getBlockDisposition(RHS, BB); 11944 if (RD == DoesNotDominateBlock) 11945 return DoesNotDominateBlock; 11946 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 11947 ProperlyDominatesBlock : DominatesBlock; 11948 } 11949 case scUnknown: 11950 if (Instruction *I = 11951 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 11952 if (I->getParent() == BB) 11953 return DominatesBlock; 11954 if (DT.properlyDominates(I->getParent(), BB)) 11955 return ProperlyDominatesBlock; 11956 return DoesNotDominateBlock; 11957 } 11958 return ProperlyDominatesBlock; 11959 case scCouldNotCompute: 11960 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11961 } 11962 llvm_unreachable("Unknown SCEV kind!"); 11963 } 11964 11965 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 11966 return getBlockDisposition(S, BB) >= DominatesBlock; 11967 } 11968 11969 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 11970 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 11971 } 11972 11973 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 11974 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 11975 } 11976 11977 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 11978 auto IsS = [&](const SCEV *X) { return S == X; }; 11979 auto ContainsS = [&](const SCEV *X) { 11980 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 11981 }; 11982 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 11983 } 11984 11985 void 11986 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 11987 ValuesAtScopes.erase(S); 11988 LoopDispositions.erase(S); 11989 BlockDispositions.erase(S); 11990 UnsignedRanges.erase(S); 11991 SignedRanges.erase(S); 11992 ExprValueMap.erase(S); 11993 HasRecMap.erase(S); 11994 MinTrailingZerosCache.erase(S); 11995 11996 for (auto I = PredicatedSCEVRewrites.begin(); 11997 I != PredicatedSCEVRewrites.end();) { 11998 std::pair<const SCEV *, const Loop *> Entry = I->first; 11999 if (Entry.first == S) 12000 PredicatedSCEVRewrites.erase(I++); 12001 else 12002 ++I; 12003 } 12004 12005 auto RemoveSCEVFromBackedgeMap = 12006 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 12007 for (auto I = Map.begin(), E = Map.end(); I != E;) { 12008 BackedgeTakenInfo &BEInfo = I->second; 12009 if (BEInfo.hasOperand(S, this)) { 12010 BEInfo.clear(); 12011 Map.erase(I++); 12012 } else 12013 ++I; 12014 } 12015 }; 12016 12017 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 12018 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 12019 } 12020 12021 void 12022 ScalarEvolution::getUsedLoops(const SCEV *S, 12023 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 12024 struct FindUsedLoops { 12025 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 12026 : LoopsUsed(LoopsUsed) {} 12027 SmallPtrSetImpl<const Loop *> &LoopsUsed; 12028 bool follow(const SCEV *S) { 12029 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 12030 LoopsUsed.insert(AR->getLoop()); 12031 return true; 12032 } 12033 12034 bool isDone() const { return false; } 12035 }; 12036 12037 FindUsedLoops F(LoopsUsed); 12038 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 12039 } 12040 12041 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 12042 SmallPtrSet<const Loop *, 8> LoopsUsed; 12043 getUsedLoops(S, LoopsUsed); 12044 for (auto *L : LoopsUsed) 12045 LoopUsers[L].push_back(S); 12046 } 12047 12048 void ScalarEvolution::verify() const { 12049 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 12050 ScalarEvolution SE2(F, TLI, AC, DT, LI); 12051 12052 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 12053 12054 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 12055 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 12056 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 12057 12058 const SCEV *visitConstant(const SCEVConstant *Constant) { 12059 return SE.getConstant(Constant->getAPInt()); 12060 } 12061 12062 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12063 return SE.getUnknown(Expr->getValue()); 12064 } 12065 12066 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 12067 return SE.getCouldNotCompute(); 12068 } 12069 }; 12070 12071 SCEVMapper SCM(SE2); 12072 12073 while (!LoopStack.empty()) { 12074 auto *L = LoopStack.pop_back_val(); 12075 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 12076 12077 auto *CurBECount = SCM.visit( 12078 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 12079 auto *NewBECount = SE2.getBackedgeTakenCount(L); 12080 12081 if (CurBECount == SE2.getCouldNotCompute() || 12082 NewBECount == SE2.getCouldNotCompute()) { 12083 // NB! This situation is legal, but is very suspicious -- whatever pass 12084 // change the loop to make a trip count go from could not compute to 12085 // computable or vice-versa *should have* invalidated SCEV. However, we 12086 // choose not to assert here (for now) since we don't want false 12087 // positives. 12088 continue; 12089 } 12090 12091 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 12092 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 12093 // not propagate undef aggressively). This means we can (and do) fail 12094 // verification in cases where a transform makes the trip count of a loop 12095 // go from "undef" to "undef+1" (say). The transform is fine, since in 12096 // both cases the loop iterates "undef" times, but SCEV thinks we 12097 // increased the trip count of the loop by 1 incorrectly. 12098 continue; 12099 } 12100 12101 if (SE.getTypeSizeInBits(CurBECount->getType()) > 12102 SE.getTypeSizeInBits(NewBECount->getType())) 12103 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 12104 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 12105 SE.getTypeSizeInBits(NewBECount->getType())) 12106 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 12107 12108 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount); 12109 12110 // Unless VerifySCEVStrict is set, we only compare constant deltas. 12111 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) { 12112 dbgs() << "Trip Count for " << *L << " Changed!\n"; 12113 dbgs() << "Old: " << *CurBECount << "\n"; 12114 dbgs() << "New: " << *NewBECount << "\n"; 12115 dbgs() << "Delta: " << *Delta << "\n"; 12116 std::abort(); 12117 } 12118 } 12119 12120 // Collect all valid loops currently in LoopInfo. 12121 SmallPtrSet<Loop *, 32> ValidLoops; 12122 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end()); 12123 while (!Worklist.empty()) { 12124 Loop *L = Worklist.pop_back_val(); 12125 if (ValidLoops.contains(L)) 12126 continue; 12127 ValidLoops.insert(L); 12128 Worklist.append(L->begin(), L->end()); 12129 } 12130 // Check for SCEV expressions referencing invalid/deleted loops. 12131 for (auto &KV : ValueExprMap) { 12132 auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second); 12133 if (!AR) 12134 continue; 12135 assert(ValidLoops.contains(AR->getLoop()) && 12136 "AddRec references invalid loop"); 12137 } 12138 } 12139 12140 bool ScalarEvolution::invalidate( 12141 Function &F, const PreservedAnalyses &PA, 12142 FunctionAnalysisManager::Invalidator &Inv) { 12143 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 12144 // of its dependencies is invalidated. 12145 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 12146 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 12147 Inv.invalidate<AssumptionAnalysis>(F, PA) || 12148 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 12149 Inv.invalidate<LoopAnalysis>(F, PA); 12150 } 12151 12152 AnalysisKey ScalarEvolutionAnalysis::Key; 12153 12154 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 12155 FunctionAnalysisManager &AM) { 12156 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 12157 AM.getResult<AssumptionAnalysis>(F), 12158 AM.getResult<DominatorTreeAnalysis>(F), 12159 AM.getResult<LoopAnalysis>(F)); 12160 } 12161 12162 PreservedAnalyses 12163 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 12164 AM.getResult<ScalarEvolutionAnalysis>(F).verify(); 12165 return PreservedAnalyses::all(); 12166 } 12167 12168 PreservedAnalyses 12169 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 12170 // For compatibility with opt's -analyze feature under legacy pass manager 12171 // which was not ported to NPM. This keeps tests using 12172 // update_analyze_test_checks.py working. 12173 OS << "Printing analysis 'Scalar Evolution Analysis' for function '" 12174 << F.getName() << "':\n"; 12175 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 12176 return PreservedAnalyses::all(); 12177 } 12178 12179 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 12180 "Scalar Evolution Analysis", false, true) 12181 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 12182 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 12183 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 12184 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 12185 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 12186 "Scalar Evolution Analysis", false, true) 12187 12188 char ScalarEvolutionWrapperPass::ID = 0; 12189 12190 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 12191 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 12192 } 12193 12194 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 12195 SE.reset(new ScalarEvolution( 12196 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 12197 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 12198 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 12199 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 12200 return false; 12201 } 12202 12203 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 12204 12205 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 12206 SE->print(OS); 12207 } 12208 12209 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 12210 if (!VerifySCEV) 12211 return; 12212 12213 SE->verify(); 12214 } 12215 12216 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 12217 AU.setPreservesAll(); 12218 AU.addRequiredTransitive<AssumptionCacheTracker>(); 12219 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 12220 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 12221 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 12222 } 12223 12224 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 12225 const SCEV *RHS) { 12226 FoldingSetNodeID ID; 12227 assert(LHS->getType() == RHS->getType() && 12228 "Type mismatch between LHS and RHS"); 12229 // Unique this node based on the arguments 12230 ID.AddInteger(SCEVPredicate::P_Equal); 12231 ID.AddPointer(LHS); 12232 ID.AddPointer(RHS); 12233 void *IP = nullptr; 12234 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12235 return S; 12236 SCEVEqualPredicate *Eq = new (SCEVAllocator) 12237 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 12238 UniquePreds.InsertNode(Eq, IP); 12239 return Eq; 12240 } 12241 12242 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 12243 const SCEVAddRecExpr *AR, 12244 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12245 FoldingSetNodeID ID; 12246 // Unique this node based on the arguments 12247 ID.AddInteger(SCEVPredicate::P_Wrap); 12248 ID.AddPointer(AR); 12249 ID.AddInteger(AddedFlags); 12250 void *IP = nullptr; 12251 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12252 return S; 12253 auto *OF = new (SCEVAllocator) 12254 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 12255 UniquePreds.InsertNode(OF, IP); 12256 return OF; 12257 } 12258 12259 namespace { 12260 12261 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 12262 public: 12263 12264 /// Rewrites \p S in the context of a loop L and the SCEV predication 12265 /// infrastructure. 12266 /// 12267 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 12268 /// equivalences present in \p Pred. 12269 /// 12270 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 12271 /// \p NewPreds such that the result will be an AddRecExpr. 12272 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 12273 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12274 SCEVUnionPredicate *Pred) { 12275 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 12276 return Rewriter.visit(S); 12277 } 12278 12279 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12280 if (Pred) { 12281 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 12282 for (auto *Pred : ExprPreds) 12283 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 12284 if (IPred->getLHS() == Expr) 12285 return IPred->getRHS(); 12286 } 12287 return convertToAddRecWithPreds(Expr); 12288 } 12289 12290 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 12291 const SCEV *Operand = visit(Expr->getOperand()); 12292 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12293 if (AR && AR->getLoop() == L && AR->isAffine()) { 12294 // This couldn't be folded because the operand didn't have the nuw 12295 // flag. Add the nusw flag as an assumption that we could make. 12296 const SCEV *Step = AR->getStepRecurrence(SE); 12297 Type *Ty = Expr->getType(); 12298 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 12299 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 12300 SE.getSignExtendExpr(Step, Ty), L, 12301 AR->getNoWrapFlags()); 12302 } 12303 return SE.getZeroExtendExpr(Operand, Expr->getType()); 12304 } 12305 12306 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 12307 const SCEV *Operand = visit(Expr->getOperand()); 12308 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12309 if (AR && AR->getLoop() == L && AR->isAffine()) { 12310 // This couldn't be folded because the operand didn't have the nsw 12311 // flag. Add the nssw flag as an assumption that we could make. 12312 const SCEV *Step = AR->getStepRecurrence(SE); 12313 Type *Ty = Expr->getType(); 12314 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 12315 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 12316 SE.getSignExtendExpr(Step, Ty), L, 12317 AR->getNoWrapFlags()); 12318 } 12319 return SE.getSignExtendExpr(Operand, Expr->getType()); 12320 } 12321 12322 private: 12323 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 12324 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12325 SCEVUnionPredicate *Pred) 12326 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 12327 12328 bool addOverflowAssumption(const SCEVPredicate *P) { 12329 if (!NewPreds) { 12330 // Check if we've already made this assumption. 12331 return Pred && Pred->implies(P); 12332 } 12333 NewPreds->insert(P); 12334 return true; 12335 } 12336 12337 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 12338 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12339 auto *A = SE.getWrapPredicate(AR, AddedFlags); 12340 return addOverflowAssumption(A); 12341 } 12342 12343 // If \p Expr represents a PHINode, we try to see if it can be represented 12344 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 12345 // to add this predicate as a runtime overflow check, we return the AddRec. 12346 // If \p Expr does not meet these conditions (is not a PHI node, or we 12347 // couldn't create an AddRec for it, or couldn't add the predicate), we just 12348 // return \p Expr. 12349 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 12350 if (!isa<PHINode>(Expr->getValue())) 12351 return Expr; 12352 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 12353 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 12354 if (!PredicatedRewrite) 12355 return Expr; 12356 for (auto *P : PredicatedRewrite->second){ 12357 // Wrap predicates from outer loops are not supported. 12358 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 12359 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 12360 if (L != AR->getLoop()) 12361 return Expr; 12362 } 12363 if (!addOverflowAssumption(P)) 12364 return Expr; 12365 } 12366 return PredicatedRewrite->first; 12367 } 12368 12369 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 12370 SCEVUnionPredicate *Pred; 12371 const Loop *L; 12372 }; 12373 12374 } // end anonymous namespace 12375 12376 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 12377 SCEVUnionPredicate &Preds) { 12378 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 12379 } 12380 12381 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 12382 const SCEV *S, const Loop *L, 12383 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 12384 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 12385 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 12386 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 12387 12388 if (!AddRec) 12389 return nullptr; 12390 12391 // Since the transformation was successful, we can now transfer the SCEV 12392 // predicates. 12393 for (auto *P : TransformPreds) 12394 Preds.insert(P); 12395 12396 return AddRec; 12397 } 12398 12399 /// SCEV predicates 12400 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 12401 SCEVPredicateKind Kind) 12402 : FastID(ID), Kind(Kind) {} 12403 12404 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 12405 const SCEV *LHS, const SCEV *RHS) 12406 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 12407 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 12408 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 12409 } 12410 12411 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 12412 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 12413 12414 if (!Op) 12415 return false; 12416 12417 return Op->LHS == LHS && Op->RHS == RHS; 12418 } 12419 12420 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 12421 12422 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 12423 12424 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 12425 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 12426 } 12427 12428 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 12429 const SCEVAddRecExpr *AR, 12430 IncrementWrapFlags Flags) 12431 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 12432 12433 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 12434 12435 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 12436 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 12437 12438 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 12439 } 12440 12441 bool SCEVWrapPredicate::isAlwaysTrue() const { 12442 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 12443 IncrementWrapFlags IFlags = Flags; 12444 12445 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 12446 IFlags = clearFlags(IFlags, IncrementNSSW); 12447 12448 return IFlags == IncrementAnyWrap; 12449 } 12450 12451 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 12452 OS.indent(Depth) << *getExpr() << " Added Flags: "; 12453 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 12454 OS << "<nusw>"; 12455 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 12456 OS << "<nssw>"; 12457 OS << "\n"; 12458 } 12459 12460 SCEVWrapPredicate::IncrementWrapFlags 12461 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 12462 ScalarEvolution &SE) { 12463 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 12464 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 12465 12466 // We can safely transfer the NSW flag as NSSW. 12467 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 12468 ImpliedFlags = IncrementNSSW; 12469 12470 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 12471 // If the increment is positive, the SCEV NUW flag will also imply the 12472 // WrapPredicate NUSW flag. 12473 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 12474 if (Step->getValue()->getValue().isNonNegative()) 12475 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 12476 } 12477 12478 return ImpliedFlags; 12479 } 12480 12481 /// Union predicates don't get cached so create a dummy set ID for it. 12482 SCEVUnionPredicate::SCEVUnionPredicate() 12483 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 12484 12485 bool SCEVUnionPredicate::isAlwaysTrue() const { 12486 return all_of(Preds, 12487 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 12488 } 12489 12490 ArrayRef<const SCEVPredicate *> 12491 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 12492 auto I = SCEVToPreds.find(Expr); 12493 if (I == SCEVToPreds.end()) 12494 return ArrayRef<const SCEVPredicate *>(); 12495 return I->second; 12496 } 12497 12498 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 12499 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 12500 return all_of(Set->Preds, 12501 [this](const SCEVPredicate *I) { return this->implies(I); }); 12502 12503 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 12504 if (ScevPredsIt == SCEVToPreds.end()) 12505 return false; 12506 auto &SCEVPreds = ScevPredsIt->second; 12507 12508 return any_of(SCEVPreds, 12509 [N](const SCEVPredicate *I) { return I->implies(N); }); 12510 } 12511 12512 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 12513 12514 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 12515 for (auto Pred : Preds) 12516 Pred->print(OS, Depth); 12517 } 12518 12519 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 12520 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 12521 for (auto Pred : Set->Preds) 12522 add(Pred); 12523 return; 12524 } 12525 12526 if (implies(N)) 12527 return; 12528 12529 const SCEV *Key = N->getExpr(); 12530 assert(Key && "Only SCEVUnionPredicate doesn't have an " 12531 " associated expression!"); 12532 12533 SCEVToPreds[Key].push_back(N); 12534 Preds.push_back(N); 12535 } 12536 12537 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 12538 Loop &L) 12539 : SE(SE), L(L) {} 12540 12541 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 12542 const SCEV *Expr = SE.getSCEV(V); 12543 RewriteEntry &Entry = RewriteMap[Expr]; 12544 12545 // If we already have an entry and the version matches, return it. 12546 if (Entry.second && Generation == Entry.first) 12547 return Entry.second; 12548 12549 // We found an entry but it's stale. Rewrite the stale entry 12550 // according to the current predicate. 12551 if (Entry.second) 12552 Expr = Entry.second; 12553 12554 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 12555 Entry = {Generation, NewSCEV}; 12556 12557 return NewSCEV; 12558 } 12559 12560 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 12561 if (!BackedgeCount) { 12562 SCEVUnionPredicate BackedgePred; 12563 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 12564 addPredicate(BackedgePred); 12565 } 12566 return BackedgeCount; 12567 } 12568 12569 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 12570 if (Preds.implies(&Pred)) 12571 return; 12572 Preds.add(&Pred); 12573 updateGeneration(); 12574 } 12575 12576 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 12577 return Preds; 12578 } 12579 12580 void PredicatedScalarEvolution::updateGeneration() { 12581 // If the generation number wrapped recompute everything. 12582 if (++Generation == 0) { 12583 for (auto &II : RewriteMap) { 12584 const SCEV *Rewritten = II.second.second; 12585 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 12586 } 12587 } 12588 } 12589 12590 void PredicatedScalarEvolution::setNoOverflow( 12591 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12592 const SCEV *Expr = getSCEV(V); 12593 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12594 12595 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 12596 12597 // Clear the statically implied flags. 12598 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 12599 addPredicate(*SE.getWrapPredicate(AR, Flags)); 12600 12601 auto II = FlagsMap.insert({V, Flags}); 12602 if (!II.second) 12603 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 12604 } 12605 12606 bool PredicatedScalarEvolution::hasNoOverflow( 12607 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12608 const SCEV *Expr = getSCEV(V); 12609 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12610 12611 Flags = SCEVWrapPredicate::clearFlags( 12612 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 12613 12614 auto II = FlagsMap.find(V); 12615 12616 if (II != FlagsMap.end()) 12617 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 12618 12619 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 12620 } 12621 12622 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 12623 const SCEV *Expr = this->getSCEV(V); 12624 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 12625 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 12626 12627 if (!New) 12628 return nullptr; 12629 12630 for (auto *P : NewPreds) 12631 Preds.add(P); 12632 12633 updateGeneration(); 12634 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 12635 return New; 12636 } 12637 12638 PredicatedScalarEvolution::PredicatedScalarEvolution( 12639 const PredicatedScalarEvolution &Init) 12640 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 12641 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 12642 for (auto I : Init.FlagsMap) 12643 FlagsMap.insert(I); 12644 } 12645 12646 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 12647 // For each block. 12648 for (auto *BB : L.getBlocks()) 12649 for (auto &I : *BB) { 12650 if (!SE.isSCEVable(I.getType())) 12651 continue; 12652 12653 auto *Expr = SE.getSCEV(&I); 12654 auto II = RewriteMap.find(Expr); 12655 12656 if (II == RewriteMap.end()) 12657 continue; 12658 12659 // Don't print things that are not interesting. 12660 if (II->second.second == Expr) 12661 continue; 12662 12663 OS.indent(Depth) << "[PSE]" << I << ":\n"; 12664 OS.indent(Depth + 2) << *Expr << "\n"; 12665 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 12666 } 12667 } 12668 12669 // Match the mathematical pattern A - (A / B) * B, where A and B can be 12670 // arbitrary expressions. 12671 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 12672 // 4, A / B becomes X / 8). 12673 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 12674 const SCEV *&RHS) { 12675 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 12676 if (Add == nullptr || Add->getNumOperands() != 2) 12677 return false; 12678 12679 const SCEV *A = Add->getOperand(1); 12680 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 12681 12682 if (Mul == nullptr) 12683 return false; 12684 12685 const auto MatchURemWithDivisor = [&](const SCEV *B) { 12686 // (SomeExpr + (-(SomeExpr / B) * B)). 12687 if (Expr == getURemExpr(A, B)) { 12688 LHS = A; 12689 RHS = B; 12690 return true; 12691 } 12692 return false; 12693 }; 12694 12695 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 12696 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 12697 return MatchURemWithDivisor(Mul->getOperand(1)) || 12698 MatchURemWithDivisor(Mul->getOperand(2)); 12699 12700 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 12701 if (Mul->getNumOperands() == 2) 12702 return MatchURemWithDivisor(Mul->getOperand(1)) || 12703 MatchURemWithDivisor(Mul->getOperand(0)) || 12704 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 12705 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 12706 return false; 12707 } 12708 12709 const SCEV* ScalarEvolution::computeMaxBackedgeTakenCount(const Loop *L) { 12710 SmallVector<BasicBlock*, 16> ExitingBlocks; 12711 L->getExitingBlocks(ExitingBlocks); 12712 12713 // Form an expression for the maximum exit count possible for this loop. We 12714 // merge the max and exact information to approximate a version of 12715 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants. 12716 SmallVector<const SCEV*, 4> ExitCounts; 12717 for (BasicBlock *ExitingBB : ExitingBlocks) { 12718 const SCEV *ExitCount = getExitCount(L, ExitingBB); 12719 if (isa<SCEVCouldNotCompute>(ExitCount)) 12720 ExitCount = getExitCount(L, ExitingBB, 12721 ScalarEvolution::ConstantMaximum); 12722 if (!isa<SCEVCouldNotCompute>(ExitCount)) { 12723 assert(DT.dominates(ExitingBB, L->getLoopLatch()) && 12724 "We should only have known counts for exiting blocks that " 12725 "dominate latch!"); 12726 ExitCounts.push_back(ExitCount); 12727 } 12728 } 12729 if (ExitCounts.empty()) 12730 return getCouldNotCompute(); 12731 return getUMinFromMismatchedTypes(ExitCounts); 12732 } 12733 12734 /// This rewriter is similar to SCEVParameterRewriter (it replaces SCEVUnknown 12735 /// components following the Map (Value -> SCEV)), but skips AddRecExpr because 12736 /// we cannot guarantee that the replacement is loop invariant in the loop of 12737 /// the AddRec. 12738 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> { 12739 ValueToSCEVMapTy ⤅ 12740 12741 public: 12742 SCEVLoopGuardRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M) 12743 : SCEVRewriteVisitor(SE), Map(M) {} 12744 12745 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 12746 12747 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12748 auto I = Map.find(Expr->getValue()); 12749 if (I == Map.end()) 12750 return Expr; 12751 return I->second; 12752 } 12753 }; 12754 12755 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) { 12756 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS, 12757 const SCEV *RHS, ValueToSCEVMapTy &RewriteMap) { 12758 if (!isa<SCEVUnknown>(LHS)) { 12759 std::swap(LHS, RHS); 12760 Predicate = CmpInst::getSwappedPredicate(Predicate); 12761 } 12762 12763 // For now, limit to conditions that provide information about unknown 12764 // expressions. 12765 auto *LHSUnknown = dyn_cast<SCEVUnknown>(LHS); 12766 if (!LHSUnknown) 12767 return; 12768 12769 // TODO: use information from more predicates. 12770 switch (Predicate) { 12771 case CmpInst::ICMP_ULT: { 12772 if (!containsAddRecurrence(RHS)) { 12773 const SCEV *Base = LHS; 12774 auto I = RewriteMap.find(LHSUnknown->getValue()); 12775 if (I != RewriteMap.end()) 12776 Base = I->second; 12777 12778 RewriteMap[LHSUnknown->getValue()] = 12779 getUMinExpr(Base, getMinusSCEV(RHS, getOne(RHS->getType()))); 12780 } 12781 break; 12782 } 12783 case CmpInst::ICMP_ULE: { 12784 if (!containsAddRecurrence(RHS)) { 12785 const SCEV *Base = LHS; 12786 auto I = RewriteMap.find(LHSUnknown->getValue()); 12787 if (I != RewriteMap.end()) 12788 Base = I->second; 12789 RewriteMap[LHSUnknown->getValue()] = getUMinExpr(Base, RHS); 12790 } 12791 break; 12792 } 12793 case CmpInst::ICMP_EQ: 12794 if (isa<SCEVConstant>(RHS)) 12795 RewriteMap[LHSUnknown->getValue()] = RHS; 12796 break; 12797 case CmpInst::ICMP_NE: 12798 if (isa<SCEVConstant>(RHS) && 12799 cast<SCEVConstant>(RHS)->getValue()->isNullValue()) 12800 RewriteMap[LHSUnknown->getValue()] = 12801 getUMaxExpr(LHS, getOne(RHS->getType())); 12802 break; 12803 default: 12804 break; 12805 } 12806 }; 12807 // Starting at the loop predecessor, climb up the predecessor chain, as long 12808 // as there are predecessors that can be found that have unique successors 12809 // leading to the original header. 12810 // TODO: share this logic with isLoopEntryGuardedByCond. 12811 ValueToSCEVMapTy RewriteMap; 12812 for (std::pair<const BasicBlock *, const BasicBlock *> Pair( 12813 L->getLoopPredecessor(), L->getHeader()); 12814 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 12815 12816 const BranchInst *LoopEntryPredicate = 12817 dyn_cast<BranchInst>(Pair.first->getTerminator()); 12818 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) 12819 continue; 12820 12821 // TODO: use information from more complex conditions, e.g. AND expressions. 12822 auto *Cmp = dyn_cast<ICmpInst>(LoopEntryPredicate->getCondition()); 12823 if (!Cmp) 12824 continue; 12825 12826 auto Predicate = Cmp->getPredicate(); 12827 if (LoopEntryPredicate->getSuccessor(1) == Pair.second) 12828 Predicate = CmpInst::getInversePredicate(Predicate); 12829 CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)), 12830 getSCEV(Cmp->getOperand(1)), RewriteMap); 12831 } 12832 12833 // Also collect information from assumptions dominating the loop. 12834 for (auto &AssumeVH : AC.assumptions()) { 12835 if (!AssumeVH) 12836 continue; 12837 auto *AssumeI = cast<CallInst>(AssumeVH); 12838 auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0)); 12839 if (!Cmp || !DT.dominates(AssumeI, L->getHeader())) 12840 continue; 12841 CollectCondition(Cmp->getPredicate(), getSCEV(Cmp->getOperand(0)), 12842 getSCEV(Cmp->getOperand(1)), RewriteMap); 12843 } 12844 12845 if (RewriteMap.empty()) 12846 return Expr; 12847 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 12848 return Rewriter.visit(Expr); 12849 } 12850