1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #include "llvm/Analysis/ScalarEvolution.h" 62 #include "llvm/ADT/Optional.h" 63 #include "llvm/ADT/STLExtras.h" 64 #include "llvm/ADT/SmallPtrSet.h" 65 #include "llvm/ADT/Statistic.h" 66 #include "llvm/Analysis/AssumptionCache.h" 67 #include "llvm/Analysis/ConstantFolding.h" 68 #include "llvm/Analysis/InstructionSimplify.h" 69 #include "llvm/Analysis/LoopInfo.h" 70 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 71 #include "llvm/Analysis/TargetLibraryInfo.h" 72 #include "llvm/Analysis/ValueTracking.h" 73 #include "llvm/IR/ConstantRange.h" 74 #include "llvm/IR/Constants.h" 75 #include "llvm/IR/DataLayout.h" 76 #include "llvm/IR/DerivedTypes.h" 77 #include "llvm/IR/Dominators.h" 78 #include "llvm/IR/GetElementPtrTypeIterator.h" 79 #include "llvm/IR/GlobalAlias.h" 80 #include "llvm/IR/GlobalVariable.h" 81 #include "llvm/IR/InstIterator.h" 82 #include "llvm/IR/Instructions.h" 83 #include "llvm/IR/LLVMContext.h" 84 #include "llvm/IR/Metadata.h" 85 #include "llvm/IR/Operator.h" 86 #include "llvm/IR/PatternMatch.h" 87 #include "llvm/Support/CommandLine.h" 88 #include "llvm/Support/Debug.h" 89 #include "llvm/Support/ErrorHandling.h" 90 #include "llvm/Support/MathExtras.h" 91 #include "llvm/Support/raw_ostream.h" 92 #include "llvm/Support/SaveAndRestore.h" 93 #include <algorithm> 94 using namespace llvm; 95 96 #define DEBUG_TYPE "scalar-evolution" 97 98 STATISTIC(NumArrayLenItCounts, 99 "Number of trip counts computed with array length"); 100 STATISTIC(NumTripCountsComputed, 101 "Number of loops with predictable loop counts"); 102 STATISTIC(NumTripCountsNotComputed, 103 "Number of loops without predictable loop counts"); 104 STATISTIC(NumBruteForceTripCountsComputed, 105 "Number of loops with trip counts computed by force"); 106 107 static cl::opt<unsigned> 108 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 109 cl::desc("Maximum number of iterations SCEV will " 110 "symbolically execute a constant " 111 "derived loop"), 112 cl::init(100)); 113 114 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 115 static cl::opt<bool> 116 VerifySCEV("verify-scev", 117 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 118 static cl::opt<bool> 119 VerifySCEVMap("verify-scev-maps", 120 cl::desc("Verify no dangling value in ScalarEvolution's " 121 "ExprValueMap (slow)")); 122 123 //===----------------------------------------------------------------------===// 124 // SCEV class definitions 125 //===----------------------------------------------------------------------===// 126 127 //===----------------------------------------------------------------------===// 128 // Implementation of the SCEV class. 129 // 130 131 LLVM_DUMP_METHOD 132 void SCEV::dump() const { 133 print(dbgs()); 134 dbgs() << '\n'; 135 } 136 137 void SCEV::print(raw_ostream &OS) const { 138 switch (static_cast<SCEVTypes>(getSCEVType())) { 139 case scConstant: 140 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 141 return; 142 case scTruncate: { 143 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 144 const SCEV *Op = Trunc->getOperand(); 145 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 146 << *Trunc->getType() << ")"; 147 return; 148 } 149 case scZeroExtend: { 150 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 151 const SCEV *Op = ZExt->getOperand(); 152 OS << "(zext " << *Op->getType() << " " << *Op << " to " 153 << *ZExt->getType() << ")"; 154 return; 155 } 156 case scSignExtend: { 157 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 158 const SCEV *Op = SExt->getOperand(); 159 OS << "(sext " << *Op->getType() << " " << *Op << " to " 160 << *SExt->getType() << ")"; 161 return; 162 } 163 case scAddRecExpr: { 164 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 165 OS << "{" << *AR->getOperand(0); 166 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 167 OS << ",+," << *AR->getOperand(i); 168 OS << "}<"; 169 if (AR->hasNoUnsignedWrap()) 170 OS << "nuw><"; 171 if (AR->hasNoSignedWrap()) 172 OS << "nsw><"; 173 if (AR->hasNoSelfWrap() && 174 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 175 OS << "nw><"; 176 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 177 OS << ">"; 178 return; 179 } 180 case scAddExpr: 181 case scMulExpr: 182 case scUMaxExpr: 183 case scSMaxExpr: { 184 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 185 const char *OpStr = nullptr; 186 switch (NAry->getSCEVType()) { 187 case scAddExpr: OpStr = " + "; break; 188 case scMulExpr: OpStr = " * "; break; 189 case scUMaxExpr: OpStr = " umax "; break; 190 case scSMaxExpr: OpStr = " smax "; break; 191 } 192 OS << "("; 193 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 194 I != E; ++I) { 195 OS << **I; 196 if (std::next(I) != E) 197 OS << OpStr; 198 } 199 OS << ")"; 200 switch (NAry->getSCEVType()) { 201 case scAddExpr: 202 case scMulExpr: 203 if (NAry->hasNoUnsignedWrap()) 204 OS << "<nuw>"; 205 if (NAry->hasNoSignedWrap()) 206 OS << "<nsw>"; 207 } 208 return; 209 } 210 case scUDivExpr: { 211 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 212 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 213 return; 214 } 215 case scUnknown: { 216 const SCEVUnknown *U = cast<SCEVUnknown>(this); 217 Type *AllocTy; 218 if (U->isSizeOf(AllocTy)) { 219 OS << "sizeof(" << *AllocTy << ")"; 220 return; 221 } 222 if (U->isAlignOf(AllocTy)) { 223 OS << "alignof(" << *AllocTy << ")"; 224 return; 225 } 226 227 Type *CTy; 228 Constant *FieldNo; 229 if (U->isOffsetOf(CTy, FieldNo)) { 230 OS << "offsetof(" << *CTy << ", "; 231 FieldNo->printAsOperand(OS, false); 232 OS << ")"; 233 return; 234 } 235 236 // Otherwise just print it normally. 237 U->getValue()->printAsOperand(OS, false); 238 return; 239 } 240 case scCouldNotCompute: 241 OS << "***COULDNOTCOMPUTE***"; 242 return; 243 } 244 llvm_unreachable("Unknown SCEV kind!"); 245 } 246 247 Type *SCEV::getType() const { 248 switch (static_cast<SCEVTypes>(getSCEVType())) { 249 case scConstant: 250 return cast<SCEVConstant>(this)->getType(); 251 case scTruncate: 252 case scZeroExtend: 253 case scSignExtend: 254 return cast<SCEVCastExpr>(this)->getType(); 255 case scAddRecExpr: 256 case scMulExpr: 257 case scUMaxExpr: 258 case scSMaxExpr: 259 return cast<SCEVNAryExpr>(this)->getType(); 260 case scAddExpr: 261 return cast<SCEVAddExpr>(this)->getType(); 262 case scUDivExpr: 263 return cast<SCEVUDivExpr>(this)->getType(); 264 case scUnknown: 265 return cast<SCEVUnknown>(this)->getType(); 266 case scCouldNotCompute: 267 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 268 } 269 llvm_unreachable("Unknown SCEV kind!"); 270 } 271 272 bool SCEV::isZero() const { 273 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 274 return SC->getValue()->isZero(); 275 return false; 276 } 277 278 bool SCEV::isOne() const { 279 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 280 return SC->getValue()->isOne(); 281 return false; 282 } 283 284 bool SCEV::isAllOnesValue() const { 285 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 286 return SC->getValue()->isAllOnesValue(); 287 return false; 288 } 289 290 /// isNonConstantNegative - Return true if the specified scev is negated, but 291 /// not a constant. 292 bool SCEV::isNonConstantNegative() const { 293 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 294 if (!Mul) return false; 295 296 // If there is a constant factor, it will be first. 297 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 298 if (!SC) return false; 299 300 // Return true if the value is negative, this matches things like (-42 * V). 301 return SC->getAPInt().isNegative(); 302 } 303 304 SCEVCouldNotCompute::SCEVCouldNotCompute() : 305 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} 306 307 bool SCEVCouldNotCompute::classof(const SCEV *S) { 308 return S->getSCEVType() == scCouldNotCompute; 309 } 310 311 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 312 FoldingSetNodeID ID; 313 ID.AddInteger(scConstant); 314 ID.AddPointer(V); 315 void *IP = nullptr; 316 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 317 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 318 UniqueSCEVs.InsertNode(S, IP); 319 return S; 320 } 321 322 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 323 return getConstant(ConstantInt::get(getContext(), Val)); 324 } 325 326 const SCEV * 327 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 328 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 329 return getConstant(ConstantInt::get(ITy, V, isSigned)); 330 } 331 332 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 333 unsigned SCEVTy, const SCEV *op, Type *ty) 334 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 335 336 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 337 const SCEV *op, Type *ty) 338 : SCEVCastExpr(ID, scTruncate, op, ty) { 339 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 340 (Ty->isIntegerTy() || Ty->isPointerTy()) && 341 "Cannot truncate non-integer value!"); 342 } 343 344 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 345 const SCEV *op, Type *ty) 346 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 347 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 348 (Ty->isIntegerTy() || Ty->isPointerTy()) && 349 "Cannot zero extend non-integer value!"); 350 } 351 352 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 353 const SCEV *op, Type *ty) 354 : SCEVCastExpr(ID, scSignExtend, op, ty) { 355 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 356 (Ty->isIntegerTy() || Ty->isPointerTy()) && 357 "Cannot sign extend non-integer value!"); 358 } 359 360 void SCEVUnknown::deleted() { 361 // Clear this SCEVUnknown from various maps. 362 SE->forgetMemoizedResults(this); 363 364 // Remove this SCEVUnknown from the uniquing map. 365 SE->UniqueSCEVs.RemoveNode(this); 366 367 // Release the value. 368 setValPtr(nullptr); 369 } 370 371 void SCEVUnknown::allUsesReplacedWith(Value *New) { 372 // Clear this SCEVUnknown from various maps. 373 SE->forgetMemoizedResults(this); 374 375 // Remove this SCEVUnknown from the uniquing map. 376 SE->UniqueSCEVs.RemoveNode(this); 377 378 // Update this SCEVUnknown to point to the new value. This is needed 379 // because there may still be outstanding SCEVs which still point to 380 // this SCEVUnknown. 381 setValPtr(New); 382 } 383 384 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 385 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 386 if (VCE->getOpcode() == Instruction::PtrToInt) 387 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 388 if (CE->getOpcode() == Instruction::GetElementPtr && 389 CE->getOperand(0)->isNullValue() && 390 CE->getNumOperands() == 2) 391 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 392 if (CI->isOne()) { 393 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 394 ->getElementType(); 395 return true; 396 } 397 398 return false; 399 } 400 401 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 402 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 403 if (VCE->getOpcode() == Instruction::PtrToInt) 404 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 405 if (CE->getOpcode() == Instruction::GetElementPtr && 406 CE->getOperand(0)->isNullValue()) { 407 Type *Ty = 408 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 409 if (StructType *STy = dyn_cast<StructType>(Ty)) 410 if (!STy->isPacked() && 411 CE->getNumOperands() == 3 && 412 CE->getOperand(1)->isNullValue()) { 413 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 414 if (CI->isOne() && 415 STy->getNumElements() == 2 && 416 STy->getElementType(0)->isIntegerTy(1)) { 417 AllocTy = STy->getElementType(1); 418 return true; 419 } 420 } 421 } 422 423 return false; 424 } 425 426 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 427 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 428 if (VCE->getOpcode() == Instruction::PtrToInt) 429 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 430 if (CE->getOpcode() == Instruction::GetElementPtr && 431 CE->getNumOperands() == 3 && 432 CE->getOperand(0)->isNullValue() && 433 CE->getOperand(1)->isNullValue()) { 434 Type *Ty = 435 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 436 // Ignore vector types here so that ScalarEvolutionExpander doesn't 437 // emit getelementptrs that index into vectors. 438 if (Ty->isStructTy() || Ty->isArrayTy()) { 439 CTy = Ty; 440 FieldNo = CE->getOperand(2); 441 return true; 442 } 443 } 444 445 return false; 446 } 447 448 //===----------------------------------------------------------------------===// 449 // SCEV Utilities 450 //===----------------------------------------------------------------------===// 451 452 namespace { 453 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less 454 /// than the complexity of the RHS. This comparator is used to canonicalize 455 /// expressions. 456 class SCEVComplexityCompare { 457 const LoopInfo *const LI; 458 public: 459 explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {} 460 461 // Return true or false if LHS is less than, or at least RHS, respectively. 462 bool operator()(const SCEV *LHS, const SCEV *RHS) const { 463 return compare(LHS, RHS) < 0; 464 } 465 466 // Return negative, zero, or positive, if LHS is less than, equal to, or 467 // greater than RHS, respectively. A three-way result allows recursive 468 // comparisons to be more efficient. 469 int compare(const SCEV *LHS, const SCEV *RHS) const { 470 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 471 if (LHS == RHS) 472 return 0; 473 474 // Primarily, sort the SCEVs by their getSCEVType(). 475 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 476 if (LType != RType) 477 return (int)LType - (int)RType; 478 479 // Aside from the getSCEVType() ordering, the particular ordering 480 // isn't very important except that it's beneficial to be consistent, 481 // so that (a + b) and (b + a) don't end up as different expressions. 482 switch (static_cast<SCEVTypes>(LType)) { 483 case scUnknown: { 484 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 485 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 486 487 // Sort SCEVUnknown values with some loose heuristics. TODO: This is 488 // not as complete as it could be. 489 const Value *LV = LU->getValue(), *RV = RU->getValue(); 490 491 // Order pointer values after integer values. This helps SCEVExpander 492 // form GEPs. 493 bool LIsPointer = LV->getType()->isPointerTy(), 494 RIsPointer = RV->getType()->isPointerTy(); 495 if (LIsPointer != RIsPointer) 496 return (int)LIsPointer - (int)RIsPointer; 497 498 // Compare getValueID values. 499 unsigned LID = LV->getValueID(), 500 RID = RV->getValueID(); 501 if (LID != RID) 502 return (int)LID - (int)RID; 503 504 // Sort arguments by their position. 505 if (const Argument *LA = dyn_cast<Argument>(LV)) { 506 const Argument *RA = cast<Argument>(RV); 507 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 508 return (int)LArgNo - (int)RArgNo; 509 } 510 511 // For instructions, compare their loop depth, and their operand 512 // count. This is pretty loose. 513 if (const Instruction *LInst = dyn_cast<Instruction>(LV)) { 514 const Instruction *RInst = cast<Instruction>(RV); 515 516 // Compare loop depths. 517 const BasicBlock *LParent = LInst->getParent(), 518 *RParent = RInst->getParent(); 519 if (LParent != RParent) { 520 unsigned LDepth = LI->getLoopDepth(LParent), 521 RDepth = LI->getLoopDepth(RParent); 522 if (LDepth != RDepth) 523 return (int)LDepth - (int)RDepth; 524 } 525 526 // Compare the number of operands. 527 unsigned LNumOps = LInst->getNumOperands(), 528 RNumOps = RInst->getNumOperands(); 529 return (int)LNumOps - (int)RNumOps; 530 } 531 532 return 0; 533 } 534 535 case scConstant: { 536 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 537 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 538 539 // Compare constant values. 540 const APInt &LA = LC->getAPInt(); 541 const APInt &RA = RC->getAPInt(); 542 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 543 if (LBitWidth != RBitWidth) 544 return (int)LBitWidth - (int)RBitWidth; 545 return LA.ult(RA) ? -1 : 1; 546 } 547 548 case scAddRecExpr: { 549 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 550 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 551 552 // Compare addrec loop depths. 553 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 554 if (LLoop != RLoop) { 555 unsigned LDepth = LLoop->getLoopDepth(), 556 RDepth = RLoop->getLoopDepth(); 557 if (LDepth != RDepth) 558 return (int)LDepth - (int)RDepth; 559 } 560 561 // Addrec complexity grows with operand count. 562 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 563 if (LNumOps != RNumOps) 564 return (int)LNumOps - (int)RNumOps; 565 566 // Lexicographically compare. 567 for (unsigned i = 0; i != LNumOps; ++i) { 568 long X = compare(LA->getOperand(i), RA->getOperand(i)); 569 if (X != 0) 570 return X; 571 } 572 573 return 0; 574 } 575 576 case scAddExpr: 577 case scMulExpr: 578 case scSMaxExpr: 579 case scUMaxExpr: { 580 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 581 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 582 583 // Lexicographically compare n-ary expressions. 584 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 585 if (LNumOps != RNumOps) 586 return (int)LNumOps - (int)RNumOps; 587 588 for (unsigned i = 0; i != LNumOps; ++i) { 589 if (i >= RNumOps) 590 return 1; 591 long X = compare(LC->getOperand(i), RC->getOperand(i)); 592 if (X != 0) 593 return X; 594 } 595 return (int)LNumOps - (int)RNumOps; 596 } 597 598 case scUDivExpr: { 599 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 600 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 601 602 // Lexicographically compare udiv expressions. 603 long X = compare(LC->getLHS(), RC->getLHS()); 604 if (X != 0) 605 return X; 606 return compare(LC->getRHS(), RC->getRHS()); 607 } 608 609 case scTruncate: 610 case scZeroExtend: 611 case scSignExtend: { 612 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 613 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 614 615 // Compare cast expressions by operand. 616 return compare(LC->getOperand(), RC->getOperand()); 617 } 618 619 case scCouldNotCompute: 620 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 621 } 622 llvm_unreachable("Unknown SCEV kind!"); 623 } 624 }; 625 } // end anonymous namespace 626 627 /// GroupByComplexity - Given a list of SCEV objects, order them by their 628 /// complexity, and group objects of the same complexity together by value. 629 /// When this routine is finished, we know that any duplicates in the vector are 630 /// consecutive and that complexity is monotonically increasing. 631 /// 632 /// Note that we go take special precautions to ensure that we get deterministic 633 /// results from this routine. In other words, we don't want the results of 634 /// this to depend on where the addresses of various SCEV objects happened to 635 /// land in memory. 636 /// 637 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 638 LoopInfo *LI) { 639 if (Ops.size() < 2) return; // Noop 640 if (Ops.size() == 2) { 641 // This is the common case, which also happens to be trivially simple. 642 // Special case it. 643 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 644 if (SCEVComplexityCompare(LI)(RHS, LHS)) 645 std::swap(LHS, RHS); 646 return; 647 } 648 649 // Do the rough sort by complexity. 650 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI)); 651 652 // Now that we are sorted by complexity, group elements of the same 653 // complexity. Note that this is, at worst, N^2, but the vector is likely to 654 // be extremely short in practice. Note that we take this approach because we 655 // do not want to depend on the addresses of the objects we are grouping. 656 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 657 const SCEV *S = Ops[i]; 658 unsigned Complexity = S->getSCEVType(); 659 660 // If there are any objects of the same complexity and same value as this 661 // one, group them. 662 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 663 if (Ops[j] == S) { // Found a duplicate. 664 // Move it to immediately after i'th element. 665 std::swap(Ops[i+1], Ops[j]); 666 ++i; // no need to rescan it. 667 if (i == e-2) return; // Done! 668 } 669 } 670 } 671 } 672 673 // Returns the size of the SCEV S. 674 static inline int sizeOfSCEV(const SCEV *S) { 675 struct FindSCEVSize { 676 int Size; 677 FindSCEVSize() : Size(0) {} 678 679 bool follow(const SCEV *S) { 680 ++Size; 681 // Keep looking at all operands of S. 682 return true; 683 } 684 bool isDone() const { 685 return false; 686 } 687 }; 688 689 FindSCEVSize F; 690 SCEVTraversal<FindSCEVSize> ST(F); 691 ST.visitAll(S); 692 return F.Size; 693 } 694 695 namespace { 696 697 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { 698 public: 699 // Computes the Quotient and Remainder of the division of Numerator by 700 // Denominator. 701 static void divide(ScalarEvolution &SE, const SCEV *Numerator, 702 const SCEV *Denominator, const SCEV **Quotient, 703 const SCEV **Remainder) { 704 assert(Numerator && Denominator && "Uninitialized SCEV"); 705 706 SCEVDivision D(SE, Numerator, Denominator); 707 708 // Check for the trivial case here to avoid having to check for it in the 709 // rest of the code. 710 if (Numerator == Denominator) { 711 *Quotient = D.One; 712 *Remainder = D.Zero; 713 return; 714 } 715 716 if (Numerator->isZero()) { 717 *Quotient = D.Zero; 718 *Remainder = D.Zero; 719 return; 720 } 721 722 // A simple case when N/1. The quotient is N. 723 if (Denominator->isOne()) { 724 *Quotient = Numerator; 725 *Remainder = D.Zero; 726 return; 727 } 728 729 // Split the Denominator when it is a product. 730 if (const SCEVMulExpr *T = dyn_cast<const SCEVMulExpr>(Denominator)) { 731 const SCEV *Q, *R; 732 *Quotient = Numerator; 733 for (const SCEV *Op : T->operands()) { 734 divide(SE, *Quotient, Op, &Q, &R); 735 *Quotient = Q; 736 737 // Bail out when the Numerator is not divisible by one of the terms of 738 // the Denominator. 739 if (!R->isZero()) { 740 *Quotient = D.Zero; 741 *Remainder = Numerator; 742 return; 743 } 744 } 745 *Remainder = D.Zero; 746 return; 747 } 748 749 D.visit(Numerator); 750 *Quotient = D.Quotient; 751 *Remainder = D.Remainder; 752 } 753 754 // Except in the trivial case described above, we do not know how to divide 755 // Expr by Denominator for the following functions with empty implementation. 756 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} 757 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} 758 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} 759 void visitUDivExpr(const SCEVUDivExpr *Numerator) {} 760 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} 761 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} 762 void visitUnknown(const SCEVUnknown *Numerator) {} 763 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} 764 765 void visitConstant(const SCEVConstant *Numerator) { 766 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { 767 APInt NumeratorVal = Numerator->getAPInt(); 768 APInt DenominatorVal = D->getAPInt(); 769 uint32_t NumeratorBW = NumeratorVal.getBitWidth(); 770 uint32_t DenominatorBW = DenominatorVal.getBitWidth(); 771 772 if (NumeratorBW > DenominatorBW) 773 DenominatorVal = DenominatorVal.sext(NumeratorBW); 774 else if (NumeratorBW < DenominatorBW) 775 NumeratorVal = NumeratorVal.sext(DenominatorBW); 776 777 APInt QuotientVal(NumeratorVal.getBitWidth(), 0); 778 APInt RemainderVal(NumeratorVal.getBitWidth(), 0); 779 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); 780 Quotient = SE.getConstant(QuotientVal); 781 Remainder = SE.getConstant(RemainderVal); 782 return; 783 } 784 } 785 786 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { 787 const SCEV *StartQ, *StartR, *StepQ, *StepR; 788 if (!Numerator->isAffine()) 789 return cannotDivide(Numerator); 790 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); 791 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); 792 // Bail out if the types do not match. 793 Type *Ty = Denominator->getType(); 794 if (Ty != StartQ->getType() || Ty != StartR->getType() || 795 Ty != StepQ->getType() || Ty != StepR->getType()) 796 return cannotDivide(Numerator); 797 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), 798 Numerator->getNoWrapFlags()); 799 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), 800 Numerator->getNoWrapFlags()); 801 } 802 803 void visitAddExpr(const SCEVAddExpr *Numerator) { 804 SmallVector<const SCEV *, 2> Qs, Rs; 805 Type *Ty = Denominator->getType(); 806 807 for (const SCEV *Op : Numerator->operands()) { 808 const SCEV *Q, *R; 809 divide(SE, Op, Denominator, &Q, &R); 810 811 // Bail out if types do not match. 812 if (Ty != Q->getType() || Ty != R->getType()) 813 return cannotDivide(Numerator); 814 815 Qs.push_back(Q); 816 Rs.push_back(R); 817 } 818 819 if (Qs.size() == 1) { 820 Quotient = Qs[0]; 821 Remainder = Rs[0]; 822 return; 823 } 824 825 Quotient = SE.getAddExpr(Qs); 826 Remainder = SE.getAddExpr(Rs); 827 } 828 829 void visitMulExpr(const SCEVMulExpr *Numerator) { 830 SmallVector<const SCEV *, 2> Qs; 831 Type *Ty = Denominator->getType(); 832 833 bool FoundDenominatorTerm = false; 834 for (const SCEV *Op : Numerator->operands()) { 835 // Bail out if types do not match. 836 if (Ty != Op->getType()) 837 return cannotDivide(Numerator); 838 839 if (FoundDenominatorTerm) { 840 Qs.push_back(Op); 841 continue; 842 } 843 844 // Check whether Denominator divides one of the product operands. 845 const SCEV *Q, *R; 846 divide(SE, Op, Denominator, &Q, &R); 847 if (!R->isZero()) { 848 Qs.push_back(Op); 849 continue; 850 } 851 852 // Bail out if types do not match. 853 if (Ty != Q->getType()) 854 return cannotDivide(Numerator); 855 856 FoundDenominatorTerm = true; 857 Qs.push_back(Q); 858 } 859 860 if (FoundDenominatorTerm) { 861 Remainder = Zero; 862 if (Qs.size() == 1) 863 Quotient = Qs[0]; 864 else 865 Quotient = SE.getMulExpr(Qs); 866 return; 867 } 868 869 if (!isa<SCEVUnknown>(Denominator)) 870 return cannotDivide(Numerator); 871 872 // The Remainder is obtained by replacing Denominator by 0 in Numerator. 873 ValueToValueMap RewriteMap; 874 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 875 cast<SCEVConstant>(Zero)->getValue(); 876 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 877 878 if (Remainder->isZero()) { 879 // The Quotient is obtained by replacing Denominator by 1 in Numerator. 880 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 881 cast<SCEVConstant>(One)->getValue(); 882 Quotient = 883 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 884 return; 885 } 886 887 // Quotient is (Numerator - Remainder) divided by Denominator. 888 const SCEV *Q, *R; 889 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); 890 // This SCEV does not seem to simplify: fail the division here. 891 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) 892 return cannotDivide(Numerator); 893 divide(SE, Diff, Denominator, &Q, &R); 894 if (R != Zero) 895 return cannotDivide(Numerator); 896 Quotient = Q; 897 } 898 899 private: 900 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, 901 const SCEV *Denominator) 902 : SE(S), Denominator(Denominator) { 903 Zero = SE.getZero(Denominator->getType()); 904 One = SE.getOne(Denominator->getType()); 905 906 // We generally do not know how to divide Expr by Denominator. We 907 // initialize the division to a "cannot divide" state to simplify the rest 908 // of the code. 909 cannotDivide(Numerator); 910 } 911 912 // Convenience function for giving up on the division. We set the quotient to 913 // be equal to zero and the remainder to be equal to the numerator. 914 void cannotDivide(const SCEV *Numerator) { 915 Quotient = Zero; 916 Remainder = Numerator; 917 } 918 919 ScalarEvolution &SE; 920 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; 921 }; 922 923 } 924 925 //===----------------------------------------------------------------------===// 926 // Simple SCEV method implementations 927 //===----------------------------------------------------------------------===// 928 929 /// BinomialCoefficient - Compute BC(It, K). The result has width W. 930 /// Assume, K > 0. 931 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 932 ScalarEvolution &SE, 933 Type *ResultTy) { 934 // Handle the simplest case efficiently. 935 if (K == 1) 936 return SE.getTruncateOrZeroExtend(It, ResultTy); 937 938 // We are using the following formula for BC(It, K): 939 // 940 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 941 // 942 // Suppose, W is the bitwidth of the return value. We must be prepared for 943 // overflow. Hence, we must assure that the result of our computation is 944 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 945 // safe in modular arithmetic. 946 // 947 // However, this code doesn't use exactly that formula; the formula it uses 948 // is something like the following, where T is the number of factors of 2 in 949 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 950 // exponentiation: 951 // 952 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 953 // 954 // This formula is trivially equivalent to the previous formula. However, 955 // this formula can be implemented much more efficiently. The trick is that 956 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 957 // arithmetic. To do exact division in modular arithmetic, all we have 958 // to do is multiply by the inverse. Therefore, this step can be done at 959 // width W. 960 // 961 // The next issue is how to safely do the division by 2^T. The way this 962 // is done is by doing the multiplication step at a width of at least W + T 963 // bits. This way, the bottom W+T bits of the product are accurate. Then, 964 // when we perform the division by 2^T (which is equivalent to a right shift 965 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 966 // truncated out after the division by 2^T. 967 // 968 // In comparison to just directly using the first formula, this technique 969 // is much more efficient; using the first formula requires W * K bits, 970 // but this formula less than W + K bits. Also, the first formula requires 971 // a division step, whereas this formula only requires multiplies and shifts. 972 // 973 // It doesn't matter whether the subtraction step is done in the calculation 974 // width or the input iteration count's width; if the subtraction overflows, 975 // the result must be zero anyway. We prefer here to do it in the width of 976 // the induction variable because it helps a lot for certain cases; CodeGen 977 // isn't smart enough to ignore the overflow, which leads to much less 978 // efficient code if the width of the subtraction is wider than the native 979 // register width. 980 // 981 // (It's possible to not widen at all by pulling out factors of 2 before 982 // the multiplication; for example, K=2 can be calculated as 983 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 984 // extra arithmetic, so it's not an obvious win, and it gets 985 // much more complicated for K > 3.) 986 987 // Protection from insane SCEVs; this bound is conservative, 988 // but it probably doesn't matter. 989 if (K > 1000) 990 return SE.getCouldNotCompute(); 991 992 unsigned W = SE.getTypeSizeInBits(ResultTy); 993 994 // Calculate K! / 2^T and T; we divide out the factors of two before 995 // multiplying for calculating K! / 2^T to avoid overflow. 996 // Other overflow doesn't matter because we only care about the bottom 997 // W bits of the result. 998 APInt OddFactorial(W, 1); 999 unsigned T = 1; 1000 for (unsigned i = 3; i <= K; ++i) { 1001 APInt Mult(W, i); 1002 unsigned TwoFactors = Mult.countTrailingZeros(); 1003 T += TwoFactors; 1004 Mult = Mult.lshr(TwoFactors); 1005 OddFactorial *= Mult; 1006 } 1007 1008 // We need at least W + T bits for the multiplication step 1009 unsigned CalculationBits = W + T; 1010 1011 // Calculate 2^T, at width T+W. 1012 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1013 1014 // Calculate the multiplicative inverse of K! / 2^T; 1015 // this multiplication factor will perform the exact division by 1016 // K! / 2^T. 1017 APInt Mod = APInt::getSignedMinValue(W+1); 1018 APInt MultiplyFactor = OddFactorial.zext(W+1); 1019 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1020 MultiplyFactor = MultiplyFactor.trunc(W); 1021 1022 // Calculate the product, at width T+W 1023 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1024 CalculationBits); 1025 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1026 for (unsigned i = 1; i != K; ++i) { 1027 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1028 Dividend = SE.getMulExpr(Dividend, 1029 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1030 } 1031 1032 // Divide by 2^T 1033 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1034 1035 // Truncate the result, and divide by K! / 2^T. 1036 1037 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1038 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1039 } 1040 1041 /// evaluateAtIteration - Return the value of this chain of recurrences at 1042 /// the specified iteration number. We can evaluate this recurrence by 1043 /// multiplying each element in the chain by the binomial coefficient 1044 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as: 1045 /// 1046 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1047 /// 1048 /// where BC(It, k) stands for binomial coefficient. 1049 /// 1050 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1051 ScalarEvolution &SE) const { 1052 const SCEV *Result = getStart(); 1053 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1054 // The computation is correct in the face of overflow provided that the 1055 // multiplication is performed _after_ the evaluation of the binomial 1056 // coefficient. 1057 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1058 if (isa<SCEVCouldNotCompute>(Coeff)) 1059 return Coeff; 1060 1061 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1062 } 1063 return Result; 1064 } 1065 1066 //===----------------------------------------------------------------------===// 1067 // SCEV Expression folder implementations 1068 //===----------------------------------------------------------------------===// 1069 1070 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 1071 Type *Ty) { 1072 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1073 "This is not a truncating conversion!"); 1074 assert(isSCEVable(Ty) && 1075 "This is not a conversion to a SCEVable type!"); 1076 Ty = getEffectiveSCEVType(Ty); 1077 1078 FoldingSetNodeID ID; 1079 ID.AddInteger(scTruncate); 1080 ID.AddPointer(Op); 1081 ID.AddPointer(Ty); 1082 void *IP = nullptr; 1083 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1084 1085 // Fold if the operand is constant. 1086 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1087 return getConstant( 1088 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1089 1090 // trunc(trunc(x)) --> trunc(x) 1091 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1092 return getTruncateExpr(ST->getOperand(), Ty); 1093 1094 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1095 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1096 return getTruncateOrSignExtend(SS->getOperand(), Ty); 1097 1098 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1099 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1100 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 1101 1102 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can 1103 // eliminate all the truncates, or we replace other casts with truncates. 1104 if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) { 1105 SmallVector<const SCEV *, 4> Operands; 1106 bool hasTrunc = false; 1107 for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) { 1108 const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty); 1109 if (!isa<SCEVCastExpr>(SA->getOperand(i))) 1110 hasTrunc = isa<SCEVTruncateExpr>(S); 1111 Operands.push_back(S); 1112 } 1113 if (!hasTrunc) 1114 return getAddExpr(Operands); 1115 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1116 } 1117 1118 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can 1119 // eliminate all the truncates, or we replace other casts with truncates. 1120 if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) { 1121 SmallVector<const SCEV *, 4> Operands; 1122 bool hasTrunc = false; 1123 for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) { 1124 const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty); 1125 if (!isa<SCEVCastExpr>(SM->getOperand(i))) 1126 hasTrunc = isa<SCEVTruncateExpr>(S); 1127 Operands.push_back(S); 1128 } 1129 if (!hasTrunc) 1130 return getMulExpr(Operands); 1131 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL. 1132 } 1133 1134 // If the input value is a chrec scev, truncate the chrec's operands. 1135 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1136 SmallVector<const SCEV *, 4> Operands; 1137 for (const SCEV *Op : AddRec->operands()) 1138 Operands.push_back(getTruncateExpr(Op, Ty)); 1139 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1140 } 1141 1142 // The cast wasn't folded; create an explicit cast node. We can reuse 1143 // the existing insert position since if we get here, we won't have 1144 // made any changes which would invalidate it. 1145 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1146 Op, Ty); 1147 UniqueSCEVs.InsertNode(S, IP); 1148 return S; 1149 } 1150 1151 // Get the limit of a recurrence such that incrementing by Step cannot cause 1152 // signed overflow as long as the value of the recurrence within the 1153 // loop does not exceed this limit before incrementing. 1154 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1155 ICmpInst::Predicate *Pred, 1156 ScalarEvolution *SE) { 1157 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1158 if (SE->isKnownPositive(Step)) { 1159 *Pred = ICmpInst::ICMP_SLT; 1160 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1161 SE->getSignedRange(Step).getSignedMax()); 1162 } 1163 if (SE->isKnownNegative(Step)) { 1164 *Pred = ICmpInst::ICMP_SGT; 1165 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1166 SE->getSignedRange(Step).getSignedMin()); 1167 } 1168 return nullptr; 1169 } 1170 1171 // Get the limit of a recurrence such that incrementing by Step cannot cause 1172 // unsigned overflow as long as the value of the recurrence within the loop does 1173 // not exceed this limit before incrementing. 1174 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1175 ICmpInst::Predicate *Pred, 1176 ScalarEvolution *SE) { 1177 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1178 *Pred = ICmpInst::ICMP_ULT; 1179 1180 return SE->getConstant(APInt::getMinValue(BitWidth) - 1181 SE->getUnsignedRange(Step).getUnsignedMax()); 1182 } 1183 1184 namespace { 1185 1186 struct ExtendOpTraitsBase { 1187 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *); 1188 }; 1189 1190 // Used to make code generic over signed and unsigned overflow. 1191 template <typename ExtendOp> struct ExtendOpTraits { 1192 // Members present: 1193 // 1194 // static const SCEV::NoWrapFlags WrapType; 1195 // 1196 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1197 // 1198 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1199 // ICmpInst::Predicate *Pred, 1200 // ScalarEvolution *SE); 1201 }; 1202 1203 template <> 1204 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1205 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1206 1207 static const GetExtendExprTy GetExtendExpr; 1208 1209 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1210 ICmpInst::Predicate *Pred, 1211 ScalarEvolution *SE) { 1212 return getSignedOverflowLimitForStep(Step, Pred, SE); 1213 } 1214 }; 1215 1216 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1217 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1218 1219 template <> 1220 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1221 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1222 1223 static const GetExtendExprTy GetExtendExpr; 1224 1225 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1226 ICmpInst::Predicate *Pred, 1227 ScalarEvolution *SE) { 1228 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1229 } 1230 }; 1231 1232 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1233 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1234 } 1235 1236 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1237 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1238 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1239 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1240 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1241 // expression "Step + sext/zext(PreIncAR)" is congruent with 1242 // "sext/zext(PostIncAR)" 1243 template <typename ExtendOpTy> 1244 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1245 ScalarEvolution *SE) { 1246 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1247 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1248 1249 const Loop *L = AR->getLoop(); 1250 const SCEV *Start = AR->getStart(); 1251 const SCEV *Step = AR->getStepRecurrence(*SE); 1252 1253 // Check for a simple looking step prior to loop entry. 1254 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1255 if (!SA) 1256 return nullptr; 1257 1258 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1259 // subtraction is expensive. For this purpose, perform a quick and dirty 1260 // difference, by checking for Step in the operand list. 1261 SmallVector<const SCEV *, 4> DiffOps; 1262 for (const SCEV *Op : SA->operands()) 1263 if (Op != Step) 1264 DiffOps.push_back(Op); 1265 1266 if (DiffOps.size() == SA->getNumOperands()) 1267 return nullptr; 1268 1269 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1270 // `Step`: 1271 1272 // 1. NSW/NUW flags on the step increment. 1273 auto PreStartFlags = 1274 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1275 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1276 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1277 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1278 1279 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1280 // "S+X does not sign/unsign-overflow". 1281 // 1282 1283 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1284 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1285 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1286 return PreStart; 1287 1288 // 2. Direct overflow check on the step operation's expression. 1289 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1290 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1291 const SCEV *OperandExtendedStart = 1292 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy), 1293 (SE->*GetExtendExpr)(Step, WideTy)); 1294 if ((SE->*GetExtendExpr)(Start, WideTy) == OperandExtendedStart) { 1295 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1296 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1297 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1298 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1299 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1300 } 1301 return PreStart; 1302 } 1303 1304 // 3. Loop precondition. 1305 ICmpInst::Predicate Pred; 1306 const SCEV *OverflowLimit = 1307 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1308 1309 if (OverflowLimit && 1310 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1311 return PreStart; 1312 1313 return nullptr; 1314 } 1315 1316 // Get the normalized zero or sign extended expression for this AddRec's Start. 1317 template <typename ExtendOpTy> 1318 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1319 ScalarEvolution *SE) { 1320 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1321 1322 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE); 1323 if (!PreStart) 1324 return (SE->*GetExtendExpr)(AR->getStart(), Ty); 1325 1326 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty), 1327 (SE->*GetExtendExpr)(PreStart, Ty)); 1328 } 1329 1330 // Try to prove away overflow by looking at "nearby" add recurrences. A 1331 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1332 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1333 // 1334 // Formally: 1335 // 1336 // {S,+,X} == {S-T,+,X} + T 1337 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1338 // 1339 // If ({S-T,+,X} + T) does not overflow ... (1) 1340 // 1341 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1342 // 1343 // If {S-T,+,X} does not overflow ... (2) 1344 // 1345 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1346 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1347 // 1348 // If (S-T)+T does not overflow ... (3) 1349 // 1350 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1351 // == {Ext(S),+,Ext(X)} == LHS 1352 // 1353 // Thus, if (1), (2) and (3) are true for some T, then 1354 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1355 // 1356 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1357 // does not overflow" restricted to the 0th iteration. Therefore we only need 1358 // to check for (1) and (2). 1359 // 1360 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1361 // is `Delta` (defined below). 1362 // 1363 template <typename ExtendOpTy> 1364 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1365 const SCEV *Step, 1366 const Loop *L) { 1367 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1368 1369 // We restrict `Start` to a constant to prevent SCEV from spending too much 1370 // time here. It is correct (but more expensive) to continue with a 1371 // non-constant `Start` and do a general SCEV subtraction to compute 1372 // `PreStart` below. 1373 // 1374 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1375 if (!StartC) 1376 return false; 1377 1378 APInt StartAI = StartC->getAPInt(); 1379 1380 for (unsigned Delta : {-2, -1, 1, 2}) { 1381 const SCEV *PreStart = getConstant(StartAI - Delta); 1382 1383 FoldingSetNodeID ID; 1384 ID.AddInteger(scAddRecExpr); 1385 ID.AddPointer(PreStart); 1386 ID.AddPointer(Step); 1387 ID.AddPointer(L); 1388 void *IP = nullptr; 1389 const auto *PreAR = 1390 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1391 1392 // Give up if we don't already have the add recurrence we need because 1393 // actually constructing an add recurrence is relatively expensive. 1394 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1395 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1396 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1397 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1398 DeltaS, &Pred, this); 1399 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1400 return true; 1401 } 1402 } 1403 1404 return false; 1405 } 1406 1407 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, 1408 Type *Ty) { 1409 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1410 "This is not an extending conversion!"); 1411 assert(isSCEVable(Ty) && 1412 "This is not a conversion to a SCEVable type!"); 1413 Ty = getEffectiveSCEVType(Ty); 1414 1415 // Fold if the operand is constant. 1416 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1417 return getConstant( 1418 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1419 1420 // zext(zext(x)) --> zext(x) 1421 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1422 return getZeroExtendExpr(SZ->getOperand(), Ty); 1423 1424 // Before doing any expensive analysis, check to see if we've already 1425 // computed a SCEV for this Op and Ty. 1426 FoldingSetNodeID ID; 1427 ID.AddInteger(scZeroExtend); 1428 ID.AddPointer(Op); 1429 ID.AddPointer(Ty); 1430 void *IP = nullptr; 1431 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1432 1433 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1434 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1435 // It's possible the bits taken off by the truncate were all zero bits. If 1436 // so, we should be able to simplify this further. 1437 const SCEV *X = ST->getOperand(); 1438 ConstantRange CR = getUnsignedRange(X); 1439 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1440 unsigned NewBits = getTypeSizeInBits(Ty); 1441 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1442 CR.zextOrTrunc(NewBits))) 1443 return getTruncateOrZeroExtend(X, Ty); 1444 } 1445 1446 // If the input value is a chrec scev, and we can prove that the value 1447 // did not overflow the old, smaller, value, we can zero extend all of the 1448 // operands (often constants). This allows analysis of something like 1449 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1450 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1451 if (AR->isAffine()) { 1452 const SCEV *Start = AR->getStart(); 1453 const SCEV *Step = AR->getStepRecurrence(*this); 1454 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1455 const Loop *L = AR->getLoop(); 1456 1457 if (!AR->hasNoUnsignedWrap()) { 1458 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1459 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1460 } 1461 1462 // If we have special knowledge that this addrec won't overflow, 1463 // we don't need to do any further analysis. 1464 if (AR->hasNoUnsignedWrap()) 1465 return getAddRecExpr( 1466 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1467 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1468 1469 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1470 // Note that this serves two purposes: It filters out loops that are 1471 // simply not analyzable, and it covers the case where this code is 1472 // being called from within backedge-taken count analysis, such that 1473 // attempting to ask for the backedge-taken count would likely result 1474 // in infinite recursion. In the later case, the analysis code will 1475 // cope with a conservative value, and it will take care to purge 1476 // that value once it has finished. 1477 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1478 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1479 // Manually compute the final value for AR, checking for 1480 // overflow. 1481 1482 // Check whether the backedge-taken count can be losslessly casted to 1483 // the addrec's type. The count is always unsigned. 1484 const SCEV *CastedMaxBECount = 1485 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1486 const SCEV *RecastedMaxBECount = 1487 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1488 if (MaxBECount == RecastedMaxBECount) { 1489 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1490 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1491 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step); 1492 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy); 1493 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy); 1494 const SCEV *WideMaxBECount = 1495 getZeroExtendExpr(CastedMaxBECount, WideTy); 1496 const SCEV *OperandExtendedAdd = 1497 getAddExpr(WideStart, 1498 getMulExpr(WideMaxBECount, 1499 getZeroExtendExpr(Step, WideTy))); 1500 if (ZAdd == OperandExtendedAdd) { 1501 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1502 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1503 // Return the expression with the addrec on the outside. 1504 return getAddRecExpr( 1505 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1506 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1507 } 1508 // Similar to above, only this time treat the step value as signed. 1509 // This covers loops that count down. 1510 OperandExtendedAdd = 1511 getAddExpr(WideStart, 1512 getMulExpr(WideMaxBECount, 1513 getSignExtendExpr(Step, WideTy))); 1514 if (ZAdd == OperandExtendedAdd) { 1515 // Cache knowledge of AR NW, which is propagated to this AddRec. 1516 // Negative step causes unsigned wrap, but it still can't self-wrap. 1517 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1518 // Return the expression with the addrec on the outside. 1519 return getAddRecExpr( 1520 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1521 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1522 } 1523 } 1524 1525 // If the backedge is guarded by a comparison with the pre-inc value 1526 // the addrec is safe. Also, if the entry is guarded by a comparison 1527 // with the start value and the backedge is guarded by a comparison 1528 // with the post-inc value, the addrec is safe. 1529 if (isKnownPositive(Step)) { 1530 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1531 getUnsignedRange(Step).getUnsignedMax()); 1532 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1533 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && 1534 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, 1535 AR->getPostIncExpr(*this), N))) { 1536 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1537 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1538 // Return the expression with the addrec on the outside. 1539 return getAddRecExpr( 1540 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1541 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1542 } 1543 } else if (isKnownNegative(Step)) { 1544 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1545 getSignedRange(Step).getSignedMin()); 1546 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1547 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) && 1548 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, 1549 AR->getPostIncExpr(*this), N))) { 1550 // Cache knowledge of AR NW, which is propagated to this AddRec. 1551 // Negative step causes unsigned wrap, but it still can't self-wrap. 1552 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1553 // Return the expression with the addrec on the outside. 1554 return getAddRecExpr( 1555 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1556 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1557 } 1558 } 1559 } 1560 1561 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1562 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1563 return getAddRecExpr( 1564 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this), 1565 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1566 } 1567 } 1568 1569 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1570 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1571 if (SA->hasNoUnsignedWrap()) { 1572 // If the addition does not unsign overflow then we can, by definition, 1573 // commute the zero extension with the addition operation. 1574 SmallVector<const SCEV *, 4> Ops; 1575 for (const auto *Op : SA->operands()) 1576 Ops.push_back(getZeroExtendExpr(Op, Ty)); 1577 return getAddExpr(Ops, SCEV::FlagNUW); 1578 } 1579 } 1580 1581 // The cast wasn't folded; create an explicit cast node. 1582 // Recompute the insert position, as it may have been invalidated. 1583 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1584 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1585 Op, Ty); 1586 UniqueSCEVs.InsertNode(S, IP); 1587 return S; 1588 } 1589 1590 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, 1591 Type *Ty) { 1592 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1593 "This is not an extending conversion!"); 1594 assert(isSCEVable(Ty) && 1595 "This is not a conversion to a SCEVable type!"); 1596 Ty = getEffectiveSCEVType(Ty); 1597 1598 // Fold if the operand is constant. 1599 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1600 return getConstant( 1601 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1602 1603 // sext(sext(x)) --> sext(x) 1604 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1605 return getSignExtendExpr(SS->getOperand(), Ty); 1606 1607 // sext(zext(x)) --> zext(x) 1608 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1609 return getZeroExtendExpr(SZ->getOperand(), Ty); 1610 1611 // Before doing any expensive analysis, check to see if we've already 1612 // computed a SCEV for this Op and Ty. 1613 FoldingSetNodeID ID; 1614 ID.AddInteger(scSignExtend); 1615 ID.AddPointer(Op); 1616 ID.AddPointer(Ty); 1617 void *IP = nullptr; 1618 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1619 1620 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1621 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1622 // It's possible the bits taken off by the truncate were all sign bits. If 1623 // so, we should be able to simplify this further. 1624 const SCEV *X = ST->getOperand(); 1625 ConstantRange CR = getSignedRange(X); 1626 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1627 unsigned NewBits = getTypeSizeInBits(Ty); 1628 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1629 CR.sextOrTrunc(NewBits))) 1630 return getTruncateOrSignExtend(X, Ty); 1631 } 1632 1633 // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2 1634 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1635 if (SA->getNumOperands() == 2) { 1636 auto *SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0)); 1637 auto *SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1)); 1638 if (SMul && SC1) { 1639 if (auto *SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) { 1640 const APInt &C1 = SC1->getAPInt(); 1641 const APInt &C2 = SC2->getAPInt(); 1642 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && 1643 C2.ugt(C1) && C2.isPowerOf2()) 1644 return getAddExpr(getSignExtendExpr(SC1, Ty), 1645 getSignExtendExpr(SMul, Ty)); 1646 } 1647 } 1648 } 1649 1650 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1651 if (SA->hasNoSignedWrap()) { 1652 // If the addition does not sign overflow then we can, by definition, 1653 // commute the sign extension with the addition operation. 1654 SmallVector<const SCEV *, 4> Ops; 1655 for (const auto *Op : SA->operands()) 1656 Ops.push_back(getSignExtendExpr(Op, Ty)); 1657 return getAddExpr(Ops, SCEV::FlagNSW); 1658 } 1659 } 1660 // If the input value is a chrec scev, and we can prove that the value 1661 // did not overflow the old, smaller, value, we can sign extend all of the 1662 // operands (often constants). This allows analysis of something like 1663 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1664 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1665 if (AR->isAffine()) { 1666 const SCEV *Start = AR->getStart(); 1667 const SCEV *Step = AR->getStepRecurrence(*this); 1668 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1669 const Loop *L = AR->getLoop(); 1670 1671 if (!AR->hasNoSignedWrap()) { 1672 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1673 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1674 } 1675 1676 // If we have special knowledge that this addrec won't overflow, 1677 // we don't need to do any further analysis. 1678 if (AR->hasNoSignedWrap()) 1679 return getAddRecExpr( 1680 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), 1681 getSignExtendExpr(Step, Ty), L, SCEV::FlagNSW); 1682 1683 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1684 // Note that this serves two purposes: It filters out loops that are 1685 // simply not analyzable, and it covers the case where this code is 1686 // being called from within backedge-taken count analysis, such that 1687 // attempting to ask for the backedge-taken count would likely result 1688 // in infinite recursion. In the later case, the analysis code will 1689 // cope with a conservative value, and it will take care to purge 1690 // that value once it has finished. 1691 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1692 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1693 // Manually compute the final value for AR, checking for 1694 // overflow. 1695 1696 // Check whether the backedge-taken count can be losslessly casted to 1697 // the addrec's type. The count is always unsigned. 1698 const SCEV *CastedMaxBECount = 1699 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1700 const SCEV *RecastedMaxBECount = 1701 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1702 if (MaxBECount == RecastedMaxBECount) { 1703 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1704 // Check whether Start+Step*MaxBECount has no signed overflow. 1705 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step); 1706 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy); 1707 const SCEV *WideStart = getSignExtendExpr(Start, WideTy); 1708 const SCEV *WideMaxBECount = 1709 getZeroExtendExpr(CastedMaxBECount, WideTy); 1710 const SCEV *OperandExtendedAdd = 1711 getAddExpr(WideStart, 1712 getMulExpr(WideMaxBECount, 1713 getSignExtendExpr(Step, WideTy))); 1714 if (SAdd == OperandExtendedAdd) { 1715 // Cache knowledge of AR NSW, which is propagated to this AddRec. 1716 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1717 // Return the expression with the addrec on the outside. 1718 return getAddRecExpr( 1719 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), 1720 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1721 } 1722 // Similar to above, only this time treat the step value as unsigned. 1723 // This covers loops that count up with an unsigned step. 1724 OperandExtendedAdd = 1725 getAddExpr(WideStart, 1726 getMulExpr(WideMaxBECount, 1727 getZeroExtendExpr(Step, WideTy))); 1728 if (SAdd == OperandExtendedAdd) { 1729 // If AR wraps around then 1730 // 1731 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 1732 // => SAdd != OperandExtendedAdd 1733 // 1734 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 1735 // (SAdd == OperandExtendedAdd => AR is NW) 1736 1737 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1738 1739 // Return the expression with the addrec on the outside. 1740 return getAddRecExpr( 1741 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), 1742 getZeroExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1743 } 1744 } 1745 } 1746 1747 // Normally, in the cases we can prove no-overflow via a 1748 // backedge guarding condition, we can also compute a backedge 1749 // taken count for the loop. The exceptions are assumptions and 1750 // guards present in the loop -- SCEV is not great at exploiting 1751 // these to compute max backedge taken counts, but can still use 1752 // these to prove lack of overflow. Use this fact to avoid 1753 // doing extra work that may not pay off. 1754 1755 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1756 !AC.assumptions().empty()) { 1757 // If the backedge is guarded by a comparison with the pre-inc 1758 // value the addrec is safe. Also, if the entry is guarded by 1759 // a comparison with the start value and the backedge is 1760 // guarded by a comparison with the post-inc value, the addrec 1761 // is safe. 1762 ICmpInst::Predicate Pred; 1763 const SCEV *OverflowLimit = 1764 getSignedOverflowLimitForStep(Step, &Pred, this); 1765 if (OverflowLimit && 1766 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 1767 (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) && 1768 isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this), 1769 OverflowLimit)))) { 1770 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 1771 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1772 return getAddRecExpr( 1773 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), 1774 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1775 } 1776 } 1777 1778 // If Start and Step are constants, check if we can apply this 1779 // transformation: 1780 // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2 1781 auto *SC1 = dyn_cast<SCEVConstant>(Start); 1782 auto *SC2 = dyn_cast<SCEVConstant>(Step); 1783 if (SC1 && SC2) { 1784 const APInt &C1 = SC1->getAPInt(); 1785 const APInt &C2 = SC2->getAPInt(); 1786 if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) && 1787 C2.isPowerOf2()) { 1788 Start = getSignExtendExpr(Start, Ty); 1789 const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L, 1790 AR->getNoWrapFlags()); 1791 return getAddExpr(Start, getSignExtendExpr(NewAR, Ty)); 1792 } 1793 } 1794 1795 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 1796 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 1797 return getAddRecExpr( 1798 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this), 1799 getSignExtendExpr(Step, Ty), L, AR->getNoWrapFlags()); 1800 } 1801 } 1802 1803 // If the input value is provably positive and we could not simplify 1804 // away the sext build a zext instead. 1805 if (isKnownNonNegative(Op)) 1806 return getZeroExtendExpr(Op, Ty); 1807 1808 // The cast wasn't folded; create an explicit cast node. 1809 // Recompute the insert position, as it may have been invalidated. 1810 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1811 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1812 Op, Ty); 1813 UniqueSCEVs.InsertNode(S, IP); 1814 return S; 1815 } 1816 1817 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 1818 /// unspecified bits out to the given type. 1819 /// 1820 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 1821 Type *Ty) { 1822 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1823 "This is not an extending conversion!"); 1824 assert(isSCEVable(Ty) && 1825 "This is not a conversion to a SCEVable type!"); 1826 Ty = getEffectiveSCEVType(Ty); 1827 1828 // Sign-extend negative constants. 1829 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1830 if (SC->getAPInt().isNegative()) 1831 return getSignExtendExpr(Op, Ty); 1832 1833 // Peel off a truncate cast. 1834 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 1835 const SCEV *NewOp = T->getOperand(); 1836 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 1837 return getAnyExtendExpr(NewOp, Ty); 1838 return getTruncateOrNoop(NewOp, Ty); 1839 } 1840 1841 // Next try a zext cast. If the cast is folded, use it. 1842 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 1843 if (!isa<SCEVZeroExtendExpr>(ZExt)) 1844 return ZExt; 1845 1846 // Next try a sext cast. If the cast is folded, use it. 1847 const SCEV *SExt = getSignExtendExpr(Op, Ty); 1848 if (!isa<SCEVSignExtendExpr>(SExt)) 1849 return SExt; 1850 1851 // Force the cast to be folded into the operands of an addrec. 1852 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 1853 SmallVector<const SCEV *, 4> Ops; 1854 for (const SCEV *Op : AR->operands()) 1855 Ops.push_back(getAnyExtendExpr(Op, Ty)); 1856 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 1857 } 1858 1859 // If the expression is obviously signed, use the sext cast value. 1860 if (isa<SCEVSMaxExpr>(Op)) 1861 return SExt; 1862 1863 // Absent any other information, use the zext cast value. 1864 return ZExt; 1865 } 1866 1867 /// CollectAddOperandsWithScales - Process the given Ops list, which is 1868 /// a list of operands to be added under the given scale, update the given 1869 /// map. This is a helper function for getAddRecExpr. As an example of 1870 /// what it does, given a sequence of operands that would form an add 1871 /// expression like this: 1872 /// 1873 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 1874 /// 1875 /// where A and B are constants, update the map with these values: 1876 /// 1877 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 1878 /// 1879 /// and add 13 + A*B*29 to AccumulatedConstant. 1880 /// This will allow getAddRecExpr to produce this: 1881 /// 1882 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 1883 /// 1884 /// This form often exposes folding opportunities that are hidden in 1885 /// the original operand list. 1886 /// 1887 /// Return true iff it appears that any interesting folding opportunities 1888 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 1889 /// the common case where no interesting opportunities are present, and 1890 /// is also used as a check to avoid infinite recursion. 1891 /// 1892 static bool 1893 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 1894 SmallVectorImpl<const SCEV *> &NewOps, 1895 APInt &AccumulatedConstant, 1896 const SCEV *const *Ops, size_t NumOperands, 1897 const APInt &Scale, 1898 ScalarEvolution &SE) { 1899 bool Interesting = false; 1900 1901 // Iterate over the add operands. They are sorted, with constants first. 1902 unsigned i = 0; 1903 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1904 ++i; 1905 // Pull a buried constant out to the outside. 1906 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 1907 Interesting = true; 1908 AccumulatedConstant += Scale * C->getAPInt(); 1909 } 1910 1911 // Next comes everything else. We're especially interested in multiplies 1912 // here, but they're in the middle, so just visit the rest with one loop. 1913 for (; i != NumOperands; ++i) { 1914 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 1915 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 1916 APInt NewScale = 1917 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 1918 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 1919 // A multiplication of a constant with another add; recurse. 1920 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 1921 Interesting |= 1922 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 1923 Add->op_begin(), Add->getNumOperands(), 1924 NewScale, SE); 1925 } else { 1926 // A multiplication of a constant with some other value. Update 1927 // the map. 1928 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 1929 const SCEV *Key = SE.getMulExpr(MulOps); 1930 auto Pair = M.insert({Key, NewScale}); 1931 if (Pair.second) { 1932 NewOps.push_back(Pair.first->first); 1933 } else { 1934 Pair.first->second += NewScale; 1935 // The map already had an entry for this value, which may indicate 1936 // a folding opportunity. 1937 Interesting = true; 1938 } 1939 } 1940 } else { 1941 // An ordinary operand. Update the map. 1942 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 1943 M.insert({Ops[i], Scale}); 1944 if (Pair.second) { 1945 NewOps.push_back(Pair.first->first); 1946 } else { 1947 Pair.first->second += Scale; 1948 // The map already had an entry for this value, which may indicate 1949 // a folding opportunity. 1950 Interesting = true; 1951 } 1952 } 1953 } 1954 1955 return Interesting; 1956 } 1957 1958 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 1959 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 1960 // can't-overflow flags for the operation if possible. 1961 static SCEV::NoWrapFlags 1962 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 1963 const SmallVectorImpl<const SCEV *> &Ops, 1964 SCEV::NoWrapFlags Flags) { 1965 using namespace std::placeholders; 1966 typedef OverflowingBinaryOperator OBO; 1967 1968 bool CanAnalyze = 1969 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 1970 (void)CanAnalyze; 1971 assert(CanAnalyze && "don't call from other places!"); 1972 1973 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 1974 SCEV::NoWrapFlags SignOrUnsignWrap = 1975 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 1976 1977 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 1978 auto IsKnownNonNegative = [&](const SCEV *S) { 1979 return SE->isKnownNonNegative(S); 1980 }; 1981 1982 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 1983 Flags = 1984 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 1985 1986 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 1987 1988 if (SignOrUnsignWrap != SignOrUnsignMask && Type == scAddExpr && 1989 Ops.size() == 2 && isa<SCEVConstant>(Ops[0])) { 1990 1991 // (A + C) --> (A + C)<nsw> if the addition does not sign overflow 1992 // (A + C) --> (A + C)<nuw> if the addition does not unsign overflow 1993 1994 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 1995 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 1996 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 1997 Instruction::Add, C, OBO::NoSignedWrap); 1998 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 1999 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2000 } 2001 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2002 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2003 Instruction::Add, C, OBO::NoUnsignedWrap); 2004 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2005 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2006 } 2007 } 2008 2009 return Flags; 2010 } 2011 2012 /// getAddExpr - Get a canonical add expression, or something simpler if 2013 /// possible. 2014 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2015 SCEV::NoWrapFlags Flags) { 2016 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2017 "only nuw or nsw allowed"); 2018 assert(!Ops.empty() && "Cannot get empty add!"); 2019 if (Ops.size() == 1) return Ops[0]; 2020 #ifndef NDEBUG 2021 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2022 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2023 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2024 "SCEVAddExpr operand types don't match!"); 2025 #endif 2026 2027 // Sort by complexity, this groups all similar expression types together. 2028 GroupByComplexity(Ops, &LI); 2029 2030 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2031 2032 // If there are any constants, fold them together. 2033 unsigned Idx = 0; 2034 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2035 ++Idx; 2036 assert(Idx < Ops.size()); 2037 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2038 // We found two constants, fold them together! 2039 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2040 if (Ops.size() == 2) return Ops[0]; 2041 Ops.erase(Ops.begin()+1); // Erase the folded element 2042 LHSC = cast<SCEVConstant>(Ops[0]); 2043 } 2044 2045 // If we are left with a constant zero being added, strip it off. 2046 if (LHSC->getValue()->isZero()) { 2047 Ops.erase(Ops.begin()); 2048 --Idx; 2049 } 2050 2051 if (Ops.size() == 1) return Ops[0]; 2052 } 2053 2054 // Okay, check to see if the same value occurs in the operand list more than 2055 // once. If so, merge them together into an multiply expression. Since we 2056 // sorted the list, these values are required to be adjacent. 2057 Type *Ty = Ops[0]->getType(); 2058 bool FoundMatch = false; 2059 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2060 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2061 // Scan ahead to count how many equal operands there are. 2062 unsigned Count = 2; 2063 while (i+Count != e && Ops[i+Count] == Ops[i]) 2064 ++Count; 2065 // Merge the values into a multiply. 2066 const SCEV *Scale = getConstant(Ty, Count); 2067 const SCEV *Mul = getMulExpr(Scale, Ops[i]); 2068 if (Ops.size() == Count) 2069 return Mul; 2070 Ops[i] = Mul; 2071 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2072 --i; e -= Count - 1; 2073 FoundMatch = true; 2074 } 2075 if (FoundMatch) 2076 return getAddExpr(Ops, Flags); 2077 2078 // Check for truncates. If all the operands are truncated from the same 2079 // type, see if factoring out the truncate would permit the result to be 2080 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) 2081 // if the contents of the resulting outer trunc fold to something simple. 2082 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { 2083 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); 2084 Type *DstType = Trunc->getType(); 2085 Type *SrcType = Trunc->getOperand()->getType(); 2086 SmallVector<const SCEV *, 8> LargeOps; 2087 bool Ok = true; 2088 // Check all the operands to see if they can be represented in the 2089 // source type of the truncate. 2090 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2091 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2092 if (T->getOperand()->getType() != SrcType) { 2093 Ok = false; 2094 break; 2095 } 2096 LargeOps.push_back(T->getOperand()); 2097 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2098 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2099 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2100 SmallVector<const SCEV *, 8> LargeMulOps; 2101 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2102 if (const SCEVTruncateExpr *T = 2103 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2104 if (T->getOperand()->getType() != SrcType) { 2105 Ok = false; 2106 break; 2107 } 2108 LargeMulOps.push_back(T->getOperand()); 2109 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2110 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2111 } else { 2112 Ok = false; 2113 break; 2114 } 2115 } 2116 if (Ok) 2117 LargeOps.push_back(getMulExpr(LargeMulOps)); 2118 } else { 2119 Ok = false; 2120 break; 2121 } 2122 } 2123 if (Ok) { 2124 // Evaluate the expression in the larger type. 2125 const SCEV *Fold = getAddExpr(LargeOps, Flags); 2126 // If it folds to something simple, use it. Otherwise, don't. 2127 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2128 return getTruncateExpr(Fold, DstType); 2129 } 2130 } 2131 2132 // Skip past any other cast SCEVs. 2133 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2134 ++Idx; 2135 2136 // If there are add operands they would be next. 2137 if (Idx < Ops.size()) { 2138 bool DeletedAdd = false; 2139 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2140 // If we have an add, expand the add operands onto the end of the operands 2141 // list. 2142 Ops.erase(Ops.begin()+Idx); 2143 Ops.append(Add->op_begin(), Add->op_end()); 2144 DeletedAdd = true; 2145 } 2146 2147 // If we deleted at least one add, we added operands to the end of the list, 2148 // and they are not necessarily sorted. Recurse to resort and resimplify 2149 // any operands we just acquired. 2150 if (DeletedAdd) 2151 return getAddExpr(Ops); 2152 } 2153 2154 // Skip over the add expression until we get to a multiply. 2155 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2156 ++Idx; 2157 2158 // Check to see if there are any folding opportunities present with 2159 // operands multiplied by constant values. 2160 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2161 uint64_t BitWidth = getTypeSizeInBits(Ty); 2162 DenseMap<const SCEV *, APInt> M; 2163 SmallVector<const SCEV *, 8> NewOps; 2164 APInt AccumulatedConstant(BitWidth, 0); 2165 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2166 Ops.data(), Ops.size(), 2167 APInt(BitWidth, 1), *this)) { 2168 struct APIntCompare { 2169 bool operator()(const APInt &LHS, const APInt &RHS) const { 2170 return LHS.ult(RHS); 2171 } 2172 }; 2173 2174 // Some interesting folding opportunity is present, so its worthwhile to 2175 // re-generate the operands list. Group the operands by constant scale, 2176 // to avoid multiplying by the same constant scale multiple times. 2177 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2178 for (const SCEV *NewOp : NewOps) 2179 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2180 // Re-generate the operands list. 2181 Ops.clear(); 2182 if (AccumulatedConstant != 0) 2183 Ops.push_back(getConstant(AccumulatedConstant)); 2184 for (auto &MulOp : MulOpLists) 2185 if (MulOp.first != 0) 2186 Ops.push_back(getMulExpr(getConstant(MulOp.first), 2187 getAddExpr(MulOp.second))); 2188 if (Ops.empty()) 2189 return getZero(Ty); 2190 if (Ops.size() == 1) 2191 return Ops[0]; 2192 return getAddExpr(Ops); 2193 } 2194 } 2195 2196 // If we are adding something to a multiply expression, make sure the 2197 // something is not already an operand of the multiply. If so, merge it into 2198 // the multiply. 2199 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2200 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2201 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2202 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2203 if (isa<SCEVConstant>(MulOpSCEV)) 2204 continue; 2205 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2206 if (MulOpSCEV == Ops[AddOp]) { 2207 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2208 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2209 if (Mul->getNumOperands() != 2) { 2210 // If the multiply has more than two operands, we must get the 2211 // Y*Z term. 2212 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2213 Mul->op_begin()+MulOp); 2214 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2215 InnerMul = getMulExpr(MulOps); 2216 } 2217 const SCEV *One = getOne(Ty); 2218 const SCEV *AddOne = getAddExpr(One, InnerMul); 2219 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV); 2220 if (Ops.size() == 2) return OuterMul; 2221 if (AddOp < Idx) { 2222 Ops.erase(Ops.begin()+AddOp); 2223 Ops.erase(Ops.begin()+Idx-1); 2224 } else { 2225 Ops.erase(Ops.begin()+Idx); 2226 Ops.erase(Ops.begin()+AddOp-1); 2227 } 2228 Ops.push_back(OuterMul); 2229 return getAddExpr(Ops); 2230 } 2231 2232 // Check this multiply against other multiplies being added together. 2233 for (unsigned OtherMulIdx = Idx+1; 2234 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2235 ++OtherMulIdx) { 2236 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2237 // If MulOp occurs in OtherMul, we can fold the two multiplies 2238 // together. 2239 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2240 OMulOp != e; ++OMulOp) 2241 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2242 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2243 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2244 if (Mul->getNumOperands() != 2) { 2245 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2246 Mul->op_begin()+MulOp); 2247 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2248 InnerMul1 = getMulExpr(MulOps); 2249 } 2250 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2251 if (OtherMul->getNumOperands() != 2) { 2252 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2253 OtherMul->op_begin()+OMulOp); 2254 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2255 InnerMul2 = getMulExpr(MulOps); 2256 } 2257 const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2); 2258 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); 2259 if (Ops.size() == 2) return OuterMul; 2260 Ops.erase(Ops.begin()+Idx); 2261 Ops.erase(Ops.begin()+OtherMulIdx-1); 2262 Ops.push_back(OuterMul); 2263 return getAddExpr(Ops); 2264 } 2265 } 2266 } 2267 } 2268 2269 // If there are any add recurrences in the operands list, see if any other 2270 // added values are loop invariant. If so, we can fold them into the 2271 // recurrence. 2272 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2273 ++Idx; 2274 2275 // Scan over all recurrences, trying to fold loop invariants into them. 2276 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2277 // Scan all of the other operands to this add and add them to the vector if 2278 // they are loop invariant w.r.t. the recurrence. 2279 SmallVector<const SCEV *, 8> LIOps; 2280 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2281 const Loop *AddRecLoop = AddRec->getLoop(); 2282 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2283 if (isLoopInvariant(Ops[i], AddRecLoop)) { 2284 LIOps.push_back(Ops[i]); 2285 Ops.erase(Ops.begin()+i); 2286 --i; --e; 2287 } 2288 2289 // If we found some loop invariants, fold them into the recurrence. 2290 if (!LIOps.empty()) { 2291 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2292 LIOps.push_back(AddRec->getStart()); 2293 2294 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2295 AddRec->op_end()); 2296 AddRecOps[0] = getAddExpr(LIOps); 2297 2298 // Build the new addrec. Propagate the NUW and NSW flags if both the 2299 // outer add and the inner addrec are guaranteed to have no overflow. 2300 // Always propagate NW. 2301 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2302 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2303 2304 // If all of the other operands were loop invariant, we are done. 2305 if (Ops.size() == 1) return NewRec; 2306 2307 // Otherwise, add the folded AddRec by the non-invariant parts. 2308 for (unsigned i = 0;; ++i) 2309 if (Ops[i] == AddRec) { 2310 Ops[i] = NewRec; 2311 break; 2312 } 2313 return getAddExpr(Ops); 2314 } 2315 2316 // Okay, if there weren't any loop invariants to be folded, check to see if 2317 // there are multiple AddRec's with the same loop induction variable being 2318 // added together. If so, we can fold them. 2319 for (unsigned OtherIdx = Idx+1; 2320 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2321 ++OtherIdx) 2322 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2323 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2324 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2325 AddRec->op_end()); 2326 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2327 ++OtherIdx) 2328 if (const auto *OtherAddRec = dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx])) 2329 if (OtherAddRec->getLoop() == AddRecLoop) { 2330 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2331 i != e; ++i) { 2332 if (i >= AddRecOps.size()) { 2333 AddRecOps.append(OtherAddRec->op_begin()+i, 2334 OtherAddRec->op_end()); 2335 break; 2336 } 2337 AddRecOps[i] = getAddExpr(AddRecOps[i], 2338 OtherAddRec->getOperand(i)); 2339 } 2340 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2341 } 2342 // Step size has changed, so we cannot guarantee no self-wraparound. 2343 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2344 return getAddExpr(Ops); 2345 } 2346 2347 // Otherwise couldn't fold anything into this recurrence. Move onto the 2348 // next one. 2349 } 2350 2351 // Okay, it looks like we really DO need an add expr. Check to see if we 2352 // already have one, otherwise create a new one. 2353 FoldingSetNodeID ID; 2354 ID.AddInteger(scAddExpr); 2355 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2356 ID.AddPointer(Ops[i]); 2357 void *IP = nullptr; 2358 SCEVAddExpr *S = 2359 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2360 if (!S) { 2361 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2362 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2363 S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator), 2364 O, Ops.size()); 2365 UniqueSCEVs.InsertNode(S, IP); 2366 } 2367 S->setNoWrapFlags(Flags); 2368 return S; 2369 } 2370 2371 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2372 uint64_t k = i*j; 2373 if (j > 1 && k / j != i) Overflow = true; 2374 return k; 2375 } 2376 2377 /// Compute the result of "n choose k", the binomial coefficient. If an 2378 /// intermediate computation overflows, Overflow will be set and the return will 2379 /// be garbage. Overflow is not cleared on absence of overflow. 2380 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2381 // We use the multiplicative formula: 2382 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2383 // At each iteration, we take the n-th term of the numeral and divide by the 2384 // (k-n)th term of the denominator. This division will always produce an 2385 // integral result, and helps reduce the chance of overflow in the 2386 // intermediate computations. However, we can still overflow even when the 2387 // final result would fit. 2388 2389 if (n == 0 || n == k) return 1; 2390 if (k > n) return 0; 2391 2392 if (k > n/2) 2393 k = n-k; 2394 2395 uint64_t r = 1; 2396 for (uint64_t i = 1; i <= k; ++i) { 2397 r = umul_ov(r, n-(i-1), Overflow); 2398 r /= i; 2399 } 2400 return r; 2401 } 2402 2403 /// Determine if any of the operands in this SCEV are a constant or if 2404 /// any of the add or multiply expressions in this SCEV contain a constant. 2405 static bool containsConstantSomewhere(const SCEV *StartExpr) { 2406 SmallVector<const SCEV *, 4> Ops; 2407 Ops.push_back(StartExpr); 2408 while (!Ops.empty()) { 2409 const SCEV *CurrentExpr = Ops.pop_back_val(); 2410 if (isa<SCEVConstant>(*CurrentExpr)) 2411 return true; 2412 2413 if (isa<SCEVAddExpr>(*CurrentExpr) || isa<SCEVMulExpr>(*CurrentExpr)) { 2414 const auto *CurrentNAry = cast<SCEVNAryExpr>(CurrentExpr); 2415 Ops.append(CurrentNAry->op_begin(), CurrentNAry->op_end()); 2416 } 2417 } 2418 return false; 2419 } 2420 2421 /// getMulExpr - Get a canonical multiply expression, or something simpler if 2422 /// possible. 2423 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2424 SCEV::NoWrapFlags Flags) { 2425 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2426 "only nuw or nsw allowed"); 2427 assert(!Ops.empty() && "Cannot get empty mul!"); 2428 if (Ops.size() == 1) return Ops[0]; 2429 #ifndef NDEBUG 2430 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2431 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2432 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2433 "SCEVMulExpr operand types don't match!"); 2434 #endif 2435 2436 // Sort by complexity, this groups all similar expression types together. 2437 GroupByComplexity(Ops, &LI); 2438 2439 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2440 2441 // If there are any constants, fold them together. 2442 unsigned Idx = 0; 2443 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2444 2445 // C1*(C2+V) -> C1*C2 + C1*V 2446 if (Ops.size() == 2) 2447 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2448 // If any of Add's ops are Adds or Muls with a constant, 2449 // apply this transformation as well. 2450 if (Add->getNumOperands() == 2) 2451 if (containsConstantSomewhere(Add)) 2452 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)), 2453 getMulExpr(LHSC, Add->getOperand(1))); 2454 2455 ++Idx; 2456 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2457 // We found two constants, fold them together! 2458 ConstantInt *Fold = 2459 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2460 Ops[0] = getConstant(Fold); 2461 Ops.erase(Ops.begin()+1); // Erase the folded element 2462 if (Ops.size() == 1) return Ops[0]; 2463 LHSC = cast<SCEVConstant>(Ops[0]); 2464 } 2465 2466 // If we are left with a constant one being multiplied, strip it off. 2467 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) { 2468 Ops.erase(Ops.begin()); 2469 --Idx; 2470 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2471 // If we have a multiply of zero, it will always be zero. 2472 return Ops[0]; 2473 } else if (Ops[0]->isAllOnesValue()) { 2474 // If we have a mul by -1 of an add, try distributing the -1 among the 2475 // add operands. 2476 if (Ops.size() == 2) { 2477 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2478 SmallVector<const SCEV *, 4> NewOps; 2479 bool AnyFolded = false; 2480 for (const SCEV *AddOp : Add->operands()) { 2481 const SCEV *Mul = getMulExpr(Ops[0], AddOp); 2482 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2483 NewOps.push_back(Mul); 2484 } 2485 if (AnyFolded) 2486 return getAddExpr(NewOps); 2487 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2488 // Negation preserves a recurrence's no self-wrap property. 2489 SmallVector<const SCEV *, 4> Operands; 2490 for (const SCEV *AddRecOp : AddRec->operands()) 2491 Operands.push_back(getMulExpr(Ops[0], AddRecOp)); 2492 2493 return getAddRecExpr(Operands, AddRec->getLoop(), 2494 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2495 } 2496 } 2497 } 2498 2499 if (Ops.size() == 1) 2500 return Ops[0]; 2501 } 2502 2503 // Skip over the add expression until we get to a multiply. 2504 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2505 ++Idx; 2506 2507 // If there are mul operands inline them all into this expression. 2508 if (Idx < Ops.size()) { 2509 bool DeletedMul = false; 2510 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2511 // If we have an mul, expand the mul operands onto the end of the operands 2512 // list. 2513 Ops.erase(Ops.begin()+Idx); 2514 Ops.append(Mul->op_begin(), Mul->op_end()); 2515 DeletedMul = true; 2516 } 2517 2518 // If we deleted at least one mul, we added operands to the end of the list, 2519 // and they are not necessarily sorted. Recurse to resort and resimplify 2520 // any operands we just acquired. 2521 if (DeletedMul) 2522 return getMulExpr(Ops); 2523 } 2524 2525 // If there are any add recurrences in the operands list, see if any other 2526 // added values are loop invariant. If so, we can fold them into the 2527 // recurrence. 2528 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2529 ++Idx; 2530 2531 // Scan over all recurrences, trying to fold loop invariants into them. 2532 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2533 // Scan all of the other operands to this mul and add them to the vector if 2534 // they are loop invariant w.r.t. the recurrence. 2535 SmallVector<const SCEV *, 8> LIOps; 2536 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2537 const Loop *AddRecLoop = AddRec->getLoop(); 2538 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2539 if (isLoopInvariant(Ops[i], AddRecLoop)) { 2540 LIOps.push_back(Ops[i]); 2541 Ops.erase(Ops.begin()+i); 2542 --i; --e; 2543 } 2544 2545 // If we found some loop invariants, fold them into the recurrence. 2546 if (!LIOps.empty()) { 2547 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2548 SmallVector<const SCEV *, 4> NewOps; 2549 NewOps.reserve(AddRec->getNumOperands()); 2550 const SCEV *Scale = getMulExpr(LIOps); 2551 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2552 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); 2553 2554 // Build the new addrec. Propagate the NUW and NSW flags if both the 2555 // outer mul and the inner addrec are guaranteed to have no overflow. 2556 // 2557 // No self-wrap cannot be guaranteed after changing the step size, but 2558 // will be inferred if either NUW or NSW is true. 2559 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 2560 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 2561 2562 // If all of the other operands were loop invariant, we are done. 2563 if (Ops.size() == 1) return NewRec; 2564 2565 // Otherwise, multiply the folded AddRec by the non-invariant parts. 2566 for (unsigned i = 0;; ++i) 2567 if (Ops[i] == AddRec) { 2568 Ops[i] = NewRec; 2569 break; 2570 } 2571 return getMulExpr(Ops); 2572 } 2573 2574 // Okay, if there weren't any loop invariants to be folded, check to see if 2575 // there are multiple AddRec's with the same loop induction variable being 2576 // multiplied together. If so, we can fold them. 2577 2578 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 2579 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 2580 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 2581 // ]]],+,...up to x=2n}. 2582 // Note that the arguments to choose() are always integers with values 2583 // known at compile time, never SCEV objects. 2584 // 2585 // The implementation avoids pointless extra computations when the two 2586 // addrec's are of different length (mathematically, it's equivalent to 2587 // an infinite stream of zeros on the right). 2588 bool OpsModified = false; 2589 for (unsigned OtherIdx = Idx+1; 2590 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2591 ++OtherIdx) { 2592 const SCEVAddRecExpr *OtherAddRec = 2593 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2594 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 2595 continue; 2596 2597 bool Overflow = false; 2598 Type *Ty = AddRec->getType(); 2599 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 2600 SmallVector<const SCEV*, 7> AddRecOps; 2601 for (int x = 0, xe = AddRec->getNumOperands() + 2602 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 2603 const SCEV *Term = getZero(Ty); 2604 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 2605 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 2606 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 2607 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 2608 z < ze && !Overflow; ++z) { 2609 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 2610 uint64_t Coeff; 2611 if (LargerThan64Bits) 2612 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 2613 else 2614 Coeff = Coeff1*Coeff2; 2615 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 2616 const SCEV *Term1 = AddRec->getOperand(y-z); 2617 const SCEV *Term2 = OtherAddRec->getOperand(z); 2618 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2)); 2619 } 2620 } 2621 AddRecOps.push_back(Term); 2622 } 2623 if (!Overflow) { 2624 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(), 2625 SCEV::FlagAnyWrap); 2626 if (Ops.size() == 2) return NewAddRec; 2627 Ops[Idx] = NewAddRec; 2628 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2629 OpsModified = true; 2630 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 2631 if (!AddRec) 2632 break; 2633 } 2634 } 2635 if (OpsModified) 2636 return getMulExpr(Ops); 2637 2638 // Otherwise couldn't fold anything into this recurrence. Move onto the 2639 // next one. 2640 } 2641 2642 // Okay, it looks like we really DO need an mul expr. Check to see if we 2643 // already have one, otherwise create a new one. 2644 FoldingSetNodeID ID; 2645 ID.AddInteger(scMulExpr); 2646 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2647 ID.AddPointer(Ops[i]); 2648 void *IP = nullptr; 2649 SCEVMulExpr *S = 2650 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2651 if (!S) { 2652 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2653 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2654 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2655 O, Ops.size()); 2656 UniqueSCEVs.InsertNode(S, IP); 2657 } 2658 S->setNoWrapFlags(Flags); 2659 return S; 2660 } 2661 2662 /// getUDivExpr - Get a canonical unsigned division expression, or something 2663 /// simpler if possible. 2664 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 2665 const SCEV *RHS) { 2666 assert(getEffectiveSCEVType(LHS->getType()) == 2667 getEffectiveSCEVType(RHS->getType()) && 2668 "SCEVUDivExpr operand types don't match!"); 2669 2670 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 2671 if (RHSC->getValue()->equalsInt(1)) 2672 return LHS; // X udiv 1 --> x 2673 // If the denominator is zero, the result of the udiv is undefined. Don't 2674 // try to analyze it, because the resolution chosen here may differ from 2675 // the resolution chosen in other parts of the compiler. 2676 if (!RHSC->getValue()->isZero()) { 2677 // Determine if the division can be folded into the operands of 2678 // its operands. 2679 // TODO: Generalize this to non-constants by using known-bits information. 2680 Type *Ty = LHS->getType(); 2681 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 2682 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 2683 // For non-power-of-two values, effectively round the value up to the 2684 // nearest power of two. 2685 if (!RHSC->getAPInt().isPowerOf2()) 2686 ++MaxShiftAmt; 2687 IntegerType *ExtTy = 2688 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 2689 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 2690 if (const SCEVConstant *Step = 2691 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 2692 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 2693 const APInt &StepInt = Step->getAPInt(); 2694 const APInt &DivInt = RHSC->getAPInt(); 2695 if (!StepInt.urem(DivInt) && 2696 getZeroExtendExpr(AR, ExtTy) == 2697 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 2698 getZeroExtendExpr(Step, ExtTy), 2699 AR->getLoop(), SCEV::FlagAnyWrap)) { 2700 SmallVector<const SCEV *, 4> Operands; 2701 for (const SCEV *Op : AR->operands()) 2702 Operands.push_back(getUDivExpr(Op, RHS)); 2703 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 2704 } 2705 /// Get a canonical UDivExpr for a recurrence. 2706 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 2707 // We can currently only fold X%N if X is constant. 2708 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 2709 if (StartC && !DivInt.urem(StepInt) && 2710 getZeroExtendExpr(AR, ExtTy) == 2711 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 2712 getZeroExtendExpr(Step, ExtTy), 2713 AR->getLoop(), SCEV::FlagAnyWrap)) { 2714 const APInt &StartInt = StartC->getAPInt(); 2715 const APInt &StartRem = StartInt.urem(StepInt); 2716 if (StartRem != 0) 2717 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, 2718 AR->getLoop(), SCEV::FlagNW); 2719 } 2720 } 2721 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 2722 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 2723 SmallVector<const SCEV *, 4> Operands; 2724 for (const SCEV *Op : M->operands()) 2725 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 2726 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 2727 // Find an operand that's safely divisible. 2728 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 2729 const SCEV *Op = M->getOperand(i); 2730 const SCEV *Div = getUDivExpr(Op, RHSC); 2731 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 2732 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 2733 M->op_end()); 2734 Operands[i] = Div; 2735 return getMulExpr(Operands); 2736 } 2737 } 2738 } 2739 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 2740 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 2741 SmallVector<const SCEV *, 4> Operands; 2742 for (const SCEV *Op : A->operands()) 2743 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 2744 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 2745 Operands.clear(); 2746 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 2747 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 2748 if (isa<SCEVUDivExpr>(Op) || 2749 getMulExpr(Op, RHS) != A->getOperand(i)) 2750 break; 2751 Operands.push_back(Op); 2752 } 2753 if (Operands.size() == A->getNumOperands()) 2754 return getAddExpr(Operands); 2755 } 2756 } 2757 2758 // Fold if both operands are constant. 2759 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 2760 Constant *LHSCV = LHSC->getValue(); 2761 Constant *RHSCV = RHSC->getValue(); 2762 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 2763 RHSCV))); 2764 } 2765 } 2766 } 2767 2768 FoldingSetNodeID ID; 2769 ID.AddInteger(scUDivExpr); 2770 ID.AddPointer(LHS); 2771 ID.AddPointer(RHS); 2772 void *IP = nullptr; 2773 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2774 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 2775 LHS, RHS); 2776 UniqueSCEVs.InsertNode(S, IP); 2777 return S; 2778 } 2779 2780 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 2781 APInt A = C1->getAPInt().abs(); 2782 APInt B = C2->getAPInt().abs(); 2783 uint32_t ABW = A.getBitWidth(); 2784 uint32_t BBW = B.getBitWidth(); 2785 2786 if (ABW > BBW) 2787 B = B.zext(ABW); 2788 else if (ABW < BBW) 2789 A = A.zext(BBW); 2790 2791 return APIntOps::GreatestCommonDivisor(A, B); 2792 } 2793 2794 /// getUDivExactExpr - Get a canonical unsigned division expression, or 2795 /// something simpler if possible. There is no representation for an exact udiv 2796 /// in SCEV IR, but we can attempt to remove factors from the LHS and RHS. 2797 /// We can't do this when it's not exact because the udiv may be clearing bits. 2798 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 2799 const SCEV *RHS) { 2800 // TODO: we could try to find factors in all sorts of things, but for now we 2801 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 2802 // end of this file for inspiration. 2803 2804 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 2805 if (!Mul) 2806 return getUDivExpr(LHS, RHS); 2807 2808 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 2809 // If the mulexpr multiplies by a constant, then that constant must be the 2810 // first element of the mulexpr. 2811 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 2812 if (LHSCst == RHSCst) { 2813 SmallVector<const SCEV *, 2> Operands; 2814 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 2815 return getMulExpr(Operands); 2816 } 2817 2818 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 2819 // that there's a factor provided by one of the other terms. We need to 2820 // check. 2821 APInt Factor = gcd(LHSCst, RHSCst); 2822 if (!Factor.isIntN(1)) { 2823 LHSCst = 2824 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 2825 RHSCst = 2826 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 2827 SmallVector<const SCEV *, 2> Operands; 2828 Operands.push_back(LHSCst); 2829 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 2830 LHS = getMulExpr(Operands); 2831 RHS = RHSCst; 2832 Mul = dyn_cast<SCEVMulExpr>(LHS); 2833 if (!Mul) 2834 return getUDivExactExpr(LHS, RHS); 2835 } 2836 } 2837 } 2838 2839 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 2840 if (Mul->getOperand(i) == RHS) { 2841 SmallVector<const SCEV *, 2> Operands; 2842 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 2843 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 2844 return getMulExpr(Operands); 2845 } 2846 } 2847 2848 return getUDivExpr(LHS, RHS); 2849 } 2850 2851 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 2852 /// Simplify the expression as much as possible. 2853 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 2854 const Loop *L, 2855 SCEV::NoWrapFlags Flags) { 2856 SmallVector<const SCEV *, 4> Operands; 2857 Operands.push_back(Start); 2858 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 2859 if (StepChrec->getLoop() == L) { 2860 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 2861 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 2862 } 2863 2864 Operands.push_back(Step); 2865 return getAddRecExpr(Operands, L, Flags); 2866 } 2867 2868 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 2869 /// Simplify the expression as much as possible. 2870 const SCEV * 2871 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 2872 const Loop *L, SCEV::NoWrapFlags Flags) { 2873 if (Operands.size() == 1) return Operands[0]; 2874 #ifndef NDEBUG 2875 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 2876 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 2877 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 2878 "SCEVAddRecExpr operand types don't match!"); 2879 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 2880 assert(isLoopInvariant(Operands[i], L) && 2881 "SCEVAddRecExpr operand is not loop-invariant!"); 2882 #endif 2883 2884 if (Operands.back()->isZero()) { 2885 Operands.pop_back(); 2886 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 2887 } 2888 2889 // It's tempting to want to call getMaxBackedgeTakenCount count here and 2890 // use that information to infer NUW and NSW flags. However, computing a 2891 // BE count requires calling getAddRecExpr, so we may not yet have a 2892 // meaningful BE count at this point (and if we don't, we'd be stuck 2893 // with a SCEVCouldNotCompute as the cached BE count). 2894 2895 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 2896 2897 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 2898 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 2899 const Loop *NestedLoop = NestedAR->getLoop(); 2900 if (L->contains(NestedLoop) 2901 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 2902 : (!NestedLoop->contains(L) && 2903 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 2904 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 2905 NestedAR->op_end()); 2906 Operands[0] = NestedAR->getStart(); 2907 // AddRecs require their operands be loop-invariant with respect to their 2908 // loops. Don't perform this transformation if it would break this 2909 // requirement. 2910 bool AllInvariant = all_of( 2911 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 2912 2913 if (AllInvariant) { 2914 // Create a recurrence for the outer loop with the same step size. 2915 // 2916 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 2917 // inner recurrence has the same property. 2918 SCEV::NoWrapFlags OuterFlags = 2919 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 2920 2921 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 2922 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 2923 return isLoopInvariant(Op, NestedLoop); 2924 }); 2925 2926 if (AllInvariant) { 2927 // Ok, both add recurrences are valid after the transformation. 2928 // 2929 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 2930 // the outer recurrence has the same property. 2931 SCEV::NoWrapFlags InnerFlags = 2932 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 2933 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 2934 } 2935 } 2936 // Reset Operands to its original state. 2937 Operands[0] = NestedAR; 2938 } 2939 } 2940 2941 // Okay, it looks like we really DO need an addrec expr. Check to see if we 2942 // already have one, otherwise create a new one. 2943 FoldingSetNodeID ID; 2944 ID.AddInteger(scAddRecExpr); 2945 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 2946 ID.AddPointer(Operands[i]); 2947 ID.AddPointer(L); 2948 void *IP = nullptr; 2949 SCEVAddRecExpr *S = 2950 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2951 if (!S) { 2952 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); 2953 std::uninitialized_copy(Operands.begin(), Operands.end(), O); 2954 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), 2955 O, Operands.size(), L); 2956 UniqueSCEVs.InsertNode(S, IP); 2957 } 2958 S->setNoWrapFlags(Flags); 2959 return S; 2960 } 2961 2962 const SCEV * 2963 ScalarEvolution::getGEPExpr(Type *PointeeType, const SCEV *BaseExpr, 2964 const SmallVectorImpl<const SCEV *> &IndexExprs, 2965 bool InBounds) { 2966 // getSCEV(Base)->getType() has the same address space as Base->getType() 2967 // because SCEV::getType() preserves the address space. 2968 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); 2969 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 2970 // instruction to its SCEV, because the Instruction may be guarded by control 2971 // flow and the no-overflow bits may not be valid for the expression in any 2972 // context. This can be fixed similarly to how these flags are handled for 2973 // adds. 2974 SCEV::NoWrapFlags Wrap = InBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 2975 2976 const SCEV *TotalOffset = getZero(IntPtrTy); 2977 // The address space is unimportant. The first thing we do on CurTy is getting 2978 // its element type. 2979 Type *CurTy = PointerType::getUnqual(PointeeType); 2980 for (const SCEV *IndexExpr : IndexExprs) { 2981 // Compute the (potentially symbolic) offset in bytes for this index. 2982 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 2983 // For a struct, add the member offset. 2984 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 2985 unsigned FieldNo = Index->getZExtValue(); 2986 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); 2987 2988 // Add the field offset to the running total offset. 2989 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 2990 2991 // Update CurTy to the type of the field at Index. 2992 CurTy = STy->getTypeAtIndex(Index); 2993 } else { 2994 // Update CurTy to its element type. 2995 CurTy = cast<SequentialType>(CurTy)->getElementType(); 2996 // For an array, add the element offset, explicitly scaled. 2997 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); 2998 // Getelementptr indices are signed. 2999 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); 3000 3001 // Multiply the index by the element size to compute the element offset. 3002 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3003 3004 // Add the element offset to the running total offset. 3005 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3006 } 3007 } 3008 3009 // Add the total offset from all the GEP indices to the base. 3010 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3011 } 3012 3013 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 3014 const SCEV *RHS) { 3015 SmallVector<const SCEV *, 2> Ops; 3016 Ops.push_back(LHS); 3017 Ops.push_back(RHS); 3018 return getSMaxExpr(Ops); 3019 } 3020 3021 const SCEV * 3022 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3023 assert(!Ops.empty() && "Cannot get empty smax!"); 3024 if (Ops.size() == 1) return Ops[0]; 3025 #ifndef NDEBUG 3026 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3027 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3028 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3029 "SCEVSMaxExpr operand types don't match!"); 3030 #endif 3031 3032 // Sort by complexity, this groups all similar expression types together. 3033 GroupByComplexity(Ops, &LI); 3034 3035 // If there are any constants, fold them together. 3036 unsigned Idx = 0; 3037 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3038 ++Idx; 3039 assert(Idx < Ops.size()); 3040 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3041 // We found two constants, fold them together! 3042 ConstantInt *Fold = ConstantInt::get( 3043 getContext(), APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt())); 3044 Ops[0] = getConstant(Fold); 3045 Ops.erase(Ops.begin()+1); // Erase the folded element 3046 if (Ops.size() == 1) return Ops[0]; 3047 LHSC = cast<SCEVConstant>(Ops[0]); 3048 } 3049 3050 // If we are left with a constant minimum-int, strip it off. 3051 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 3052 Ops.erase(Ops.begin()); 3053 --Idx; 3054 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 3055 // If we have an smax with a constant maximum-int, it will always be 3056 // maximum-int. 3057 return Ops[0]; 3058 } 3059 3060 if (Ops.size() == 1) return Ops[0]; 3061 } 3062 3063 // Find the first SMax 3064 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 3065 ++Idx; 3066 3067 // Check to see if one of the operands is an SMax. If so, expand its operands 3068 // onto our operand list, and recurse to simplify. 3069 if (Idx < Ops.size()) { 3070 bool DeletedSMax = false; 3071 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 3072 Ops.erase(Ops.begin()+Idx); 3073 Ops.append(SMax->op_begin(), SMax->op_end()); 3074 DeletedSMax = true; 3075 } 3076 3077 if (DeletedSMax) 3078 return getSMaxExpr(Ops); 3079 } 3080 3081 // Okay, check to see if the same value occurs in the operand list twice. If 3082 // so, delete one. Since we sorted the list, these values are required to 3083 // be adjacent. 3084 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3085 // X smax Y smax Y --> X smax Y 3086 // X smax Y --> X, if X is always greater than Y 3087 if (Ops[i] == Ops[i+1] || 3088 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 3089 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3090 --i; --e; 3091 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 3092 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3093 --i; --e; 3094 } 3095 3096 if (Ops.size() == 1) return Ops[0]; 3097 3098 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3099 3100 // Okay, it looks like we really DO need an smax expr. Check to see if we 3101 // already have one, otherwise create a new one. 3102 FoldingSetNodeID ID; 3103 ID.AddInteger(scSMaxExpr); 3104 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3105 ID.AddPointer(Ops[i]); 3106 void *IP = nullptr; 3107 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3108 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3109 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3110 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), 3111 O, Ops.size()); 3112 UniqueSCEVs.InsertNode(S, IP); 3113 return S; 3114 } 3115 3116 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 3117 const SCEV *RHS) { 3118 SmallVector<const SCEV *, 2> Ops; 3119 Ops.push_back(LHS); 3120 Ops.push_back(RHS); 3121 return getUMaxExpr(Ops); 3122 } 3123 3124 const SCEV * 3125 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3126 assert(!Ops.empty() && "Cannot get empty umax!"); 3127 if (Ops.size() == 1) return Ops[0]; 3128 #ifndef NDEBUG 3129 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3130 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3131 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3132 "SCEVUMaxExpr operand types don't match!"); 3133 #endif 3134 3135 // Sort by complexity, this groups all similar expression types together. 3136 GroupByComplexity(Ops, &LI); 3137 3138 // If there are any constants, fold them together. 3139 unsigned Idx = 0; 3140 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3141 ++Idx; 3142 assert(Idx < Ops.size()); 3143 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3144 // We found two constants, fold them together! 3145 ConstantInt *Fold = ConstantInt::get( 3146 getContext(), APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt())); 3147 Ops[0] = getConstant(Fold); 3148 Ops.erase(Ops.begin()+1); // Erase the folded element 3149 if (Ops.size() == 1) return Ops[0]; 3150 LHSC = cast<SCEVConstant>(Ops[0]); 3151 } 3152 3153 // If we are left with a constant minimum-int, strip it off. 3154 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 3155 Ops.erase(Ops.begin()); 3156 --Idx; 3157 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 3158 // If we have an umax with a constant maximum-int, it will always be 3159 // maximum-int. 3160 return Ops[0]; 3161 } 3162 3163 if (Ops.size() == 1) return Ops[0]; 3164 } 3165 3166 // Find the first UMax 3167 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 3168 ++Idx; 3169 3170 // Check to see if one of the operands is a UMax. If so, expand its operands 3171 // onto our operand list, and recurse to simplify. 3172 if (Idx < Ops.size()) { 3173 bool DeletedUMax = false; 3174 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 3175 Ops.erase(Ops.begin()+Idx); 3176 Ops.append(UMax->op_begin(), UMax->op_end()); 3177 DeletedUMax = true; 3178 } 3179 3180 if (DeletedUMax) 3181 return getUMaxExpr(Ops); 3182 } 3183 3184 // Okay, check to see if the same value occurs in the operand list twice. If 3185 // so, delete one. Since we sorted the list, these values are required to 3186 // be adjacent. 3187 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3188 // X umax Y umax Y --> X umax Y 3189 // X umax Y --> X, if X is always greater than Y 3190 if (Ops[i] == Ops[i+1] || 3191 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) { 3192 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3193 --i; --e; 3194 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) { 3195 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3196 --i; --e; 3197 } 3198 3199 if (Ops.size() == 1) return Ops[0]; 3200 3201 assert(!Ops.empty() && "Reduced umax down to nothing!"); 3202 3203 // Okay, it looks like we really DO need a umax expr. Check to see if we 3204 // already have one, otherwise create a new one. 3205 FoldingSetNodeID ID; 3206 ID.AddInteger(scUMaxExpr); 3207 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3208 ID.AddPointer(Ops[i]); 3209 void *IP = nullptr; 3210 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3211 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3212 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3213 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 3214 O, Ops.size()); 3215 UniqueSCEVs.InsertNode(S, IP); 3216 return S; 3217 } 3218 3219 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3220 const SCEV *RHS) { 3221 // ~smax(~x, ~y) == smin(x, y). 3222 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3223 } 3224 3225 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3226 const SCEV *RHS) { 3227 // ~umax(~x, ~y) == umin(x, y) 3228 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 3229 } 3230 3231 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3232 // We can bypass creating a target-independent 3233 // constant expression and then folding it back into a ConstantInt. 3234 // This is just a compile-time optimization. 3235 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3236 } 3237 3238 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3239 StructType *STy, 3240 unsigned FieldNo) { 3241 // We can bypass creating a target-independent 3242 // constant expression and then folding it back into a ConstantInt. 3243 // This is just a compile-time optimization. 3244 return getConstant( 3245 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3246 } 3247 3248 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3249 // Don't attempt to do anything other than create a SCEVUnknown object 3250 // here. createSCEV only calls getUnknown after checking for all other 3251 // interesting possibilities, and any other code that calls getUnknown 3252 // is doing so in order to hide a value from SCEV canonicalization. 3253 3254 FoldingSetNodeID ID; 3255 ID.AddInteger(scUnknown); 3256 ID.AddPointer(V); 3257 void *IP = nullptr; 3258 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3259 assert(cast<SCEVUnknown>(S)->getValue() == V && 3260 "Stale SCEVUnknown in uniquing map!"); 3261 return S; 3262 } 3263 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3264 FirstUnknown); 3265 FirstUnknown = cast<SCEVUnknown>(S); 3266 UniqueSCEVs.InsertNode(S, IP); 3267 return S; 3268 } 3269 3270 //===----------------------------------------------------------------------===// 3271 // Basic SCEV Analysis and PHI Idiom Recognition Code 3272 // 3273 3274 /// isSCEVable - Test if values of the given type are analyzable within 3275 /// the SCEV framework. This primarily includes integer types, and it 3276 /// can optionally include pointer types if the ScalarEvolution class 3277 /// has access to target-specific information. 3278 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3279 // Integers and pointers are always SCEVable. 3280 return Ty->isIntegerTy() || Ty->isPointerTy(); 3281 } 3282 3283 /// getTypeSizeInBits - Return the size in bits of the specified type, 3284 /// for which isSCEVable must return true. 3285 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3286 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3287 return getDataLayout().getTypeSizeInBits(Ty); 3288 } 3289 3290 /// getEffectiveSCEVType - Return a type with the same bitwidth as 3291 /// the given type and which represents how SCEV will treat the given 3292 /// type, for which isSCEVable must return true. For pointer types, 3293 /// this is the pointer-sized integer type. 3294 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3295 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3296 3297 if (Ty->isIntegerTy()) 3298 return Ty; 3299 3300 // The only other support type is pointer. 3301 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3302 return getDataLayout().getIntPtrType(Ty); 3303 } 3304 3305 const SCEV *ScalarEvolution::getCouldNotCompute() { 3306 return CouldNotCompute.get(); 3307 } 3308 3309 3310 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3311 // Helper class working with SCEVTraversal to figure out if a SCEV contains 3312 // a SCEVUnknown with null value-pointer. FindInvalidSCEVUnknown::FindOne 3313 // is set iff if find such SCEVUnknown. 3314 // 3315 struct FindInvalidSCEVUnknown { 3316 bool FindOne; 3317 FindInvalidSCEVUnknown() { FindOne = false; } 3318 bool follow(const SCEV *S) { 3319 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 3320 case scConstant: 3321 return false; 3322 case scUnknown: 3323 if (!cast<SCEVUnknown>(S)->getValue()) 3324 FindOne = true; 3325 return false; 3326 default: 3327 return true; 3328 } 3329 } 3330 bool isDone() const { return FindOne; } 3331 }; 3332 3333 FindInvalidSCEVUnknown F; 3334 SCEVTraversal<FindInvalidSCEVUnknown> ST(F); 3335 ST.visitAll(S); 3336 3337 return !F.FindOne; 3338 } 3339 3340 namespace { 3341 // Helper class working with SCEVTraversal to figure out if a SCEV contains 3342 // a sub SCEV of scAddRecExpr type. FindInvalidSCEVUnknown::FoundOne is set 3343 // iff if such sub scAddRecExpr type SCEV is found. 3344 struct FindAddRecurrence { 3345 bool FoundOne; 3346 FindAddRecurrence() : FoundOne(false) {} 3347 3348 bool follow(const SCEV *S) { 3349 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 3350 case scAddRecExpr: 3351 FoundOne = true; 3352 case scConstant: 3353 case scUnknown: 3354 case scCouldNotCompute: 3355 return false; 3356 default: 3357 return true; 3358 } 3359 } 3360 bool isDone() const { return FoundOne; } 3361 }; 3362 } 3363 3364 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3365 HasRecMapType::iterator I = HasRecMap.find_as(S); 3366 if (I != HasRecMap.end()) 3367 return I->second; 3368 3369 FindAddRecurrence F; 3370 SCEVTraversal<FindAddRecurrence> ST(F); 3371 ST.visitAll(S); 3372 HasRecMap.insert({S, F.FoundOne}); 3373 return F.FoundOne; 3374 } 3375 3376 /// getSCEVValues - Return the Value set from S. 3377 SetVector<Value *> *ScalarEvolution::getSCEVValues(const SCEV *S) { 3378 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3379 if (SI == ExprValueMap.end()) 3380 return nullptr; 3381 #ifndef NDEBUG 3382 if (VerifySCEVMap) { 3383 // Check there is no dangling Value in the set returned. 3384 for (const auto &VE : SI->second) 3385 assert(ValueExprMap.count(VE)); 3386 } 3387 #endif 3388 return &SI->second; 3389 } 3390 3391 /// eraseValueFromMap - Erase Value from ValueExprMap and ExprValueMap. 3392 /// If ValueExprMap.erase(V) is not used together with forgetMemoizedResults(S), 3393 /// eraseValueFromMap should be used instead to ensure whenever V->S is removed 3394 /// from ValueExprMap, V is also removed from the set of ExprValueMap[S]. 3395 void ScalarEvolution::eraseValueFromMap(Value *V) { 3396 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3397 if (I != ValueExprMap.end()) { 3398 const SCEV *S = I->second; 3399 SetVector<Value *> *SV = getSCEVValues(S); 3400 // Remove V from the set of ExprValueMap[S] 3401 if (SV) 3402 SV->remove(V); 3403 ValueExprMap.erase(V); 3404 } 3405 } 3406 3407 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the 3408 /// expression and create a new one. 3409 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3410 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3411 3412 const SCEV *S = getExistingSCEV(V); 3413 if (S == nullptr) { 3414 S = createSCEV(V); 3415 // During PHI resolution, it is possible to create two SCEVs for the same 3416 // V, so it is needed to double check whether V->S is inserted into 3417 // ValueExprMap before insert S->V into ExprValueMap. 3418 std::pair<ValueExprMapType::iterator, bool> Pair = 3419 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3420 if (Pair.second) 3421 ExprValueMap[S].insert(V); 3422 } 3423 return S; 3424 } 3425 3426 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3427 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3428 3429 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3430 if (I != ValueExprMap.end()) { 3431 const SCEV *S = I->second; 3432 if (checkValidity(S)) 3433 return S; 3434 forgetMemoizedResults(S); 3435 ValueExprMap.erase(I); 3436 } 3437 return nullptr; 3438 } 3439 3440 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V 3441 /// 3442 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3443 SCEV::NoWrapFlags Flags) { 3444 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3445 return getConstant( 3446 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3447 3448 Type *Ty = V->getType(); 3449 Ty = getEffectiveSCEVType(Ty); 3450 return getMulExpr( 3451 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 3452 } 3453 3454 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V 3455 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3456 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3457 return getConstant( 3458 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3459 3460 Type *Ty = V->getType(); 3461 Ty = getEffectiveSCEVType(Ty); 3462 const SCEV *AllOnes = 3463 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 3464 return getMinusSCEV(AllOnes, V); 3465 } 3466 3467 /// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1. 3468 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3469 SCEV::NoWrapFlags Flags) { 3470 // Fast path: X - X --> 0. 3471 if (LHS == RHS) 3472 return getZero(LHS->getType()); 3473 3474 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 3475 // makes it so that we cannot make much use of NUW. 3476 auto AddFlags = SCEV::FlagAnyWrap; 3477 const bool RHSIsNotMinSigned = 3478 !getSignedRange(RHS).getSignedMin().isMinSignedValue(); 3479 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 3480 // Let M be the minimum representable signed value. Then (-1)*RHS 3481 // signed-wraps if and only if RHS is M. That can happen even for 3482 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 3483 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 3484 // (-1)*RHS, we need to prove that RHS != M. 3485 // 3486 // If LHS is non-negative and we know that LHS - RHS does not 3487 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 3488 // either by proving that RHS > M or that LHS >= 0. 3489 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 3490 AddFlags = SCEV::FlagNSW; 3491 } 3492 } 3493 3494 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 3495 // RHS is NSW and LHS >= 0. 3496 // 3497 // The difficulty here is that the NSW flag may have been proven 3498 // relative to a loop that is to be found in a recurrence in LHS and 3499 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 3500 // larger scope than intended. 3501 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3502 3503 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags); 3504 } 3505 3506 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the 3507 /// input value to the specified type. If the type must be extended, it is zero 3508 /// extended. 3509 const SCEV * 3510 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) { 3511 Type *SrcTy = V->getType(); 3512 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3513 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3514 "Cannot truncate or zero extend with non-integer arguments!"); 3515 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3516 return V; // No conversion 3517 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3518 return getTruncateExpr(V, Ty); 3519 return getZeroExtendExpr(V, Ty); 3520 } 3521 3522 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the 3523 /// input value to the specified type. If the type must be extended, it is sign 3524 /// extended. 3525 const SCEV * 3526 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 3527 Type *Ty) { 3528 Type *SrcTy = V->getType(); 3529 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3530 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3531 "Cannot truncate or zero extend with non-integer arguments!"); 3532 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3533 return V; // No conversion 3534 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 3535 return getTruncateExpr(V, Ty); 3536 return getSignExtendExpr(V, Ty); 3537 } 3538 3539 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the 3540 /// input value to the specified type. If the type must be extended, it is zero 3541 /// extended. The conversion must not be narrowing. 3542 const SCEV * 3543 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 3544 Type *SrcTy = V->getType(); 3545 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3546 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3547 "Cannot noop or zero extend with non-integer arguments!"); 3548 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3549 "getNoopOrZeroExtend cannot truncate!"); 3550 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3551 return V; // No conversion 3552 return getZeroExtendExpr(V, Ty); 3553 } 3554 3555 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the 3556 /// input value to the specified type. If the type must be extended, it is sign 3557 /// extended. The conversion must not be narrowing. 3558 const SCEV * 3559 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 3560 Type *SrcTy = V->getType(); 3561 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3562 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3563 "Cannot noop or sign extend with non-integer arguments!"); 3564 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3565 "getNoopOrSignExtend cannot truncate!"); 3566 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3567 return V; // No conversion 3568 return getSignExtendExpr(V, Ty); 3569 } 3570 3571 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of 3572 /// the input value to the specified type. If the type must be extended, 3573 /// it is extended with unspecified bits. The conversion must not be 3574 /// narrowing. 3575 const SCEV * 3576 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 3577 Type *SrcTy = V->getType(); 3578 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3579 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3580 "Cannot noop or any extend with non-integer arguments!"); 3581 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 3582 "getNoopOrAnyExtend cannot truncate!"); 3583 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3584 return V; // No conversion 3585 return getAnyExtendExpr(V, Ty); 3586 } 3587 3588 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the 3589 /// input value to the specified type. The conversion must not be widening. 3590 const SCEV * 3591 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 3592 Type *SrcTy = V->getType(); 3593 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 3594 (Ty->isIntegerTy() || Ty->isPointerTy()) && 3595 "Cannot truncate or noop with non-integer arguments!"); 3596 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 3597 "getTruncateOrNoop cannot extend!"); 3598 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 3599 return V; // No conversion 3600 return getTruncateExpr(V, Ty); 3601 } 3602 3603 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of 3604 /// the types using zero-extension, and then perform a umax operation 3605 /// with them. 3606 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 3607 const SCEV *RHS) { 3608 const SCEV *PromotedLHS = LHS; 3609 const SCEV *PromotedRHS = RHS; 3610 3611 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3612 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3613 else 3614 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3615 3616 return getUMaxExpr(PromotedLHS, PromotedRHS); 3617 } 3618 3619 /// getUMinFromMismatchedTypes - Promote the operands to the wider of 3620 /// the types using zero-extension, and then perform a umin operation 3621 /// with them. 3622 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 3623 const SCEV *RHS) { 3624 const SCEV *PromotedLHS = LHS; 3625 const SCEV *PromotedRHS = RHS; 3626 3627 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 3628 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 3629 else 3630 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 3631 3632 return getUMinExpr(PromotedLHS, PromotedRHS); 3633 } 3634 3635 /// getPointerBase - Transitively follow the chain of pointer-type operands 3636 /// until reaching a SCEV that does not have a single pointer operand. This 3637 /// returns a SCEVUnknown pointer for well-formed pointer-type expressions, 3638 /// but corner cases do exist. 3639 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 3640 // A pointer operand may evaluate to a nonpointer expression, such as null. 3641 if (!V->getType()->isPointerTy()) 3642 return V; 3643 3644 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 3645 return getPointerBase(Cast->getOperand()); 3646 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 3647 const SCEV *PtrOp = nullptr; 3648 for (const SCEV *NAryOp : NAry->operands()) { 3649 if (NAryOp->getType()->isPointerTy()) { 3650 // Cannot find the base of an expression with multiple pointer operands. 3651 if (PtrOp) 3652 return V; 3653 PtrOp = NAryOp; 3654 } 3655 } 3656 if (!PtrOp) 3657 return V; 3658 return getPointerBase(PtrOp); 3659 } 3660 return V; 3661 } 3662 3663 /// PushDefUseChildren - Push users of the given Instruction 3664 /// onto the given Worklist. 3665 static void 3666 PushDefUseChildren(Instruction *I, 3667 SmallVectorImpl<Instruction *> &Worklist) { 3668 // Push the def-use children onto the Worklist stack. 3669 for (User *U : I->users()) 3670 Worklist.push_back(cast<Instruction>(U)); 3671 } 3672 3673 /// ForgetSymbolicValue - This looks up computed SCEV values for all 3674 /// instructions that depend on the given instruction and removes them from 3675 /// the ValueExprMapType map if they reference SymName. This is used during PHI 3676 /// resolution. 3677 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 3678 SmallVector<Instruction *, 16> Worklist; 3679 PushDefUseChildren(PN, Worklist); 3680 3681 SmallPtrSet<Instruction *, 8> Visited; 3682 Visited.insert(PN); 3683 while (!Worklist.empty()) { 3684 Instruction *I = Worklist.pop_back_val(); 3685 if (!Visited.insert(I).second) 3686 continue; 3687 3688 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 3689 if (It != ValueExprMap.end()) { 3690 const SCEV *Old = It->second; 3691 3692 // Short-circuit the def-use traversal if the symbolic name 3693 // ceases to appear in expressions. 3694 if (Old != SymName && !hasOperand(Old, SymName)) 3695 continue; 3696 3697 // SCEVUnknown for a PHI either means that it has an unrecognized 3698 // structure, it's a PHI that's in the progress of being computed 3699 // by createNodeForPHI, or it's a single-value PHI. In the first case, 3700 // additional loop trip count information isn't going to change anything. 3701 // In the second case, createNodeForPHI will perform the necessary 3702 // updates on its own when it gets to that point. In the third, we do 3703 // want to forget the SCEVUnknown. 3704 if (!isa<PHINode>(I) || 3705 !isa<SCEVUnknown>(Old) || 3706 (I != PN && Old == SymName)) { 3707 forgetMemoizedResults(Old); 3708 ValueExprMap.erase(It); 3709 } 3710 } 3711 3712 PushDefUseChildren(I, Worklist); 3713 } 3714 } 3715 3716 namespace { 3717 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 3718 public: 3719 static const SCEV *rewrite(const SCEV *S, const Loop *L, 3720 ScalarEvolution &SE) { 3721 SCEVInitRewriter Rewriter(L, SE); 3722 const SCEV *Result = Rewriter.visit(S); 3723 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 3724 } 3725 3726 SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 3727 : SCEVRewriteVisitor(SE), L(L), Valid(true) {} 3728 3729 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 3730 if (!(SE.getLoopDisposition(Expr, L) == ScalarEvolution::LoopInvariant)) 3731 Valid = false; 3732 return Expr; 3733 } 3734 3735 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 3736 // Only allow AddRecExprs for this loop. 3737 if (Expr->getLoop() == L) 3738 return Expr->getStart(); 3739 Valid = false; 3740 return Expr; 3741 } 3742 3743 bool isValid() { return Valid; } 3744 3745 private: 3746 const Loop *L; 3747 bool Valid; 3748 }; 3749 3750 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 3751 public: 3752 static const SCEV *rewrite(const SCEV *S, const Loop *L, 3753 ScalarEvolution &SE) { 3754 SCEVShiftRewriter Rewriter(L, SE); 3755 const SCEV *Result = Rewriter.visit(S); 3756 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 3757 } 3758 3759 SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 3760 : SCEVRewriteVisitor(SE), L(L), Valid(true) {} 3761 3762 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 3763 // Only allow AddRecExprs for this loop. 3764 if (!(SE.getLoopDisposition(Expr, L) == ScalarEvolution::LoopInvariant)) 3765 Valid = false; 3766 return Expr; 3767 } 3768 3769 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 3770 if (Expr->getLoop() == L && Expr->isAffine()) 3771 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 3772 Valid = false; 3773 return Expr; 3774 } 3775 bool isValid() { return Valid; } 3776 3777 private: 3778 const Loop *L; 3779 bool Valid; 3780 }; 3781 } // end anonymous namespace 3782 3783 SCEV::NoWrapFlags 3784 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 3785 if (!AR->isAffine()) 3786 return SCEV::FlagAnyWrap; 3787 3788 typedef OverflowingBinaryOperator OBO; 3789 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 3790 3791 if (!AR->hasNoSignedWrap()) { 3792 ConstantRange AddRecRange = getSignedRange(AR); 3793 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 3794 3795 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 3796 Instruction::Add, IncRange, OBO::NoSignedWrap); 3797 if (NSWRegion.contains(AddRecRange)) 3798 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 3799 } 3800 3801 if (!AR->hasNoUnsignedWrap()) { 3802 ConstantRange AddRecRange = getUnsignedRange(AR); 3803 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 3804 3805 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 3806 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 3807 if (NUWRegion.contains(AddRecRange)) 3808 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 3809 } 3810 3811 return Result; 3812 } 3813 3814 namespace { 3815 /// Represents an abstract binary operation. This may exist as a 3816 /// normal instruction or constant expression, or may have been 3817 /// derived from an expression tree. 3818 struct BinaryOp { 3819 unsigned Opcode; 3820 Value *LHS; 3821 Value *RHS; 3822 bool IsNSW; 3823 bool IsNUW; 3824 3825 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 3826 /// constant expression. 3827 Operator *Op; 3828 3829 explicit BinaryOp(Operator *Op) 3830 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 3831 IsNSW(false), IsNUW(false), Op(Op) { 3832 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 3833 IsNSW = OBO->hasNoSignedWrap(); 3834 IsNUW = OBO->hasNoUnsignedWrap(); 3835 } 3836 } 3837 3838 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 3839 bool IsNUW = false) 3840 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW), 3841 Op(nullptr) {} 3842 }; 3843 } 3844 3845 3846 /// Try to map \p V into a BinaryOp, and return \c None on failure. 3847 static Optional<BinaryOp> MatchBinaryOp(Value *V) { 3848 auto *Op = dyn_cast<Operator>(V); 3849 if (!Op) 3850 return None; 3851 3852 // Implementation detail: all the cleverness here should happen without 3853 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 3854 // SCEV expressions when possible, and we should not break that. 3855 3856 switch (Op->getOpcode()) { 3857 case Instruction::Add: 3858 case Instruction::Sub: 3859 case Instruction::Mul: 3860 case Instruction::UDiv: 3861 case Instruction::And: 3862 case Instruction::Or: 3863 case Instruction::AShr: 3864 case Instruction::Shl: 3865 return BinaryOp(Op); 3866 3867 case Instruction::Xor: 3868 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 3869 // If the RHS of the xor is a signbit, then this is just an add. 3870 // Instcombine turns add of signbit into xor as a strength reduction step. 3871 if (RHSC->getValue().isSignBit()) 3872 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 3873 return BinaryOp(Op); 3874 3875 case Instruction::LShr: 3876 // Turn logical shift right of a constant into a unsigned divide. 3877 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 3878 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 3879 3880 // If the shift count is not less than the bitwidth, the result of 3881 // the shift is undefined. Don't try to analyze it, because the 3882 // resolution chosen here may differ from the resolution chosen in 3883 // other parts of the compiler. 3884 if (SA->getValue().ult(BitWidth)) { 3885 Constant *X = 3886 ConstantInt::get(SA->getContext(), 3887 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 3888 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 3889 } 3890 } 3891 return BinaryOp(Op); 3892 3893 default: 3894 break; 3895 } 3896 3897 return None; 3898 } 3899 3900 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 3901 const Loop *L = LI.getLoopFor(PN->getParent()); 3902 if (!L || L->getHeader() != PN->getParent()) 3903 return nullptr; 3904 3905 // The loop may have multiple entrances or multiple exits; we can analyze 3906 // this phi as an addrec if it has a unique entry value and a unique 3907 // backedge value. 3908 Value *BEValueV = nullptr, *StartValueV = nullptr; 3909 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 3910 Value *V = PN->getIncomingValue(i); 3911 if (L->contains(PN->getIncomingBlock(i))) { 3912 if (!BEValueV) { 3913 BEValueV = V; 3914 } else if (BEValueV != V) { 3915 BEValueV = nullptr; 3916 break; 3917 } 3918 } else if (!StartValueV) { 3919 StartValueV = V; 3920 } else if (StartValueV != V) { 3921 StartValueV = nullptr; 3922 break; 3923 } 3924 } 3925 if (BEValueV && StartValueV) { 3926 // While we are analyzing this PHI node, handle its value symbolically. 3927 const SCEV *SymbolicName = getUnknown(PN); 3928 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 3929 "PHI node already processed?"); 3930 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 3931 3932 // Using this symbolic name for the PHI, analyze the value coming around 3933 // the back-edge. 3934 const SCEV *BEValue = getSCEV(BEValueV); 3935 3936 // NOTE: If BEValue is loop invariant, we know that the PHI node just 3937 // has a special value for the first iteration of the loop. 3938 3939 // If the value coming around the backedge is an add with the symbolic 3940 // value we just inserted, then we found a simple induction variable! 3941 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 3942 // If there is a single occurrence of the symbolic value, replace it 3943 // with a recurrence. 3944 unsigned FoundIndex = Add->getNumOperands(); 3945 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 3946 if (Add->getOperand(i) == SymbolicName) 3947 if (FoundIndex == e) { 3948 FoundIndex = i; 3949 break; 3950 } 3951 3952 if (FoundIndex != Add->getNumOperands()) { 3953 // Create an add with everything but the specified operand. 3954 SmallVector<const SCEV *, 8> Ops; 3955 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 3956 if (i != FoundIndex) 3957 Ops.push_back(Add->getOperand(i)); 3958 const SCEV *Accum = getAddExpr(Ops); 3959 3960 // This is not a valid addrec if the step amount is varying each 3961 // loop iteration, but is not itself an addrec in this loop. 3962 if (isLoopInvariant(Accum, L) || 3963 (isa<SCEVAddRecExpr>(Accum) && 3964 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 3965 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 3966 3967 // If the increment doesn't overflow, then neither the addrec nor 3968 // the post-increment will overflow. 3969 if (auto BO = MatchBinaryOp(BEValueV)) { 3970 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 3971 if (BO->IsNUW) 3972 Flags = setFlags(Flags, SCEV::FlagNUW); 3973 if (BO->IsNSW) 3974 Flags = setFlags(Flags, SCEV::FlagNSW); 3975 } 3976 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 3977 // If the increment is an inbounds GEP, then we know the address 3978 // space cannot be wrapped around. We cannot make any guarantee 3979 // about signed or unsigned overflow because pointers are 3980 // unsigned but we may have a negative index from the base 3981 // pointer. We can guarantee that no unsigned wrap occurs if the 3982 // indices form a positive value. 3983 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 3984 Flags = setFlags(Flags, SCEV::FlagNW); 3985 3986 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 3987 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 3988 Flags = setFlags(Flags, SCEV::FlagNUW); 3989 } 3990 3991 // We cannot transfer nuw and nsw flags from subtraction 3992 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 3993 // for instance. 3994 } 3995 3996 const SCEV *StartVal = getSCEV(StartValueV); 3997 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 3998 3999 // Since the no-wrap flags are on the increment, they apply to the 4000 // post-incremented value as well. 4001 if (isLoopInvariant(Accum, L)) 4002 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 4003 4004 // Okay, for the entire analysis of this edge we assumed the PHI 4005 // to be symbolic. We now need to go back and purge all of the 4006 // entries for the scalars that use the symbolic expression. 4007 forgetSymbolicName(PN, SymbolicName); 4008 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4009 return PHISCEV; 4010 } 4011 } 4012 } else { 4013 // Otherwise, this could be a loop like this: 4014 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 4015 // In this case, j = {1,+,1} and BEValue is j. 4016 // Because the other in-value of i (0) fits the evolution of BEValue 4017 // i really is an addrec evolution. 4018 // 4019 // We can generalize this saying that i is the shifted value of BEValue 4020 // by one iteration: 4021 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 4022 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 4023 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this); 4024 if (Shifted != getCouldNotCompute() && 4025 Start != getCouldNotCompute()) { 4026 const SCEV *StartVal = getSCEV(StartValueV); 4027 if (Start == StartVal) { 4028 // Okay, for the entire analysis of this edge we assumed the PHI 4029 // to be symbolic. We now need to go back and purge all of the 4030 // entries for the scalars that use the symbolic expression. 4031 forgetSymbolicName(PN, SymbolicName); 4032 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 4033 return Shifted; 4034 } 4035 } 4036 } 4037 4038 // Remove the temporary PHI node SCEV that has been inserted while intending 4039 // to create an AddRecExpr for this PHI node. We can not keep this temporary 4040 // as it will prevent later (possibly simpler) SCEV expressions to be added 4041 // to the ValueExprMap. 4042 ValueExprMap.erase(PN); 4043 } 4044 4045 return nullptr; 4046 } 4047 4048 // Checks if the SCEV S is available at BB. S is considered available at BB 4049 // if S can be materialized at BB without introducing a fault. 4050 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 4051 BasicBlock *BB) { 4052 struct CheckAvailable { 4053 bool TraversalDone = false; 4054 bool Available = true; 4055 4056 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 4057 BasicBlock *BB = nullptr; 4058 DominatorTree &DT; 4059 4060 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 4061 : L(L), BB(BB), DT(DT) {} 4062 4063 bool setUnavailable() { 4064 TraversalDone = true; 4065 Available = false; 4066 return false; 4067 } 4068 4069 bool follow(const SCEV *S) { 4070 switch (S->getSCEVType()) { 4071 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 4072 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 4073 // These expressions are available if their operand(s) is/are. 4074 return true; 4075 4076 case scAddRecExpr: { 4077 // We allow add recurrences that are on the loop BB is in, or some 4078 // outer loop. This guarantees availability because the value of the 4079 // add recurrence at BB is simply the "current" value of the induction 4080 // variable. We can relax this in the future; for instance an add 4081 // recurrence on a sibling dominating loop is also available at BB. 4082 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 4083 if (L && (ARLoop == L || ARLoop->contains(L))) 4084 return true; 4085 4086 return setUnavailable(); 4087 } 4088 4089 case scUnknown: { 4090 // For SCEVUnknown, we check for simple dominance. 4091 const auto *SU = cast<SCEVUnknown>(S); 4092 Value *V = SU->getValue(); 4093 4094 if (isa<Argument>(V)) 4095 return false; 4096 4097 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 4098 return false; 4099 4100 return setUnavailable(); 4101 } 4102 4103 case scUDivExpr: 4104 case scCouldNotCompute: 4105 // We do not try to smart about these at all. 4106 return setUnavailable(); 4107 } 4108 llvm_unreachable("switch should be fully covered!"); 4109 } 4110 4111 bool isDone() { return TraversalDone; } 4112 }; 4113 4114 CheckAvailable CA(L, BB, DT); 4115 SCEVTraversal<CheckAvailable> ST(CA); 4116 4117 ST.visitAll(S); 4118 return CA.Available; 4119 } 4120 4121 // Try to match a control flow sequence that branches out at BI and merges back 4122 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 4123 // match. 4124 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 4125 Value *&C, Value *&LHS, Value *&RHS) { 4126 C = BI->getCondition(); 4127 4128 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 4129 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 4130 4131 if (!LeftEdge.isSingleEdge()) 4132 return false; 4133 4134 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 4135 4136 Use &LeftUse = Merge->getOperandUse(0); 4137 Use &RightUse = Merge->getOperandUse(1); 4138 4139 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 4140 LHS = LeftUse; 4141 RHS = RightUse; 4142 return true; 4143 } 4144 4145 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 4146 LHS = RightUse; 4147 RHS = LeftUse; 4148 return true; 4149 } 4150 4151 return false; 4152 } 4153 4154 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 4155 if (PN->getNumIncomingValues() == 2) { 4156 const Loop *L = LI.getLoopFor(PN->getParent()); 4157 4158 // We don't want to break LCSSA, even in a SCEV expression tree. 4159 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 4160 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 4161 return nullptr; 4162 4163 // Try to match 4164 // 4165 // br %cond, label %left, label %right 4166 // left: 4167 // br label %merge 4168 // right: 4169 // br label %merge 4170 // merge: 4171 // V = phi [ %x, %left ], [ %y, %right ] 4172 // 4173 // as "select %cond, %x, %y" 4174 4175 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 4176 assert(IDom && "At least the entry block should dominate PN"); 4177 4178 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 4179 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 4180 4181 if (BI && BI->isConditional() && 4182 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 4183 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 4184 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 4185 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 4186 } 4187 4188 return nullptr; 4189 } 4190 4191 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 4192 if (const SCEV *S = createAddRecFromPHI(PN)) 4193 return S; 4194 4195 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 4196 return S; 4197 4198 // If the PHI has a single incoming value, follow that value, unless the 4199 // PHI's incoming blocks are in a different loop, in which case doing so 4200 // risks breaking LCSSA form. Instcombine would normally zap these, but 4201 // it doesn't have DominatorTree information, so it may miss cases. 4202 if (Value *V = SimplifyInstruction(PN, getDataLayout(), &TLI, &DT, &AC)) 4203 if (LI.replacementPreservesLCSSAForm(PN, V)) 4204 return getSCEV(V); 4205 4206 // If it's not a loop phi, we can't handle it yet. 4207 return getUnknown(PN); 4208 } 4209 4210 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 4211 Value *Cond, 4212 Value *TrueVal, 4213 Value *FalseVal) { 4214 // Handle "constant" branch or select. This can occur for instance when a 4215 // loop pass transforms an inner loop and moves on to process the outer loop. 4216 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 4217 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 4218 4219 // Try to match some simple smax or umax patterns. 4220 auto *ICI = dyn_cast<ICmpInst>(Cond); 4221 if (!ICI) 4222 return getUnknown(I); 4223 4224 Value *LHS = ICI->getOperand(0); 4225 Value *RHS = ICI->getOperand(1); 4226 4227 switch (ICI->getPredicate()) { 4228 case ICmpInst::ICMP_SLT: 4229 case ICmpInst::ICMP_SLE: 4230 std::swap(LHS, RHS); 4231 // fall through 4232 case ICmpInst::ICMP_SGT: 4233 case ICmpInst::ICMP_SGE: 4234 // a >s b ? a+x : b+x -> smax(a, b)+x 4235 // a >s b ? b+x : a+x -> smin(a, b)+x 4236 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 4237 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 4238 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 4239 const SCEV *LA = getSCEV(TrueVal); 4240 const SCEV *RA = getSCEV(FalseVal); 4241 const SCEV *LDiff = getMinusSCEV(LA, LS); 4242 const SCEV *RDiff = getMinusSCEV(RA, RS); 4243 if (LDiff == RDiff) 4244 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 4245 LDiff = getMinusSCEV(LA, RS); 4246 RDiff = getMinusSCEV(RA, LS); 4247 if (LDiff == RDiff) 4248 return getAddExpr(getSMinExpr(LS, RS), LDiff); 4249 } 4250 break; 4251 case ICmpInst::ICMP_ULT: 4252 case ICmpInst::ICMP_ULE: 4253 std::swap(LHS, RHS); 4254 // fall through 4255 case ICmpInst::ICMP_UGT: 4256 case ICmpInst::ICMP_UGE: 4257 // a >u b ? a+x : b+x -> umax(a, b)+x 4258 // a >u b ? b+x : a+x -> umin(a, b)+x 4259 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 4260 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4261 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 4262 const SCEV *LA = getSCEV(TrueVal); 4263 const SCEV *RA = getSCEV(FalseVal); 4264 const SCEV *LDiff = getMinusSCEV(LA, LS); 4265 const SCEV *RDiff = getMinusSCEV(RA, RS); 4266 if (LDiff == RDiff) 4267 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 4268 LDiff = getMinusSCEV(LA, RS); 4269 RDiff = getMinusSCEV(RA, LS); 4270 if (LDiff == RDiff) 4271 return getAddExpr(getUMinExpr(LS, RS), LDiff); 4272 } 4273 break; 4274 case ICmpInst::ICMP_NE: 4275 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 4276 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 4277 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 4278 const SCEV *One = getOne(I->getType()); 4279 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4280 const SCEV *LA = getSCEV(TrueVal); 4281 const SCEV *RA = getSCEV(FalseVal); 4282 const SCEV *LDiff = getMinusSCEV(LA, LS); 4283 const SCEV *RDiff = getMinusSCEV(RA, One); 4284 if (LDiff == RDiff) 4285 return getAddExpr(getUMaxExpr(One, LS), LDiff); 4286 } 4287 break; 4288 case ICmpInst::ICMP_EQ: 4289 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 4290 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 4291 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 4292 const SCEV *One = getOne(I->getType()); 4293 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 4294 const SCEV *LA = getSCEV(TrueVal); 4295 const SCEV *RA = getSCEV(FalseVal); 4296 const SCEV *LDiff = getMinusSCEV(LA, One); 4297 const SCEV *RDiff = getMinusSCEV(RA, LS); 4298 if (LDiff == RDiff) 4299 return getAddExpr(getUMaxExpr(One, LS), LDiff); 4300 } 4301 break; 4302 default: 4303 break; 4304 } 4305 4306 return getUnknown(I); 4307 } 4308 4309 /// createNodeForGEP - Expand GEP instructions into add and multiply 4310 /// operations. This allows them to be analyzed by regular SCEV code. 4311 /// 4312 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 4313 // Don't attempt to analyze GEPs over unsized objects. 4314 if (!GEP->getSourceElementType()->isSized()) 4315 return getUnknown(GEP); 4316 4317 SmallVector<const SCEV *, 4> IndexExprs; 4318 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 4319 IndexExprs.push_back(getSCEV(*Index)); 4320 return getGEPExpr(GEP->getSourceElementType(), 4321 getSCEV(GEP->getPointerOperand()), 4322 IndexExprs, GEP->isInBounds()); 4323 } 4324 4325 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is 4326 /// guaranteed to end in (at every loop iteration). It is, at the same time, 4327 /// the minimum number of times S is divisible by 2. For example, given {4,+,8} 4328 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S. 4329 uint32_t 4330 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 4331 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 4332 return C->getAPInt().countTrailingZeros(); 4333 4334 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 4335 return std::min(GetMinTrailingZeros(T->getOperand()), 4336 (uint32_t)getTypeSizeInBits(T->getType())); 4337 4338 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 4339 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 4340 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 4341 getTypeSizeInBits(E->getType()) : OpRes; 4342 } 4343 4344 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 4345 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 4346 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 4347 getTypeSizeInBits(E->getType()) : OpRes; 4348 } 4349 4350 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 4351 // The result is the min of all operands results. 4352 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 4353 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 4354 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 4355 return MinOpRes; 4356 } 4357 4358 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 4359 // The result is the sum of all operands results. 4360 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 4361 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 4362 for (unsigned i = 1, e = M->getNumOperands(); 4363 SumOpRes != BitWidth && i != e; ++i) 4364 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), 4365 BitWidth); 4366 return SumOpRes; 4367 } 4368 4369 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 4370 // The result is the min of all operands results. 4371 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 4372 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 4373 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 4374 return MinOpRes; 4375 } 4376 4377 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 4378 // The result is the min of all operands results. 4379 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 4380 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 4381 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 4382 return MinOpRes; 4383 } 4384 4385 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 4386 // The result is the min of all operands results. 4387 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 4388 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 4389 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 4390 return MinOpRes; 4391 } 4392 4393 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 4394 // For a SCEVUnknown, ask ValueTracking. 4395 unsigned BitWidth = getTypeSizeInBits(U->getType()); 4396 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 4397 computeKnownBits(U->getValue(), Zeros, Ones, getDataLayout(), 0, &AC, 4398 nullptr, &DT); 4399 return Zeros.countTrailingOnes(); 4400 } 4401 4402 // SCEVUDivExpr 4403 return 0; 4404 } 4405 4406 /// GetRangeFromMetadata - Helper method to assign a range to V from 4407 /// metadata present in the IR. 4408 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 4409 if (Instruction *I = dyn_cast<Instruction>(V)) 4410 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 4411 return getConstantRangeFromMetadata(*MD); 4412 4413 return None; 4414 } 4415 4416 /// getRange - Determine the range for a particular SCEV. If SignHint is 4417 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 4418 /// with a "cleaner" unsigned (resp. signed) representation. 4419 /// 4420 ConstantRange 4421 ScalarEvolution::getRange(const SCEV *S, 4422 ScalarEvolution::RangeSignHint SignHint) { 4423 DenseMap<const SCEV *, ConstantRange> &Cache = 4424 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 4425 : SignedRanges; 4426 4427 // See if we've computed this range already. 4428 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 4429 if (I != Cache.end()) 4430 return I->second; 4431 4432 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 4433 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 4434 4435 unsigned BitWidth = getTypeSizeInBits(S->getType()); 4436 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 4437 4438 // If the value has known zeros, the maximum value will have those known zeros 4439 // as well. 4440 uint32_t TZ = GetMinTrailingZeros(S); 4441 if (TZ != 0) { 4442 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 4443 ConservativeResult = 4444 ConstantRange(APInt::getMinValue(BitWidth), 4445 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 4446 else 4447 ConservativeResult = ConstantRange( 4448 APInt::getSignedMinValue(BitWidth), 4449 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 4450 } 4451 4452 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 4453 ConstantRange X = getRange(Add->getOperand(0), SignHint); 4454 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 4455 X = X.add(getRange(Add->getOperand(i), SignHint)); 4456 return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); 4457 } 4458 4459 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 4460 ConstantRange X = getRange(Mul->getOperand(0), SignHint); 4461 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 4462 X = X.multiply(getRange(Mul->getOperand(i), SignHint)); 4463 return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); 4464 } 4465 4466 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 4467 ConstantRange X = getRange(SMax->getOperand(0), SignHint); 4468 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 4469 X = X.smax(getRange(SMax->getOperand(i), SignHint)); 4470 return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); 4471 } 4472 4473 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 4474 ConstantRange X = getRange(UMax->getOperand(0), SignHint); 4475 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 4476 X = X.umax(getRange(UMax->getOperand(i), SignHint)); 4477 return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); 4478 } 4479 4480 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 4481 ConstantRange X = getRange(UDiv->getLHS(), SignHint); 4482 ConstantRange Y = getRange(UDiv->getRHS(), SignHint); 4483 return setRange(UDiv, SignHint, 4484 ConservativeResult.intersectWith(X.udiv(Y))); 4485 } 4486 4487 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 4488 ConstantRange X = getRange(ZExt->getOperand(), SignHint); 4489 return setRange(ZExt, SignHint, 4490 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 4491 } 4492 4493 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 4494 ConstantRange X = getRange(SExt->getOperand(), SignHint); 4495 return setRange(SExt, SignHint, 4496 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 4497 } 4498 4499 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 4500 ConstantRange X = getRange(Trunc->getOperand(), SignHint); 4501 return setRange(Trunc, SignHint, 4502 ConservativeResult.intersectWith(X.truncate(BitWidth))); 4503 } 4504 4505 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 4506 // If there's no unsigned wrap, the value will never be less than its 4507 // initial value. 4508 if (AddRec->hasNoUnsignedWrap()) 4509 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 4510 if (!C->getValue()->isZero()) 4511 ConservativeResult = ConservativeResult.intersectWith( 4512 ConstantRange(C->getAPInt(), APInt(BitWidth, 0))); 4513 4514 // If there's no signed wrap, and all the operands have the same sign or 4515 // zero, the value won't ever change sign. 4516 if (AddRec->hasNoSignedWrap()) { 4517 bool AllNonNeg = true; 4518 bool AllNonPos = true; 4519 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 4520 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 4521 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 4522 } 4523 if (AllNonNeg) 4524 ConservativeResult = ConservativeResult.intersectWith( 4525 ConstantRange(APInt(BitWidth, 0), 4526 APInt::getSignedMinValue(BitWidth))); 4527 else if (AllNonPos) 4528 ConservativeResult = ConservativeResult.intersectWith( 4529 ConstantRange(APInt::getSignedMinValue(BitWidth), 4530 APInt(BitWidth, 1))); 4531 } 4532 4533 // TODO: non-affine addrec 4534 if (AddRec->isAffine()) { 4535 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 4536 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 4537 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 4538 auto RangeFromAffine = getRangeForAffineAR( 4539 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 4540 BitWidth); 4541 if (!RangeFromAffine.isFullSet()) 4542 ConservativeResult = 4543 ConservativeResult.intersectWith(RangeFromAffine); 4544 4545 auto RangeFromFactoring = getRangeViaFactoring( 4546 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 4547 BitWidth); 4548 if (!RangeFromFactoring.isFullSet()) 4549 ConservativeResult = 4550 ConservativeResult.intersectWith(RangeFromFactoring); 4551 } 4552 } 4553 4554 return setRange(AddRec, SignHint, ConservativeResult); 4555 } 4556 4557 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 4558 // Check if the IR explicitly contains !range metadata. 4559 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 4560 if (MDRange.hasValue()) 4561 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue()); 4562 4563 // Split here to avoid paying the compile-time cost of calling both 4564 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 4565 // if needed. 4566 const DataLayout &DL = getDataLayout(); 4567 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 4568 // For a SCEVUnknown, ask ValueTracking. 4569 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 4570 computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, &AC, nullptr, &DT); 4571 if (Ones != ~Zeros + 1) 4572 ConservativeResult = 4573 ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)); 4574 } else { 4575 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 4576 "generalize as needed!"); 4577 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 4578 if (NS > 1) 4579 ConservativeResult = ConservativeResult.intersectWith( 4580 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 4581 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1)); 4582 } 4583 4584 return setRange(U, SignHint, ConservativeResult); 4585 } 4586 4587 return setRange(S, SignHint, ConservativeResult); 4588 } 4589 4590 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 4591 const SCEV *Step, 4592 const SCEV *MaxBECount, 4593 unsigned BitWidth) { 4594 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 4595 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 4596 "Precondition!"); 4597 4598 ConstantRange Result(BitWidth, /* isFullSet = */ true); 4599 4600 // Check for overflow. This must be done with ConstantRange arithmetic 4601 // because we could be called from within the ScalarEvolution overflow 4602 // checking code. 4603 4604 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 4605 ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount); 4606 ConstantRange ZExtMaxBECountRange = 4607 MaxBECountRange.zextOrTrunc(BitWidth * 2 + 1); 4608 4609 ConstantRange StepSRange = getSignedRange(Step); 4610 ConstantRange SExtStepSRange = StepSRange.sextOrTrunc(BitWidth * 2 + 1); 4611 4612 ConstantRange StartURange = getUnsignedRange(Start); 4613 ConstantRange EndURange = 4614 StartURange.add(MaxBECountRange.multiply(StepSRange)); 4615 4616 // Check for unsigned overflow. 4617 ConstantRange ZExtStartURange = StartURange.zextOrTrunc(BitWidth * 2 + 1); 4618 ConstantRange ZExtEndURange = EndURange.zextOrTrunc(BitWidth * 2 + 1); 4619 if (ZExtStartURange.add(ZExtMaxBECountRange.multiply(SExtStepSRange)) == 4620 ZExtEndURange) { 4621 APInt Min = APIntOps::umin(StartURange.getUnsignedMin(), 4622 EndURange.getUnsignedMin()); 4623 APInt Max = APIntOps::umax(StartURange.getUnsignedMax(), 4624 EndURange.getUnsignedMax()); 4625 bool IsFullRange = Min.isMinValue() && Max.isMaxValue(); 4626 if (!IsFullRange) 4627 Result = 4628 Result.intersectWith(ConstantRange(Min, Max + 1)); 4629 } 4630 4631 ConstantRange StartSRange = getSignedRange(Start); 4632 ConstantRange EndSRange = 4633 StartSRange.add(MaxBECountRange.multiply(StepSRange)); 4634 4635 // Check for signed overflow. This must be done with ConstantRange 4636 // arithmetic because we could be called from within the ScalarEvolution 4637 // overflow checking code. 4638 ConstantRange SExtStartSRange = StartSRange.sextOrTrunc(BitWidth * 2 + 1); 4639 ConstantRange SExtEndSRange = EndSRange.sextOrTrunc(BitWidth * 2 + 1); 4640 if (SExtStartSRange.add(ZExtMaxBECountRange.multiply(SExtStepSRange)) == 4641 SExtEndSRange) { 4642 APInt Min = 4643 APIntOps::smin(StartSRange.getSignedMin(), EndSRange.getSignedMin()); 4644 APInt Max = 4645 APIntOps::smax(StartSRange.getSignedMax(), EndSRange.getSignedMax()); 4646 bool IsFullRange = Min.isMinSignedValue() && Max.isMaxSignedValue(); 4647 if (!IsFullRange) 4648 Result = 4649 Result.intersectWith(ConstantRange(Min, Max + 1)); 4650 } 4651 4652 return Result; 4653 } 4654 4655 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 4656 const SCEV *Step, 4657 const SCEV *MaxBECount, 4658 unsigned BitWidth) { 4659 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 4660 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 4661 4662 struct SelectPattern { 4663 Value *Condition = nullptr; 4664 APInt TrueValue; 4665 APInt FalseValue; 4666 4667 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 4668 const SCEV *S) { 4669 Optional<unsigned> CastOp; 4670 APInt Offset(BitWidth, 0); 4671 4672 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 4673 "Should be!"); 4674 4675 // Peel off a constant offset: 4676 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 4677 // In the future we could consider being smarter here and handle 4678 // {Start+Step,+,Step} too. 4679 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 4680 return; 4681 4682 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 4683 S = SA->getOperand(1); 4684 } 4685 4686 // Peel off a cast operation 4687 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 4688 CastOp = SCast->getSCEVType(); 4689 S = SCast->getOperand(); 4690 } 4691 4692 using namespace llvm::PatternMatch; 4693 4694 auto *SU = dyn_cast<SCEVUnknown>(S); 4695 const APInt *TrueVal, *FalseVal; 4696 if (!SU || 4697 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 4698 m_APInt(FalseVal)))) { 4699 Condition = nullptr; 4700 return; 4701 } 4702 4703 TrueValue = *TrueVal; 4704 FalseValue = *FalseVal; 4705 4706 // Re-apply the cast we peeled off earlier 4707 if (CastOp.hasValue()) 4708 switch (*CastOp) { 4709 default: 4710 llvm_unreachable("Unknown SCEV cast type!"); 4711 4712 case scTruncate: 4713 TrueValue = TrueValue.trunc(BitWidth); 4714 FalseValue = FalseValue.trunc(BitWidth); 4715 break; 4716 case scZeroExtend: 4717 TrueValue = TrueValue.zext(BitWidth); 4718 FalseValue = FalseValue.zext(BitWidth); 4719 break; 4720 case scSignExtend: 4721 TrueValue = TrueValue.sext(BitWidth); 4722 FalseValue = FalseValue.sext(BitWidth); 4723 break; 4724 } 4725 4726 // Re-apply the constant offset we peeled off earlier 4727 TrueValue += Offset; 4728 FalseValue += Offset; 4729 } 4730 4731 bool isRecognized() { return Condition != nullptr; } 4732 }; 4733 4734 SelectPattern StartPattern(*this, BitWidth, Start); 4735 if (!StartPattern.isRecognized()) 4736 return ConstantRange(BitWidth, /* isFullSet = */ true); 4737 4738 SelectPattern StepPattern(*this, BitWidth, Step); 4739 if (!StepPattern.isRecognized()) 4740 return ConstantRange(BitWidth, /* isFullSet = */ true); 4741 4742 if (StartPattern.Condition != StepPattern.Condition) { 4743 // We don't handle this case today; but we could, by considering four 4744 // possibilities below instead of two. I'm not sure if there are cases where 4745 // that will help over what getRange already does, though. 4746 return ConstantRange(BitWidth, /* isFullSet = */ true); 4747 } 4748 4749 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 4750 // construct arbitrary general SCEV expressions here. This function is called 4751 // from deep in the call stack, and calling getSCEV (on a sext instruction, 4752 // say) can end up caching a suboptimal value. 4753 4754 // FIXME: without the explicit `this` receiver below, MSVC errors out with 4755 // C2352 and C2512 (otherwise it isn't needed). 4756 4757 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 4758 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 4759 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 4760 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 4761 4762 ConstantRange TrueRange = 4763 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 4764 ConstantRange FalseRange = 4765 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 4766 4767 return TrueRange.unionWith(FalseRange); 4768 } 4769 4770 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 4771 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 4772 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 4773 4774 // Return early if there are no flags to propagate to the SCEV. 4775 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4776 if (BinOp->hasNoUnsignedWrap()) 4777 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 4778 if (BinOp->hasNoSignedWrap()) 4779 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 4780 if (Flags == SCEV::FlagAnyWrap) 4781 return SCEV::FlagAnyWrap; 4782 4783 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 4784 } 4785 4786 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 4787 // Here we check that I is in the header of the innermost loop containing I, 4788 // since we only deal with instructions in the loop header. The actual loop we 4789 // need to check later will come from an add recurrence, but getting that 4790 // requires computing the SCEV of the operands, which can be expensive. This 4791 // check we can do cheaply to rule out some cases early. 4792 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 4793 if (InnermostContainingLoop == nullptr || 4794 InnermostContainingLoop->getHeader() != I->getParent()) 4795 return false; 4796 4797 // Only proceed if we can prove that I does not yield poison. 4798 if (!isKnownNotFullPoison(I)) return false; 4799 4800 // At this point we know that if I is executed, then it does not wrap 4801 // according to at least one of NSW or NUW. If I is not executed, then we do 4802 // not know if the calculation that I represents would wrap. Multiple 4803 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 4804 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 4805 // derived from other instructions that map to the same SCEV. We cannot make 4806 // that guarantee for cases where I is not executed. So we need to find the 4807 // loop that I is considered in relation to and prove that I is executed for 4808 // every iteration of that loop. That implies that the value that I 4809 // calculates does not wrap anywhere in the loop, so then we can apply the 4810 // flags to the SCEV. 4811 // 4812 // We check isLoopInvariant to disambiguate in case we are adding recurrences 4813 // from different loops, so that we know which loop to prove that I is 4814 // executed in. 4815 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 4816 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 4817 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 4818 bool AllOtherOpsLoopInvariant = true; 4819 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 4820 ++OtherOpIndex) { 4821 if (OtherOpIndex != OpIndex) { 4822 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 4823 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 4824 AllOtherOpsLoopInvariant = false; 4825 break; 4826 } 4827 } 4828 } 4829 if (AllOtherOpsLoopInvariant && 4830 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 4831 return true; 4832 } 4833 } 4834 return false; 4835 } 4836 4837 /// createSCEV - We know that there is no SCEV for the specified value. Analyze 4838 /// the expression. 4839 /// 4840 const SCEV *ScalarEvolution::createSCEV(Value *V) { 4841 if (!isSCEVable(V->getType())) 4842 return getUnknown(V); 4843 4844 if (Instruction *I = dyn_cast<Instruction>(V)) { 4845 // Don't attempt to analyze instructions in blocks that aren't 4846 // reachable. Such instructions don't matter, and they aren't required 4847 // to obey basic rules for definitions dominating uses which this 4848 // analysis depends on. 4849 if (!DT.isReachableFromEntry(I->getParent())) 4850 return getUnknown(V); 4851 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 4852 return getConstant(CI); 4853 else if (isa<ConstantPointerNull>(V)) 4854 return getZero(V->getType()); 4855 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 4856 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 4857 else if (!isa<ConstantExpr>(V)) 4858 return getUnknown(V); 4859 4860 Operator *U = cast<Operator>(V); 4861 if (auto BO = MatchBinaryOp(U)) { 4862 switch (BO->Opcode) { 4863 case Instruction::Add: { 4864 // The simple thing to do would be to just call getSCEV on both operands 4865 // and call getAddExpr with the result. However if we're looking at a 4866 // bunch of things all added together, this can be quite inefficient, 4867 // because it leads to N-1 getAddExpr calls for N ultimate operands. 4868 // Instead, gather up all the operands and make a single getAddExpr call. 4869 // LLVM IR canonical form means we need only traverse the left operands. 4870 SmallVector<const SCEV *, 4> AddOps; 4871 do { 4872 if (BO->Op) { 4873 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 4874 AddOps.push_back(OpSCEV); 4875 break; 4876 } 4877 4878 // If a NUW or NSW flag can be applied to the SCEV for this 4879 // addition, then compute the SCEV for this addition by itself 4880 // with a separate call to getAddExpr. We need to do that 4881 // instead of pushing the operands of the addition onto AddOps, 4882 // since the flags are only known to apply to this particular 4883 // addition - they may not apply to other additions that can be 4884 // formed with operands from AddOps. 4885 const SCEV *RHS = getSCEV(BO->RHS); 4886 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 4887 if (Flags != SCEV::FlagAnyWrap) { 4888 const SCEV *LHS = getSCEV(BO->LHS); 4889 if (BO->Opcode == Instruction::Sub) 4890 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 4891 else 4892 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 4893 break; 4894 } 4895 } 4896 4897 if (BO->Opcode == Instruction::Sub) 4898 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 4899 else 4900 AddOps.push_back(getSCEV(BO->RHS)); 4901 4902 auto NewBO = MatchBinaryOp(BO->LHS); 4903 if (!NewBO || (NewBO->Opcode != Instruction::Add && 4904 NewBO->Opcode != Instruction::Sub)) { 4905 AddOps.push_back(getSCEV(BO->LHS)); 4906 break; 4907 } 4908 BO = NewBO; 4909 } while (true); 4910 4911 return getAddExpr(AddOps); 4912 } 4913 4914 case Instruction::Mul: { 4915 SmallVector<const SCEV *, 4> MulOps; 4916 do { 4917 if (BO->Op) { 4918 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 4919 MulOps.push_back(OpSCEV); 4920 break; 4921 } 4922 4923 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 4924 if (Flags != SCEV::FlagAnyWrap) { 4925 MulOps.push_back( 4926 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 4927 break; 4928 } 4929 } 4930 4931 MulOps.push_back(getSCEV(BO->RHS)); 4932 auto NewBO = MatchBinaryOp(BO->LHS); 4933 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 4934 MulOps.push_back(getSCEV(BO->LHS)); 4935 break; 4936 } 4937 BO = NewBO; 4938 } while (true); 4939 4940 return getMulExpr(MulOps); 4941 } 4942 case Instruction::UDiv: 4943 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 4944 case Instruction::Sub: { 4945 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4946 if (BO->Op) 4947 Flags = getNoWrapFlagsFromUB(BO->Op); 4948 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 4949 } 4950 case Instruction::And: 4951 // For an expression like x&255 that merely masks off the high bits, 4952 // use zext(trunc(x)) as the SCEV expression. 4953 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 4954 if (CI->isNullValue()) 4955 return getSCEV(BO->RHS); 4956 if (CI->isAllOnesValue()) 4957 return getSCEV(BO->LHS); 4958 const APInt &A = CI->getValue(); 4959 4960 // Instcombine's ShrinkDemandedConstant may strip bits out of 4961 // constants, obscuring what would otherwise be a low-bits mask. 4962 // Use computeKnownBits to compute what ShrinkDemandedConstant 4963 // knew about to reconstruct a low-bits mask value. 4964 unsigned LZ = A.countLeadingZeros(); 4965 unsigned TZ = A.countTrailingZeros(); 4966 unsigned BitWidth = A.getBitWidth(); 4967 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 4968 computeKnownBits(BO->LHS, KnownZero, KnownOne, getDataLayout(), 4969 0, &AC, nullptr, &DT); 4970 4971 APInt EffectiveMask = 4972 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 4973 if ((LZ != 0 || TZ != 0) && !((~A & ~KnownZero) & EffectiveMask)) { 4974 const SCEV *MulCount = getConstant(ConstantInt::get( 4975 getContext(), APInt::getOneBitSet(BitWidth, TZ))); 4976 return getMulExpr( 4977 getZeroExtendExpr( 4978 getTruncateExpr( 4979 getUDivExactExpr(getSCEV(BO->LHS), MulCount), 4980 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 4981 BO->LHS->getType()), 4982 MulCount); 4983 } 4984 } 4985 break; 4986 4987 case Instruction::Or: 4988 // If the RHS of the Or is a constant, we may have something like: 4989 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 4990 // optimizations will transparently handle this case. 4991 // 4992 // In order for this transformation to be safe, the LHS must be of the 4993 // form X*(2^n) and the Or constant must be less than 2^n. 4994 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 4995 const SCEV *LHS = getSCEV(BO->LHS); 4996 const APInt &CIVal = CI->getValue(); 4997 if (GetMinTrailingZeros(LHS) >= 4998 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 4999 // Build a plain add SCEV. 5000 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 5001 // If the LHS of the add was an addrec and it has no-wrap flags, 5002 // transfer the no-wrap flags, since an or won't introduce a wrap. 5003 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 5004 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 5005 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( 5006 OldAR->getNoWrapFlags()); 5007 } 5008 return S; 5009 } 5010 } 5011 break; 5012 5013 case Instruction::Xor: 5014 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 5015 // If the RHS of xor is -1, then this is a not operation. 5016 if (CI->isAllOnesValue()) 5017 return getNotSCEV(getSCEV(BO->LHS)); 5018 5019 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 5020 // This is a variant of the check for xor with -1, and it handles 5021 // the case where instcombine has trimmed non-demanded bits out 5022 // of an xor with -1. 5023 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 5024 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 5025 if (LBO->getOpcode() == Instruction::And && 5026 LCI->getValue() == CI->getValue()) 5027 if (const SCEVZeroExtendExpr *Z = 5028 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 5029 Type *UTy = BO->LHS->getType(); 5030 const SCEV *Z0 = Z->getOperand(); 5031 Type *Z0Ty = Z0->getType(); 5032 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 5033 5034 // If C is a low-bits mask, the zero extend is serving to 5035 // mask off the high bits. Complement the operand and 5036 // re-apply the zext. 5037 if (APIntOps::isMask(Z0TySize, CI->getValue())) 5038 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 5039 5040 // If C is a single bit, it may be in the sign-bit position 5041 // before the zero-extend. In this case, represent the xor 5042 // using an add, which is equivalent, and re-apply the zext. 5043 APInt Trunc = CI->getValue().trunc(Z0TySize); 5044 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 5045 Trunc.isSignBit()) 5046 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 5047 UTy); 5048 } 5049 } 5050 break; 5051 5052 case Instruction::Shl: 5053 // Turn shift left of a constant amount into a multiply. 5054 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 5055 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 5056 5057 // If the shift count is not less than the bitwidth, the result of 5058 // the shift is undefined. Don't try to analyze it, because the 5059 // resolution chosen here may differ from the resolution chosen in 5060 // other parts of the compiler. 5061 if (SA->getValue().uge(BitWidth)) 5062 break; 5063 5064 // It is currently not resolved how to interpret NSW for left 5065 // shift by BitWidth - 1, so we avoid applying flags in that 5066 // case. Remove this check (or this comment) once the situation 5067 // is resolved. See 5068 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html 5069 // and http://reviews.llvm.org/D8890 . 5070 auto Flags = SCEV::FlagAnyWrap; 5071 if (BO->Op && SA->getValue().ult(BitWidth - 1)) 5072 Flags = getNoWrapFlagsFromUB(BO->Op); 5073 5074 Constant *X = ConstantInt::get(getContext(), 5075 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 5076 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 5077 } 5078 break; 5079 5080 case Instruction::AShr: 5081 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression. 5082 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) 5083 if (Operator *L = dyn_cast<Operator>(BO->LHS)) 5084 if (L->getOpcode() == Instruction::Shl && 5085 L->getOperand(1) == BO->RHS) { 5086 uint64_t BitWidth = getTypeSizeInBits(BO->LHS->getType()); 5087 5088 // If the shift count is not less than the bitwidth, the result of 5089 // the shift is undefined. Don't try to analyze it, because the 5090 // resolution chosen here may differ from the resolution chosen in 5091 // other parts of the compiler. 5092 if (CI->getValue().uge(BitWidth)) 5093 break; 5094 5095 uint64_t Amt = BitWidth - CI->getZExtValue(); 5096 if (Amt == BitWidth) 5097 return getSCEV(L->getOperand(0)); // shift by zero --> noop 5098 return getSignExtendExpr( 5099 getTruncateExpr(getSCEV(L->getOperand(0)), 5100 IntegerType::get(getContext(), Amt)), 5101 BO->LHS->getType()); 5102 } 5103 break; 5104 } 5105 } 5106 5107 switch (U->getOpcode()) { 5108 case Instruction::Trunc: 5109 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 5110 5111 case Instruction::ZExt: 5112 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 5113 5114 case Instruction::SExt: 5115 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 5116 5117 case Instruction::BitCast: 5118 // BitCasts are no-op casts so we just eliminate the cast. 5119 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 5120 return getSCEV(U->getOperand(0)); 5121 break; 5122 5123 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 5124 // lead to pointer expressions which cannot safely be expanded to GEPs, 5125 // because ScalarEvolution doesn't respect the GEP aliasing rules when 5126 // simplifying integer expressions. 5127 5128 case Instruction::GetElementPtr: 5129 return createNodeForGEP(cast<GEPOperator>(U)); 5130 5131 case Instruction::PHI: 5132 return createNodeForPHI(cast<PHINode>(U)); 5133 5134 case Instruction::Select: 5135 // U can also be a select constant expr, which let fall through. Since 5136 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 5137 // constant expressions cannot have instructions as operands, we'd have 5138 // returned getUnknown for a select constant expressions anyway. 5139 if (isa<Instruction>(U)) 5140 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 5141 U->getOperand(1), U->getOperand(2)); 5142 } 5143 5144 return getUnknown(V); 5145 } 5146 5147 5148 5149 //===----------------------------------------------------------------------===// 5150 // Iteration Count Computation Code 5151 // 5152 5153 unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L) { 5154 if (BasicBlock *ExitingBB = L->getExitingBlock()) 5155 return getSmallConstantTripCount(L, ExitingBB); 5156 5157 // No trip count information for multiple exits. 5158 return 0; 5159 } 5160 5161 /// getSmallConstantTripCount - Returns the maximum trip count of this loop as a 5162 /// normal unsigned value. Returns 0 if the trip count is unknown or not 5163 /// constant. Will also return 0 if the maximum trip count is very large (>= 5164 /// 2^32). 5165 /// 5166 /// This "trip count" assumes that control exits via ExitingBlock. More 5167 /// precisely, it is the number of times that control may reach ExitingBlock 5168 /// before taking the branch. For loops with multiple exits, it may not be the 5169 /// number times that the loop header executes because the loop may exit 5170 /// prematurely via another branch. 5171 unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L, 5172 BasicBlock *ExitingBlock) { 5173 assert(ExitingBlock && "Must pass a non-null exiting block!"); 5174 assert(L->isLoopExiting(ExitingBlock) && 5175 "Exiting block must actually branch out of the loop!"); 5176 const SCEVConstant *ExitCount = 5177 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 5178 if (!ExitCount) 5179 return 0; 5180 5181 ConstantInt *ExitConst = ExitCount->getValue(); 5182 5183 // Guard against huge trip counts. 5184 if (ExitConst->getValue().getActiveBits() > 32) 5185 return 0; 5186 5187 // In case of integer overflow, this returns 0, which is correct. 5188 return ((unsigned)ExitConst->getZExtValue()) + 1; 5189 } 5190 5191 unsigned ScalarEvolution::getSmallConstantTripMultiple(Loop *L) { 5192 if (BasicBlock *ExitingBB = L->getExitingBlock()) 5193 return getSmallConstantTripMultiple(L, ExitingBB); 5194 5195 // No trip multiple information for multiple exits. 5196 return 0; 5197 } 5198 5199 /// getSmallConstantTripMultiple - Returns the largest constant divisor of the 5200 /// trip count of this loop as a normal unsigned value, if possible. This 5201 /// means that the actual trip count is always a multiple of the returned 5202 /// value (don't forget the trip count could very well be zero as well!). 5203 /// 5204 /// Returns 1 if the trip count is unknown or not guaranteed to be the 5205 /// multiple of a constant (which is also the case if the trip count is simply 5206 /// constant, use getSmallConstantTripCount for that case), Will also return 1 5207 /// if the trip count is very large (>= 2^32). 5208 /// 5209 /// As explained in the comments for getSmallConstantTripCount, this assumes 5210 /// that control exits the loop via ExitingBlock. 5211 unsigned 5212 ScalarEvolution::getSmallConstantTripMultiple(Loop *L, 5213 BasicBlock *ExitingBlock) { 5214 assert(ExitingBlock && "Must pass a non-null exiting block!"); 5215 assert(L->isLoopExiting(ExitingBlock) && 5216 "Exiting block must actually branch out of the loop!"); 5217 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 5218 if (ExitCount == getCouldNotCompute()) 5219 return 1; 5220 5221 // Get the trip count from the BE count by adding 1. 5222 const SCEV *TCMul = getAddExpr(ExitCount, getOne(ExitCount->getType())); 5223 // FIXME: SCEV distributes multiplication as V1*C1 + V2*C1. We could attempt 5224 // to factor simple cases. 5225 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(TCMul)) 5226 TCMul = Mul->getOperand(0); 5227 5228 const SCEVConstant *MulC = dyn_cast<SCEVConstant>(TCMul); 5229 if (!MulC) 5230 return 1; 5231 5232 ConstantInt *Result = MulC->getValue(); 5233 5234 // Guard against huge trip counts (this requires checking 5235 // for zero to handle the case where the trip count == -1 and the 5236 // addition wraps). 5237 if (!Result || Result->getValue().getActiveBits() > 32 || 5238 Result->getValue().getActiveBits() == 0) 5239 return 1; 5240 5241 return (unsigned)Result->getZExtValue(); 5242 } 5243 5244 // getExitCount - Get the expression for the number of loop iterations for which 5245 // this loop is guaranteed not to exit via ExitingBlock. Otherwise return 5246 // SCEVCouldNotCompute. 5247 const SCEV *ScalarEvolution::getExitCount(Loop *L, BasicBlock *ExitingBlock) { 5248 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 5249 } 5250 5251 const SCEV * 5252 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 5253 SCEVUnionPredicate &Preds) { 5254 return getPredicatedBackedgeTakenInfo(L).getExact(this, &Preds); 5255 } 5256 5257 /// getBackedgeTakenCount - If the specified loop has a predictable 5258 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute 5259 /// object. The backedge-taken count is the number of times the loop header 5260 /// will be branched to from within the loop. This is one less than the 5261 /// trip count of the loop, since it doesn't count the first iteration, 5262 /// when the header is branched to from outside the loop. 5263 /// 5264 /// Note that it is not valid to call this method on a loop without a 5265 /// loop-invariant backedge-taken count (see 5266 /// hasLoopInvariantBackedgeTakenCount). 5267 /// 5268 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 5269 return getBackedgeTakenInfo(L).getExact(this); 5270 } 5271 5272 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except 5273 /// return the least SCEV value that is known never to be less than the 5274 /// actual backedge taken count. 5275 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 5276 return getBackedgeTakenInfo(L).getMax(this); 5277 } 5278 5279 /// PushLoopPHIs - Push PHI nodes in the header of the given loop 5280 /// onto the given Worklist. 5281 static void 5282 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 5283 BasicBlock *Header = L->getHeader(); 5284 5285 // Push all Loop-header PHIs onto the Worklist stack. 5286 for (BasicBlock::iterator I = Header->begin(); 5287 PHINode *PN = dyn_cast<PHINode>(I); ++I) 5288 Worklist.push_back(PN); 5289 } 5290 5291 const ScalarEvolution::BackedgeTakenInfo & 5292 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 5293 auto &BTI = getBackedgeTakenInfo(L); 5294 if (BTI.hasFullInfo()) 5295 return BTI; 5296 5297 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 5298 5299 if (!Pair.second) 5300 return Pair.first->second; 5301 5302 BackedgeTakenInfo Result = 5303 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 5304 5305 return PredicatedBackedgeTakenCounts.find(L)->second = Result; 5306 } 5307 5308 const ScalarEvolution::BackedgeTakenInfo & 5309 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 5310 // Initially insert an invalid entry for this loop. If the insertion 5311 // succeeds, proceed to actually compute a backedge-taken count and 5312 // update the value. The temporary CouldNotCompute value tells SCEV 5313 // code elsewhere that it shouldn't attempt to request a new 5314 // backedge-taken count, which could result in infinite recursion. 5315 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 5316 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 5317 if (!Pair.second) 5318 return Pair.first->second; 5319 5320 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 5321 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 5322 // must be cleared in this scope. 5323 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 5324 5325 if (Result.getExact(this) != getCouldNotCompute()) { 5326 assert(isLoopInvariant(Result.getExact(this), L) && 5327 isLoopInvariant(Result.getMax(this), L) && 5328 "Computed backedge-taken count isn't loop invariant for loop!"); 5329 ++NumTripCountsComputed; 5330 } 5331 else if (Result.getMax(this) == getCouldNotCompute() && 5332 isa<PHINode>(L->getHeader()->begin())) { 5333 // Only count loops that have phi nodes as not being computable. 5334 ++NumTripCountsNotComputed; 5335 } 5336 5337 // Now that we know more about the trip count for this loop, forget any 5338 // existing SCEV values for PHI nodes in this loop since they are only 5339 // conservative estimates made without the benefit of trip count 5340 // information. This is similar to the code in forgetLoop, except that 5341 // it handles SCEVUnknown PHI nodes specially. 5342 if (Result.hasAnyInfo()) { 5343 SmallVector<Instruction *, 16> Worklist; 5344 PushLoopPHIs(L, Worklist); 5345 5346 SmallPtrSet<Instruction *, 8> Visited; 5347 while (!Worklist.empty()) { 5348 Instruction *I = Worklist.pop_back_val(); 5349 if (!Visited.insert(I).second) 5350 continue; 5351 5352 ValueExprMapType::iterator It = 5353 ValueExprMap.find_as(static_cast<Value *>(I)); 5354 if (It != ValueExprMap.end()) { 5355 const SCEV *Old = It->second; 5356 5357 // SCEVUnknown for a PHI either means that it has an unrecognized 5358 // structure, or it's a PHI that's in the progress of being computed 5359 // by createNodeForPHI. In the former case, additional loop trip 5360 // count information isn't going to change anything. In the later 5361 // case, createNodeForPHI will perform the necessary updates on its 5362 // own when it gets to that point. 5363 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 5364 forgetMemoizedResults(Old); 5365 ValueExprMap.erase(It); 5366 } 5367 if (PHINode *PN = dyn_cast<PHINode>(I)) 5368 ConstantEvolutionLoopExitValue.erase(PN); 5369 } 5370 5371 PushDefUseChildren(I, Worklist); 5372 } 5373 } 5374 5375 // Re-lookup the insert position, since the call to 5376 // computeBackedgeTakenCount above could result in a 5377 // recusive call to getBackedgeTakenInfo (on a different 5378 // loop), which would invalidate the iterator computed 5379 // earlier. 5380 return BackedgeTakenCounts.find(L)->second = Result; 5381 } 5382 5383 /// forgetLoop - This method should be called by the client when it has 5384 /// changed a loop in a way that may effect ScalarEvolution's ability to 5385 /// compute a trip count, or if the loop is deleted. 5386 void ScalarEvolution::forgetLoop(const Loop *L) { 5387 // Drop any stored trip count value. 5388 auto RemoveLoopFromBackedgeMap = 5389 [L](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 5390 auto BTCPos = Map.find(L); 5391 if (BTCPos != Map.end()) { 5392 BTCPos->second.clear(); 5393 Map.erase(BTCPos); 5394 } 5395 }; 5396 5397 RemoveLoopFromBackedgeMap(BackedgeTakenCounts); 5398 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts); 5399 5400 // Drop information about expressions based on loop-header PHIs. 5401 SmallVector<Instruction *, 16> Worklist; 5402 PushLoopPHIs(L, Worklist); 5403 5404 SmallPtrSet<Instruction *, 8> Visited; 5405 while (!Worklist.empty()) { 5406 Instruction *I = Worklist.pop_back_val(); 5407 if (!Visited.insert(I).second) 5408 continue; 5409 5410 ValueExprMapType::iterator It = 5411 ValueExprMap.find_as(static_cast<Value *>(I)); 5412 if (It != ValueExprMap.end()) { 5413 forgetMemoizedResults(It->second); 5414 ValueExprMap.erase(It); 5415 if (PHINode *PN = dyn_cast<PHINode>(I)) 5416 ConstantEvolutionLoopExitValue.erase(PN); 5417 } 5418 5419 PushDefUseChildren(I, Worklist); 5420 } 5421 5422 // Forget all contained loops too, to avoid dangling entries in the 5423 // ValuesAtScopes map. 5424 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 5425 forgetLoop(*I); 5426 } 5427 5428 /// forgetValue - This method should be called by the client when it has 5429 /// changed a value in a way that may effect its value, or which may 5430 /// disconnect it from a def-use chain linking it to a loop. 5431 void ScalarEvolution::forgetValue(Value *V) { 5432 Instruction *I = dyn_cast<Instruction>(V); 5433 if (!I) return; 5434 5435 // Drop information about expressions based on loop-header PHIs. 5436 SmallVector<Instruction *, 16> Worklist; 5437 Worklist.push_back(I); 5438 5439 SmallPtrSet<Instruction *, 8> Visited; 5440 while (!Worklist.empty()) { 5441 I = Worklist.pop_back_val(); 5442 if (!Visited.insert(I).second) 5443 continue; 5444 5445 ValueExprMapType::iterator It = 5446 ValueExprMap.find_as(static_cast<Value *>(I)); 5447 if (It != ValueExprMap.end()) { 5448 forgetMemoizedResults(It->second); 5449 ValueExprMap.erase(It); 5450 if (PHINode *PN = dyn_cast<PHINode>(I)) 5451 ConstantEvolutionLoopExitValue.erase(PN); 5452 } 5453 5454 PushDefUseChildren(I, Worklist); 5455 } 5456 } 5457 5458 /// getExact - Get the exact loop backedge taken count considering all loop 5459 /// exits. A computable result can only be returned for loops with a single 5460 /// exit. Returning the minimum taken count among all exits is incorrect 5461 /// because one of the loop's exit limit's may have been skipped. HowFarToZero 5462 /// assumes that the limit of each loop test is never skipped. This is a valid 5463 /// assumption as long as the loop exits via that test. For precise results, it 5464 /// is the caller's responsibility to specify the relevant loop exit using 5465 /// getExact(ExitingBlock, SE). 5466 const SCEV * 5467 ScalarEvolution::BackedgeTakenInfo::getExact( 5468 ScalarEvolution *SE, SCEVUnionPredicate *Preds) const { 5469 // If any exits were not computable, the loop is not computable. 5470 if (!ExitNotTaken.isCompleteList()) return SE->getCouldNotCompute(); 5471 5472 // We need exactly one computable exit. 5473 if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute(); 5474 assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info"); 5475 5476 const SCEV *BECount = nullptr; 5477 for (auto &ENT : ExitNotTaken) { 5478 assert(ENT.ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV"); 5479 5480 if (!BECount) 5481 BECount = ENT.ExactNotTaken; 5482 else if (BECount != ENT.ExactNotTaken) 5483 return SE->getCouldNotCompute(); 5484 if (Preds && ENT.getPred()) 5485 Preds->add(ENT.getPred()); 5486 5487 assert((Preds || ENT.hasAlwaysTruePred()) && 5488 "Predicate should be always true!"); 5489 } 5490 5491 assert(BECount && "Invalid not taken count for loop exit"); 5492 return BECount; 5493 } 5494 5495 /// getExact - Get the exact not taken count for this loop exit. 5496 const SCEV * 5497 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 5498 ScalarEvolution *SE) const { 5499 for (auto &ENT : ExitNotTaken) 5500 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePred()) 5501 return ENT.ExactNotTaken; 5502 5503 return SE->getCouldNotCompute(); 5504 } 5505 5506 /// getMax - Get the max backedge taken count for the loop. 5507 const SCEV * 5508 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 5509 for (auto &ENT : ExitNotTaken) 5510 if (!ENT.hasAlwaysTruePred()) 5511 return SE->getCouldNotCompute(); 5512 5513 return Max ? Max : SE->getCouldNotCompute(); 5514 } 5515 5516 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 5517 ScalarEvolution *SE) const { 5518 if (Max && Max != SE->getCouldNotCompute() && SE->hasOperand(Max, S)) 5519 return true; 5520 5521 if (!ExitNotTaken.ExitingBlock) 5522 return false; 5523 5524 for (auto &ENT : ExitNotTaken) 5525 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 5526 SE->hasOperand(ENT.ExactNotTaken, S)) 5527 return true; 5528 5529 return false; 5530 } 5531 5532 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 5533 /// computable exit into a persistent ExitNotTakenInfo array. 5534 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 5535 SmallVectorImpl<EdgeInfo> &ExitCounts, bool Complete, const SCEV *MaxCount) 5536 : Max(MaxCount) { 5537 5538 if (!Complete) 5539 ExitNotTaken.setIncomplete(); 5540 5541 unsigned NumExits = ExitCounts.size(); 5542 if (NumExits == 0) return; 5543 5544 ExitNotTaken.ExitingBlock = ExitCounts[0].ExitBlock; 5545 ExitNotTaken.ExactNotTaken = ExitCounts[0].Taken; 5546 5547 // Determine the number of ExitNotTakenExtras structures that we need. 5548 unsigned ExtraInfoSize = 0; 5549 if (NumExits > 1) 5550 ExtraInfoSize = 1 + std::count_if(std::next(ExitCounts.begin()), 5551 ExitCounts.end(), [](EdgeInfo &Entry) { 5552 return !Entry.Pred.isAlwaysTrue(); 5553 }); 5554 else if (!ExitCounts[0].Pred.isAlwaysTrue()) 5555 ExtraInfoSize = 1; 5556 5557 ExitNotTakenExtras *ENT = nullptr; 5558 5559 // Allocate the ExitNotTakenExtras structures and initialize the first 5560 // element (ExitNotTaken). 5561 if (ExtraInfoSize > 0) { 5562 ENT = new ExitNotTakenExtras[ExtraInfoSize]; 5563 ExitNotTaken.ExtraInfo = &ENT[0]; 5564 *ExitNotTaken.getPred() = std::move(ExitCounts[0].Pred); 5565 } 5566 5567 if (NumExits == 1) 5568 return; 5569 5570 auto &Exits = ExitNotTaken.ExtraInfo->Exits; 5571 5572 // Handle the rare case of multiple computable exits. 5573 for (unsigned i = 1, PredPos = 1; i < NumExits; ++i) { 5574 ExitNotTakenExtras *Ptr = nullptr; 5575 if (!ExitCounts[i].Pred.isAlwaysTrue()) { 5576 Ptr = &ENT[PredPos++]; 5577 Ptr->Pred = std::move(ExitCounts[i].Pred); 5578 } 5579 5580 Exits.emplace_back(ExitCounts[i].ExitBlock, ExitCounts[i].Taken, Ptr); 5581 } 5582 } 5583 5584 /// clear - Invalidate this result and free the ExitNotTakenInfo array. 5585 void ScalarEvolution::BackedgeTakenInfo::clear() { 5586 ExitNotTaken.ExitingBlock = nullptr; 5587 ExitNotTaken.ExactNotTaken = nullptr; 5588 delete[] ExitNotTaken.ExtraInfo; 5589 } 5590 5591 /// computeBackedgeTakenCount - Compute the number of times the backedge 5592 /// of the specified loop will execute. 5593 ScalarEvolution::BackedgeTakenInfo 5594 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 5595 bool AllowPredicates) { 5596 SmallVector<BasicBlock *, 8> ExitingBlocks; 5597 L->getExitingBlocks(ExitingBlocks); 5598 5599 SmallVector<EdgeInfo, 4> ExitCounts; 5600 bool CouldComputeBECount = true; 5601 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 5602 const SCEV *MustExitMaxBECount = nullptr; 5603 const SCEV *MayExitMaxBECount = nullptr; 5604 5605 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 5606 // and compute maxBECount. 5607 // Do a union of all the predicates here. 5608 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 5609 BasicBlock *ExitBB = ExitingBlocks[i]; 5610 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 5611 5612 assert((AllowPredicates || EL.Pred.isAlwaysTrue()) && 5613 "Predicated exit limit when predicates are not allowed!"); 5614 5615 // 1. For each exit that can be computed, add an entry to ExitCounts. 5616 // CouldComputeBECount is true only if all exits can be computed. 5617 if (EL.Exact == getCouldNotCompute()) 5618 // We couldn't compute an exact value for this exit, so 5619 // we won't be able to compute an exact value for the loop. 5620 CouldComputeBECount = false; 5621 else 5622 ExitCounts.emplace_back(EdgeInfo(ExitBB, EL.Exact, EL.Pred)); 5623 5624 // 2. Derive the loop's MaxBECount from each exit's max number of 5625 // non-exiting iterations. Partition the loop exits into two kinds: 5626 // LoopMustExits and LoopMayExits. 5627 // 5628 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 5629 // is a LoopMayExit. If any computable LoopMustExit is found, then 5630 // MaxBECount is the minimum EL.Max of computable LoopMustExits. Otherwise, 5631 // MaxBECount is conservatively the maximum EL.Max, where CouldNotCompute is 5632 // considered greater than any computable EL.Max. 5633 if (EL.Max != getCouldNotCompute() && Latch && 5634 DT.dominates(ExitBB, Latch)) { 5635 if (!MustExitMaxBECount) 5636 MustExitMaxBECount = EL.Max; 5637 else { 5638 MustExitMaxBECount = 5639 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.Max); 5640 } 5641 } else if (MayExitMaxBECount != getCouldNotCompute()) { 5642 if (!MayExitMaxBECount || EL.Max == getCouldNotCompute()) 5643 MayExitMaxBECount = EL.Max; 5644 else { 5645 MayExitMaxBECount = 5646 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.Max); 5647 } 5648 } 5649 } 5650 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 5651 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 5652 return BackedgeTakenInfo(ExitCounts, CouldComputeBECount, MaxBECount); 5653 } 5654 5655 ScalarEvolution::ExitLimit 5656 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 5657 bool AllowPredicates) { 5658 5659 // Okay, we've chosen an exiting block. See what condition causes us to exit 5660 // at this block and remember the exit block and whether all other targets 5661 // lead to the loop header. 5662 bool MustExecuteLoopHeader = true; 5663 BasicBlock *Exit = nullptr; 5664 for (auto *SBB : successors(ExitingBlock)) 5665 if (!L->contains(SBB)) { 5666 if (Exit) // Multiple exit successors. 5667 return getCouldNotCompute(); 5668 Exit = SBB; 5669 } else if (SBB != L->getHeader()) { 5670 MustExecuteLoopHeader = false; 5671 } 5672 5673 // At this point, we know we have a conditional branch that determines whether 5674 // the loop is exited. However, we don't know if the branch is executed each 5675 // time through the loop. If not, then the execution count of the branch will 5676 // not be equal to the trip count of the loop. 5677 // 5678 // Currently we check for this by checking to see if the Exit branch goes to 5679 // the loop header. If so, we know it will always execute the same number of 5680 // times as the loop. We also handle the case where the exit block *is* the 5681 // loop header. This is common for un-rotated loops. 5682 // 5683 // If both of those tests fail, walk up the unique predecessor chain to the 5684 // header, stopping if there is an edge that doesn't exit the loop. If the 5685 // header is reached, the execution count of the branch will be equal to the 5686 // trip count of the loop. 5687 // 5688 // More extensive analysis could be done to handle more cases here. 5689 // 5690 if (!MustExecuteLoopHeader && ExitingBlock != L->getHeader()) { 5691 // The simple checks failed, try climbing the unique predecessor chain 5692 // up to the header. 5693 bool Ok = false; 5694 for (BasicBlock *BB = ExitingBlock; BB; ) { 5695 BasicBlock *Pred = BB->getUniquePredecessor(); 5696 if (!Pred) 5697 return getCouldNotCompute(); 5698 TerminatorInst *PredTerm = Pred->getTerminator(); 5699 for (const BasicBlock *PredSucc : PredTerm->successors()) { 5700 if (PredSucc == BB) 5701 continue; 5702 // If the predecessor has a successor that isn't BB and isn't 5703 // outside the loop, assume the worst. 5704 if (L->contains(PredSucc)) 5705 return getCouldNotCompute(); 5706 } 5707 if (Pred == L->getHeader()) { 5708 Ok = true; 5709 break; 5710 } 5711 BB = Pred; 5712 } 5713 if (!Ok) 5714 return getCouldNotCompute(); 5715 } 5716 5717 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 5718 TerminatorInst *Term = ExitingBlock->getTerminator(); 5719 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 5720 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 5721 // Proceed to the next level to examine the exit condition expression. 5722 return computeExitLimitFromCond( 5723 L, BI->getCondition(), BI->getSuccessor(0), BI->getSuccessor(1), 5724 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 5725 } 5726 5727 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) 5728 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 5729 /*ControlsExit=*/IsOnlyExit); 5730 5731 return getCouldNotCompute(); 5732 } 5733 5734 /// computeExitLimitFromCond - Compute the number of times the 5735 /// backedge of the specified loop will execute if its exit condition 5736 /// were a conditional branch of ExitCond, TBB, and FBB. 5737 /// 5738 /// @param ControlsExit is true if ExitCond directly controls the exit 5739 /// branch. In this case, we can assume that the loop exits only if the 5740 /// condition is true and can infer that failing to meet the condition prior to 5741 /// integer wraparound results in undefined behavior. 5742 ScalarEvolution::ExitLimit 5743 ScalarEvolution::computeExitLimitFromCond(const Loop *L, 5744 Value *ExitCond, 5745 BasicBlock *TBB, 5746 BasicBlock *FBB, 5747 bool ControlsExit, 5748 bool AllowPredicates) { 5749 // Check if the controlling expression for this loop is an And or Or. 5750 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 5751 if (BO->getOpcode() == Instruction::And) { 5752 // Recurse on the operands of the and. 5753 bool EitherMayExit = L->contains(TBB); 5754 ExitLimit EL0 = computeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB, 5755 ControlsExit && !EitherMayExit, 5756 AllowPredicates); 5757 ExitLimit EL1 = computeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB, 5758 ControlsExit && !EitherMayExit, 5759 AllowPredicates); 5760 const SCEV *BECount = getCouldNotCompute(); 5761 const SCEV *MaxBECount = getCouldNotCompute(); 5762 if (EitherMayExit) { 5763 // Both conditions must be true for the loop to continue executing. 5764 // Choose the less conservative count. 5765 if (EL0.Exact == getCouldNotCompute() || 5766 EL1.Exact == getCouldNotCompute()) 5767 BECount = getCouldNotCompute(); 5768 else 5769 BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact); 5770 if (EL0.Max == getCouldNotCompute()) 5771 MaxBECount = EL1.Max; 5772 else if (EL1.Max == getCouldNotCompute()) 5773 MaxBECount = EL0.Max; 5774 else 5775 MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max); 5776 } else { 5777 // Both conditions must be true at the same time for the loop to exit. 5778 // For now, be conservative. 5779 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 5780 if (EL0.Max == EL1.Max) 5781 MaxBECount = EL0.Max; 5782 if (EL0.Exact == EL1.Exact) 5783 BECount = EL0.Exact; 5784 } 5785 5786 SCEVUnionPredicate NP; 5787 NP.add(&EL0.Pred); 5788 NP.add(&EL1.Pred); 5789 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 5790 // to be more aggressive when computing BECount than when computing 5791 // MaxBECount. In these cases it is possible for EL0.Exact and EL1.Exact 5792 // to match, but for EL0.Max and EL1.Max to not. 5793 if (isa<SCEVCouldNotCompute>(MaxBECount) && 5794 !isa<SCEVCouldNotCompute>(BECount)) 5795 MaxBECount = BECount; 5796 5797 return ExitLimit(BECount, MaxBECount, NP); 5798 } 5799 if (BO->getOpcode() == Instruction::Or) { 5800 // Recurse on the operands of the or. 5801 bool EitherMayExit = L->contains(FBB); 5802 ExitLimit EL0 = computeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB, 5803 ControlsExit && !EitherMayExit, 5804 AllowPredicates); 5805 ExitLimit EL1 = computeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB, 5806 ControlsExit && !EitherMayExit, 5807 AllowPredicates); 5808 const SCEV *BECount = getCouldNotCompute(); 5809 const SCEV *MaxBECount = getCouldNotCompute(); 5810 if (EitherMayExit) { 5811 // Both conditions must be false for the loop to continue executing. 5812 // Choose the less conservative count. 5813 if (EL0.Exact == getCouldNotCompute() || 5814 EL1.Exact == getCouldNotCompute()) 5815 BECount = getCouldNotCompute(); 5816 else 5817 BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact); 5818 if (EL0.Max == getCouldNotCompute()) 5819 MaxBECount = EL1.Max; 5820 else if (EL1.Max == getCouldNotCompute()) 5821 MaxBECount = EL0.Max; 5822 else 5823 MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max); 5824 } else { 5825 // Both conditions must be false at the same time for the loop to exit. 5826 // For now, be conservative. 5827 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 5828 if (EL0.Max == EL1.Max) 5829 MaxBECount = EL0.Max; 5830 if (EL0.Exact == EL1.Exact) 5831 BECount = EL0.Exact; 5832 } 5833 5834 SCEVUnionPredicate NP; 5835 NP.add(&EL0.Pred); 5836 NP.add(&EL1.Pred); 5837 return ExitLimit(BECount, MaxBECount, NP); 5838 } 5839 } 5840 5841 // With an icmp, it may be feasible to compute an exact backedge-taken count. 5842 // Proceed to the next level to examine the icmp. 5843 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 5844 ExitLimit EL = 5845 computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit); 5846 if (EL.hasFullInfo() || !AllowPredicates) 5847 return EL; 5848 5849 // Try again, but use SCEV predicates this time. 5850 return computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit, 5851 /*AllowPredicates=*/true); 5852 } 5853 5854 // Check for a constant condition. These are normally stripped out by 5855 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 5856 // preserve the CFG and is temporarily leaving constant conditions 5857 // in place. 5858 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 5859 if (L->contains(FBB) == !CI->getZExtValue()) 5860 // The backedge is always taken. 5861 return getCouldNotCompute(); 5862 else 5863 // The backedge is never taken. 5864 return getZero(CI->getType()); 5865 } 5866 5867 // If it's not an integer or pointer comparison then compute it the hard way. 5868 return computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 5869 } 5870 5871 ScalarEvolution::ExitLimit 5872 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 5873 ICmpInst *ExitCond, 5874 BasicBlock *TBB, 5875 BasicBlock *FBB, 5876 bool ControlsExit, 5877 bool AllowPredicates) { 5878 5879 // If the condition was exit on true, convert the condition to exit on false 5880 ICmpInst::Predicate Cond; 5881 if (!L->contains(FBB)) 5882 Cond = ExitCond->getPredicate(); 5883 else 5884 Cond = ExitCond->getInversePredicate(); 5885 5886 // Handle common loops like: for (X = "string"; *X; ++X) 5887 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 5888 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 5889 ExitLimit ItCnt = 5890 computeLoadConstantCompareExitLimit(LI, RHS, L, Cond); 5891 if (ItCnt.hasAnyInfo()) 5892 return ItCnt; 5893 } 5894 5895 ExitLimit ShiftEL = computeShiftCompareExitLimit( 5896 ExitCond->getOperand(0), ExitCond->getOperand(1), L, Cond); 5897 if (ShiftEL.hasAnyInfo()) 5898 return ShiftEL; 5899 5900 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 5901 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 5902 5903 // Try to evaluate any dependencies out of the loop. 5904 LHS = getSCEVAtScope(LHS, L); 5905 RHS = getSCEVAtScope(RHS, L); 5906 5907 // At this point, we would like to compute how many iterations of the 5908 // loop the predicate will return true for these inputs. 5909 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 5910 // If there is a loop-invariant, force it into the RHS. 5911 std::swap(LHS, RHS); 5912 Cond = ICmpInst::getSwappedPredicate(Cond); 5913 } 5914 5915 // Simplify the operands before analyzing them. 5916 (void)SimplifyICmpOperands(Cond, LHS, RHS); 5917 5918 // If we have a comparison of a chrec against a constant, try to use value 5919 // ranges to answer this query. 5920 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 5921 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 5922 if (AddRec->getLoop() == L) { 5923 // Form the constant range. 5924 ConstantRange CompRange( 5925 ICmpInst::makeConstantRange(Cond, RHSC->getAPInt())); 5926 5927 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 5928 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 5929 } 5930 5931 switch (Cond) { 5932 case ICmpInst::ICMP_NE: { // while (X != Y) 5933 // Convert to: while (X-Y != 0) 5934 ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 5935 AllowPredicates); 5936 if (EL.hasAnyInfo()) return EL; 5937 break; 5938 } 5939 case ICmpInst::ICMP_EQ: { // while (X == Y) 5940 // Convert to: while (X-Y == 0) 5941 ExitLimit EL = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); 5942 if (EL.hasAnyInfo()) return EL; 5943 break; 5944 } 5945 case ICmpInst::ICMP_SLT: 5946 case ICmpInst::ICMP_ULT: { // while (X < Y) 5947 bool IsSigned = Cond == ICmpInst::ICMP_SLT; 5948 ExitLimit EL = HowManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 5949 AllowPredicates); 5950 if (EL.hasAnyInfo()) return EL; 5951 break; 5952 } 5953 case ICmpInst::ICMP_SGT: 5954 case ICmpInst::ICMP_UGT: { // while (X > Y) 5955 bool IsSigned = Cond == ICmpInst::ICMP_SGT; 5956 ExitLimit EL = 5957 HowManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 5958 AllowPredicates); 5959 if (EL.hasAnyInfo()) return EL; 5960 break; 5961 } 5962 default: 5963 break; 5964 } 5965 return computeExitCountExhaustively(L, ExitCond, !L->contains(TBB)); 5966 } 5967 5968 ScalarEvolution::ExitLimit 5969 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 5970 SwitchInst *Switch, 5971 BasicBlock *ExitingBlock, 5972 bool ControlsExit) { 5973 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 5974 5975 // Give up if the exit is the default dest of a switch. 5976 if (Switch->getDefaultDest() == ExitingBlock) 5977 return getCouldNotCompute(); 5978 5979 assert(L->contains(Switch->getDefaultDest()) && 5980 "Default case must not exit the loop!"); 5981 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 5982 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 5983 5984 // while (X != Y) --> while (X-Y != 0) 5985 ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 5986 if (EL.hasAnyInfo()) 5987 return EL; 5988 5989 return getCouldNotCompute(); 5990 } 5991 5992 static ConstantInt * 5993 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 5994 ScalarEvolution &SE) { 5995 const SCEV *InVal = SE.getConstant(C); 5996 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 5997 assert(isa<SCEVConstant>(Val) && 5998 "Evaluation of SCEV at constant didn't fold correctly?"); 5999 return cast<SCEVConstant>(Val)->getValue(); 6000 } 6001 6002 /// computeLoadConstantCompareExitLimit - Given an exit condition of 6003 /// 'icmp op load X, cst', try to see if we can compute the backedge 6004 /// execution count. 6005 ScalarEvolution::ExitLimit 6006 ScalarEvolution::computeLoadConstantCompareExitLimit( 6007 LoadInst *LI, 6008 Constant *RHS, 6009 const Loop *L, 6010 ICmpInst::Predicate predicate) { 6011 6012 if (LI->isVolatile()) return getCouldNotCompute(); 6013 6014 // Check to see if the loaded pointer is a getelementptr of a global. 6015 // TODO: Use SCEV instead of manually grubbing with GEPs. 6016 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 6017 if (!GEP) return getCouldNotCompute(); 6018 6019 // Make sure that it is really a constant global we are gepping, with an 6020 // initializer, and make sure the first IDX is really 0. 6021 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 6022 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 6023 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 6024 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 6025 return getCouldNotCompute(); 6026 6027 // Okay, we allow one non-constant index into the GEP instruction. 6028 Value *VarIdx = nullptr; 6029 std::vector<Constant*> Indexes; 6030 unsigned VarIdxNum = 0; 6031 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 6032 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 6033 Indexes.push_back(CI); 6034 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 6035 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 6036 VarIdx = GEP->getOperand(i); 6037 VarIdxNum = i-2; 6038 Indexes.push_back(nullptr); 6039 } 6040 6041 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 6042 if (!VarIdx) 6043 return getCouldNotCompute(); 6044 6045 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 6046 // Check to see if X is a loop variant variable value now. 6047 const SCEV *Idx = getSCEV(VarIdx); 6048 Idx = getSCEVAtScope(Idx, L); 6049 6050 // We can only recognize very limited forms of loop index expressions, in 6051 // particular, only affine AddRec's like {C1,+,C2}. 6052 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 6053 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 6054 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 6055 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 6056 return getCouldNotCompute(); 6057 6058 unsigned MaxSteps = MaxBruteForceIterations; 6059 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 6060 ConstantInt *ItCst = ConstantInt::get( 6061 cast<IntegerType>(IdxExpr->getType()), IterationNum); 6062 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 6063 6064 // Form the GEP offset. 6065 Indexes[VarIdxNum] = Val; 6066 6067 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 6068 Indexes); 6069 if (!Result) break; // Cannot compute! 6070 6071 // Evaluate the condition for this iteration. 6072 Result = ConstantExpr::getICmp(predicate, Result, RHS); 6073 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 6074 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 6075 ++NumArrayLenItCounts; 6076 return getConstant(ItCst); // Found terminating iteration! 6077 } 6078 } 6079 return getCouldNotCompute(); 6080 } 6081 6082 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 6083 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 6084 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 6085 if (!RHS) 6086 return getCouldNotCompute(); 6087 6088 const BasicBlock *Latch = L->getLoopLatch(); 6089 if (!Latch) 6090 return getCouldNotCompute(); 6091 6092 const BasicBlock *Predecessor = L->getLoopPredecessor(); 6093 if (!Predecessor) 6094 return getCouldNotCompute(); 6095 6096 // Return true if V is of the form "LHS `shift_op` <positive constant>". 6097 // Return LHS in OutLHS and shift_opt in OutOpCode. 6098 auto MatchPositiveShift = 6099 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 6100 6101 using namespace PatternMatch; 6102 6103 ConstantInt *ShiftAmt; 6104 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 6105 OutOpCode = Instruction::LShr; 6106 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 6107 OutOpCode = Instruction::AShr; 6108 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 6109 OutOpCode = Instruction::Shl; 6110 else 6111 return false; 6112 6113 return ShiftAmt->getValue().isStrictlyPositive(); 6114 }; 6115 6116 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 6117 // 6118 // loop: 6119 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 6120 // %iv.shifted = lshr i32 %iv, <positive constant> 6121 // 6122 // Return true on a succesful match. Return the corresponding PHI node (%iv 6123 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 6124 auto MatchShiftRecurrence = 6125 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 6126 Optional<Instruction::BinaryOps> PostShiftOpCode; 6127 6128 { 6129 Instruction::BinaryOps OpC; 6130 Value *V; 6131 6132 // If we encounter a shift instruction, "peel off" the shift operation, 6133 // and remember that we did so. Later when we inspect %iv's backedge 6134 // value, we will make sure that the backedge value uses the same 6135 // operation. 6136 // 6137 // Note: the peeled shift operation does not have to be the same 6138 // instruction as the one feeding into the PHI's backedge value. We only 6139 // really care about it being the same *kind* of shift instruction -- 6140 // that's all that is required for our later inferences to hold. 6141 if (MatchPositiveShift(LHS, V, OpC)) { 6142 PostShiftOpCode = OpC; 6143 LHS = V; 6144 } 6145 } 6146 6147 PNOut = dyn_cast<PHINode>(LHS); 6148 if (!PNOut || PNOut->getParent() != L->getHeader()) 6149 return false; 6150 6151 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 6152 Value *OpLHS; 6153 6154 return 6155 // The backedge value for the PHI node must be a shift by a positive 6156 // amount 6157 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 6158 6159 // of the PHI node itself 6160 OpLHS == PNOut && 6161 6162 // and the kind of shift should be match the kind of shift we peeled 6163 // off, if any. 6164 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 6165 }; 6166 6167 PHINode *PN; 6168 Instruction::BinaryOps OpCode; 6169 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 6170 return getCouldNotCompute(); 6171 6172 const DataLayout &DL = getDataLayout(); 6173 6174 // The key rationale for this optimization is that for some kinds of shift 6175 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 6176 // within a finite number of iterations. If the condition guarding the 6177 // backedge (in the sense that the backedge is taken if the condition is true) 6178 // is false for the value the shift recurrence stabilizes to, then we know 6179 // that the backedge is taken only a finite number of times. 6180 6181 ConstantInt *StableValue = nullptr; 6182 switch (OpCode) { 6183 default: 6184 llvm_unreachable("Impossible case!"); 6185 6186 case Instruction::AShr: { 6187 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 6188 // bitwidth(K) iterations. 6189 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 6190 bool KnownZero, KnownOne; 6191 ComputeSignBit(FirstValue, KnownZero, KnownOne, DL, 0, nullptr, 6192 Predecessor->getTerminator(), &DT); 6193 auto *Ty = cast<IntegerType>(RHS->getType()); 6194 if (KnownZero) 6195 StableValue = ConstantInt::get(Ty, 0); 6196 else if (KnownOne) 6197 StableValue = ConstantInt::get(Ty, -1, true); 6198 else 6199 return getCouldNotCompute(); 6200 6201 break; 6202 } 6203 case Instruction::LShr: 6204 case Instruction::Shl: 6205 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 6206 // stabilize to 0 in at most bitwidth(K) iterations. 6207 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 6208 break; 6209 } 6210 6211 auto *Result = 6212 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 6213 assert(Result->getType()->isIntegerTy(1) && 6214 "Otherwise cannot be an operand to a branch instruction"); 6215 6216 if (Result->isZeroValue()) { 6217 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 6218 const SCEV *UpperBound = 6219 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 6220 SCEVUnionPredicate P; 6221 return ExitLimit(getCouldNotCompute(), UpperBound, P); 6222 } 6223 6224 return getCouldNotCompute(); 6225 } 6226 6227 /// CanConstantFold - Return true if we can constant fold an instruction of the 6228 /// specified type, assuming that all operands were constants. 6229 static bool CanConstantFold(const Instruction *I) { 6230 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 6231 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 6232 isa<LoadInst>(I)) 6233 return true; 6234 6235 if (const CallInst *CI = dyn_cast<CallInst>(I)) 6236 if (const Function *F = CI->getCalledFunction()) 6237 return canConstantFoldCallTo(F); 6238 return false; 6239 } 6240 6241 /// Determine whether this instruction can constant evolve within this loop 6242 /// assuming its operands can all constant evolve. 6243 static bool canConstantEvolve(Instruction *I, const Loop *L) { 6244 // An instruction outside of the loop can't be derived from a loop PHI. 6245 if (!L->contains(I)) return false; 6246 6247 if (isa<PHINode>(I)) { 6248 // We don't currently keep track of the control flow needed to evaluate 6249 // PHIs, so we cannot handle PHIs inside of loops. 6250 return L->getHeader() == I->getParent(); 6251 } 6252 6253 // If we won't be able to constant fold this expression even if the operands 6254 // are constants, bail early. 6255 return CanConstantFold(I); 6256 } 6257 6258 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 6259 /// recursing through each instruction operand until reaching a loop header phi. 6260 static PHINode * 6261 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 6262 DenseMap<Instruction *, PHINode *> &PHIMap) { 6263 6264 // Otherwise, we can evaluate this instruction if all of its operands are 6265 // constant or derived from a PHI node themselves. 6266 PHINode *PHI = nullptr; 6267 for (Value *Op : UseInst->operands()) { 6268 if (isa<Constant>(Op)) continue; 6269 6270 Instruction *OpInst = dyn_cast<Instruction>(Op); 6271 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 6272 6273 PHINode *P = dyn_cast<PHINode>(OpInst); 6274 if (!P) 6275 // If this operand is already visited, reuse the prior result. 6276 // We may have P != PHI if this is the deepest point at which the 6277 // inconsistent paths meet. 6278 P = PHIMap.lookup(OpInst); 6279 if (!P) { 6280 // Recurse and memoize the results, whether a phi is found or not. 6281 // This recursive call invalidates pointers into PHIMap. 6282 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap); 6283 PHIMap[OpInst] = P; 6284 } 6285 if (!P) 6286 return nullptr; // Not evolving from PHI 6287 if (PHI && PHI != P) 6288 return nullptr; // Evolving from multiple different PHIs. 6289 PHI = P; 6290 } 6291 // This is a expression evolving from a constant PHI! 6292 return PHI; 6293 } 6294 6295 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 6296 /// in the loop that V is derived from. We allow arbitrary operations along the 6297 /// way, but the operands of an operation must either be constants or a value 6298 /// derived from a constant PHI. If this expression does not fit with these 6299 /// constraints, return null. 6300 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 6301 Instruction *I = dyn_cast<Instruction>(V); 6302 if (!I || !canConstantEvolve(I, L)) return nullptr; 6303 6304 if (PHINode *PN = dyn_cast<PHINode>(I)) 6305 return PN; 6306 6307 // Record non-constant instructions contained by the loop. 6308 DenseMap<Instruction *, PHINode *> PHIMap; 6309 return getConstantEvolvingPHIOperands(I, L, PHIMap); 6310 } 6311 6312 /// EvaluateExpression - Given an expression that passes the 6313 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 6314 /// in the loop has the value PHIVal. If we can't fold this expression for some 6315 /// reason, return null. 6316 static Constant *EvaluateExpression(Value *V, const Loop *L, 6317 DenseMap<Instruction *, Constant *> &Vals, 6318 const DataLayout &DL, 6319 const TargetLibraryInfo *TLI) { 6320 // Convenient constant check, but redundant for recursive calls. 6321 if (Constant *C = dyn_cast<Constant>(V)) return C; 6322 Instruction *I = dyn_cast<Instruction>(V); 6323 if (!I) return nullptr; 6324 6325 if (Constant *C = Vals.lookup(I)) return C; 6326 6327 // An instruction inside the loop depends on a value outside the loop that we 6328 // weren't given a mapping for, or a value such as a call inside the loop. 6329 if (!canConstantEvolve(I, L)) return nullptr; 6330 6331 // An unmapped PHI can be due to a branch or another loop inside this loop, 6332 // or due to this not being the initial iteration through a loop where we 6333 // couldn't compute the evolution of this particular PHI last time. 6334 if (isa<PHINode>(I)) return nullptr; 6335 6336 std::vector<Constant*> Operands(I->getNumOperands()); 6337 6338 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 6339 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 6340 if (!Operand) { 6341 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 6342 if (!Operands[i]) return nullptr; 6343 continue; 6344 } 6345 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 6346 Vals[Operand] = C; 6347 if (!C) return nullptr; 6348 Operands[i] = C; 6349 } 6350 6351 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 6352 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 6353 Operands[1], DL, TLI); 6354 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 6355 if (!LI->isVolatile()) 6356 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 6357 } 6358 return ConstantFoldInstOperands(I, Operands, DL, TLI); 6359 } 6360 6361 6362 // If every incoming value to PN except the one for BB is a specific Constant, 6363 // return that, else return nullptr. 6364 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 6365 Constant *IncomingVal = nullptr; 6366 6367 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 6368 if (PN->getIncomingBlock(i) == BB) 6369 continue; 6370 6371 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 6372 if (!CurrentVal) 6373 return nullptr; 6374 6375 if (IncomingVal != CurrentVal) { 6376 if (IncomingVal) 6377 return nullptr; 6378 IncomingVal = CurrentVal; 6379 } 6380 } 6381 6382 return IncomingVal; 6383 } 6384 6385 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 6386 /// in the header of its containing loop, we know the loop executes a 6387 /// constant number of times, and the PHI node is just a recurrence 6388 /// involving constants, fold it. 6389 Constant * 6390 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 6391 const APInt &BEs, 6392 const Loop *L) { 6393 auto I = ConstantEvolutionLoopExitValue.find(PN); 6394 if (I != ConstantEvolutionLoopExitValue.end()) 6395 return I->second; 6396 6397 if (BEs.ugt(MaxBruteForceIterations)) 6398 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 6399 6400 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 6401 6402 DenseMap<Instruction *, Constant *> CurrentIterVals; 6403 BasicBlock *Header = L->getHeader(); 6404 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 6405 6406 BasicBlock *Latch = L->getLoopLatch(); 6407 if (!Latch) 6408 return nullptr; 6409 6410 for (auto &I : *Header) { 6411 PHINode *PHI = dyn_cast<PHINode>(&I); 6412 if (!PHI) break; 6413 auto *StartCST = getOtherIncomingValue(PHI, Latch); 6414 if (!StartCST) continue; 6415 CurrentIterVals[PHI] = StartCST; 6416 } 6417 if (!CurrentIterVals.count(PN)) 6418 return RetVal = nullptr; 6419 6420 Value *BEValue = PN->getIncomingValueForBlock(Latch); 6421 6422 // Execute the loop symbolically to determine the exit value. 6423 if (BEs.getActiveBits() >= 32) 6424 return RetVal = nullptr; // More than 2^32-1 iterations?? Not doing it! 6425 6426 unsigned NumIterations = BEs.getZExtValue(); // must be in range 6427 unsigned IterationNum = 0; 6428 const DataLayout &DL = getDataLayout(); 6429 for (; ; ++IterationNum) { 6430 if (IterationNum == NumIterations) 6431 return RetVal = CurrentIterVals[PN]; // Got exit value! 6432 6433 // Compute the value of the PHIs for the next iteration. 6434 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 6435 DenseMap<Instruction *, Constant *> NextIterVals; 6436 Constant *NextPHI = 6437 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 6438 if (!NextPHI) 6439 return nullptr; // Couldn't evaluate! 6440 NextIterVals[PN] = NextPHI; 6441 6442 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 6443 6444 // Also evaluate the other PHI nodes. However, we don't get to stop if we 6445 // cease to be able to evaluate one of them or if they stop evolving, 6446 // because that doesn't necessarily prevent us from computing PN. 6447 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 6448 for (const auto &I : CurrentIterVals) { 6449 PHINode *PHI = dyn_cast<PHINode>(I.first); 6450 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 6451 PHIsToCompute.emplace_back(PHI, I.second); 6452 } 6453 // We use two distinct loops because EvaluateExpression may invalidate any 6454 // iterators into CurrentIterVals. 6455 for (const auto &I : PHIsToCompute) { 6456 PHINode *PHI = I.first; 6457 Constant *&NextPHI = NextIterVals[PHI]; 6458 if (!NextPHI) { // Not already computed. 6459 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 6460 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 6461 } 6462 if (NextPHI != I.second) 6463 StoppedEvolving = false; 6464 } 6465 6466 // If all entries in CurrentIterVals == NextIterVals then we can stop 6467 // iterating, the loop can't continue to change. 6468 if (StoppedEvolving) 6469 return RetVal = CurrentIterVals[PN]; 6470 6471 CurrentIterVals.swap(NextIterVals); 6472 } 6473 } 6474 6475 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 6476 Value *Cond, 6477 bool ExitWhen) { 6478 PHINode *PN = getConstantEvolvingPHI(Cond, L); 6479 if (!PN) return getCouldNotCompute(); 6480 6481 // If the loop is canonicalized, the PHI will have exactly two entries. 6482 // That's the only form we support here. 6483 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 6484 6485 DenseMap<Instruction *, Constant *> CurrentIterVals; 6486 BasicBlock *Header = L->getHeader(); 6487 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 6488 6489 BasicBlock *Latch = L->getLoopLatch(); 6490 assert(Latch && "Should follow from NumIncomingValues == 2!"); 6491 6492 for (auto &I : *Header) { 6493 PHINode *PHI = dyn_cast<PHINode>(&I); 6494 if (!PHI) 6495 break; 6496 auto *StartCST = getOtherIncomingValue(PHI, Latch); 6497 if (!StartCST) continue; 6498 CurrentIterVals[PHI] = StartCST; 6499 } 6500 if (!CurrentIterVals.count(PN)) 6501 return getCouldNotCompute(); 6502 6503 // Okay, we find a PHI node that defines the trip count of this loop. Execute 6504 // the loop symbolically to determine when the condition gets a value of 6505 // "ExitWhen". 6506 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 6507 const DataLayout &DL = getDataLayout(); 6508 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 6509 auto *CondVal = dyn_cast_or_null<ConstantInt>( 6510 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 6511 6512 // Couldn't symbolically evaluate. 6513 if (!CondVal) return getCouldNotCompute(); 6514 6515 if (CondVal->getValue() == uint64_t(ExitWhen)) { 6516 ++NumBruteForceTripCountsComputed; 6517 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 6518 } 6519 6520 // Update all the PHI nodes for the next iteration. 6521 DenseMap<Instruction *, Constant *> NextIterVals; 6522 6523 // Create a list of which PHIs we need to compute. We want to do this before 6524 // calling EvaluateExpression on them because that may invalidate iterators 6525 // into CurrentIterVals. 6526 SmallVector<PHINode *, 8> PHIsToCompute; 6527 for (const auto &I : CurrentIterVals) { 6528 PHINode *PHI = dyn_cast<PHINode>(I.first); 6529 if (!PHI || PHI->getParent() != Header) continue; 6530 PHIsToCompute.push_back(PHI); 6531 } 6532 for (PHINode *PHI : PHIsToCompute) { 6533 Constant *&NextPHI = NextIterVals[PHI]; 6534 if (NextPHI) continue; // Already computed! 6535 6536 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 6537 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 6538 } 6539 CurrentIterVals.swap(NextIterVals); 6540 } 6541 6542 // Too many iterations were needed to evaluate. 6543 return getCouldNotCompute(); 6544 } 6545 6546 /// getSCEVAtScope - Return a SCEV expression for the specified value 6547 /// at the specified scope in the program. The L value specifies a loop 6548 /// nest to evaluate the expression at, where null is the top-level or a 6549 /// specified loop is immediately inside of the loop. 6550 /// 6551 /// This method can be used to compute the exit value for a variable defined 6552 /// in a loop by querying what the value will hold in the parent loop. 6553 /// 6554 /// In the case that a relevant loop exit value cannot be computed, the 6555 /// original value V is returned. 6556 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 6557 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 6558 ValuesAtScopes[V]; 6559 // Check to see if we've folded this expression at this loop before. 6560 for (auto &LS : Values) 6561 if (LS.first == L) 6562 return LS.second ? LS.second : V; 6563 6564 Values.emplace_back(L, nullptr); 6565 6566 // Otherwise compute it. 6567 const SCEV *C = computeSCEVAtScope(V, L); 6568 for (auto &LS : reverse(ValuesAtScopes[V])) 6569 if (LS.first == L) { 6570 LS.second = C; 6571 break; 6572 } 6573 return C; 6574 } 6575 6576 /// This builds up a Constant using the ConstantExpr interface. That way, we 6577 /// will return Constants for objects which aren't represented by a 6578 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 6579 /// Returns NULL if the SCEV isn't representable as a Constant. 6580 static Constant *BuildConstantFromSCEV(const SCEV *V) { 6581 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 6582 case scCouldNotCompute: 6583 case scAddRecExpr: 6584 break; 6585 case scConstant: 6586 return cast<SCEVConstant>(V)->getValue(); 6587 case scUnknown: 6588 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 6589 case scSignExtend: { 6590 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 6591 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 6592 return ConstantExpr::getSExt(CastOp, SS->getType()); 6593 break; 6594 } 6595 case scZeroExtend: { 6596 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 6597 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 6598 return ConstantExpr::getZExt(CastOp, SZ->getType()); 6599 break; 6600 } 6601 case scTruncate: { 6602 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 6603 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 6604 return ConstantExpr::getTrunc(CastOp, ST->getType()); 6605 break; 6606 } 6607 case scAddExpr: { 6608 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 6609 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 6610 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 6611 unsigned AS = PTy->getAddressSpace(); 6612 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 6613 C = ConstantExpr::getBitCast(C, DestPtrTy); 6614 } 6615 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 6616 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 6617 if (!C2) return nullptr; 6618 6619 // First pointer! 6620 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 6621 unsigned AS = C2->getType()->getPointerAddressSpace(); 6622 std::swap(C, C2); 6623 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 6624 // The offsets have been converted to bytes. We can add bytes to an 6625 // i8* by GEP with the byte count in the first index. 6626 C = ConstantExpr::getBitCast(C, DestPtrTy); 6627 } 6628 6629 // Don't bother trying to sum two pointers. We probably can't 6630 // statically compute a load that results from it anyway. 6631 if (C2->getType()->isPointerTy()) 6632 return nullptr; 6633 6634 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 6635 if (PTy->getElementType()->isStructTy()) 6636 C2 = ConstantExpr::getIntegerCast( 6637 C2, Type::getInt32Ty(C->getContext()), true); 6638 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 6639 } else 6640 C = ConstantExpr::getAdd(C, C2); 6641 } 6642 return C; 6643 } 6644 break; 6645 } 6646 case scMulExpr: { 6647 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 6648 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 6649 // Don't bother with pointers at all. 6650 if (C->getType()->isPointerTy()) return nullptr; 6651 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 6652 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 6653 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 6654 C = ConstantExpr::getMul(C, C2); 6655 } 6656 return C; 6657 } 6658 break; 6659 } 6660 case scUDivExpr: { 6661 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 6662 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 6663 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 6664 if (LHS->getType() == RHS->getType()) 6665 return ConstantExpr::getUDiv(LHS, RHS); 6666 break; 6667 } 6668 case scSMaxExpr: 6669 case scUMaxExpr: 6670 break; // TODO: smax, umax. 6671 } 6672 return nullptr; 6673 } 6674 6675 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 6676 if (isa<SCEVConstant>(V)) return V; 6677 6678 // If this instruction is evolved from a constant-evolving PHI, compute the 6679 // exit value from the loop without using SCEVs. 6680 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 6681 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 6682 const Loop *LI = this->LI[I->getParent()]; 6683 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 6684 if (PHINode *PN = dyn_cast<PHINode>(I)) 6685 if (PN->getParent() == LI->getHeader()) { 6686 // Okay, there is no closed form solution for the PHI node. Check 6687 // to see if the loop that contains it has a known backedge-taken 6688 // count. If so, we may be able to force computation of the exit 6689 // value. 6690 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 6691 if (const SCEVConstant *BTCC = 6692 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 6693 // Okay, we know how many times the containing loop executes. If 6694 // this is a constant evolving PHI node, get the final value at 6695 // the specified iteration number. 6696 Constant *RV = 6697 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 6698 if (RV) return getSCEV(RV); 6699 } 6700 } 6701 6702 // Okay, this is an expression that we cannot symbolically evaluate 6703 // into a SCEV. Check to see if it's possible to symbolically evaluate 6704 // the arguments into constants, and if so, try to constant propagate the 6705 // result. This is particularly useful for computing loop exit values. 6706 if (CanConstantFold(I)) { 6707 SmallVector<Constant *, 4> Operands; 6708 bool MadeImprovement = false; 6709 for (Value *Op : I->operands()) { 6710 if (Constant *C = dyn_cast<Constant>(Op)) { 6711 Operands.push_back(C); 6712 continue; 6713 } 6714 6715 // If any of the operands is non-constant and if they are 6716 // non-integer and non-pointer, don't even try to analyze them 6717 // with scev techniques. 6718 if (!isSCEVable(Op->getType())) 6719 return V; 6720 6721 const SCEV *OrigV = getSCEV(Op); 6722 const SCEV *OpV = getSCEVAtScope(OrigV, L); 6723 MadeImprovement |= OrigV != OpV; 6724 6725 Constant *C = BuildConstantFromSCEV(OpV); 6726 if (!C) return V; 6727 if (C->getType() != Op->getType()) 6728 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 6729 Op->getType(), 6730 false), 6731 C, Op->getType()); 6732 Operands.push_back(C); 6733 } 6734 6735 // Check to see if getSCEVAtScope actually made an improvement. 6736 if (MadeImprovement) { 6737 Constant *C = nullptr; 6738 const DataLayout &DL = getDataLayout(); 6739 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 6740 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 6741 Operands[1], DL, &TLI); 6742 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 6743 if (!LI->isVolatile()) 6744 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 6745 } else 6746 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 6747 if (!C) return V; 6748 return getSCEV(C); 6749 } 6750 } 6751 } 6752 6753 // This is some other type of SCEVUnknown, just return it. 6754 return V; 6755 } 6756 6757 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 6758 // Avoid performing the look-up in the common case where the specified 6759 // expression has no loop-variant portions. 6760 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 6761 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 6762 if (OpAtScope != Comm->getOperand(i)) { 6763 // Okay, at least one of these operands is loop variant but might be 6764 // foldable. Build a new instance of the folded commutative expression. 6765 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 6766 Comm->op_begin()+i); 6767 NewOps.push_back(OpAtScope); 6768 6769 for (++i; i != e; ++i) { 6770 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 6771 NewOps.push_back(OpAtScope); 6772 } 6773 if (isa<SCEVAddExpr>(Comm)) 6774 return getAddExpr(NewOps); 6775 if (isa<SCEVMulExpr>(Comm)) 6776 return getMulExpr(NewOps); 6777 if (isa<SCEVSMaxExpr>(Comm)) 6778 return getSMaxExpr(NewOps); 6779 if (isa<SCEVUMaxExpr>(Comm)) 6780 return getUMaxExpr(NewOps); 6781 llvm_unreachable("Unknown commutative SCEV type!"); 6782 } 6783 } 6784 // If we got here, all operands are loop invariant. 6785 return Comm; 6786 } 6787 6788 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 6789 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 6790 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 6791 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 6792 return Div; // must be loop invariant 6793 return getUDivExpr(LHS, RHS); 6794 } 6795 6796 // If this is a loop recurrence for a loop that does not contain L, then we 6797 // are dealing with the final value computed by the loop. 6798 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 6799 // First, attempt to evaluate each operand. 6800 // Avoid performing the look-up in the common case where the specified 6801 // expression has no loop-variant portions. 6802 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 6803 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 6804 if (OpAtScope == AddRec->getOperand(i)) 6805 continue; 6806 6807 // Okay, at least one of these operands is loop variant but might be 6808 // foldable. Build a new instance of the folded commutative expression. 6809 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 6810 AddRec->op_begin()+i); 6811 NewOps.push_back(OpAtScope); 6812 for (++i; i != e; ++i) 6813 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 6814 6815 const SCEV *FoldedRec = 6816 getAddRecExpr(NewOps, AddRec->getLoop(), 6817 AddRec->getNoWrapFlags(SCEV::FlagNW)); 6818 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 6819 // The addrec may be folded to a nonrecurrence, for example, if the 6820 // induction variable is multiplied by zero after constant folding. Go 6821 // ahead and return the folded value. 6822 if (!AddRec) 6823 return FoldedRec; 6824 break; 6825 } 6826 6827 // If the scope is outside the addrec's loop, evaluate it by using the 6828 // loop exit value of the addrec. 6829 if (!AddRec->getLoop()->contains(L)) { 6830 // To evaluate this recurrence, we need to know how many times the AddRec 6831 // loop iterates. Compute this now. 6832 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 6833 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 6834 6835 // Then, evaluate the AddRec. 6836 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 6837 } 6838 6839 return AddRec; 6840 } 6841 6842 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 6843 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 6844 if (Op == Cast->getOperand()) 6845 return Cast; // must be loop invariant 6846 return getZeroExtendExpr(Op, Cast->getType()); 6847 } 6848 6849 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 6850 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 6851 if (Op == Cast->getOperand()) 6852 return Cast; // must be loop invariant 6853 return getSignExtendExpr(Op, Cast->getType()); 6854 } 6855 6856 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 6857 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 6858 if (Op == Cast->getOperand()) 6859 return Cast; // must be loop invariant 6860 return getTruncateExpr(Op, Cast->getType()); 6861 } 6862 6863 llvm_unreachable("Unknown SCEV type!"); 6864 } 6865 6866 /// getSCEVAtScope - This is a convenience function which does 6867 /// getSCEVAtScope(getSCEV(V), L). 6868 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 6869 return getSCEVAtScope(getSCEV(V), L); 6870 } 6871 6872 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the 6873 /// following equation: 6874 /// 6875 /// A * X = B (mod N) 6876 /// 6877 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 6878 /// A and B isn't important. 6879 /// 6880 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 6881 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B, 6882 ScalarEvolution &SE) { 6883 uint32_t BW = A.getBitWidth(); 6884 assert(BW == B.getBitWidth() && "Bit widths must be the same."); 6885 assert(A != 0 && "A must be non-zero."); 6886 6887 // 1. D = gcd(A, N) 6888 // 6889 // The gcd of A and N may have only one prime factor: 2. The number of 6890 // trailing zeros in A is its multiplicity 6891 uint32_t Mult2 = A.countTrailingZeros(); 6892 // D = 2^Mult2 6893 6894 // 2. Check if B is divisible by D. 6895 // 6896 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 6897 // is not less than multiplicity of this prime factor for D. 6898 if (B.countTrailingZeros() < Mult2) 6899 return SE.getCouldNotCompute(); 6900 6901 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 6902 // modulo (N / D). 6903 // 6904 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this 6905 // bit width during computations. 6906 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 6907 APInt Mod(BW + 1, 0); 6908 Mod.setBit(BW - Mult2); // Mod = N / D 6909 APInt I = AD.multiplicativeInverse(Mod); 6910 6911 // 4. Compute the minimum unsigned root of the equation: 6912 // I * (B / D) mod (N / D) 6913 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod); 6914 6915 // The result is guaranteed to be less than 2^BW so we may truncate it to BW 6916 // bits. 6917 return SE.getConstant(Result.trunc(BW)); 6918 } 6919 6920 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the 6921 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which 6922 /// might be the same) or two SCEVCouldNotCompute objects. 6923 /// 6924 static std::pair<const SCEV *,const SCEV *> 6925 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 6926 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 6927 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 6928 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 6929 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 6930 6931 // We currently can only solve this if the coefficients are constants. 6932 if (!LC || !MC || !NC) { 6933 const SCEV *CNC = SE.getCouldNotCompute(); 6934 return {CNC, CNC}; 6935 } 6936 6937 uint32_t BitWidth = LC->getAPInt().getBitWidth(); 6938 const APInt &L = LC->getAPInt(); 6939 const APInt &M = MC->getAPInt(); 6940 const APInt &N = NC->getAPInt(); 6941 APInt Two(BitWidth, 2); 6942 APInt Four(BitWidth, 4); 6943 6944 { 6945 using namespace APIntOps; 6946 const APInt& C = L; 6947 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 6948 // The B coefficient is M-N/2 6949 APInt B(M); 6950 B -= sdiv(N,Two); 6951 6952 // The A coefficient is N/2 6953 APInt A(N.sdiv(Two)); 6954 6955 // Compute the B^2-4ac term. 6956 APInt SqrtTerm(B); 6957 SqrtTerm *= B; 6958 SqrtTerm -= Four * (A * C); 6959 6960 if (SqrtTerm.isNegative()) { 6961 // The loop is provably infinite. 6962 const SCEV *CNC = SE.getCouldNotCompute(); 6963 return {CNC, CNC}; 6964 } 6965 6966 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 6967 // integer value or else APInt::sqrt() will assert. 6968 APInt SqrtVal(SqrtTerm.sqrt()); 6969 6970 // Compute the two solutions for the quadratic formula. 6971 // The divisions must be performed as signed divisions. 6972 APInt NegB(-B); 6973 APInt TwoA(A << 1); 6974 if (TwoA.isMinValue()) { 6975 const SCEV *CNC = SE.getCouldNotCompute(); 6976 return {CNC, CNC}; 6977 } 6978 6979 LLVMContext &Context = SE.getContext(); 6980 6981 ConstantInt *Solution1 = 6982 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); 6983 ConstantInt *Solution2 = 6984 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); 6985 6986 return {SE.getConstant(Solution1), SE.getConstant(Solution2)}; 6987 } // end APIntOps namespace 6988 } 6989 6990 /// HowFarToZero - Return the number of times a backedge comparing the specified 6991 /// value to zero will execute. If not computable, return CouldNotCompute. 6992 /// 6993 /// This is only used for loops with a "x != y" exit test. The exit condition is 6994 /// now expressed as a single expression, V = x-y. So the exit test is 6995 /// effectively V != 0. We know and take advantage of the fact that this 6996 /// expression only being used in a comparison by zero context. 6997 ScalarEvolution::ExitLimit 6998 ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 6999 bool AllowPredicates) { 7000 SCEVUnionPredicate P; 7001 // If the value is a constant 7002 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 7003 // If the value is already zero, the branch will execute zero times. 7004 if (C->getValue()->isZero()) return C; 7005 return getCouldNotCompute(); // Otherwise it will loop infinitely. 7006 } 7007 7008 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 7009 if (!AddRec && AllowPredicates) 7010 // Try to make this an AddRec using runtime tests, in the first X 7011 // iterations of this loop, where X is the SCEV expression found by the 7012 // algorithm below. 7013 AddRec = convertSCEVToAddRecWithPredicates(V, L, P); 7014 7015 if (!AddRec || AddRec->getLoop() != L) 7016 return getCouldNotCompute(); 7017 7018 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 7019 // the quadratic equation to solve it. 7020 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 7021 std::pair<const SCEV *,const SCEV *> Roots = 7022 SolveQuadraticEquation(AddRec, *this); 7023 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 7024 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 7025 if (R1 && R2) { 7026 // Pick the smallest positive root value. 7027 if (ConstantInt *CB = 7028 dyn_cast<ConstantInt>(ConstantExpr::getICmp(CmpInst::ICMP_ULT, 7029 R1->getValue(), 7030 R2->getValue()))) { 7031 if (!CB->getZExtValue()) 7032 std::swap(R1, R2); // R1 is the minimum root now. 7033 7034 // We can only use this value if the chrec ends up with an exact zero 7035 // value at this index. When solving for "X*X != 5", for example, we 7036 // should not accept a root of 2. 7037 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); 7038 if (Val->isZero()) 7039 return ExitLimit(R1, R1, P); // We found a quadratic root! 7040 } 7041 } 7042 return getCouldNotCompute(); 7043 } 7044 7045 // Otherwise we can only handle this if it is affine. 7046 if (!AddRec->isAffine()) 7047 return getCouldNotCompute(); 7048 7049 // If this is an affine expression, the execution count of this branch is 7050 // the minimum unsigned root of the following equation: 7051 // 7052 // Start + Step*N = 0 (mod 2^BW) 7053 // 7054 // equivalent to: 7055 // 7056 // Step*N = -Start (mod 2^BW) 7057 // 7058 // where BW is the common bit width of Start and Step. 7059 7060 // Get the initial value for the loop. 7061 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 7062 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 7063 7064 // For now we handle only constant steps. 7065 // 7066 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 7067 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 7068 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 7069 // We have not yet seen any such cases. 7070 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 7071 if (!StepC || StepC->getValue()->equalsInt(0)) 7072 return getCouldNotCompute(); 7073 7074 // For positive steps (counting up until unsigned overflow): 7075 // N = -Start/Step (as unsigned) 7076 // For negative steps (counting down to zero): 7077 // N = Start/-Step 7078 // First compute the unsigned distance from zero in the direction of Step. 7079 bool CountDown = StepC->getAPInt().isNegative(); 7080 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 7081 7082 // Handle unitary steps, which cannot wraparound. 7083 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 7084 // N = Distance (as unsigned) 7085 if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) { 7086 ConstantRange CR = getUnsignedRange(Start); 7087 const SCEV *MaxBECount; 7088 if (!CountDown && CR.getUnsignedMin().isMinValue()) 7089 // When counting up, the worst starting value is 1, not 0. 7090 MaxBECount = CR.getUnsignedMax().isMinValue() 7091 ? getConstant(APInt::getMinValue(CR.getBitWidth())) 7092 : getConstant(APInt::getMaxValue(CR.getBitWidth())); 7093 else 7094 MaxBECount = getConstant(CountDown ? CR.getUnsignedMax() 7095 : -CR.getUnsignedMin()); 7096 return ExitLimit(Distance, MaxBECount, P); 7097 } 7098 7099 // As a special case, handle the instance where Step is a positive power of 7100 // two. In this case, determining whether Step divides Distance evenly can be 7101 // done by counting and comparing the number of trailing zeros of Step and 7102 // Distance. 7103 if (!CountDown) { 7104 const APInt &StepV = StepC->getAPInt(); 7105 // StepV.isPowerOf2() returns true if StepV is an positive power of two. It 7106 // also returns true if StepV is maximally negative (eg, INT_MIN), but that 7107 // case is not handled as this code is guarded by !CountDown. 7108 if (StepV.isPowerOf2() && 7109 GetMinTrailingZeros(Distance) >= StepV.countTrailingZeros()) { 7110 // Here we've constrained the equation to be of the form 7111 // 7112 // 2^(N + k) * Distance' = (StepV == 2^N) * X (mod 2^W) ... (0) 7113 // 7114 // where we're operating on a W bit wide integer domain and k is 7115 // non-negative. The smallest unsigned solution for X is the trip count. 7116 // 7117 // (0) is equivalent to: 7118 // 7119 // 2^(N + k) * Distance' - 2^N * X = L * 2^W 7120 // <=> 2^N(2^k * Distance' - X) = L * 2^(W - N) * 2^N 7121 // <=> 2^k * Distance' - X = L * 2^(W - N) 7122 // <=> 2^k * Distance' = L * 2^(W - N) + X ... (1) 7123 // 7124 // The smallest X satisfying (1) is unsigned remainder of dividing the LHS 7125 // by 2^(W - N). 7126 // 7127 // <=> X = 2^k * Distance' URem 2^(W - N) ... (2) 7128 // 7129 // E.g. say we're solving 7130 // 7131 // 2 * Val = 2 * X (in i8) ... (3) 7132 // 7133 // then from (2), we get X = Val URem i8 128 (k = 0 in this case). 7134 // 7135 // Note: It is tempting to solve (3) by setting X = Val, but Val is not 7136 // necessarily the smallest unsigned value of X that satisfies (3). 7137 // E.g. if Val is i8 -127 then the smallest value of X that satisfies (3) 7138 // is i8 1, not i8 -127 7139 7140 const auto *ModuloResult = getUDivExactExpr(Distance, Step); 7141 7142 // Since SCEV does not have a URem node, we construct one using a truncate 7143 // and a zero extend. 7144 7145 unsigned NarrowWidth = StepV.getBitWidth() - StepV.countTrailingZeros(); 7146 auto *NarrowTy = IntegerType::get(getContext(), NarrowWidth); 7147 auto *WideTy = Distance->getType(); 7148 7149 const SCEV *Limit = 7150 getZeroExtendExpr(getTruncateExpr(ModuloResult, NarrowTy), WideTy); 7151 return ExitLimit(Limit, Limit, P); 7152 } 7153 } 7154 7155 // If the condition controls loop exit (the loop exits only if the expression 7156 // is true) and the addition is no-wrap we can use unsigned divide to 7157 // compute the backedge count. In this case, the step may not divide the 7158 // distance, but we don't care because if the condition is "missed" the loop 7159 // will have undefined behavior due to wrapping. 7160 if (ControlsExit && AddRec->hasNoSelfWrap()) { 7161 const SCEV *Exact = 7162 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 7163 return ExitLimit(Exact, Exact, P); 7164 } 7165 7166 // Then, try to solve the above equation provided that Start is constant. 7167 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start)) { 7168 const SCEV *E = SolveLinEquationWithOverflow( 7169 StepC->getValue()->getValue(), -StartC->getValue()->getValue(), *this); 7170 return ExitLimit(E, E, P); 7171 } 7172 return getCouldNotCompute(); 7173 } 7174 7175 /// HowFarToNonZero - Return the number of times a backedge checking the 7176 /// specified value for nonzero will execute. If not computable, return 7177 /// CouldNotCompute 7178 ScalarEvolution::ExitLimit 7179 ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { 7180 // Loops that look like: while (X == 0) are very strange indeed. We don't 7181 // handle them yet except for the trivial case. This could be expanded in the 7182 // future as needed. 7183 7184 // If the value is a constant, check to see if it is known to be non-zero 7185 // already. If so, the backedge will execute zero times. 7186 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 7187 if (!C->getValue()->isNullValue()) 7188 return getZero(C->getType()); 7189 return getCouldNotCompute(); // Otherwise it will loop infinitely. 7190 } 7191 7192 // We could implement others, but I really doubt anyone writes loops like 7193 // this, and if they did, they would already be constant folded. 7194 return getCouldNotCompute(); 7195 } 7196 7197 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB 7198 /// (which may not be an immediate predecessor) which has exactly one 7199 /// successor from which BB is reachable, or null if no such block is 7200 /// found. 7201 /// 7202 std::pair<BasicBlock *, BasicBlock *> 7203 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 7204 // If the block has a unique predecessor, then there is no path from the 7205 // predecessor to the block that does not go through the direct edge 7206 // from the predecessor to the block. 7207 if (BasicBlock *Pred = BB->getSinglePredecessor()) 7208 return {Pred, BB}; 7209 7210 // A loop's header is defined to be a block that dominates the loop. 7211 // If the header has a unique predecessor outside the loop, it must be 7212 // a block that has exactly one successor that can reach the loop. 7213 if (Loop *L = LI.getLoopFor(BB)) 7214 return {L->getLoopPredecessor(), L->getHeader()}; 7215 7216 return {nullptr, nullptr}; 7217 } 7218 7219 /// HasSameValue - SCEV structural equivalence is usually sufficient for 7220 /// testing whether two expressions are equal, however for the purposes of 7221 /// looking for a condition guarding a loop, it can be useful to be a little 7222 /// more general, since a front-end may have replicated the controlling 7223 /// expression. 7224 /// 7225 static bool HasSameValue(const SCEV *A, const SCEV *B) { 7226 // Quick check to see if they are the same SCEV. 7227 if (A == B) return true; 7228 7229 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 7230 // Not all instructions that are "identical" compute the same value. For 7231 // instance, two distinct alloca instructions allocating the same type are 7232 // identical and do not read memory; but compute distinct values. 7233 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 7234 }; 7235 7236 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 7237 // two different instructions with the same value. Check for this case. 7238 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 7239 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 7240 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 7241 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 7242 if (ComputesEqualValues(AI, BI)) 7243 return true; 7244 7245 // Otherwise assume they may have a different value. 7246 return false; 7247 } 7248 7249 /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with 7250 /// predicate Pred. Return true iff any changes were made. 7251 /// 7252 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 7253 const SCEV *&LHS, const SCEV *&RHS, 7254 unsigned Depth) { 7255 bool Changed = false; 7256 7257 // If we hit the max recursion limit bail out. 7258 if (Depth >= 3) 7259 return false; 7260 7261 // Canonicalize a constant to the right side. 7262 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 7263 // Check for both operands constant. 7264 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 7265 if (ConstantExpr::getICmp(Pred, 7266 LHSC->getValue(), 7267 RHSC->getValue())->isNullValue()) 7268 goto trivially_false; 7269 else 7270 goto trivially_true; 7271 } 7272 // Otherwise swap the operands to put the constant on the right. 7273 std::swap(LHS, RHS); 7274 Pred = ICmpInst::getSwappedPredicate(Pred); 7275 Changed = true; 7276 } 7277 7278 // If we're comparing an addrec with a value which is loop-invariant in the 7279 // addrec's loop, put the addrec on the left. Also make a dominance check, 7280 // as both operands could be addrecs loop-invariant in each other's loop. 7281 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 7282 const Loop *L = AR->getLoop(); 7283 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 7284 std::swap(LHS, RHS); 7285 Pred = ICmpInst::getSwappedPredicate(Pred); 7286 Changed = true; 7287 } 7288 } 7289 7290 // If there's a constant operand, canonicalize comparisons with boundary 7291 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 7292 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 7293 const APInt &RA = RC->getAPInt(); 7294 switch (Pred) { 7295 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 7296 case ICmpInst::ICMP_EQ: 7297 case ICmpInst::ICMP_NE: 7298 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 7299 if (!RA) 7300 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 7301 if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 7302 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 7303 ME->getOperand(0)->isAllOnesValue()) { 7304 RHS = AE->getOperand(1); 7305 LHS = ME->getOperand(1); 7306 Changed = true; 7307 } 7308 break; 7309 case ICmpInst::ICMP_UGE: 7310 if ((RA - 1).isMinValue()) { 7311 Pred = ICmpInst::ICMP_NE; 7312 RHS = getConstant(RA - 1); 7313 Changed = true; 7314 break; 7315 } 7316 if (RA.isMaxValue()) { 7317 Pred = ICmpInst::ICMP_EQ; 7318 Changed = true; 7319 break; 7320 } 7321 if (RA.isMinValue()) goto trivially_true; 7322 7323 Pred = ICmpInst::ICMP_UGT; 7324 RHS = getConstant(RA - 1); 7325 Changed = true; 7326 break; 7327 case ICmpInst::ICMP_ULE: 7328 if ((RA + 1).isMaxValue()) { 7329 Pred = ICmpInst::ICMP_NE; 7330 RHS = getConstant(RA + 1); 7331 Changed = true; 7332 break; 7333 } 7334 if (RA.isMinValue()) { 7335 Pred = ICmpInst::ICMP_EQ; 7336 Changed = true; 7337 break; 7338 } 7339 if (RA.isMaxValue()) goto trivially_true; 7340 7341 Pred = ICmpInst::ICMP_ULT; 7342 RHS = getConstant(RA + 1); 7343 Changed = true; 7344 break; 7345 case ICmpInst::ICMP_SGE: 7346 if ((RA - 1).isMinSignedValue()) { 7347 Pred = ICmpInst::ICMP_NE; 7348 RHS = getConstant(RA - 1); 7349 Changed = true; 7350 break; 7351 } 7352 if (RA.isMaxSignedValue()) { 7353 Pred = ICmpInst::ICMP_EQ; 7354 Changed = true; 7355 break; 7356 } 7357 if (RA.isMinSignedValue()) goto trivially_true; 7358 7359 Pred = ICmpInst::ICMP_SGT; 7360 RHS = getConstant(RA - 1); 7361 Changed = true; 7362 break; 7363 case ICmpInst::ICMP_SLE: 7364 if ((RA + 1).isMaxSignedValue()) { 7365 Pred = ICmpInst::ICMP_NE; 7366 RHS = getConstant(RA + 1); 7367 Changed = true; 7368 break; 7369 } 7370 if (RA.isMinSignedValue()) { 7371 Pred = ICmpInst::ICMP_EQ; 7372 Changed = true; 7373 break; 7374 } 7375 if (RA.isMaxSignedValue()) goto trivially_true; 7376 7377 Pred = ICmpInst::ICMP_SLT; 7378 RHS = getConstant(RA + 1); 7379 Changed = true; 7380 break; 7381 case ICmpInst::ICMP_UGT: 7382 if (RA.isMinValue()) { 7383 Pred = ICmpInst::ICMP_NE; 7384 Changed = true; 7385 break; 7386 } 7387 if ((RA + 1).isMaxValue()) { 7388 Pred = ICmpInst::ICMP_EQ; 7389 RHS = getConstant(RA + 1); 7390 Changed = true; 7391 break; 7392 } 7393 if (RA.isMaxValue()) goto trivially_false; 7394 break; 7395 case ICmpInst::ICMP_ULT: 7396 if (RA.isMaxValue()) { 7397 Pred = ICmpInst::ICMP_NE; 7398 Changed = true; 7399 break; 7400 } 7401 if ((RA - 1).isMinValue()) { 7402 Pred = ICmpInst::ICMP_EQ; 7403 RHS = getConstant(RA - 1); 7404 Changed = true; 7405 break; 7406 } 7407 if (RA.isMinValue()) goto trivially_false; 7408 break; 7409 case ICmpInst::ICMP_SGT: 7410 if (RA.isMinSignedValue()) { 7411 Pred = ICmpInst::ICMP_NE; 7412 Changed = true; 7413 break; 7414 } 7415 if ((RA + 1).isMaxSignedValue()) { 7416 Pred = ICmpInst::ICMP_EQ; 7417 RHS = getConstant(RA + 1); 7418 Changed = true; 7419 break; 7420 } 7421 if (RA.isMaxSignedValue()) goto trivially_false; 7422 break; 7423 case ICmpInst::ICMP_SLT: 7424 if (RA.isMaxSignedValue()) { 7425 Pred = ICmpInst::ICMP_NE; 7426 Changed = true; 7427 break; 7428 } 7429 if ((RA - 1).isMinSignedValue()) { 7430 Pred = ICmpInst::ICMP_EQ; 7431 RHS = getConstant(RA - 1); 7432 Changed = true; 7433 break; 7434 } 7435 if (RA.isMinSignedValue()) goto trivially_false; 7436 break; 7437 } 7438 } 7439 7440 // Check for obvious equality. 7441 if (HasSameValue(LHS, RHS)) { 7442 if (ICmpInst::isTrueWhenEqual(Pred)) 7443 goto trivially_true; 7444 if (ICmpInst::isFalseWhenEqual(Pred)) 7445 goto trivially_false; 7446 } 7447 7448 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 7449 // adding or subtracting 1 from one of the operands. 7450 switch (Pred) { 7451 case ICmpInst::ICMP_SLE: 7452 if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) { 7453 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 7454 SCEV::FlagNSW); 7455 Pred = ICmpInst::ICMP_SLT; 7456 Changed = true; 7457 } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) { 7458 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 7459 SCEV::FlagNSW); 7460 Pred = ICmpInst::ICMP_SLT; 7461 Changed = true; 7462 } 7463 break; 7464 case ICmpInst::ICMP_SGE: 7465 if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) { 7466 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 7467 SCEV::FlagNSW); 7468 Pred = ICmpInst::ICMP_SGT; 7469 Changed = true; 7470 } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) { 7471 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 7472 SCEV::FlagNSW); 7473 Pred = ICmpInst::ICMP_SGT; 7474 Changed = true; 7475 } 7476 break; 7477 case ICmpInst::ICMP_ULE: 7478 if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) { 7479 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 7480 SCEV::FlagNUW); 7481 Pred = ICmpInst::ICMP_ULT; 7482 Changed = true; 7483 } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) { 7484 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 7485 Pred = ICmpInst::ICMP_ULT; 7486 Changed = true; 7487 } 7488 break; 7489 case ICmpInst::ICMP_UGE: 7490 if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) { 7491 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 7492 Pred = ICmpInst::ICMP_UGT; 7493 Changed = true; 7494 } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) { 7495 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 7496 SCEV::FlagNUW); 7497 Pred = ICmpInst::ICMP_UGT; 7498 Changed = true; 7499 } 7500 break; 7501 default: 7502 break; 7503 } 7504 7505 // TODO: More simplifications are possible here. 7506 7507 // Recursively simplify until we either hit a recursion limit or nothing 7508 // changes. 7509 if (Changed) 7510 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 7511 7512 return Changed; 7513 7514 trivially_true: 7515 // Return 0 == 0. 7516 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 7517 Pred = ICmpInst::ICMP_EQ; 7518 return true; 7519 7520 trivially_false: 7521 // Return 0 != 0. 7522 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 7523 Pred = ICmpInst::ICMP_NE; 7524 return true; 7525 } 7526 7527 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 7528 return getSignedRange(S).getSignedMax().isNegative(); 7529 } 7530 7531 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 7532 return getSignedRange(S).getSignedMin().isStrictlyPositive(); 7533 } 7534 7535 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 7536 return !getSignedRange(S).getSignedMin().isNegative(); 7537 } 7538 7539 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 7540 return !getSignedRange(S).getSignedMax().isStrictlyPositive(); 7541 } 7542 7543 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 7544 return isKnownNegative(S) || isKnownPositive(S); 7545 } 7546 7547 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 7548 const SCEV *LHS, const SCEV *RHS) { 7549 // Canonicalize the inputs first. 7550 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7551 7552 // If LHS or RHS is an addrec, check to see if the condition is true in 7553 // every iteration of the loop. 7554 // If LHS and RHS are both addrec, both conditions must be true in 7555 // every iteration of the loop. 7556 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 7557 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 7558 bool LeftGuarded = false; 7559 bool RightGuarded = false; 7560 if (LAR) { 7561 const Loop *L = LAR->getLoop(); 7562 if (isLoopEntryGuardedByCond(L, Pred, LAR->getStart(), RHS) && 7563 isLoopBackedgeGuardedByCond(L, Pred, LAR->getPostIncExpr(*this), RHS)) { 7564 if (!RAR) return true; 7565 LeftGuarded = true; 7566 } 7567 } 7568 if (RAR) { 7569 const Loop *L = RAR->getLoop(); 7570 if (isLoopEntryGuardedByCond(L, Pred, LHS, RAR->getStart()) && 7571 isLoopBackedgeGuardedByCond(L, Pred, LHS, RAR->getPostIncExpr(*this))) { 7572 if (!LAR) return true; 7573 RightGuarded = true; 7574 } 7575 } 7576 if (LeftGuarded && RightGuarded) 7577 return true; 7578 7579 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 7580 return true; 7581 7582 // Otherwise see what can be done with known constant ranges. 7583 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS); 7584 } 7585 7586 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 7587 ICmpInst::Predicate Pred, 7588 bool &Increasing) { 7589 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 7590 7591 #ifndef NDEBUG 7592 // Verify an invariant: inverting the predicate should turn a monotonically 7593 // increasing change to a monotonically decreasing one, and vice versa. 7594 bool IncreasingSwapped; 7595 bool ResultSwapped = isMonotonicPredicateImpl( 7596 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 7597 7598 assert(Result == ResultSwapped && "should be able to analyze both!"); 7599 if (ResultSwapped) 7600 assert(Increasing == !IncreasingSwapped && 7601 "monotonicity should flip as we flip the predicate"); 7602 #endif 7603 7604 return Result; 7605 } 7606 7607 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 7608 ICmpInst::Predicate Pred, 7609 bool &Increasing) { 7610 7611 // A zero step value for LHS means the induction variable is essentially a 7612 // loop invariant value. We don't really depend on the predicate actually 7613 // flipping from false to true (for increasing predicates, and the other way 7614 // around for decreasing predicates), all we care about is that *if* the 7615 // predicate changes then it only changes from false to true. 7616 // 7617 // A zero step value in itself is not very useful, but there may be places 7618 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 7619 // as general as possible. 7620 7621 switch (Pred) { 7622 default: 7623 return false; // Conservative answer 7624 7625 case ICmpInst::ICMP_UGT: 7626 case ICmpInst::ICMP_UGE: 7627 case ICmpInst::ICMP_ULT: 7628 case ICmpInst::ICMP_ULE: 7629 if (!LHS->hasNoUnsignedWrap()) 7630 return false; 7631 7632 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 7633 return true; 7634 7635 case ICmpInst::ICMP_SGT: 7636 case ICmpInst::ICMP_SGE: 7637 case ICmpInst::ICMP_SLT: 7638 case ICmpInst::ICMP_SLE: { 7639 if (!LHS->hasNoSignedWrap()) 7640 return false; 7641 7642 const SCEV *Step = LHS->getStepRecurrence(*this); 7643 7644 if (isKnownNonNegative(Step)) { 7645 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 7646 return true; 7647 } 7648 7649 if (isKnownNonPositive(Step)) { 7650 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 7651 return true; 7652 } 7653 7654 return false; 7655 } 7656 7657 } 7658 7659 llvm_unreachable("switch has default clause!"); 7660 } 7661 7662 bool ScalarEvolution::isLoopInvariantPredicate( 7663 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 7664 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 7665 const SCEV *&InvariantRHS) { 7666 7667 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 7668 if (!isLoopInvariant(RHS, L)) { 7669 if (!isLoopInvariant(LHS, L)) 7670 return false; 7671 7672 std::swap(LHS, RHS); 7673 Pred = ICmpInst::getSwappedPredicate(Pred); 7674 } 7675 7676 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 7677 if (!ArLHS || ArLHS->getLoop() != L) 7678 return false; 7679 7680 bool Increasing; 7681 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 7682 return false; 7683 7684 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 7685 // true as the loop iterates, and the backedge is control dependent on 7686 // "ArLHS `Pred` RHS" == true then we can reason as follows: 7687 // 7688 // * if the predicate was false in the first iteration then the predicate 7689 // is never evaluated again, since the loop exits without taking the 7690 // backedge. 7691 // * if the predicate was true in the first iteration then it will 7692 // continue to be true for all future iterations since it is 7693 // monotonically increasing. 7694 // 7695 // For both the above possibilities, we can replace the loop varying 7696 // predicate with its value on the first iteration of the loop (which is 7697 // loop invariant). 7698 // 7699 // A similar reasoning applies for a monotonically decreasing predicate, by 7700 // replacing true with false and false with true in the above two bullets. 7701 7702 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 7703 7704 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 7705 return false; 7706 7707 InvariantPred = Pred; 7708 InvariantLHS = ArLHS->getStart(); 7709 InvariantRHS = RHS; 7710 return true; 7711 } 7712 7713 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 7714 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 7715 if (HasSameValue(LHS, RHS)) 7716 return ICmpInst::isTrueWhenEqual(Pred); 7717 7718 // This code is split out from isKnownPredicate because it is called from 7719 // within isLoopEntryGuardedByCond. 7720 7721 auto CheckRanges = 7722 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 7723 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 7724 .contains(RangeLHS); 7725 }; 7726 7727 // The check at the top of the function catches the case where the values are 7728 // known to be equal. 7729 if (Pred == CmpInst::ICMP_EQ) 7730 return false; 7731 7732 if (Pred == CmpInst::ICMP_NE) 7733 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 7734 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 7735 isKnownNonZero(getMinusSCEV(LHS, RHS)); 7736 7737 if (CmpInst::isSigned(Pred)) 7738 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 7739 7740 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 7741 } 7742 7743 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 7744 const SCEV *LHS, 7745 const SCEV *RHS) { 7746 7747 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 7748 // Return Y via OutY. 7749 auto MatchBinaryAddToConst = 7750 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 7751 SCEV::NoWrapFlags ExpectedFlags) { 7752 const SCEV *NonConstOp, *ConstOp; 7753 SCEV::NoWrapFlags FlagsPresent; 7754 7755 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 7756 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 7757 return false; 7758 7759 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 7760 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 7761 }; 7762 7763 APInt C; 7764 7765 switch (Pred) { 7766 default: 7767 break; 7768 7769 case ICmpInst::ICMP_SGE: 7770 std::swap(LHS, RHS); 7771 case ICmpInst::ICMP_SLE: 7772 // X s<= (X + C)<nsw> if C >= 0 7773 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 7774 return true; 7775 7776 // (X + C)<nsw> s<= X if C <= 0 7777 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 7778 !C.isStrictlyPositive()) 7779 return true; 7780 break; 7781 7782 case ICmpInst::ICMP_SGT: 7783 std::swap(LHS, RHS); 7784 case ICmpInst::ICMP_SLT: 7785 // X s< (X + C)<nsw> if C > 0 7786 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 7787 C.isStrictlyPositive()) 7788 return true; 7789 7790 // (X + C)<nsw> s< X if C < 0 7791 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 7792 return true; 7793 break; 7794 } 7795 7796 return false; 7797 } 7798 7799 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 7800 const SCEV *LHS, 7801 const SCEV *RHS) { 7802 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 7803 return false; 7804 7805 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 7806 // the stack can result in exponential time complexity. 7807 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 7808 7809 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 7810 // 7811 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 7812 // isKnownPredicate. isKnownPredicate is more powerful, but also more 7813 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 7814 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 7815 // use isKnownPredicate later if needed. 7816 return isKnownNonNegative(RHS) && 7817 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 7818 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 7819 } 7820 7821 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 7822 ICmpInst::Predicate Pred, 7823 const SCEV *LHS, const SCEV *RHS) { 7824 // No need to even try if we know the module has no guards. 7825 if (!HasGuards) 7826 return false; 7827 7828 return any_of(*BB, [&](Instruction &I) { 7829 using namespace llvm::PatternMatch; 7830 7831 Value *Condition; 7832 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 7833 m_Value(Condition))) && 7834 isImpliedCond(Pred, LHS, RHS, Condition, false); 7835 }); 7836 } 7837 7838 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 7839 /// protected by a conditional between LHS and RHS. This is used to 7840 /// to eliminate casts. 7841 bool 7842 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 7843 ICmpInst::Predicate Pred, 7844 const SCEV *LHS, const SCEV *RHS) { 7845 // Interpret a null as meaning no loop, where there is obviously no guard 7846 // (interprocedural conditions notwithstanding). 7847 if (!L) return true; 7848 7849 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 7850 return true; 7851 7852 BasicBlock *Latch = L->getLoopLatch(); 7853 if (!Latch) 7854 return false; 7855 7856 BranchInst *LoopContinuePredicate = 7857 dyn_cast<BranchInst>(Latch->getTerminator()); 7858 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 7859 isImpliedCond(Pred, LHS, RHS, 7860 LoopContinuePredicate->getCondition(), 7861 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 7862 return true; 7863 7864 // We don't want more than one activation of the following loops on the stack 7865 // -- that can lead to O(n!) time complexity. 7866 if (WalkingBEDominatingConds) 7867 return false; 7868 7869 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 7870 7871 // See if we can exploit a trip count to prove the predicate. 7872 const auto &BETakenInfo = getBackedgeTakenInfo(L); 7873 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 7874 if (LatchBECount != getCouldNotCompute()) { 7875 // We know that Latch branches back to the loop header exactly 7876 // LatchBECount times. This means the backdege condition at Latch is 7877 // equivalent to "{0,+,1} u< LatchBECount". 7878 Type *Ty = LatchBECount->getType(); 7879 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 7880 const SCEV *LoopCounter = 7881 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 7882 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 7883 LatchBECount)) 7884 return true; 7885 } 7886 7887 // Check conditions due to any @llvm.assume intrinsics. 7888 for (auto &AssumeVH : AC.assumptions()) { 7889 if (!AssumeVH) 7890 continue; 7891 auto *CI = cast<CallInst>(AssumeVH); 7892 if (!DT.dominates(CI, Latch->getTerminator())) 7893 continue; 7894 7895 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 7896 return true; 7897 } 7898 7899 // If the loop is not reachable from the entry block, we risk running into an 7900 // infinite loop as we walk up into the dom tree. These loops do not matter 7901 // anyway, so we just return a conservative answer when we see them. 7902 if (!DT.isReachableFromEntry(L->getHeader())) 7903 return false; 7904 7905 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 7906 return true; 7907 7908 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 7909 DTN != HeaderDTN; DTN = DTN->getIDom()) { 7910 7911 assert(DTN && "should reach the loop header before reaching the root!"); 7912 7913 BasicBlock *BB = DTN->getBlock(); 7914 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 7915 return true; 7916 7917 BasicBlock *PBB = BB->getSinglePredecessor(); 7918 if (!PBB) 7919 continue; 7920 7921 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 7922 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 7923 continue; 7924 7925 Value *Condition = ContinuePredicate->getCondition(); 7926 7927 // If we have an edge `E` within the loop body that dominates the only 7928 // latch, the condition guarding `E` also guards the backedge. This 7929 // reasoning works only for loops with a single latch. 7930 7931 BasicBlockEdge DominatingEdge(PBB, BB); 7932 if (DominatingEdge.isSingleEdge()) { 7933 // We're constructively (and conservatively) enumerating edges within the 7934 // loop body that dominate the latch. The dominator tree better agree 7935 // with us on this: 7936 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 7937 7938 if (isImpliedCond(Pred, LHS, RHS, Condition, 7939 BB != ContinuePredicate->getSuccessor(0))) 7940 return true; 7941 } 7942 } 7943 7944 return false; 7945 } 7946 7947 /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected 7948 /// by a conditional between LHS and RHS. This is used to help avoid max 7949 /// expressions in loop trip counts, and to eliminate casts. 7950 bool 7951 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 7952 ICmpInst::Predicate Pred, 7953 const SCEV *LHS, const SCEV *RHS) { 7954 // Interpret a null as meaning no loop, where there is obviously no guard 7955 // (interprocedural conditions notwithstanding). 7956 if (!L) return false; 7957 7958 if (isKnownPredicateViaConstantRanges(Pred, LHS, RHS)) 7959 return true; 7960 7961 // Starting at the loop predecessor, climb up the predecessor chain, as long 7962 // as there are predecessors that can be found that have unique successors 7963 // leading to the original header. 7964 for (std::pair<BasicBlock *, BasicBlock *> 7965 Pair(L->getLoopPredecessor(), L->getHeader()); 7966 Pair.first; 7967 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 7968 7969 if (isImpliedViaGuard(Pair.first, Pred, LHS, RHS)) 7970 return true; 7971 7972 BranchInst *LoopEntryPredicate = 7973 dyn_cast<BranchInst>(Pair.first->getTerminator()); 7974 if (!LoopEntryPredicate || 7975 LoopEntryPredicate->isUnconditional()) 7976 continue; 7977 7978 if (isImpliedCond(Pred, LHS, RHS, 7979 LoopEntryPredicate->getCondition(), 7980 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 7981 return true; 7982 } 7983 7984 // Check conditions due to any @llvm.assume intrinsics. 7985 for (auto &AssumeVH : AC.assumptions()) { 7986 if (!AssumeVH) 7987 continue; 7988 auto *CI = cast<CallInst>(AssumeVH); 7989 if (!DT.dominates(CI, L->getHeader())) 7990 continue; 7991 7992 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 7993 return true; 7994 } 7995 7996 return false; 7997 } 7998 7999 namespace { 8000 /// RAII wrapper to prevent recursive application of isImpliedCond. 8001 /// ScalarEvolution's PendingLoopPredicates set must be empty unless we are 8002 /// currently evaluating isImpliedCond. 8003 struct MarkPendingLoopPredicate { 8004 Value *Cond; 8005 DenseSet<Value*> &LoopPreds; 8006 bool Pending; 8007 8008 MarkPendingLoopPredicate(Value *C, DenseSet<Value*> &LP) 8009 : Cond(C), LoopPreds(LP) { 8010 Pending = !LoopPreds.insert(Cond).second; 8011 } 8012 ~MarkPendingLoopPredicate() { 8013 if (!Pending) 8014 LoopPreds.erase(Cond); 8015 } 8016 }; 8017 } // end anonymous namespace 8018 8019 /// isImpliedCond - Test whether the condition described by Pred, LHS, 8020 /// and RHS is true whenever the given Cond value evaluates to true. 8021 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 8022 const SCEV *LHS, const SCEV *RHS, 8023 Value *FoundCondValue, 8024 bool Inverse) { 8025 MarkPendingLoopPredicate Mark(FoundCondValue, PendingLoopPredicates); 8026 if (Mark.Pending) 8027 return false; 8028 8029 // Recursively handle And and Or conditions. 8030 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 8031 if (BO->getOpcode() == Instruction::And) { 8032 if (!Inverse) 8033 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 8034 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 8035 } else if (BO->getOpcode() == Instruction::Or) { 8036 if (Inverse) 8037 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 8038 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 8039 } 8040 } 8041 8042 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 8043 if (!ICI) return false; 8044 8045 // Now that we found a conditional branch that dominates the loop or controls 8046 // the loop latch. Check to see if it is the comparison we are looking for. 8047 ICmpInst::Predicate FoundPred; 8048 if (Inverse) 8049 FoundPred = ICI->getInversePredicate(); 8050 else 8051 FoundPred = ICI->getPredicate(); 8052 8053 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 8054 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 8055 8056 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 8057 } 8058 8059 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 8060 const SCEV *RHS, 8061 ICmpInst::Predicate FoundPred, 8062 const SCEV *FoundLHS, 8063 const SCEV *FoundRHS) { 8064 // Balance the types. 8065 if (getTypeSizeInBits(LHS->getType()) < 8066 getTypeSizeInBits(FoundLHS->getType())) { 8067 if (CmpInst::isSigned(Pred)) { 8068 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 8069 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 8070 } else { 8071 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 8072 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 8073 } 8074 } else if (getTypeSizeInBits(LHS->getType()) > 8075 getTypeSizeInBits(FoundLHS->getType())) { 8076 if (CmpInst::isSigned(FoundPred)) { 8077 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 8078 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 8079 } else { 8080 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 8081 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 8082 } 8083 } 8084 8085 // Canonicalize the query to match the way instcombine will have 8086 // canonicalized the comparison. 8087 if (SimplifyICmpOperands(Pred, LHS, RHS)) 8088 if (LHS == RHS) 8089 return CmpInst::isTrueWhenEqual(Pred); 8090 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 8091 if (FoundLHS == FoundRHS) 8092 return CmpInst::isFalseWhenEqual(FoundPred); 8093 8094 // Check to see if we can make the LHS or RHS match. 8095 if (LHS == FoundRHS || RHS == FoundLHS) { 8096 if (isa<SCEVConstant>(RHS)) { 8097 std::swap(FoundLHS, FoundRHS); 8098 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 8099 } else { 8100 std::swap(LHS, RHS); 8101 Pred = ICmpInst::getSwappedPredicate(Pred); 8102 } 8103 } 8104 8105 // Check whether the found predicate is the same as the desired predicate. 8106 if (FoundPred == Pred) 8107 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 8108 8109 // Check whether swapping the found predicate makes it the same as the 8110 // desired predicate. 8111 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 8112 if (isa<SCEVConstant>(RHS)) 8113 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 8114 else 8115 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 8116 RHS, LHS, FoundLHS, FoundRHS); 8117 } 8118 8119 // Unsigned comparison is the same as signed comparison when both the operands 8120 // are non-negative. 8121 if (CmpInst::isUnsigned(FoundPred) && 8122 CmpInst::getSignedPredicate(FoundPred) == Pred && 8123 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 8124 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 8125 8126 // Check if we can make progress by sharpening ranges. 8127 if (FoundPred == ICmpInst::ICMP_NE && 8128 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 8129 8130 const SCEVConstant *C = nullptr; 8131 const SCEV *V = nullptr; 8132 8133 if (isa<SCEVConstant>(FoundLHS)) { 8134 C = cast<SCEVConstant>(FoundLHS); 8135 V = FoundRHS; 8136 } else { 8137 C = cast<SCEVConstant>(FoundRHS); 8138 V = FoundLHS; 8139 } 8140 8141 // The guarding predicate tells us that C != V. If the known range 8142 // of V is [C, t), we can sharpen the range to [C + 1, t). The 8143 // range we consider has to correspond to same signedness as the 8144 // predicate we're interested in folding. 8145 8146 APInt Min = ICmpInst::isSigned(Pred) ? 8147 getSignedRange(V).getSignedMin() : getUnsignedRange(V).getUnsignedMin(); 8148 8149 if (Min == C->getAPInt()) { 8150 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 8151 // This is true even if (Min + 1) wraps around -- in case of 8152 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 8153 8154 APInt SharperMin = Min + 1; 8155 8156 switch (Pred) { 8157 case ICmpInst::ICMP_SGE: 8158 case ICmpInst::ICMP_UGE: 8159 // We know V `Pred` SharperMin. If this implies LHS `Pred` 8160 // RHS, we're done. 8161 if (isImpliedCondOperands(Pred, LHS, RHS, V, 8162 getConstant(SharperMin))) 8163 return true; 8164 8165 case ICmpInst::ICMP_SGT: 8166 case ICmpInst::ICMP_UGT: 8167 // We know from the range information that (V `Pred` Min || 8168 // V == Min). We know from the guarding condition that !(V 8169 // == Min). This gives us 8170 // 8171 // V `Pred` Min || V == Min && !(V == Min) 8172 // => V `Pred` Min 8173 // 8174 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 8175 8176 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 8177 return true; 8178 8179 default: 8180 // No change 8181 break; 8182 } 8183 } 8184 } 8185 8186 // Check whether the actual condition is beyond sufficient. 8187 if (FoundPred == ICmpInst::ICMP_EQ) 8188 if (ICmpInst::isTrueWhenEqual(Pred)) 8189 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 8190 return true; 8191 if (Pred == ICmpInst::ICMP_NE) 8192 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 8193 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 8194 return true; 8195 8196 // Otherwise assume the worst. 8197 return false; 8198 } 8199 8200 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 8201 const SCEV *&L, const SCEV *&R, 8202 SCEV::NoWrapFlags &Flags) { 8203 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 8204 if (!AE || AE->getNumOperands() != 2) 8205 return false; 8206 8207 L = AE->getOperand(0); 8208 R = AE->getOperand(1); 8209 Flags = AE->getNoWrapFlags(); 8210 return true; 8211 } 8212 8213 bool ScalarEvolution::computeConstantDifference(const SCEV *Less, 8214 const SCEV *More, 8215 APInt &C) { 8216 // We avoid subtracting expressions here because this function is usually 8217 // fairly deep in the call stack (i.e. is called many times). 8218 8219 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 8220 const auto *LAR = cast<SCEVAddRecExpr>(Less); 8221 const auto *MAR = cast<SCEVAddRecExpr>(More); 8222 8223 if (LAR->getLoop() != MAR->getLoop()) 8224 return false; 8225 8226 // We look at affine expressions only; not for correctness but to keep 8227 // getStepRecurrence cheap. 8228 if (!LAR->isAffine() || !MAR->isAffine()) 8229 return false; 8230 8231 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 8232 return false; 8233 8234 Less = LAR->getStart(); 8235 More = MAR->getStart(); 8236 8237 // fall through 8238 } 8239 8240 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 8241 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 8242 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 8243 C = M - L; 8244 return true; 8245 } 8246 8247 const SCEV *L, *R; 8248 SCEV::NoWrapFlags Flags; 8249 if (splitBinaryAdd(Less, L, R, Flags)) 8250 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 8251 if (R == More) { 8252 C = -(LC->getAPInt()); 8253 return true; 8254 } 8255 8256 if (splitBinaryAdd(More, L, R, Flags)) 8257 if (const auto *LC = dyn_cast<SCEVConstant>(L)) 8258 if (R == Less) { 8259 C = LC->getAPInt(); 8260 return true; 8261 } 8262 8263 return false; 8264 } 8265 8266 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 8267 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 8268 const SCEV *FoundLHS, const SCEV *FoundRHS) { 8269 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 8270 return false; 8271 8272 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 8273 if (!AddRecLHS) 8274 return false; 8275 8276 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 8277 if (!AddRecFoundLHS) 8278 return false; 8279 8280 // We'd like to let SCEV reason about control dependencies, so we constrain 8281 // both the inequalities to be about add recurrences on the same loop. This 8282 // way we can use isLoopEntryGuardedByCond later. 8283 8284 const Loop *L = AddRecFoundLHS->getLoop(); 8285 if (L != AddRecLHS->getLoop()) 8286 return false; 8287 8288 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 8289 // 8290 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 8291 // ... (2) 8292 // 8293 // Informal proof for (2), assuming (1) [*]: 8294 // 8295 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 8296 // 8297 // Then 8298 // 8299 // FoundLHS s< FoundRHS s< INT_MIN - C 8300 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 8301 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 8302 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 8303 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 8304 // <=> FoundLHS + C s< FoundRHS + C 8305 // 8306 // [*]: (1) can be proved by ruling out overflow. 8307 // 8308 // [**]: This can be proved by analyzing all the four possibilities: 8309 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 8310 // (A s>= 0, B s>= 0). 8311 // 8312 // Note: 8313 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 8314 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 8315 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 8316 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 8317 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 8318 // C)". 8319 8320 APInt LDiff, RDiff; 8321 if (!computeConstantDifference(FoundLHS, LHS, LDiff) || 8322 !computeConstantDifference(FoundRHS, RHS, RDiff) || 8323 LDiff != RDiff) 8324 return false; 8325 8326 if (LDiff == 0) 8327 return true; 8328 8329 APInt FoundRHSLimit; 8330 8331 if (Pred == CmpInst::ICMP_ULT) { 8332 FoundRHSLimit = -RDiff; 8333 } else { 8334 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 8335 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - RDiff; 8336 } 8337 8338 // Try to prove (1) or (2), as needed. 8339 return isLoopEntryGuardedByCond(L, Pred, FoundRHS, 8340 getConstant(FoundRHSLimit)); 8341 } 8342 8343 /// isImpliedCondOperands - Test whether the condition described by Pred, 8344 /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS, 8345 /// and FoundRHS is true. 8346 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 8347 const SCEV *LHS, const SCEV *RHS, 8348 const SCEV *FoundLHS, 8349 const SCEV *FoundRHS) { 8350 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 8351 return true; 8352 8353 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 8354 return true; 8355 8356 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 8357 FoundLHS, FoundRHS) || 8358 // ~x < ~y --> x > y 8359 isImpliedCondOperandsHelper(Pred, LHS, RHS, 8360 getNotSCEV(FoundRHS), 8361 getNotSCEV(FoundLHS)); 8362 } 8363 8364 8365 /// If Expr computes ~A, return A else return nullptr 8366 static const SCEV *MatchNotExpr(const SCEV *Expr) { 8367 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 8368 if (!Add || Add->getNumOperands() != 2 || 8369 !Add->getOperand(0)->isAllOnesValue()) 8370 return nullptr; 8371 8372 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 8373 if (!AddRHS || AddRHS->getNumOperands() != 2 || 8374 !AddRHS->getOperand(0)->isAllOnesValue()) 8375 return nullptr; 8376 8377 return AddRHS->getOperand(1); 8378 } 8379 8380 8381 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values? 8382 template<typename MaxExprType> 8383 static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, 8384 const SCEV *Candidate) { 8385 const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr); 8386 if (!MaxExpr) return false; 8387 8388 return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end(); 8389 } 8390 8391 8392 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values? 8393 template<typename MaxExprType> 8394 static bool IsMinConsistingOf(ScalarEvolution &SE, 8395 const SCEV *MaybeMinExpr, 8396 const SCEV *Candidate) { 8397 const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr); 8398 if (!MaybeMaxExpr) 8399 return false; 8400 8401 return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate)); 8402 } 8403 8404 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 8405 ICmpInst::Predicate Pred, 8406 const SCEV *LHS, const SCEV *RHS) { 8407 8408 // If both sides are affine addrecs for the same loop, with equal 8409 // steps, and we know the recurrences don't wrap, then we only 8410 // need to check the predicate on the starting values. 8411 8412 if (!ICmpInst::isRelational(Pred)) 8413 return false; 8414 8415 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 8416 if (!LAR) 8417 return false; 8418 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 8419 if (!RAR) 8420 return false; 8421 if (LAR->getLoop() != RAR->getLoop()) 8422 return false; 8423 if (!LAR->isAffine() || !RAR->isAffine()) 8424 return false; 8425 8426 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 8427 return false; 8428 8429 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 8430 SCEV::FlagNSW : SCEV::FlagNUW; 8431 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 8432 return false; 8433 8434 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 8435 } 8436 8437 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 8438 /// expression? 8439 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 8440 ICmpInst::Predicate Pred, 8441 const SCEV *LHS, const SCEV *RHS) { 8442 switch (Pred) { 8443 default: 8444 return false; 8445 8446 case ICmpInst::ICMP_SGE: 8447 std::swap(LHS, RHS); 8448 // fall through 8449 case ICmpInst::ICMP_SLE: 8450 return 8451 // min(A, ...) <= A 8452 IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) || 8453 // A <= max(A, ...) 8454 IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 8455 8456 case ICmpInst::ICMP_UGE: 8457 std::swap(LHS, RHS); 8458 // fall through 8459 case ICmpInst::ICMP_ULE: 8460 return 8461 // min(A, ...) <= A 8462 IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) || 8463 // A <= max(A, ...) 8464 IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 8465 } 8466 8467 llvm_unreachable("covered switch fell through?!"); 8468 } 8469 8470 /// isImpliedCondOperandsHelper - Test whether the condition described by 8471 /// Pred, LHS, and RHS is true whenever the condition described by Pred, 8472 /// FoundLHS, and FoundRHS is true. 8473 bool 8474 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 8475 const SCEV *LHS, const SCEV *RHS, 8476 const SCEV *FoundLHS, 8477 const SCEV *FoundRHS) { 8478 auto IsKnownPredicateFull = 8479 [this](ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 8480 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 8481 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 8482 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 8483 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 8484 }; 8485 8486 switch (Pred) { 8487 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 8488 case ICmpInst::ICMP_EQ: 8489 case ICmpInst::ICMP_NE: 8490 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 8491 return true; 8492 break; 8493 case ICmpInst::ICMP_SLT: 8494 case ICmpInst::ICMP_SLE: 8495 if (IsKnownPredicateFull(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 8496 IsKnownPredicateFull(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 8497 return true; 8498 break; 8499 case ICmpInst::ICMP_SGT: 8500 case ICmpInst::ICMP_SGE: 8501 if (IsKnownPredicateFull(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 8502 IsKnownPredicateFull(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 8503 return true; 8504 break; 8505 case ICmpInst::ICMP_ULT: 8506 case ICmpInst::ICMP_ULE: 8507 if (IsKnownPredicateFull(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 8508 IsKnownPredicateFull(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 8509 return true; 8510 break; 8511 case ICmpInst::ICMP_UGT: 8512 case ICmpInst::ICMP_UGE: 8513 if (IsKnownPredicateFull(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 8514 IsKnownPredicateFull(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 8515 return true; 8516 break; 8517 } 8518 8519 return false; 8520 } 8521 8522 /// isImpliedCondOperandsViaRanges - helper function for isImpliedCondOperands. 8523 /// Tries to get cases like "X `sgt` 0 => X - 1 `sgt` -1". 8524 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 8525 const SCEV *LHS, 8526 const SCEV *RHS, 8527 const SCEV *FoundLHS, 8528 const SCEV *FoundRHS) { 8529 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 8530 // The restriction on `FoundRHS` be lifted easily -- it exists only to 8531 // reduce the compile time impact of this optimization. 8532 return false; 8533 8534 const SCEVAddExpr *AddLHS = dyn_cast<SCEVAddExpr>(LHS); 8535 if (!AddLHS || AddLHS->getOperand(1) != FoundLHS || 8536 !isa<SCEVConstant>(AddLHS->getOperand(0))) 8537 return false; 8538 8539 APInt ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 8540 8541 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 8542 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 8543 ConstantRange FoundLHSRange = 8544 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 8545 8546 // Since `LHS` is `FoundLHS` + `AddLHS->getOperand(0)`, we can compute a range 8547 // for `LHS`: 8548 APInt Addend = cast<SCEVConstant>(AddLHS->getOperand(0))->getAPInt(); 8549 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(Addend)); 8550 8551 // We can also compute the range of values for `LHS` that satisfy the 8552 // consequent, "`LHS` `Pred` `RHS`": 8553 APInt ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 8554 ConstantRange SatisfyingLHSRange = 8555 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 8556 8557 // The antecedent implies the consequent if every value of `LHS` that 8558 // satisfies the antecedent also satisfies the consequent. 8559 return SatisfyingLHSRange.contains(LHSRange); 8560 } 8561 8562 // Verify if an linear IV with positive stride can overflow when in a 8563 // less-than comparison, knowing the invariant term of the comparison, the 8564 // stride and the knowledge of NSW/NUW flags on the recurrence. 8565 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 8566 bool IsSigned, bool NoWrap) { 8567 if (NoWrap) return false; 8568 8569 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 8570 const SCEV *One = getOne(Stride->getType()); 8571 8572 if (IsSigned) { 8573 APInt MaxRHS = getSignedRange(RHS).getSignedMax(); 8574 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 8575 APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One)) 8576 .getSignedMax(); 8577 8578 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 8579 return (MaxValue - MaxStrideMinusOne).slt(MaxRHS); 8580 } 8581 8582 APInt MaxRHS = getUnsignedRange(RHS).getUnsignedMax(); 8583 APInt MaxValue = APInt::getMaxValue(BitWidth); 8584 APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One)) 8585 .getUnsignedMax(); 8586 8587 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 8588 return (MaxValue - MaxStrideMinusOne).ult(MaxRHS); 8589 } 8590 8591 // Verify if an linear IV with negative stride can overflow when in a 8592 // greater-than comparison, knowing the invariant term of the comparison, 8593 // the stride and the knowledge of NSW/NUW flags on the recurrence. 8594 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 8595 bool IsSigned, bool NoWrap) { 8596 if (NoWrap) return false; 8597 8598 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 8599 const SCEV *One = getOne(Stride->getType()); 8600 8601 if (IsSigned) { 8602 APInt MinRHS = getSignedRange(RHS).getSignedMin(); 8603 APInt MinValue = APInt::getSignedMinValue(BitWidth); 8604 APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One)) 8605 .getSignedMax(); 8606 8607 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 8608 return (MinValue + MaxStrideMinusOne).sgt(MinRHS); 8609 } 8610 8611 APInt MinRHS = getUnsignedRange(RHS).getUnsignedMin(); 8612 APInt MinValue = APInt::getMinValue(BitWidth); 8613 APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One)) 8614 .getUnsignedMax(); 8615 8616 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 8617 return (MinValue + MaxStrideMinusOne).ugt(MinRHS); 8618 } 8619 8620 // Compute the backedge taken count knowing the interval difference, the 8621 // stride and presence of the equality in the comparison. 8622 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 8623 bool Equality) { 8624 const SCEV *One = getOne(Step->getType()); 8625 Delta = Equality ? getAddExpr(Delta, Step) 8626 : getAddExpr(Delta, getMinusSCEV(Step, One)); 8627 return getUDivExpr(Delta, Step); 8628 } 8629 8630 /// HowManyLessThans - Return the number of times a backedge containing the 8631 /// specified less-than comparison will execute. If not computable, return 8632 /// CouldNotCompute. 8633 /// 8634 /// @param ControlsExit is true when the LHS < RHS condition directly controls 8635 /// the branch (loops exits only if condition is true). In this case, we can use 8636 /// NoWrapFlags to skip overflow checks. 8637 ScalarEvolution::ExitLimit 8638 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, 8639 const Loop *L, bool IsSigned, 8640 bool ControlsExit, bool AllowPredicates) { 8641 SCEVUnionPredicate P; 8642 // We handle only IV < Invariant 8643 if (!isLoopInvariant(RHS, L)) 8644 return getCouldNotCompute(); 8645 8646 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 8647 if (!IV && AllowPredicates) 8648 // Try to make this an AddRec using runtime tests, in the first X 8649 // iterations of this loop, where X is the SCEV expression found by the 8650 // algorithm below. 8651 IV = convertSCEVToAddRecWithPredicates(LHS, L, P); 8652 8653 // Avoid weird loops 8654 if (!IV || IV->getLoop() != L || !IV->isAffine()) 8655 return getCouldNotCompute(); 8656 8657 bool NoWrap = ControlsExit && 8658 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 8659 8660 const SCEV *Stride = IV->getStepRecurrence(*this); 8661 8662 // Avoid negative or zero stride values 8663 if (!isKnownPositive(Stride)) 8664 return getCouldNotCompute(); 8665 8666 // Avoid proven overflow cases: this will ensure that the backedge taken count 8667 // will not generate any unsigned overflow. Relaxed no-overflow conditions 8668 // exploit NoWrapFlags, allowing to optimize in presence of undefined 8669 // behaviors like the case of C language. 8670 if (!Stride->isOne() && doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 8671 return getCouldNotCompute(); 8672 8673 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 8674 : ICmpInst::ICMP_ULT; 8675 const SCEV *Start = IV->getStart(); 8676 const SCEV *End = RHS; 8677 if (!isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) { 8678 const SCEV *Diff = getMinusSCEV(RHS, Start); 8679 // If we have NoWrap set, then we can assume that the increment won't 8680 // overflow, in which case if RHS - Start is a constant, we don't need to 8681 // do a max operation since we can just figure it out statically 8682 if (NoWrap && isa<SCEVConstant>(Diff)) { 8683 APInt D = dyn_cast<const SCEVConstant>(Diff)->getAPInt(); 8684 if (D.isNegative()) 8685 End = Start; 8686 } else 8687 End = IsSigned ? getSMaxExpr(RHS, Start) 8688 : getUMaxExpr(RHS, Start); 8689 } 8690 8691 const SCEV *BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 8692 8693 APInt MinStart = IsSigned ? getSignedRange(Start).getSignedMin() 8694 : getUnsignedRange(Start).getUnsignedMin(); 8695 8696 APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin() 8697 : getUnsignedRange(Stride).getUnsignedMin(); 8698 8699 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 8700 APInt Limit = IsSigned ? APInt::getSignedMaxValue(BitWidth) - (MinStride - 1) 8701 : APInt::getMaxValue(BitWidth) - (MinStride - 1); 8702 8703 // Although End can be a MAX expression we estimate MaxEnd considering only 8704 // the case End = RHS. This is safe because in the other case (End - Start) 8705 // is zero, leading to a zero maximum backedge taken count. 8706 APInt MaxEnd = 8707 IsSigned ? APIntOps::smin(getSignedRange(RHS).getSignedMax(), Limit) 8708 : APIntOps::umin(getUnsignedRange(RHS).getUnsignedMax(), Limit); 8709 8710 const SCEV *MaxBECount; 8711 if (isa<SCEVConstant>(BECount)) 8712 MaxBECount = BECount; 8713 else 8714 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart), 8715 getConstant(MinStride), false); 8716 8717 if (isa<SCEVCouldNotCompute>(MaxBECount)) 8718 MaxBECount = BECount; 8719 8720 return ExitLimit(BECount, MaxBECount, P); 8721 } 8722 8723 ScalarEvolution::ExitLimit 8724 ScalarEvolution::HowManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 8725 const Loop *L, bool IsSigned, 8726 bool ControlsExit, bool AllowPredicates) { 8727 SCEVUnionPredicate P; 8728 // We handle only IV > Invariant 8729 if (!isLoopInvariant(RHS, L)) 8730 return getCouldNotCompute(); 8731 8732 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 8733 if (!IV && AllowPredicates) 8734 // Try to make this an AddRec using runtime tests, in the first X 8735 // iterations of this loop, where X is the SCEV expression found by the 8736 // algorithm below. 8737 IV = convertSCEVToAddRecWithPredicates(LHS, L, P); 8738 8739 // Avoid weird loops 8740 if (!IV || IV->getLoop() != L || !IV->isAffine()) 8741 return getCouldNotCompute(); 8742 8743 bool NoWrap = ControlsExit && 8744 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 8745 8746 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 8747 8748 // Avoid negative or zero stride values 8749 if (!isKnownPositive(Stride)) 8750 return getCouldNotCompute(); 8751 8752 // Avoid proven overflow cases: this will ensure that the backedge taken count 8753 // will not generate any unsigned overflow. Relaxed no-overflow conditions 8754 // exploit NoWrapFlags, allowing to optimize in presence of undefined 8755 // behaviors like the case of C language. 8756 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 8757 return getCouldNotCompute(); 8758 8759 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 8760 : ICmpInst::ICMP_UGT; 8761 8762 const SCEV *Start = IV->getStart(); 8763 const SCEV *End = RHS; 8764 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { 8765 const SCEV *Diff = getMinusSCEV(RHS, Start); 8766 // If we have NoWrap set, then we can assume that the increment won't 8767 // overflow, in which case if RHS - Start is a constant, we don't need to 8768 // do a max operation since we can just figure it out statically 8769 if (NoWrap && isa<SCEVConstant>(Diff)) { 8770 APInt D = dyn_cast<const SCEVConstant>(Diff)->getAPInt(); 8771 if (!D.isNegative()) 8772 End = Start; 8773 } else 8774 End = IsSigned ? getSMinExpr(RHS, Start) 8775 : getUMinExpr(RHS, Start); 8776 } 8777 8778 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 8779 8780 APInt MaxStart = IsSigned ? getSignedRange(Start).getSignedMax() 8781 : getUnsignedRange(Start).getUnsignedMax(); 8782 8783 APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin() 8784 : getUnsignedRange(Stride).getUnsignedMin(); 8785 8786 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 8787 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 8788 : APInt::getMinValue(BitWidth) + (MinStride - 1); 8789 8790 // Although End can be a MIN expression we estimate MinEnd considering only 8791 // the case End = RHS. This is safe because in the other case (Start - End) 8792 // is zero, leading to a zero maximum backedge taken count. 8793 APInt MinEnd = 8794 IsSigned ? APIntOps::smax(getSignedRange(RHS).getSignedMin(), Limit) 8795 : APIntOps::umax(getUnsignedRange(RHS).getUnsignedMin(), Limit); 8796 8797 8798 const SCEV *MaxBECount = getCouldNotCompute(); 8799 if (isa<SCEVConstant>(BECount)) 8800 MaxBECount = BECount; 8801 else 8802 MaxBECount = computeBECount(getConstant(MaxStart - MinEnd), 8803 getConstant(MinStride), false); 8804 8805 if (isa<SCEVCouldNotCompute>(MaxBECount)) 8806 MaxBECount = BECount; 8807 8808 return ExitLimit(BECount, MaxBECount, P); 8809 } 8810 8811 /// getNumIterationsInRange - Return the number of iterations of this loop that 8812 /// produce values in the specified constant range. Another way of looking at 8813 /// this is that it returns the first iteration number where the value is not in 8814 /// the condition, thus computing the exit count. If the iteration count can't 8815 /// be computed, an instance of SCEVCouldNotCompute is returned. 8816 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, 8817 ScalarEvolution &SE) const { 8818 if (Range.isFullSet()) // Infinite loop. 8819 return SE.getCouldNotCompute(); 8820 8821 // If the start is a non-zero constant, shift the range to simplify things. 8822 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 8823 if (!SC->getValue()->isZero()) { 8824 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 8825 Operands[0] = SE.getZero(SC->getType()); 8826 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 8827 getNoWrapFlags(FlagNW)); 8828 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 8829 return ShiftedAddRec->getNumIterationsInRange( 8830 Range.subtract(SC->getAPInt()), SE); 8831 // This is strange and shouldn't happen. 8832 return SE.getCouldNotCompute(); 8833 } 8834 8835 // The only time we can solve this is when we have all constant indices. 8836 // Otherwise, we cannot determine the overflow conditions. 8837 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 8838 return SE.getCouldNotCompute(); 8839 8840 // Okay at this point we know that all elements of the chrec are constants and 8841 // that the start element is zero. 8842 8843 // First check to see if the range contains zero. If not, the first 8844 // iteration exits. 8845 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 8846 if (!Range.contains(APInt(BitWidth, 0))) 8847 return SE.getZero(getType()); 8848 8849 if (isAffine()) { 8850 // If this is an affine expression then we have this situation: 8851 // Solve {0,+,A} in Range === Ax in Range 8852 8853 // We know that zero is in the range. If A is positive then we know that 8854 // the upper value of the range must be the first possible exit value. 8855 // If A is negative then the lower of the range is the last possible loop 8856 // value. Also note that we already checked for a full range. 8857 APInt One(BitWidth,1); 8858 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 8859 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower(); 8860 8861 // The exit value should be (End+A)/A. 8862 APInt ExitVal = (End + A).udiv(A); 8863 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 8864 8865 // Evaluate at the exit value. If we really did fall out of the valid 8866 // range, then we computed our trip count, otherwise wrap around or other 8867 // things must have happened. 8868 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 8869 if (Range.contains(Val->getValue())) 8870 return SE.getCouldNotCompute(); // Something strange happened 8871 8872 // Ensure that the previous value is in the range. This is a sanity check. 8873 assert(Range.contains( 8874 EvaluateConstantChrecAtConstant(this, 8875 ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) && 8876 "Linear scev computation is off in a bad way!"); 8877 return SE.getConstant(ExitValue); 8878 } else if (isQuadratic()) { 8879 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 8880 // quadratic equation to solve it. To do this, we must frame our problem in 8881 // terms of figuring out when zero is crossed, instead of when 8882 // Range.getUpper() is crossed. 8883 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); 8884 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 8885 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(), 8886 // getNoWrapFlags(FlagNW) 8887 FlagAnyWrap); 8888 8889 // Next, solve the constructed addrec 8890 auto Roots = SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE); 8891 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 8892 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 8893 if (R1) { 8894 // Pick the smallest positive root value. 8895 if (ConstantInt *CB = dyn_cast<ConstantInt>(ConstantExpr::getICmp( 8896 ICmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { 8897 if (!CB->getZExtValue()) 8898 std::swap(R1, R2); // R1 is the minimum root now. 8899 8900 // Make sure the root is not off by one. The returned iteration should 8901 // not be in the range, but the previous one should be. When solving 8902 // for "X*X < 5", for example, we should not return a root of 2. 8903 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this, 8904 R1->getValue(), 8905 SE); 8906 if (Range.contains(R1Val->getValue())) { 8907 // The next iteration must be out of the range... 8908 ConstantInt *NextVal = 8909 ConstantInt::get(SE.getContext(), R1->getAPInt() + 1); 8910 8911 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 8912 if (!Range.contains(R1Val->getValue())) 8913 return SE.getConstant(NextVal); 8914 return SE.getCouldNotCompute(); // Something strange happened 8915 } 8916 8917 // If R1 was not in the range, then it is a good return value. Make 8918 // sure that R1-1 WAS in the range though, just in case. 8919 ConstantInt *NextVal = 8920 ConstantInt::get(SE.getContext(), R1->getAPInt() - 1); 8921 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 8922 if (Range.contains(R1Val->getValue())) 8923 return R1; 8924 return SE.getCouldNotCompute(); // Something strange happened 8925 } 8926 } 8927 } 8928 8929 return SE.getCouldNotCompute(); 8930 } 8931 8932 namespace { 8933 struct FindUndefs { 8934 bool Found; 8935 FindUndefs() : Found(false) {} 8936 8937 bool follow(const SCEV *S) { 8938 if (const SCEVUnknown *C = dyn_cast<SCEVUnknown>(S)) { 8939 if (isa<UndefValue>(C->getValue())) 8940 Found = true; 8941 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 8942 if (isa<UndefValue>(C->getValue())) 8943 Found = true; 8944 } 8945 8946 // Keep looking if we haven't found it yet. 8947 return !Found; 8948 } 8949 bool isDone() const { 8950 // Stop recursion if we have found an undef. 8951 return Found; 8952 } 8953 }; 8954 } 8955 8956 // Return true when S contains at least an undef value. 8957 static inline bool 8958 containsUndefs(const SCEV *S) { 8959 FindUndefs F; 8960 SCEVTraversal<FindUndefs> ST(F); 8961 ST.visitAll(S); 8962 8963 return F.Found; 8964 } 8965 8966 namespace { 8967 // Collect all steps of SCEV expressions. 8968 struct SCEVCollectStrides { 8969 ScalarEvolution &SE; 8970 SmallVectorImpl<const SCEV *> &Strides; 8971 8972 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 8973 : SE(SE), Strides(S) {} 8974 8975 bool follow(const SCEV *S) { 8976 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 8977 Strides.push_back(AR->getStepRecurrence(SE)); 8978 return true; 8979 } 8980 bool isDone() const { return false; } 8981 }; 8982 8983 // Collect all SCEVUnknown and SCEVMulExpr expressions. 8984 struct SCEVCollectTerms { 8985 SmallVectorImpl<const SCEV *> &Terms; 8986 8987 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) 8988 : Terms(T) {} 8989 8990 bool follow(const SCEV *S) { 8991 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S)) { 8992 if (!containsUndefs(S)) 8993 Terms.push_back(S); 8994 8995 // Stop recursion: once we collected a term, do not walk its operands. 8996 return false; 8997 } 8998 8999 // Keep looking. 9000 return true; 9001 } 9002 bool isDone() const { return false; } 9003 }; 9004 9005 // Check if a SCEV contains an AddRecExpr. 9006 struct SCEVHasAddRec { 9007 bool &ContainsAddRec; 9008 9009 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 9010 ContainsAddRec = false; 9011 } 9012 9013 bool follow(const SCEV *S) { 9014 if (isa<SCEVAddRecExpr>(S)) { 9015 ContainsAddRec = true; 9016 9017 // Stop recursion: once we collected a term, do not walk its operands. 9018 return false; 9019 } 9020 9021 // Keep looking. 9022 return true; 9023 } 9024 bool isDone() const { return false; } 9025 }; 9026 9027 // Find factors that are multiplied with an expression that (possibly as a 9028 // subexpression) contains an AddRecExpr. In the expression: 9029 // 9030 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 9031 // 9032 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 9033 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 9034 // parameters as they form a product with an induction variable. 9035 // 9036 // This collector expects all array size parameters to be in the same MulExpr. 9037 // It might be necessary to later add support for collecting parameters that are 9038 // spread over different nested MulExpr. 9039 struct SCEVCollectAddRecMultiplies { 9040 SmallVectorImpl<const SCEV *> &Terms; 9041 ScalarEvolution &SE; 9042 9043 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 9044 : Terms(T), SE(SE) {} 9045 9046 bool follow(const SCEV *S) { 9047 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 9048 bool HasAddRec = false; 9049 SmallVector<const SCEV *, 0> Operands; 9050 for (auto Op : Mul->operands()) { 9051 if (isa<SCEVUnknown>(Op)) { 9052 Operands.push_back(Op); 9053 } else { 9054 bool ContainsAddRec; 9055 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 9056 visitAll(Op, ContiansAddRec); 9057 HasAddRec |= ContainsAddRec; 9058 } 9059 } 9060 if (Operands.size() == 0) 9061 return true; 9062 9063 if (!HasAddRec) 9064 return false; 9065 9066 Terms.push_back(SE.getMulExpr(Operands)); 9067 // Stop recursion: once we collected a term, do not walk its operands. 9068 return false; 9069 } 9070 9071 // Keep looking. 9072 return true; 9073 } 9074 bool isDone() const { return false; } 9075 }; 9076 } 9077 9078 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 9079 /// two places: 9080 /// 1) The strides of AddRec expressions. 9081 /// 2) Unknowns that are multiplied with AddRec expressions. 9082 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 9083 SmallVectorImpl<const SCEV *> &Terms) { 9084 SmallVector<const SCEV *, 4> Strides; 9085 SCEVCollectStrides StrideCollector(*this, Strides); 9086 visitAll(Expr, StrideCollector); 9087 9088 DEBUG({ 9089 dbgs() << "Strides:\n"; 9090 for (const SCEV *S : Strides) 9091 dbgs() << *S << "\n"; 9092 }); 9093 9094 for (const SCEV *S : Strides) { 9095 SCEVCollectTerms TermCollector(Terms); 9096 visitAll(S, TermCollector); 9097 } 9098 9099 DEBUG({ 9100 dbgs() << "Terms:\n"; 9101 for (const SCEV *T : Terms) 9102 dbgs() << *T << "\n"; 9103 }); 9104 9105 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 9106 visitAll(Expr, MulCollector); 9107 } 9108 9109 static bool findArrayDimensionsRec(ScalarEvolution &SE, 9110 SmallVectorImpl<const SCEV *> &Terms, 9111 SmallVectorImpl<const SCEV *> &Sizes) { 9112 int Last = Terms.size() - 1; 9113 const SCEV *Step = Terms[Last]; 9114 9115 // End of recursion. 9116 if (Last == 0) { 9117 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 9118 SmallVector<const SCEV *, 2> Qs; 9119 for (const SCEV *Op : M->operands()) 9120 if (!isa<SCEVConstant>(Op)) 9121 Qs.push_back(Op); 9122 9123 Step = SE.getMulExpr(Qs); 9124 } 9125 9126 Sizes.push_back(Step); 9127 return true; 9128 } 9129 9130 for (const SCEV *&Term : Terms) { 9131 // Normalize the terms before the next call to findArrayDimensionsRec. 9132 const SCEV *Q, *R; 9133 SCEVDivision::divide(SE, Term, Step, &Q, &R); 9134 9135 // Bail out when GCD does not evenly divide one of the terms. 9136 if (!R->isZero()) 9137 return false; 9138 9139 Term = Q; 9140 } 9141 9142 // Remove all SCEVConstants. 9143 Terms.erase(std::remove_if(Terms.begin(), Terms.end(), [](const SCEV *E) { 9144 return isa<SCEVConstant>(E); 9145 }), 9146 Terms.end()); 9147 9148 if (Terms.size() > 0) 9149 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 9150 return false; 9151 9152 Sizes.push_back(Step); 9153 return true; 9154 } 9155 9156 // Returns true when S contains at least a SCEVUnknown parameter. 9157 static inline bool 9158 containsParameters(const SCEV *S) { 9159 struct FindParameter { 9160 bool FoundParameter; 9161 FindParameter() : FoundParameter(false) {} 9162 9163 bool follow(const SCEV *S) { 9164 if (isa<SCEVUnknown>(S)) { 9165 FoundParameter = true; 9166 // Stop recursion: we found a parameter. 9167 return false; 9168 } 9169 // Keep looking. 9170 return true; 9171 } 9172 bool isDone() const { 9173 // Stop recursion if we have found a parameter. 9174 return FoundParameter; 9175 } 9176 }; 9177 9178 FindParameter F; 9179 SCEVTraversal<FindParameter> ST(F); 9180 ST.visitAll(S); 9181 9182 return F.FoundParameter; 9183 } 9184 9185 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 9186 static inline bool 9187 containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 9188 for (const SCEV *T : Terms) 9189 if (containsParameters(T)) 9190 return true; 9191 return false; 9192 } 9193 9194 // Return the number of product terms in S. 9195 static inline int numberOfTerms(const SCEV *S) { 9196 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 9197 return Expr->getNumOperands(); 9198 return 1; 9199 } 9200 9201 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 9202 if (isa<SCEVConstant>(T)) 9203 return nullptr; 9204 9205 if (isa<SCEVUnknown>(T)) 9206 return T; 9207 9208 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 9209 SmallVector<const SCEV *, 2> Factors; 9210 for (const SCEV *Op : M->operands()) 9211 if (!isa<SCEVConstant>(Op)) 9212 Factors.push_back(Op); 9213 9214 return SE.getMulExpr(Factors); 9215 } 9216 9217 return T; 9218 } 9219 9220 /// Return the size of an element read or written by Inst. 9221 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 9222 Type *Ty; 9223 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 9224 Ty = Store->getValueOperand()->getType(); 9225 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 9226 Ty = Load->getType(); 9227 else 9228 return nullptr; 9229 9230 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 9231 return getSizeOfExpr(ETy, Ty); 9232 } 9233 9234 /// Second step of delinearization: compute the array dimensions Sizes from the 9235 /// set of Terms extracted from the memory access function of this SCEVAddRec. 9236 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 9237 SmallVectorImpl<const SCEV *> &Sizes, 9238 const SCEV *ElementSize) const { 9239 9240 if (Terms.size() < 1 || !ElementSize) 9241 return; 9242 9243 // Early return when Terms do not contain parameters: we do not delinearize 9244 // non parametric SCEVs. 9245 if (!containsParameters(Terms)) 9246 return; 9247 9248 DEBUG({ 9249 dbgs() << "Terms:\n"; 9250 for (const SCEV *T : Terms) 9251 dbgs() << *T << "\n"; 9252 }); 9253 9254 // Remove duplicates. 9255 std::sort(Terms.begin(), Terms.end()); 9256 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 9257 9258 // Put larger terms first. 9259 std::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) { 9260 return numberOfTerms(LHS) > numberOfTerms(RHS); 9261 }); 9262 9263 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 9264 9265 // Try to divide all terms by the element size. If term is not divisible by 9266 // element size, proceed with the original term. 9267 for (const SCEV *&Term : Terms) { 9268 const SCEV *Q, *R; 9269 SCEVDivision::divide(SE, Term, ElementSize, &Q, &R); 9270 if (!Q->isZero()) 9271 Term = Q; 9272 } 9273 9274 SmallVector<const SCEV *, 4> NewTerms; 9275 9276 // Remove constant factors. 9277 for (const SCEV *T : Terms) 9278 if (const SCEV *NewT = removeConstantFactors(SE, T)) 9279 NewTerms.push_back(NewT); 9280 9281 DEBUG({ 9282 dbgs() << "Terms after sorting:\n"; 9283 for (const SCEV *T : NewTerms) 9284 dbgs() << *T << "\n"; 9285 }); 9286 9287 if (NewTerms.empty() || 9288 !findArrayDimensionsRec(SE, NewTerms, Sizes)) { 9289 Sizes.clear(); 9290 return; 9291 } 9292 9293 // The last element to be pushed into Sizes is the size of an element. 9294 Sizes.push_back(ElementSize); 9295 9296 DEBUG({ 9297 dbgs() << "Sizes:\n"; 9298 for (const SCEV *S : Sizes) 9299 dbgs() << *S << "\n"; 9300 }); 9301 } 9302 9303 /// Third step of delinearization: compute the access functions for the 9304 /// Subscripts based on the dimensions in Sizes. 9305 void ScalarEvolution::computeAccessFunctions( 9306 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 9307 SmallVectorImpl<const SCEV *> &Sizes) { 9308 9309 // Early exit in case this SCEV is not an affine multivariate function. 9310 if (Sizes.empty()) 9311 return; 9312 9313 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 9314 if (!AR->isAffine()) 9315 return; 9316 9317 const SCEV *Res = Expr; 9318 int Last = Sizes.size() - 1; 9319 for (int i = Last; i >= 0; i--) { 9320 const SCEV *Q, *R; 9321 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 9322 9323 DEBUG({ 9324 dbgs() << "Res: " << *Res << "\n"; 9325 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 9326 dbgs() << "Res divided by Sizes[i]:\n"; 9327 dbgs() << "Quotient: " << *Q << "\n"; 9328 dbgs() << "Remainder: " << *R << "\n"; 9329 }); 9330 9331 Res = Q; 9332 9333 // Do not record the last subscript corresponding to the size of elements in 9334 // the array. 9335 if (i == Last) { 9336 9337 // Bail out if the remainder is too complex. 9338 if (isa<SCEVAddRecExpr>(R)) { 9339 Subscripts.clear(); 9340 Sizes.clear(); 9341 return; 9342 } 9343 9344 continue; 9345 } 9346 9347 // Record the access function for the current subscript. 9348 Subscripts.push_back(R); 9349 } 9350 9351 // Also push in last position the remainder of the last division: it will be 9352 // the access function of the innermost dimension. 9353 Subscripts.push_back(Res); 9354 9355 std::reverse(Subscripts.begin(), Subscripts.end()); 9356 9357 DEBUG({ 9358 dbgs() << "Subscripts:\n"; 9359 for (const SCEV *S : Subscripts) 9360 dbgs() << *S << "\n"; 9361 }); 9362 } 9363 9364 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 9365 /// sizes of an array access. Returns the remainder of the delinearization that 9366 /// is the offset start of the array. The SCEV->delinearize algorithm computes 9367 /// the multiples of SCEV coefficients: that is a pattern matching of sub 9368 /// expressions in the stride and base of a SCEV corresponding to the 9369 /// computation of a GCD (greatest common divisor) of base and stride. When 9370 /// SCEV->delinearize fails, it returns the SCEV unchanged. 9371 /// 9372 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 9373 /// 9374 /// void foo(long n, long m, long o, double A[n][m][o]) { 9375 /// 9376 /// for (long i = 0; i < n; i++) 9377 /// for (long j = 0; j < m; j++) 9378 /// for (long k = 0; k < o; k++) 9379 /// A[i][j][k] = 1.0; 9380 /// } 9381 /// 9382 /// the delinearization input is the following AddRec SCEV: 9383 /// 9384 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 9385 /// 9386 /// From this SCEV, we are able to say that the base offset of the access is %A 9387 /// because it appears as an offset that does not divide any of the strides in 9388 /// the loops: 9389 /// 9390 /// CHECK: Base offset: %A 9391 /// 9392 /// and then SCEV->delinearize determines the size of some of the dimensions of 9393 /// the array as these are the multiples by which the strides are happening: 9394 /// 9395 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 9396 /// 9397 /// Note that the outermost dimension remains of UnknownSize because there are 9398 /// no strides that would help identifying the size of the last dimension: when 9399 /// the array has been statically allocated, one could compute the size of that 9400 /// dimension by dividing the overall size of the array by the size of the known 9401 /// dimensions: %m * %o * 8. 9402 /// 9403 /// Finally delinearize provides the access functions for the array reference 9404 /// that does correspond to A[i][j][k] of the above C testcase: 9405 /// 9406 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 9407 /// 9408 /// The testcases are checking the output of a function pass: 9409 /// DelinearizationPass that walks through all loads and stores of a function 9410 /// asking for the SCEV of the memory access with respect to all enclosing 9411 /// loops, calling SCEV->delinearize on that and printing the results. 9412 9413 void ScalarEvolution::delinearize(const SCEV *Expr, 9414 SmallVectorImpl<const SCEV *> &Subscripts, 9415 SmallVectorImpl<const SCEV *> &Sizes, 9416 const SCEV *ElementSize) { 9417 // First step: collect parametric terms. 9418 SmallVector<const SCEV *, 4> Terms; 9419 collectParametricTerms(Expr, Terms); 9420 9421 if (Terms.empty()) 9422 return; 9423 9424 // Second step: find subscript sizes. 9425 findArrayDimensions(Terms, Sizes, ElementSize); 9426 9427 if (Sizes.empty()) 9428 return; 9429 9430 // Third step: compute the access functions for each subscript. 9431 computeAccessFunctions(Expr, Subscripts, Sizes); 9432 9433 if (Subscripts.empty()) 9434 return; 9435 9436 DEBUG({ 9437 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 9438 dbgs() << "ArrayDecl[UnknownSize]"; 9439 for (const SCEV *S : Sizes) 9440 dbgs() << "[" << *S << "]"; 9441 9442 dbgs() << "\nArrayRef"; 9443 for (const SCEV *S : Subscripts) 9444 dbgs() << "[" << *S << "]"; 9445 dbgs() << "\n"; 9446 }); 9447 } 9448 9449 //===----------------------------------------------------------------------===// 9450 // SCEVCallbackVH Class Implementation 9451 //===----------------------------------------------------------------------===// 9452 9453 void ScalarEvolution::SCEVCallbackVH::deleted() { 9454 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 9455 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 9456 SE->ConstantEvolutionLoopExitValue.erase(PN); 9457 SE->eraseValueFromMap(getValPtr()); 9458 // this now dangles! 9459 } 9460 9461 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 9462 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 9463 9464 // Forget all the expressions associated with users of the old value, 9465 // so that future queries will recompute the expressions using the new 9466 // value. 9467 Value *Old = getValPtr(); 9468 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 9469 SmallPtrSet<User *, 8> Visited; 9470 while (!Worklist.empty()) { 9471 User *U = Worklist.pop_back_val(); 9472 // Deleting the Old value will cause this to dangle. Postpone 9473 // that until everything else is done. 9474 if (U == Old) 9475 continue; 9476 if (!Visited.insert(U).second) 9477 continue; 9478 if (PHINode *PN = dyn_cast<PHINode>(U)) 9479 SE->ConstantEvolutionLoopExitValue.erase(PN); 9480 SE->eraseValueFromMap(U); 9481 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 9482 } 9483 // Delete the Old value. 9484 if (PHINode *PN = dyn_cast<PHINode>(Old)) 9485 SE->ConstantEvolutionLoopExitValue.erase(PN); 9486 SE->eraseValueFromMap(Old); 9487 // this now dangles! 9488 } 9489 9490 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 9491 : CallbackVH(V), SE(se) {} 9492 9493 //===----------------------------------------------------------------------===// 9494 // ScalarEvolution Class Implementation 9495 //===----------------------------------------------------------------------===// 9496 9497 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 9498 AssumptionCache &AC, DominatorTree &DT, 9499 LoopInfo &LI) 9500 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 9501 CouldNotCompute(new SCEVCouldNotCompute()), 9502 WalkingBEDominatingConds(false), ProvingSplitPredicate(false), 9503 ValuesAtScopes(64), LoopDispositions(64), BlockDispositions(64), 9504 FirstUnknown(nullptr) { 9505 9506 // To use guards for proving predicates, we need to scan every instruction in 9507 // relevant basic blocks, and not just terminators. Doing this is a waste of 9508 // time if the IR does not actually contain any calls to 9509 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 9510 // 9511 // This pessimizes the case where a pass that preserves ScalarEvolution wants 9512 // to _add_ guards to the module when there weren't any before, and wants 9513 // ScalarEvolution to optimize based on those guards. For now we prefer to be 9514 // efficient in lieu of being smart in that rather obscure case. 9515 9516 auto *GuardDecl = F.getParent()->getFunction( 9517 Intrinsic::getName(Intrinsic::experimental_guard)); 9518 HasGuards = GuardDecl && !GuardDecl->use_empty(); 9519 } 9520 9521 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 9522 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 9523 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 9524 ValueExprMap(std::move(Arg.ValueExprMap)), 9525 WalkingBEDominatingConds(false), ProvingSplitPredicate(false), 9526 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 9527 PredicatedBackedgeTakenCounts( 9528 std::move(Arg.PredicatedBackedgeTakenCounts)), 9529 ConstantEvolutionLoopExitValue( 9530 std::move(Arg.ConstantEvolutionLoopExitValue)), 9531 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 9532 LoopDispositions(std::move(Arg.LoopDispositions)), 9533 BlockDispositions(std::move(Arg.BlockDispositions)), 9534 UnsignedRanges(std::move(Arg.UnsignedRanges)), 9535 SignedRanges(std::move(Arg.SignedRanges)), 9536 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 9537 UniquePreds(std::move(Arg.UniquePreds)), 9538 SCEVAllocator(std::move(Arg.SCEVAllocator)), 9539 FirstUnknown(Arg.FirstUnknown) { 9540 Arg.FirstUnknown = nullptr; 9541 } 9542 9543 ScalarEvolution::~ScalarEvolution() { 9544 // Iterate through all the SCEVUnknown instances and call their 9545 // destructors, so that they release their references to their values. 9546 for (SCEVUnknown *U = FirstUnknown; U;) { 9547 SCEVUnknown *Tmp = U; 9548 U = U->Next; 9549 Tmp->~SCEVUnknown(); 9550 } 9551 FirstUnknown = nullptr; 9552 9553 ExprValueMap.clear(); 9554 ValueExprMap.clear(); 9555 HasRecMap.clear(); 9556 9557 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 9558 // that a loop had multiple computable exits. 9559 for (auto &BTCI : BackedgeTakenCounts) 9560 BTCI.second.clear(); 9561 for (auto &BTCI : PredicatedBackedgeTakenCounts) 9562 BTCI.second.clear(); 9563 9564 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 9565 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 9566 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 9567 } 9568 9569 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 9570 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 9571 } 9572 9573 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 9574 const Loop *L) { 9575 // Print all inner loops first 9576 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 9577 PrintLoopInfo(OS, SE, *I); 9578 9579 OS << "Loop "; 9580 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 9581 OS << ": "; 9582 9583 SmallVector<BasicBlock *, 8> ExitBlocks; 9584 L->getExitBlocks(ExitBlocks); 9585 if (ExitBlocks.size() != 1) 9586 OS << "<multiple exits> "; 9587 9588 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 9589 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 9590 } else { 9591 OS << "Unpredictable backedge-taken count. "; 9592 } 9593 9594 OS << "\n" 9595 "Loop "; 9596 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 9597 OS << ": "; 9598 9599 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 9600 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 9601 } else { 9602 OS << "Unpredictable max backedge-taken count. "; 9603 } 9604 9605 OS << "\n" 9606 "Loop "; 9607 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 9608 OS << ": "; 9609 9610 SCEVUnionPredicate Pred; 9611 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 9612 if (!isa<SCEVCouldNotCompute>(PBT)) { 9613 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 9614 OS << " Predicates:\n"; 9615 Pred.print(OS, 4); 9616 } else { 9617 OS << "Unpredictable predicated backedge-taken count. "; 9618 } 9619 OS << "\n"; 9620 } 9621 9622 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 9623 switch (LD) { 9624 case ScalarEvolution::LoopVariant: 9625 return "Variant"; 9626 case ScalarEvolution::LoopInvariant: 9627 return "Invariant"; 9628 case ScalarEvolution::LoopComputable: 9629 return "Computable"; 9630 } 9631 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 9632 } 9633 9634 void ScalarEvolution::print(raw_ostream &OS) const { 9635 // ScalarEvolution's implementation of the print method is to print 9636 // out SCEV values of all instructions that are interesting. Doing 9637 // this potentially causes it to create new SCEV objects though, 9638 // which technically conflicts with the const qualifier. This isn't 9639 // observable from outside the class though, so casting away the 9640 // const isn't dangerous. 9641 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 9642 9643 OS << "Classifying expressions for: "; 9644 F.printAsOperand(OS, /*PrintType=*/false); 9645 OS << "\n"; 9646 for (Instruction &I : instructions(F)) 9647 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 9648 OS << I << '\n'; 9649 OS << " --> "; 9650 const SCEV *SV = SE.getSCEV(&I); 9651 SV->print(OS); 9652 if (!isa<SCEVCouldNotCompute>(SV)) { 9653 OS << " U: "; 9654 SE.getUnsignedRange(SV).print(OS); 9655 OS << " S: "; 9656 SE.getSignedRange(SV).print(OS); 9657 } 9658 9659 const Loop *L = LI.getLoopFor(I.getParent()); 9660 9661 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 9662 if (AtUse != SV) { 9663 OS << " --> "; 9664 AtUse->print(OS); 9665 if (!isa<SCEVCouldNotCompute>(AtUse)) { 9666 OS << " U: "; 9667 SE.getUnsignedRange(AtUse).print(OS); 9668 OS << " S: "; 9669 SE.getSignedRange(AtUse).print(OS); 9670 } 9671 } 9672 9673 if (L) { 9674 OS << "\t\t" "Exits: "; 9675 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 9676 if (!SE.isLoopInvariant(ExitValue, L)) { 9677 OS << "<<Unknown>>"; 9678 } else { 9679 OS << *ExitValue; 9680 } 9681 9682 bool First = true; 9683 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 9684 if (First) { 9685 OS << "\t\t" "LoopDispositions: { "; 9686 First = false; 9687 } else { 9688 OS << ", "; 9689 } 9690 9691 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 9692 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 9693 } 9694 9695 for (auto *InnerL : depth_first(L)) { 9696 if (InnerL == L) 9697 continue; 9698 if (First) { 9699 OS << "\t\t" "LoopDispositions: { "; 9700 First = false; 9701 } else { 9702 OS << ", "; 9703 } 9704 9705 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 9706 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 9707 } 9708 9709 OS << " }"; 9710 } 9711 9712 OS << "\n"; 9713 } 9714 9715 OS << "Determining loop execution counts for: "; 9716 F.printAsOperand(OS, /*PrintType=*/false); 9717 OS << "\n"; 9718 for (LoopInfo::iterator I = LI.begin(), E = LI.end(); I != E; ++I) 9719 PrintLoopInfo(OS, &SE, *I); 9720 } 9721 9722 ScalarEvolution::LoopDisposition 9723 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 9724 auto &Values = LoopDispositions[S]; 9725 for (auto &V : Values) { 9726 if (V.getPointer() == L) 9727 return V.getInt(); 9728 } 9729 Values.emplace_back(L, LoopVariant); 9730 LoopDisposition D = computeLoopDisposition(S, L); 9731 auto &Values2 = LoopDispositions[S]; 9732 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 9733 if (V.getPointer() == L) { 9734 V.setInt(D); 9735 break; 9736 } 9737 } 9738 return D; 9739 } 9740 9741 ScalarEvolution::LoopDisposition 9742 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 9743 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 9744 case scConstant: 9745 return LoopInvariant; 9746 case scTruncate: 9747 case scZeroExtend: 9748 case scSignExtend: 9749 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 9750 case scAddRecExpr: { 9751 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 9752 9753 // If L is the addrec's loop, it's computable. 9754 if (AR->getLoop() == L) 9755 return LoopComputable; 9756 9757 // Add recurrences are never invariant in the function-body (null loop). 9758 if (!L) 9759 return LoopVariant; 9760 9761 // This recurrence is variant w.r.t. L if L contains AR's loop. 9762 if (L->contains(AR->getLoop())) 9763 return LoopVariant; 9764 9765 // This recurrence is invariant w.r.t. L if AR's loop contains L. 9766 if (AR->getLoop()->contains(L)) 9767 return LoopInvariant; 9768 9769 // This recurrence is variant w.r.t. L if any of its operands 9770 // are variant. 9771 for (auto *Op : AR->operands()) 9772 if (!isLoopInvariant(Op, L)) 9773 return LoopVariant; 9774 9775 // Otherwise it's loop-invariant. 9776 return LoopInvariant; 9777 } 9778 case scAddExpr: 9779 case scMulExpr: 9780 case scUMaxExpr: 9781 case scSMaxExpr: { 9782 bool HasVarying = false; 9783 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 9784 LoopDisposition D = getLoopDisposition(Op, L); 9785 if (D == LoopVariant) 9786 return LoopVariant; 9787 if (D == LoopComputable) 9788 HasVarying = true; 9789 } 9790 return HasVarying ? LoopComputable : LoopInvariant; 9791 } 9792 case scUDivExpr: { 9793 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 9794 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 9795 if (LD == LoopVariant) 9796 return LoopVariant; 9797 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 9798 if (RD == LoopVariant) 9799 return LoopVariant; 9800 return (LD == LoopInvariant && RD == LoopInvariant) ? 9801 LoopInvariant : LoopComputable; 9802 } 9803 case scUnknown: 9804 // All non-instruction values are loop invariant. All instructions are loop 9805 // invariant if they are not contained in the specified loop. 9806 // Instructions are never considered invariant in the function body 9807 // (null loop) because they are defined within the "loop". 9808 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 9809 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 9810 return LoopInvariant; 9811 case scCouldNotCompute: 9812 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 9813 } 9814 llvm_unreachable("Unknown SCEV kind!"); 9815 } 9816 9817 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 9818 return getLoopDisposition(S, L) == LoopInvariant; 9819 } 9820 9821 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 9822 return getLoopDisposition(S, L) == LoopComputable; 9823 } 9824 9825 ScalarEvolution::BlockDisposition 9826 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 9827 auto &Values = BlockDispositions[S]; 9828 for (auto &V : Values) { 9829 if (V.getPointer() == BB) 9830 return V.getInt(); 9831 } 9832 Values.emplace_back(BB, DoesNotDominateBlock); 9833 BlockDisposition D = computeBlockDisposition(S, BB); 9834 auto &Values2 = BlockDispositions[S]; 9835 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 9836 if (V.getPointer() == BB) { 9837 V.setInt(D); 9838 break; 9839 } 9840 } 9841 return D; 9842 } 9843 9844 ScalarEvolution::BlockDisposition 9845 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 9846 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 9847 case scConstant: 9848 return ProperlyDominatesBlock; 9849 case scTruncate: 9850 case scZeroExtend: 9851 case scSignExtend: 9852 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 9853 case scAddRecExpr: { 9854 // This uses a "dominates" query instead of "properly dominates" query 9855 // to test for proper dominance too, because the instruction which 9856 // produces the addrec's value is a PHI, and a PHI effectively properly 9857 // dominates its entire containing block. 9858 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 9859 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 9860 return DoesNotDominateBlock; 9861 } 9862 // FALL THROUGH into SCEVNAryExpr handling. 9863 case scAddExpr: 9864 case scMulExpr: 9865 case scUMaxExpr: 9866 case scSMaxExpr: { 9867 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 9868 bool Proper = true; 9869 for (const SCEV *NAryOp : NAry->operands()) { 9870 BlockDisposition D = getBlockDisposition(NAryOp, BB); 9871 if (D == DoesNotDominateBlock) 9872 return DoesNotDominateBlock; 9873 if (D == DominatesBlock) 9874 Proper = false; 9875 } 9876 return Proper ? ProperlyDominatesBlock : DominatesBlock; 9877 } 9878 case scUDivExpr: { 9879 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 9880 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 9881 BlockDisposition LD = getBlockDisposition(LHS, BB); 9882 if (LD == DoesNotDominateBlock) 9883 return DoesNotDominateBlock; 9884 BlockDisposition RD = getBlockDisposition(RHS, BB); 9885 if (RD == DoesNotDominateBlock) 9886 return DoesNotDominateBlock; 9887 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 9888 ProperlyDominatesBlock : DominatesBlock; 9889 } 9890 case scUnknown: 9891 if (Instruction *I = 9892 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 9893 if (I->getParent() == BB) 9894 return DominatesBlock; 9895 if (DT.properlyDominates(I->getParent(), BB)) 9896 return ProperlyDominatesBlock; 9897 return DoesNotDominateBlock; 9898 } 9899 return ProperlyDominatesBlock; 9900 case scCouldNotCompute: 9901 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 9902 } 9903 llvm_unreachable("Unknown SCEV kind!"); 9904 } 9905 9906 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 9907 return getBlockDisposition(S, BB) >= DominatesBlock; 9908 } 9909 9910 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 9911 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 9912 } 9913 9914 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 9915 // Search for a SCEV expression node within an expression tree. 9916 // Implements SCEVTraversal::Visitor. 9917 struct SCEVSearch { 9918 const SCEV *Node; 9919 bool IsFound; 9920 9921 SCEVSearch(const SCEV *N): Node(N), IsFound(false) {} 9922 9923 bool follow(const SCEV *S) { 9924 IsFound |= (S == Node); 9925 return !IsFound; 9926 } 9927 bool isDone() const { return IsFound; } 9928 }; 9929 9930 SCEVSearch Search(Op); 9931 visitAll(S, Search); 9932 return Search.IsFound; 9933 } 9934 9935 void ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 9936 ValuesAtScopes.erase(S); 9937 LoopDispositions.erase(S); 9938 BlockDispositions.erase(S); 9939 UnsignedRanges.erase(S); 9940 SignedRanges.erase(S); 9941 ExprValueMap.erase(S); 9942 HasRecMap.erase(S); 9943 9944 auto RemoveSCEVFromBackedgeMap = 9945 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 9946 for (auto I = Map.begin(), E = Map.end(); I != E;) { 9947 BackedgeTakenInfo &BEInfo = I->second; 9948 if (BEInfo.hasOperand(S, this)) { 9949 BEInfo.clear(); 9950 Map.erase(I++); 9951 } else 9952 ++I; 9953 } 9954 }; 9955 9956 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 9957 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 9958 } 9959 9960 typedef DenseMap<const Loop *, std::string> VerifyMap; 9961 9962 /// replaceSubString - Replaces all occurrences of From in Str with To. 9963 static void replaceSubString(std::string &Str, StringRef From, StringRef To) { 9964 size_t Pos = 0; 9965 while ((Pos = Str.find(From, Pos)) != std::string::npos) { 9966 Str.replace(Pos, From.size(), To.data(), To.size()); 9967 Pos += To.size(); 9968 } 9969 } 9970 9971 /// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis. 9972 static void 9973 getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) { 9974 std::string &S = Map[L]; 9975 if (S.empty()) { 9976 raw_string_ostream OS(S); 9977 SE.getBackedgeTakenCount(L)->print(OS); 9978 9979 // false and 0 are semantically equivalent. This can happen in dead loops. 9980 replaceSubString(OS.str(), "false", "0"); 9981 // Remove wrap flags, their use in SCEV is highly fragile. 9982 // FIXME: Remove this when SCEV gets smarter about them. 9983 replaceSubString(OS.str(), "<nw>", ""); 9984 replaceSubString(OS.str(), "<nsw>", ""); 9985 replaceSubString(OS.str(), "<nuw>", ""); 9986 } 9987 9988 for (auto *R : reverse(*L)) 9989 getLoopBackedgeTakenCounts(R, Map, SE); // recurse. 9990 } 9991 9992 void ScalarEvolution::verify() const { 9993 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 9994 9995 // Gather stringified backedge taken counts for all loops using SCEV's caches. 9996 // FIXME: It would be much better to store actual values instead of strings, 9997 // but SCEV pointers will change if we drop the caches. 9998 VerifyMap BackedgeDumpsOld, BackedgeDumpsNew; 9999 for (LoopInfo::reverse_iterator I = LI.rbegin(), E = LI.rend(); I != E; ++I) 10000 getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE); 10001 10002 // Gather stringified backedge taken counts for all loops using a fresh 10003 // ScalarEvolution object. 10004 ScalarEvolution SE2(F, TLI, AC, DT, LI); 10005 for (LoopInfo::reverse_iterator I = LI.rbegin(), E = LI.rend(); I != E; ++I) 10006 getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE2); 10007 10008 // Now compare whether they're the same with and without caches. This allows 10009 // verifying that no pass changed the cache. 10010 assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() && 10011 "New loops suddenly appeared!"); 10012 10013 for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(), 10014 OldE = BackedgeDumpsOld.end(), 10015 NewI = BackedgeDumpsNew.begin(); 10016 OldI != OldE; ++OldI, ++NewI) { 10017 assert(OldI->first == NewI->first && "Loop order changed!"); 10018 10019 // Compare the stringified SCEVs. We don't care if undef backedgetaken count 10020 // changes. 10021 // FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This 10022 // means that a pass is buggy or SCEV has to learn a new pattern but is 10023 // usually not harmful. 10024 if (OldI->second != NewI->second && 10025 OldI->second.find("undef") == std::string::npos && 10026 NewI->second.find("undef") == std::string::npos && 10027 OldI->second != "***COULDNOTCOMPUTE***" && 10028 NewI->second != "***COULDNOTCOMPUTE***") { 10029 dbgs() << "SCEVValidator: SCEV for loop '" 10030 << OldI->first->getHeader()->getName() 10031 << "' changed from '" << OldI->second 10032 << "' to '" << NewI->second << "'!\n"; 10033 std::abort(); 10034 } 10035 } 10036 10037 // TODO: Verify more things. 10038 } 10039 10040 char ScalarEvolutionAnalysis::PassID; 10041 10042 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 10043 AnalysisManager<Function> &AM) { 10044 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 10045 AM.getResult<AssumptionAnalysis>(F), 10046 AM.getResult<DominatorTreeAnalysis>(F), 10047 AM.getResult<LoopAnalysis>(F)); 10048 } 10049 10050 PreservedAnalyses 10051 ScalarEvolutionPrinterPass::run(Function &F, AnalysisManager<Function> &AM) { 10052 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 10053 return PreservedAnalyses::all(); 10054 } 10055 10056 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 10057 "Scalar Evolution Analysis", false, true) 10058 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 10059 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 10060 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 10061 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 10062 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 10063 "Scalar Evolution Analysis", false, true) 10064 char ScalarEvolutionWrapperPass::ID = 0; 10065 10066 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 10067 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 10068 } 10069 10070 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 10071 SE.reset(new ScalarEvolution( 10072 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 10073 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 10074 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 10075 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 10076 return false; 10077 } 10078 10079 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 10080 10081 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 10082 SE->print(OS); 10083 } 10084 10085 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 10086 if (!VerifySCEV) 10087 return; 10088 10089 SE->verify(); 10090 } 10091 10092 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 10093 AU.setPreservesAll(); 10094 AU.addRequiredTransitive<AssumptionCacheTracker>(); 10095 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 10096 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 10097 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 10098 } 10099 10100 const SCEVPredicate * 10101 ScalarEvolution::getEqualPredicate(const SCEVUnknown *LHS, 10102 const SCEVConstant *RHS) { 10103 FoldingSetNodeID ID; 10104 // Unique this node based on the arguments 10105 ID.AddInteger(SCEVPredicate::P_Equal); 10106 ID.AddPointer(LHS); 10107 ID.AddPointer(RHS); 10108 void *IP = nullptr; 10109 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 10110 return S; 10111 SCEVEqualPredicate *Eq = new (SCEVAllocator) 10112 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 10113 UniquePreds.InsertNode(Eq, IP); 10114 return Eq; 10115 } 10116 10117 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 10118 const SCEVAddRecExpr *AR, 10119 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 10120 FoldingSetNodeID ID; 10121 // Unique this node based on the arguments 10122 ID.AddInteger(SCEVPredicate::P_Wrap); 10123 ID.AddPointer(AR); 10124 ID.AddInteger(AddedFlags); 10125 void *IP = nullptr; 10126 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 10127 return S; 10128 auto *OF = new (SCEVAllocator) 10129 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 10130 UniquePreds.InsertNode(OF, IP); 10131 return OF; 10132 } 10133 10134 namespace { 10135 10136 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 10137 public: 10138 // Rewrites \p S in the context of a loop L and the predicate A. 10139 // If Assume is true, rewrite is free to add further predicates to A 10140 // such that the result will be an AddRecExpr. 10141 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 10142 SCEVUnionPredicate &A, bool Assume) { 10143 SCEVPredicateRewriter Rewriter(L, SE, A, Assume); 10144 return Rewriter.visit(S); 10145 } 10146 10147 SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 10148 SCEVUnionPredicate &P, bool Assume) 10149 : SCEVRewriteVisitor(SE), P(P), L(L), Assume(Assume) {} 10150 10151 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 10152 auto ExprPreds = P.getPredicatesForExpr(Expr); 10153 for (auto *Pred : ExprPreds) 10154 if (const auto *IPred = dyn_cast<const SCEVEqualPredicate>(Pred)) 10155 if (IPred->getLHS() == Expr) 10156 return IPred->getRHS(); 10157 10158 return Expr; 10159 } 10160 10161 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 10162 const SCEV *Operand = visit(Expr->getOperand()); 10163 const SCEVAddRecExpr *AR = dyn_cast<const SCEVAddRecExpr>(Operand); 10164 if (AR && AR->getLoop() == L && AR->isAffine()) { 10165 // This couldn't be folded because the operand didn't have the nuw 10166 // flag. Add the nusw flag as an assumption that we could make. 10167 const SCEV *Step = AR->getStepRecurrence(SE); 10168 Type *Ty = Expr->getType(); 10169 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 10170 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 10171 SE.getSignExtendExpr(Step, Ty), L, 10172 AR->getNoWrapFlags()); 10173 } 10174 return SE.getZeroExtendExpr(Operand, Expr->getType()); 10175 } 10176 10177 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 10178 const SCEV *Operand = visit(Expr->getOperand()); 10179 const SCEVAddRecExpr *AR = dyn_cast<const SCEVAddRecExpr>(Operand); 10180 if (AR && AR->getLoop() == L && AR->isAffine()) { 10181 // This couldn't be folded because the operand didn't have the nsw 10182 // flag. Add the nssw flag as an assumption that we could make. 10183 const SCEV *Step = AR->getStepRecurrence(SE); 10184 Type *Ty = Expr->getType(); 10185 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 10186 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 10187 SE.getSignExtendExpr(Step, Ty), L, 10188 AR->getNoWrapFlags()); 10189 } 10190 return SE.getSignExtendExpr(Operand, Expr->getType()); 10191 } 10192 10193 private: 10194 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 10195 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 10196 auto *A = SE.getWrapPredicate(AR, AddedFlags); 10197 if (!Assume) { 10198 // Check if we've already made this assumption. 10199 if (P.implies(A)) 10200 return true; 10201 return false; 10202 } 10203 P.add(A); 10204 return true; 10205 } 10206 10207 SCEVUnionPredicate &P; 10208 const Loop *L; 10209 bool Assume; 10210 }; 10211 } // end anonymous namespace 10212 10213 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 10214 SCEVUnionPredicate &Preds) { 10215 return SCEVPredicateRewriter::rewrite(S, L, *this, Preds, false); 10216 } 10217 10218 const SCEVAddRecExpr * 10219 ScalarEvolution::convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L, 10220 SCEVUnionPredicate &Preds) { 10221 SCEVUnionPredicate TransformPreds; 10222 S = SCEVPredicateRewriter::rewrite(S, L, *this, TransformPreds, true); 10223 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 10224 10225 if (!AddRec) 10226 return nullptr; 10227 10228 // Since the transformation was successful, we can now transfer the SCEV 10229 // predicates. 10230 Preds.add(&TransformPreds); 10231 return AddRec; 10232 } 10233 10234 /// SCEV predicates 10235 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 10236 SCEVPredicateKind Kind) 10237 : FastID(ID), Kind(Kind) {} 10238 10239 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 10240 const SCEVUnknown *LHS, 10241 const SCEVConstant *RHS) 10242 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) {} 10243 10244 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 10245 const auto *Op = dyn_cast<const SCEVEqualPredicate>(N); 10246 10247 if (!Op) 10248 return false; 10249 10250 return Op->LHS == LHS && Op->RHS == RHS; 10251 } 10252 10253 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 10254 10255 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 10256 10257 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 10258 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 10259 } 10260 10261 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 10262 const SCEVAddRecExpr *AR, 10263 IncrementWrapFlags Flags) 10264 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 10265 10266 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 10267 10268 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 10269 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 10270 10271 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 10272 } 10273 10274 bool SCEVWrapPredicate::isAlwaysTrue() const { 10275 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 10276 IncrementWrapFlags IFlags = Flags; 10277 10278 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 10279 IFlags = clearFlags(IFlags, IncrementNSSW); 10280 10281 return IFlags == IncrementAnyWrap; 10282 } 10283 10284 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 10285 OS.indent(Depth) << *getExpr() << " Added Flags: "; 10286 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 10287 OS << "<nusw>"; 10288 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 10289 OS << "<nssw>"; 10290 OS << "\n"; 10291 } 10292 10293 SCEVWrapPredicate::IncrementWrapFlags 10294 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 10295 ScalarEvolution &SE) { 10296 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 10297 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 10298 10299 // We can safely transfer the NSW flag as NSSW. 10300 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 10301 ImpliedFlags = IncrementNSSW; 10302 10303 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 10304 // If the increment is positive, the SCEV NUW flag will also imply the 10305 // WrapPredicate NUSW flag. 10306 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 10307 if (Step->getValue()->getValue().isNonNegative()) 10308 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 10309 } 10310 10311 return ImpliedFlags; 10312 } 10313 10314 /// Union predicates don't get cached so create a dummy set ID for it. 10315 SCEVUnionPredicate::SCEVUnionPredicate() 10316 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 10317 10318 bool SCEVUnionPredicate::isAlwaysTrue() const { 10319 return all_of(Preds, 10320 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 10321 } 10322 10323 ArrayRef<const SCEVPredicate *> 10324 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 10325 auto I = SCEVToPreds.find(Expr); 10326 if (I == SCEVToPreds.end()) 10327 return ArrayRef<const SCEVPredicate *>(); 10328 return I->second; 10329 } 10330 10331 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 10332 if (const auto *Set = dyn_cast<const SCEVUnionPredicate>(N)) 10333 return all_of(Set->Preds, 10334 [this](const SCEVPredicate *I) { return this->implies(I); }); 10335 10336 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 10337 if (ScevPredsIt == SCEVToPreds.end()) 10338 return false; 10339 auto &SCEVPreds = ScevPredsIt->second; 10340 10341 return any_of(SCEVPreds, 10342 [N](const SCEVPredicate *I) { return I->implies(N); }); 10343 } 10344 10345 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 10346 10347 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 10348 for (auto Pred : Preds) 10349 Pred->print(OS, Depth); 10350 } 10351 10352 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 10353 if (const auto *Set = dyn_cast<const SCEVUnionPredicate>(N)) { 10354 for (auto Pred : Set->Preds) 10355 add(Pred); 10356 return; 10357 } 10358 10359 if (implies(N)) 10360 return; 10361 10362 const SCEV *Key = N->getExpr(); 10363 assert(Key && "Only SCEVUnionPredicate doesn't have an " 10364 " associated expression!"); 10365 10366 SCEVToPreds[Key].push_back(N); 10367 Preds.push_back(N); 10368 } 10369 10370 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 10371 Loop &L) 10372 : SE(SE), L(L), Generation(0), BackedgeCount(nullptr) {} 10373 10374 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 10375 const SCEV *Expr = SE.getSCEV(V); 10376 RewriteEntry &Entry = RewriteMap[Expr]; 10377 10378 // If we already have an entry and the version matches, return it. 10379 if (Entry.second && Generation == Entry.first) 10380 return Entry.second; 10381 10382 // We found an entry but it's stale. Rewrite the stale entry 10383 // acording to the current predicate. 10384 if (Entry.second) 10385 Expr = Entry.second; 10386 10387 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 10388 Entry = {Generation, NewSCEV}; 10389 10390 return NewSCEV; 10391 } 10392 10393 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 10394 if (!BackedgeCount) { 10395 SCEVUnionPredicate BackedgePred; 10396 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 10397 addPredicate(BackedgePred); 10398 } 10399 return BackedgeCount; 10400 } 10401 10402 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 10403 if (Preds.implies(&Pred)) 10404 return; 10405 Preds.add(&Pred); 10406 updateGeneration(); 10407 } 10408 10409 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 10410 return Preds; 10411 } 10412 10413 void PredicatedScalarEvolution::updateGeneration() { 10414 // If the generation number wrapped recompute everything. 10415 if (++Generation == 0) { 10416 for (auto &II : RewriteMap) { 10417 const SCEV *Rewritten = II.second.second; 10418 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 10419 } 10420 } 10421 } 10422 10423 void PredicatedScalarEvolution::setNoOverflow( 10424 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 10425 const SCEV *Expr = getSCEV(V); 10426 const auto *AR = cast<SCEVAddRecExpr>(Expr); 10427 10428 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 10429 10430 // Clear the statically implied flags. 10431 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 10432 addPredicate(*SE.getWrapPredicate(AR, Flags)); 10433 10434 auto II = FlagsMap.insert({V, Flags}); 10435 if (!II.second) 10436 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 10437 } 10438 10439 bool PredicatedScalarEvolution::hasNoOverflow( 10440 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 10441 const SCEV *Expr = getSCEV(V); 10442 const auto *AR = cast<SCEVAddRecExpr>(Expr); 10443 10444 Flags = SCEVWrapPredicate::clearFlags( 10445 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 10446 10447 auto II = FlagsMap.find(V); 10448 10449 if (II != FlagsMap.end()) 10450 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 10451 10452 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 10453 } 10454 10455 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 10456 const SCEV *Expr = this->getSCEV(V); 10457 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, Preds); 10458 10459 if (!New) 10460 return nullptr; 10461 10462 updateGeneration(); 10463 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 10464 return New; 10465 } 10466 10467 PredicatedScalarEvolution::PredicatedScalarEvolution( 10468 const PredicatedScalarEvolution &Init) 10469 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 10470 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 10471 for (auto I = Init.FlagsMap.begin(), E = Init.FlagsMap.end(); I != E; ++I) 10472 FlagsMap.insert(*I); 10473 } 10474 10475 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 10476 // For each block. 10477 for (auto *BB : L.getBlocks()) 10478 for (auto &I : *BB) { 10479 if (!SE.isSCEVable(I.getType())) 10480 continue; 10481 10482 auto *Expr = SE.getSCEV(&I); 10483 auto II = RewriteMap.find(Expr); 10484 10485 if (II == RewriteMap.end()) 10486 continue; 10487 10488 // Don't print things that are not interesting. 10489 if (II->second.second == Expr) 10490 continue; 10491 10492 OS.indent(Depth) << "[PSE]" << I << ":\n"; 10493 OS.indent(Depth + 2) << *Expr << "\n"; 10494 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 10495 } 10496 } 10497