1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 83 #include "llvm/Analysis/TargetLibraryInfo.h" 84 #include "llvm/Analysis/ValueTracking.h" 85 #include "llvm/Config/llvm-config.h" 86 #include "llvm/IR/Argument.h" 87 #include "llvm/IR/BasicBlock.h" 88 #include "llvm/IR/CFG.h" 89 #include "llvm/IR/CallSite.h" 90 #include "llvm/IR/Constant.h" 91 #include "llvm/IR/ConstantRange.h" 92 #include "llvm/IR/Constants.h" 93 #include "llvm/IR/DataLayout.h" 94 #include "llvm/IR/DerivedTypes.h" 95 #include "llvm/IR/Dominators.h" 96 #include "llvm/IR/Function.h" 97 #include "llvm/IR/GlobalAlias.h" 98 #include "llvm/IR/GlobalValue.h" 99 #include "llvm/IR/GlobalVariable.h" 100 #include "llvm/IR/InstIterator.h" 101 #include "llvm/IR/InstrTypes.h" 102 #include "llvm/IR/Instruction.h" 103 #include "llvm/IR/Instructions.h" 104 #include "llvm/IR/IntrinsicInst.h" 105 #include "llvm/IR/Intrinsics.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/Metadata.h" 108 #include "llvm/IR/Operator.h" 109 #include "llvm/IR/PatternMatch.h" 110 #include "llvm/IR/Type.h" 111 #include "llvm/IR/Use.h" 112 #include "llvm/IR/User.h" 113 #include "llvm/IR/Value.h" 114 #include "llvm/IR/Verifier.h" 115 #include "llvm/Pass.h" 116 #include "llvm/Support/Casting.h" 117 #include "llvm/Support/CommandLine.h" 118 #include "llvm/Support/Compiler.h" 119 #include "llvm/Support/Debug.h" 120 #include "llvm/Support/ErrorHandling.h" 121 #include "llvm/Support/KnownBits.h" 122 #include "llvm/Support/SaveAndRestore.h" 123 #include "llvm/Support/raw_ostream.h" 124 #include <algorithm> 125 #include <cassert> 126 #include <climits> 127 #include <cstddef> 128 #include <cstdint> 129 #include <cstdlib> 130 #include <map> 131 #include <memory> 132 #include <tuple> 133 #include <utility> 134 #include <vector> 135 136 using namespace llvm; 137 138 #define DEBUG_TYPE "scalar-evolution" 139 140 STATISTIC(NumArrayLenItCounts, 141 "Number of trip counts computed with array length"); 142 STATISTIC(NumTripCountsComputed, 143 "Number of loops with predictable loop counts"); 144 STATISTIC(NumTripCountsNotComputed, 145 "Number of loops without predictable loop counts"); 146 STATISTIC(NumBruteForceTripCountsComputed, 147 "Number of loops with trip counts computed by force"); 148 149 static cl::opt<unsigned> 150 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 151 cl::desc("Maximum number of iterations SCEV will " 152 "symbolically execute a constant " 153 "derived loop"), 154 cl::init(100)); 155 156 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 157 static cl::opt<bool> VerifySCEV( 158 "verify-scev", cl::Hidden, 159 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 160 static cl::opt<bool> 161 VerifySCEVMap("verify-scev-maps", cl::Hidden, 162 cl::desc("Verify no dangling value in ScalarEvolution's " 163 "ExprValueMap (slow)")); 164 165 static cl::opt<bool> VerifyIR( 166 "scev-verify-ir", cl::Hidden, 167 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 168 cl::init(false)); 169 170 static cl::opt<unsigned> MulOpsInlineThreshold( 171 "scev-mulops-inline-threshold", cl::Hidden, 172 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 173 cl::init(32)); 174 175 static cl::opt<unsigned> AddOpsInlineThreshold( 176 "scev-addops-inline-threshold", cl::Hidden, 177 cl::desc("Threshold for inlining addition operands into a SCEV"), 178 cl::init(500)); 179 180 static cl::opt<unsigned> MaxSCEVCompareDepth( 181 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 182 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 183 cl::init(32)); 184 185 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 186 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 187 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 188 cl::init(2)); 189 190 static cl::opt<unsigned> MaxValueCompareDepth( 191 "scalar-evolution-max-value-compare-depth", cl::Hidden, 192 cl::desc("Maximum depth of recursive value complexity comparisons"), 193 cl::init(2)); 194 195 static cl::opt<unsigned> 196 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 197 cl::desc("Maximum depth of recursive arithmetics"), 198 cl::init(32)); 199 200 static cl::opt<unsigned> MaxConstantEvolvingDepth( 201 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 202 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 203 204 static cl::opt<unsigned> 205 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 206 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 207 cl::init(8)); 208 209 static cl::opt<unsigned> 210 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 211 cl::desc("Max coefficients in AddRec during evolving"), 212 cl::init(8)); 213 214 static cl::opt<unsigned> 215 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 216 cl::desc("Size of the expression which is considered huge"), 217 cl::init(4096)); 218 219 //===----------------------------------------------------------------------===// 220 // SCEV class definitions 221 //===----------------------------------------------------------------------===// 222 223 //===----------------------------------------------------------------------===// 224 // Implementation of the SCEV class. 225 // 226 227 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 228 LLVM_DUMP_METHOD void SCEV::dump() const { 229 print(dbgs()); 230 dbgs() << '\n'; 231 } 232 #endif 233 234 void SCEV::print(raw_ostream &OS) const { 235 switch (static_cast<SCEVTypes>(getSCEVType())) { 236 case scConstant: 237 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 238 return; 239 case scTruncate: { 240 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 241 const SCEV *Op = Trunc->getOperand(); 242 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 243 << *Trunc->getType() << ")"; 244 return; 245 } 246 case scZeroExtend: { 247 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 248 const SCEV *Op = ZExt->getOperand(); 249 OS << "(zext " << *Op->getType() << " " << *Op << " to " 250 << *ZExt->getType() << ")"; 251 return; 252 } 253 case scSignExtend: { 254 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 255 const SCEV *Op = SExt->getOperand(); 256 OS << "(sext " << *Op->getType() << " " << *Op << " to " 257 << *SExt->getType() << ")"; 258 return; 259 } 260 case scAddRecExpr: { 261 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 262 OS << "{" << *AR->getOperand(0); 263 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 264 OS << ",+," << *AR->getOperand(i); 265 OS << "}<"; 266 if (AR->hasNoUnsignedWrap()) 267 OS << "nuw><"; 268 if (AR->hasNoSignedWrap()) 269 OS << "nsw><"; 270 if (AR->hasNoSelfWrap() && 271 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 272 OS << "nw><"; 273 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 274 OS << ">"; 275 return; 276 } 277 case scAddExpr: 278 case scMulExpr: 279 case scUMaxExpr: 280 case scSMaxExpr: { 281 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 282 const char *OpStr = nullptr; 283 switch (NAry->getSCEVType()) { 284 case scAddExpr: OpStr = " + "; break; 285 case scMulExpr: OpStr = " * "; break; 286 case scUMaxExpr: OpStr = " umax "; break; 287 case scSMaxExpr: OpStr = " smax "; break; 288 } 289 OS << "("; 290 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 291 I != E; ++I) { 292 OS << **I; 293 if (std::next(I) != E) 294 OS << OpStr; 295 } 296 OS << ")"; 297 switch (NAry->getSCEVType()) { 298 case scAddExpr: 299 case scMulExpr: 300 if (NAry->hasNoUnsignedWrap()) 301 OS << "<nuw>"; 302 if (NAry->hasNoSignedWrap()) 303 OS << "<nsw>"; 304 } 305 return; 306 } 307 case scUDivExpr: { 308 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 309 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 310 return; 311 } 312 case scUnknown: { 313 const SCEVUnknown *U = cast<SCEVUnknown>(this); 314 Type *AllocTy; 315 if (U->isSizeOf(AllocTy)) { 316 OS << "sizeof(" << *AllocTy << ")"; 317 return; 318 } 319 if (U->isAlignOf(AllocTy)) { 320 OS << "alignof(" << *AllocTy << ")"; 321 return; 322 } 323 324 Type *CTy; 325 Constant *FieldNo; 326 if (U->isOffsetOf(CTy, FieldNo)) { 327 OS << "offsetof(" << *CTy << ", "; 328 FieldNo->printAsOperand(OS, false); 329 OS << ")"; 330 return; 331 } 332 333 // Otherwise just print it normally. 334 U->getValue()->printAsOperand(OS, false); 335 return; 336 } 337 case scCouldNotCompute: 338 OS << "***COULDNOTCOMPUTE***"; 339 return; 340 } 341 llvm_unreachable("Unknown SCEV kind!"); 342 } 343 344 Type *SCEV::getType() const { 345 switch (static_cast<SCEVTypes>(getSCEVType())) { 346 case scConstant: 347 return cast<SCEVConstant>(this)->getType(); 348 case scTruncate: 349 case scZeroExtend: 350 case scSignExtend: 351 return cast<SCEVCastExpr>(this)->getType(); 352 case scAddRecExpr: 353 case scMulExpr: 354 case scUMaxExpr: 355 case scSMaxExpr: 356 return cast<SCEVNAryExpr>(this)->getType(); 357 case scAddExpr: 358 return cast<SCEVAddExpr>(this)->getType(); 359 case scUDivExpr: 360 return cast<SCEVUDivExpr>(this)->getType(); 361 case scUnknown: 362 return cast<SCEVUnknown>(this)->getType(); 363 case scCouldNotCompute: 364 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 365 } 366 llvm_unreachable("Unknown SCEV kind!"); 367 } 368 369 bool SCEV::isZero() const { 370 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 371 return SC->getValue()->isZero(); 372 return false; 373 } 374 375 bool SCEV::isOne() const { 376 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 377 return SC->getValue()->isOne(); 378 return false; 379 } 380 381 bool SCEV::isAllOnesValue() const { 382 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 383 return SC->getValue()->isMinusOne(); 384 return false; 385 } 386 387 bool SCEV::isNonConstantNegative() const { 388 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 389 if (!Mul) return false; 390 391 // If there is a constant factor, it will be first. 392 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 393 if (!SC) return false; 394 395 // Return true if the value is negative, this matches things like (-42 * V). 396 return SC->getAPInt().isNegative(); 397 } 398 399 SCEVCouldNotCompute::SCEVCouldNotCompute() : 400 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 401 402 bool SCEVCouldNotCompute::classof(const SCEV *S) { 403 return S->getSCEVType() == scCouldNotCompute; 404 } 405 406 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 407 FoldingSetNodeID ID; 408 ID.AddInteger(scConstant); 409 ID.AddPointer(V); 410 void *IP = nullptr; 411 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 412 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 413 UniqueSCEVs.InsertNode(S, IP); 414 return S; 415 } 416 417 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 418 return getConstant(ConstantInt::get(getContext(), Val)); 419 } 420 421 const SCEV * 422 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 423 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 424 return getConstant(ConstantInt::get(ITy, V, isSigned)); 425 } 426 427 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 428 unsigned SCEVTy, const SCEV *op, Type *ty) 429 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Op(op), Ty(ty) {} 430 431 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 432 const SCEV *op, Type *ty) 433 : SCEVCastExpr(ID, scTruncate, op, ty) { 434 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 435 "Cannot truncate non-integer value!"); 436 } 437 438 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 439 const SCEV *op, Type *ty) 440 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 441 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 442 "Cannot zero extend non-integer value!"); 443 } 444 445 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 446 const SCEV *op, Type *ty) 447 : SCEVCastExpr(ID, scSignExtend, op, ty) { 448 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 449 "Cannot sign extend non-integer value!"); 450 } 451 452 void SCEVUnknown::deleted() { 453 // Clear this SCEVUnknown from various maps. 454 SE->forgetMemoizedResults(this); 455 456 // Remove this SCEVUnknown from the uniquing map. 457 SE->UniqueSCEVs.RemoveNode(this); 458 459 // Release the value. 460 setValPtr(nullptr); 461 } 462 463 void SCEVUnknown::allUsesReplacedWith(Value *New) { 464 // Remove this SCEVUnknown from the uniquing map. 465 SE->UniqueSCEVs.RemoveNode(this); 466 467 // Update this SCEVUnknown to point to the new value. This is needed 468 // because there may still be outstanding SCEVs which still point to 469 // this SCEVUnknown. 470 setValPtr(New); 471 } 472 473 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 474 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 475 if (VCE->getOpcode() == Instruction::PtrToInt) 476 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 477 if (CE->getOpcode() == Instruction::GetElementPtr && 478 CE->getOperand(0)->isNullValue() && 479 CE->getNumOperands() == 2) 480 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 481 if (CI->isOne()) { 482 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 483 ->getElementType(); 484 return true; 485 } 486 487 return false; 488 } 489 490 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 491 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 492 if (VCE->getOpcode() == Instruction::PtrToInt) 493 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 494 if (CE->getOpcode() == Instruction::GetElementPtr && 495 CE->getOperand(0)->isNullValue()) { 496 Type *Ty = 497 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 498 if (StructType *STy = dyn_cast<StructType>(Ty)) 499 if (!STy->isPacked() && 500 CE->getNumOperands() == 3 && 501 CE->getOperand(1)->isNullValue()) { 502 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 503 if (CI->isOne() && 504 STy->getNumElements() == 2 && 505 STy->getElementType(0)->isIntegerTy(1)) { 506 AllocTy = STy->getElementType(1); 507 return true; 508 } 509 } 510 } 511 512 return false; 513 } 514 515 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 516 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 517 if (VCE->getOpcode() == Instruction::PtrToInt) 518 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 519 if (CE->getOpcode() == Instruction::GetElementPtr && 520 CE->getNumOperands() == 3 && 521 CE->getOperand(0)->isNullValue() && 522 CE->getOperand(1)->isNullValue()) { 523 Type *Ty = 524 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 525 // Ignore vector types here so that ScalarEvolutionExpander doesn't 526 // emit getelementptrs that index into vectors. 527 if (Ty->isStructTy() || Ty->isArrayTy()) { 528 CTy = Ty; 529 FieldNo = CE->getOperand(2); 530 return true; 531 } 532 } 533 534 return false; 535 } 536 537 //===----------------------------------------------------------------------===// 538 // SCEV Utilities 539 //===----------------------------------------------------------------------===// 540 541 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 542 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 543 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 544 /// have been previously deemed to be "equally complex" by this routine. It is 545 /// intended to avoid exponential time complexity in cases like: 546 /// 547 /// %a = f(%x, %y) 548 /// %b = f(%a, %a) 549 /// %c = f(%b, %b) 550 /// 551 /// %d = f(%x, %y) 552 /// %e = f(%d, %d) 553 /// %f = f(%e, %e) 554 /// 555 /// CompareValueComplexity(%f, %c) 556 /// 557 /// Since we do not continue running this routine on expression trees once we 558 /// have seen unequal values, there is no need to track them in the cache. 559 static int 560 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 561 const LoopInfo *const LI, Value *LV, Value *RV, 562 unsigned Depth) { 563 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 564 return 0; 565 566 // Order pointer values after integer values. This helps SCEVExpander form 567 // GEPs. 568 bool LIsPointer = LV->getType()->isPointerTy(), 569 RIsPointer = RV->getType()->isPointerTy(); 570 if (LIsPointer != RIsPointer) 571 return (int)LIsPointer - (int)RIsPointer; 572 573 // Compare getValueID values. 574 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 575 if (LID != RID) 576 return (int)LID - (int)RID; 577 578 // Sort arguments by their position. 579 if (const auto *LA = dyn_cast<Argument>(LV)) { 580 const auto *RA = cast<Argument>(RV); 581 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 582 return (int)LArgNo - (int)RArgNo; 583 } 584 585 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 586 const auto *RGV = cast<GlobalValue>(RV); 587 588 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 589 auto LT = GV->getLinkage(); 590 return !(GlobalValue::isPrivateLinkage(LT) || 591 GlobalValue::isInternalLinkage(LT)); 592 }; 593 594 // Use the names to distinguish the two values, but only if the 595 // names are semantically important. 596 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 597 return LGV->getName().compare(RGV->getName()); 598 } 599 600 // For instructions, compare their loop depth, and their operand count. This 601 // is pretty loose. 602 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 603 const auto *RInst = cast<Instruction>(RV); 604 605 // Compare loop depths. 606 const BasicBlock *LParent = LInst->getParent(), 607 *RParent = RInst->getParent(); 608 if (LParent != RParent) { 609 unsigned LDepth = LI->getLoopDepth(LParent), 610 RDepth = LI->getLoopDepth(RParent); 611 if (LDepth != RDepth) 612 return (int)LDepth - (int)RDepth; 613 } 614 615 // Compare the number of operands. 616 unsigned LNumOps = LInst->getNumOperands(), 617 RNumOps = RInst->getNumOperands(); 618 if (LNumOps != RNumOps) 619 return (int)LNumOps - (int)RNumOps; 620 621 for (unsigned Idx : seq(0u, LNumOps)) { 622 int Result = 623 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 624 RInst->getOperand(Idx), Depth + 1); 625 if (Result != 0) 626 return Result; 627 } 628 } 629 630 EqCacheValue.unionSets(LV, RV); 631 return 0; 632 } 633 634 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 635 // than RHS, respectively. A three-way result allows recursive comparisons to be 636 // more efficient. 637 static int CompareSCEVComplexity( 638 EquivalenceClasses<const SCEV *> &EqCacheSCEV, 639 EquivalenceClasses<const Value *> &EqCacheValue, 640 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 641 DominatorTree &DT, unsigned Depth = 0) { 642 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 643 if (LHS == RHS) 644 return 0; 645 646 // Primarily, sort the SCEVs by their getSCEVType(). 647 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 648 if (LType != RType) 649 return (int)LType - (int)RType; 650 651 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS)) 652 return 0; 653 // Aside from the getSCEVType() ordering, the particular ordering 654 // isn't very important except that it's beneficial to be consistent, 655 // so that (a + b) and (b + a) don't end up as different expressions. 656 switch (static_cast<SCEVTypes>(LType)) { 657 case scUnknown: { 658 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 659 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 660 661 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 662 RU->getValue(), Depth + 1); 663 if (X == 0) 664 EqCacheSCEV.unionSets(LHS, RHS); 665 return X; 666 } 667 668 case scConstant: { 669 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 670 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 671 672 // Compare constant values. 673 const APInt &LA = LC->getAPInt(); 674 const APInt &RA = RC->getAPInt(); 675 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 676 if (LBitWidth != RBitWidth) 677 return (int)LBitWidth - (int)RBitWidth; 678 return LA.ult(RA) ? -1 : 1; 679 } 680 681 case scAddRecExpr: { 682 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 683 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 684 685 // There is always a dominance between two recs that are used by one SCEV, 686 // so we can safely sort recs by loop header dominance. We require such 687 // order in getAddExpr. 688 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 689 if (LLoop != RLoop) { 690 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 691 assert(LHead != RHead && "Two loops share the same header?"); 692 if (DT.dominates(LHead, RHead)) 693 return 1; 694 else 695 assert(DT.dominates(RHead, LHead) && 696 "No dominance between recurrences used by one SCEV?"); 697 return -1; 698 } 699 700 // Addrec complexity grows with operand count. 701 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 702 if (LNumOps != RNumOps) 703 return (int)LNumOps - (int)RNumOps; 704 705 // Lexicographically compare. 706 for (unsigned i = 0; i != LNumOps; ++i) { 707 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 708 LA->getOperand(i), RA->getOperand(i), DT, 709 Depth + 1); 710 if (X != 0) 711 return X; 712 } 713 EqCacheSCEV.unionSets(LHS, RHS); 714 return 0; 715 } 716 717 case scAddExpr: 718 case scMulExpr: 719 case scSMaxExpr: 720 case scUMaxExpr: { 721 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 722 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 723 724 // Lexicographically compare n-ary expressions. 725 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 726 if (LNumOps != RNumOps) 727 return (int)LNumOps - (int)RNumOps; 728 729 for (unsigned i = 0; i != LNumOps; ++i) { 730 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 731 LC->getOperand(i), RC->getOperand(i), DT, 732 Depth + 1); 733 if (X != 0) 734 return X; 735 } 736 EqCacheSCEV.unionSets(LHS, RHS); 737 return 0; 738 } 739 740 case scUDivExpr: { 741 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 742 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 743 744 // Lexicographically compare udiv expressions. 745 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 746 RC->getLHS(), DT, Depth + 1); 747 if (X != 0) 748 return X; 749 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 750 RC->getRHS(), DT, Depth + 1); 751 if (X == 0) 752 EqCacheSCEV.unionSets(LHS, RHS); 753 return X; 754 } 755 756 case scTruncate: 757 case scZeroExtend: 758 case scSignExtend: { 759 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 760 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 761 762 // Compare cast expressions by operand. 763 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 764 LC->getOperand(), RC->getOperand(), DT, 765 Depth + 1); 766 if (X == 0) 767 EqCacheSCEV.unionSets(LHS, RHS); 768 return X; 769 } 770 771 case scCouldNotCompute: 772 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 773 } 774 llvm_unreachable("Unknown SCEV kind!"); 775 } 776 777 /// Given a list of SCEV objects, order them by their complexity, and group 778 /// objects of the same complexity together by value. When this routine is 779 /// finished, we know that any duplicates in the vector are consecutive and that 780 /// complexity is monotonically increasing. 781 /// 782 /// Note that we go take special precautions to ensure that we get deterministic 783 /// results from this routine. In other words, we don't want the results of 784 /// this to depend on where the addresses of various SCEV objects happened to 785 /// land in memory. 786 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 787 LoopInfo *LI, DominatorTree &DT) { 788 if (Ops.size() < 2) return; // Noop 789 790 EquivalenceClasses<const SCEV *> EqCacheSCEV; 791 EquivalenceClasses<const Value *> EqCacheValue; 792 if (Ops.size() == 2) { 793 // This is the common case, which also happens to be trivially simple. 794 // Special case it. 795 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 796 if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0) 797 std::swap(LHS, RHS); 798 return; 799 } 800 801 // Do the rough sort by complexity. 802 std::stable_sort(Ops.begin(), Ops.end(), 803 [&](const SCEV *LHS, const SCEV *RHS) { 804 return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 805 LHS, RHS, DT) < 0; 806 }); 807 808 // Now that we are sorted by complexity, group elements of the same 809 // complexity. Note that this is, at worst, N^2, but the vector is likely to 810 // be extremely short in practice. Note that we take this approach because we 811 // do not want to depend on the addresses of the objects we are grouping. 812 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 813 const SCEV *S = Ops[i]; 814 unsigned Complexity = S->getSCEVType(); 815 816 // If there are any objects of the same complexity and same value as this 817 // one, group them. 818 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 819 if (Ops[j] == S) { // Found a duplicate. 820 // Move it to immediately after i'th element. 821 std::swap(Ops[i+1], Ops[j]); 822 ++i; // no need to rescan it. 823 if (i == e-2) return; // Done! 824 } 825 } 826 } 827 } 828 829 // Returns the size of the SCEV S. 830 static inline int sizeOfSCEV(const SCEV *S) { 831 struct FindSCEVSize { 832 int Size = 0; 833 834 FindSCEVSize() = default; 835 836 bool follow(const SCEV *S) { 837 ++Size; 838 // Keep looking at all operands of S. 839 return true; 840 } 841 842 bool isDone() const { 843 return false; 844 } 845 }; 846 847 FindSCEVSize F; 848 SCEVTraversal<FindSCEVSize> ST(F); 849 ST.visitAll(S); 850 return F.Size; 851 } 852 853 /// Returns true if the subtree of \p S contains at least HugeExprThreshold 854 /// nodes. 855 static bool isHugeExpression(const SCEV *S) { 856 return S->getExpressionSize() >= HugeExprThreshold; 857 } 858 859 /// Returns true of \p Ops contains a huge SCEV (see definition above). 860 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 861 return any_of(Ops, isHugeExpression); 862 } 863 864 namespace { 865 866 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { 867 public: 868 // Computes the Quotient and Remainder of the division of Numerator by 869 // Denominator. 870 static void divide(ScalarEvolution &SE, const SCEV *Numerator, 871 const SCEV *Denominator, const SCEV **Quotient, 872 const SCEV **Remainder) { 873 assert(Numerator && Denominator && "Uninitialized SCEV"); 874 875 SCEVDivision D(SE, Numerator, Denominator); 876 877 // Check for the trivial case here to avoid having to check for it in the 878 // rest of the code. 879 if (Numerator == Denominator) { 880 *Quotient = D.One; 881 *Remainder = D.Zero; 882 return; 883 } 884 885 if (Numerator->isZero()) { 886 *Quotient = D.Zero; 887 *Remainder = D.Zero; 888 return; 889 } 890 891 // A simple case when N/1. The quotient is N. 892 if (Denominator->isOne()) { 893 *Quotient = Numerator; 894 *Remainder = D.Zero; 895 return; 896 } 897 898 // Split the Denominator when it is a product. 899 if (const SCEVMulExpr *T = dyn_cast<SCEVMulExpr>(Denominator)) { 900 const SCEV *Q, *R; 901 *Quotient = Numerator; 902 for (const SCEV *Op : T->operands()) { 903 divide(SE, *Quotient, Op, &Q, &R); 904 *Quotient = Q; 905 906 // Bail out when the Numerator is not divisible by one of the terms of 907 // the Denominator. 908 if (!R->isZero()) { 909 *Quotient = D.Zero; 910 *Remainder = Numerator; 911 return; 912 } 913 } 914 *Remainder = D.Zero; 915 return; 916 } 917 918 D.visit(Numerator); 919 *Quotient = D.Quotient; 920 *Remainder = D.Remainder; 921 } 922 923 // Except in the trivial case described above, we do not know how to divide 924 // Expr by Denominator for the following functions with empty implementation. 925 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} 926 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} 927 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} 928 void visitUDivExpr(const SCEVUDivExpr *Numerator) {} 929 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} 930 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} 931 void visitUnknown(const SCEVUnknown *Numerator) {} 932 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} 933 934 void visitConstant(const SCEVConstant *Numerator) { 935 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { 936 APInt NumeratorVal = Numerator->getAPInt(); 937 APInt DenominatorVal = D->getAPInt(); 938 uint32_t NumeratorBW = NumeratorVal.getBitWidth(); 939 uint32_t DenominatorBW = DenominatorVal.getBitWidth(); 940 941 if (NumeratorBW > DenominatorBW) 942 DenominatorVal = DenominatorVal.sext(NumeratorBW); 943 else if (NumeratorBW < DenominatorBW) 944 NumeratorVal = NumeratorVal.sext(DenominatorBW); 945 946 APInt QuotientVal(NumeratorVal.getBitWidth(), 0); 947 APInt RemainderVal(NumeratorVal.getBitWidth(), 0); 948 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); 949 Quotient = SE.getConstant(QuotientVal); 950 Remainder = SE.getConstant(RemainderVal); 951 return; 952 } 953 } 954 955 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { 956 const SCEV *StartQ, *StartR, *StepQ, *StepR; 957 if (!Numerator->isAffine()) 958 return cannotDivide(Numerator); 959 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); 960 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); 961 // Bail out if the types do not match. 962 Type *Ty = Denominator->getType(); 963 if (Ty != StartQ->getType() || Ty != StartR->getType() || 964 Ty != StepQ->getType() || Ty != StepR->getType()) 965 return cannotDivide(Numerator); 966 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), 967 Numerator->getNoWrapFlags()); 968 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), 969 Numerator->getNoWrapFlags()); 970 } 971 972 void visitAddExpr(const SCEVAddExpr *Numerator) { 973 SmallVector<const SCEV *, 2> Qs, Rs; 974 Type *Ty = Denominator->getType(); 975 976 for (const SCEV *Op : Numerator->operands()) { 977 const SCEV *Q, *R; 978 divide(SE, Op, Denominator, &Q, &R); 979 980 // Bail out if types do not match. 981 if (Ty != Q->getType() || Ty != R->getType()) 982 return cannotDivide(Numerator); 983 984 Qs.push_back(Q); 985 Rs.push_back(R); 986 } 987 988 if (Qs.size() == 1) { 989 Quotient = Qs[0]; 990 Remainder = Rs[0]; 991 return; 992 } 993 994 Quotient = SE.getAddExpr(Qs); 995 Remainder = SE.getAddExpr(Rs); 996 } 997 998 void visitMulExpr(const SCEVMulExpr *Numerator) { 999 SmallVector<const SCEV *, 2> Qs; 1000 Type *Ty = Denominator->getType(); 1001 1002 bool FoundDenominatorTerm = false; 1003 for (const SCEV *Op : Numerator->operands()) { 1004 // Bail out if types do not match. 1005 if (Ty != Op->getType()) 1006 return cannotDivide(Numerator); 1007 1008 if (FoundDenominatorTerm) { 1009 Qs.push_back(Op); 1010 continue; 1011 } 1012 1013 // Check whether Denominator divides one of the product operands. 1014 const SCEV *Q, *R; 1015 divide(SE, Op, Denominator, &Q, &R); 1016 if (!R->isZero()) { 1017 Qs.push_back(Op); 1018 continue; 1019 } 1020 1021 // Bail out if types do not match. 1022 if (Ty != Q->getType()) 1023 return cannotDivide(Numerator); 1024 1025 FoundDenominatorTerm = true; 1026 Qs.push_back(Q); 1027 } 1028 1029 if (FoundDenominatorTerm) { 1030 Remainder = Zero; 1031 if (Qs.size() == 1) 1032 Quotient = Qs[0]; 1033 else 1034 Quotient = SE.getMulExpr(Qs); 1035 return; 1036 } 1037 1038 if (!isa<SCEVUnknown>(Denominator)) 1039 return cannotDivide(Numerator); 1040 1041 // The Remainder is obtained by replacing Denominator by 0 in Numerator. 1042 ValueToValueMap RewriteMap; 1043 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1044 cast<SCEVConstant>(Zero)->getValue(); 1045 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1046 1047 if (Remainder->isZero()) { 1048 // The Quotient is obtained by replacing Denominator by 1 in Numerator. 1049 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1050 cast<SCEVConstant>(One)->getValue(); 1051 Quotient = 1052 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1053 return; 1054 } 1055 1056 // Quotient is (Numerator - Remainder) divided by Denominator. 1057 const SCEV *Q, *R; 1058 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); 1059 // This SCEV does not seem to simplify: fail the division here. 1060 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) 1061 return cannotDivide(Numerator); 1062 divide(SE, Diff, Denominator, &Q, &R); 1063 if (R != Zero) 1064 return cannotDivide(Numerator); 1065 Quotient = Q; 1066 } 1067 1068 private: 1069 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, 1070 const SCEV *Denominator) 1071 : SE(S), Denominator(Denominator) { 1072 Zero = SE.getZero(Denominator->getType()); 1073 One = SE.getOne(Denominator->getType()); 1074 1075 // We generally do not know how to divide Expr by Denominator. We 1076 // initialize the division to a "cannot divide" state to simplify the rest 1077 // of the code. 1078 cannotDivide(Numerator); 1079 } 1080 1081 // Convenience function for giving up on the division. We set the quotient to 1082 // be equal to zero and the remainder to be equal to the numerator. 1083 void cannotDivide(const SCEV *Numerator) { 1084 Quotient = Zero; 1085 Remainder = Numerator; 1086 } 1087 1088 ScalarEvolution &SE; 1089 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; 1090 }; 1091 1092 } // end anonymous namespace 1093 1094 //===----------------------------------------------------------------------===// 1095 // Simple SCEV method implementations 1096 //===----------------------------------------------------------------------===// 1097 1098 /// Compute BC(It, K). The result has width W. Assume, K > 0. 1099 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 1100 ScalarEvolution &SE, 1101 Type *ResultTy) { 1102 // Handle the simplest case efficiently. 1103 if (K == 1) 1104 return SE.getTruncateOrZeroExtend(It, ResultTy); 1105 1106 // We are using the following formula for BC(It, K): 1107 // 1108 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 1109 // 1110 // Suppose, W is the bitwidth of the return value. We must be prepared for 1111 // overflow. Hence, we must assure that the result of our computation is 1112 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 1113 // safe in modular arithmetic. 1114 // 1115 // However, this code doesn't use exactly that formula; the formula it uses 1116 // is something like the following, where T is the number of factors of 2 in 1117 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 1118 // exponentiation: 1119 // 1120 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 1121 // 1122 // This formula is trivially equivalent to the previous formula. However, 1123 // this formula can be implemented much more efficiently. The trick is that 1124 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 1125 // arithmetic. To do exact division in modular arithmetic, all we have 1126 // to do is multiply by the inverse. Therefore, this step can be done at 1127 // width W. 1128 // 1129 // The next issue is how to safely do the division by 2^T. The way this 1130 // is done is by doing the multiplication step at a width of at least W + T 1131 // bits. This way, the bottom W+T bits of the product are accurate. Then, 1132 // when we perform the division by 2^T (which is equivalent to a right shift 1133 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 1134 // truncated out after the division by 2^T. 1135 // 1136 // In comparison to just directly using the first formula, this technique 1137 // is much more efficient; using the first formula requires W * K bits, 1138 // but this formula less than W + K bits. Also, the first formula requires 1139 // a division step, whereas this formula only requires multiplies and shifts. 1140 // 1141 // It doesn't matter whether the subtraction step is done in the calculation 1142 // width or the input iteration count's width; if the subtraction overflows, 1143 // the result must be zero anyway. We prefer here to do it in the width of 1144 // the induction variable because it helps a lot for certain cases; CodeGen 1145 // isn't smart enough to ignore the overflow, which leads to much less 1146 // efficient code if the width of the subtraction is wider than the native 1147 // register width. 1148 // 1149 // (It's possible to not widen at all by pulling out factors of 2 before 1150 // the multiplication; for example, K=2 can be calculated as 1151 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 1152 // extra arithmetic, so it's not an obvious win, and it gets 1153 // much more complicated for K > 3.) 1154 1155 // Protection from insane SCEVs; this bound is conservative, 1156 // but it probably doesn't matter. 1157 if (K > 1000) 1158 return SE.getCouldNotCompute(); 1159 1160 unsigned W = SE.getTypeSizeInBits(ResultTy); 1161 1162 // Calculate K! / 2^T and T; we divide out the factors of two before 1163 // multiplying for calculating K! / 2^T to avoid overflow. 1164 // Other overflow doesn't matter because we only care about the bottom 1165 // W bits of the result. 1166 APInt OddFactorial(W, 1); 1167 unsigned T = 1; 1168 for (unsigned i = 3; i <= K; ++i) { 1169 APInt Mult(W, i); 1170 unsigned TwoFactors = Mult.countTrailingZeros(); 1171 T += TwoFactors; 1172 Mult.lshrInPlace(TwoFactors); 1173 OddFactorial *= Mult; 1174 } 1175 1176 // We need at least W + T bits for the multiplication step 1177 unsigned CalculationBits = W + T; 1178 1179 // Calculate 2^T, at width T+W. 1180 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1181 1182 // Calculate the multiplicative inverse of K! / 2^T; 1183 // this multiplication factor will perform the exact division by 1184 // K! / 2^T. 1185 APInt Mod = APInt::getSignedMinValue(W+1); 1186 APInt MultiplyFactor = OddFactorial.zext(W+1); 1187 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1188 MultiplyFactor = MultiplyFactor.trunc(W); 1189 1190 // Calculate the product, at width T+W 1191 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1192 CalculationBits); 1193 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1194 for (unsigned i = 1; i != K; ++i) { 1195 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1196 Dividend = SE.getMulExpr(Dividend, 1197 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1198 } 1199 1200 // Divide by 2^T 1201 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1202 1203 // Truncate the result, and divide by K! / 2^T. 1204 1205 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1206 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1207 } 1208 1209 /// Return the value of this chain of recurrences at the specified iteration 1210 /// number. We can evaluate this recurrence by multiplying each element in the 1211 /// chain by the binomial coefficient corresponding to it. In other words, we 1212 /// can evaluate {A,+,B,+,C,+,D} as: 1213 /// 1214 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1215 /// 1216 /// where BC(It, k) stands for binomial coefficient. 1217 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1218 ScalarEvolution &SE) const { 1219 const SCEV *Result = getStart(); 1220 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1221 // The computation is correct in the face of overflow provided that the 1222 // multiplication is performed _after_ the evaluation of the binomial 1223 // coefficient. 1224 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1225 if (isa<SCEVCouldNotCompute>(Coeff)) 1226 return Coeff; 1227 1228 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1229 } 1230 return Result; 1231 } 1232 1233 //===----------------------------------------------------------------------===// 1234 // SCEV Expression folder implementations 1235 //===----------------------------------------------------------------------===// 1236 1237 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1238 unsigned Depth) { 1239 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1240 "This is not a truncating conversion!"); 1241 assert(isSCEVable(Ty) && 1242 "This is not a conversion to a SCEVable type!"); 1243 Ty = getEffectiveSCEVType(Ty); 1244 1245 FoldingSetNodeID ID; 1246 ID.AddInteger(scTruncate); 1247 ID.AddPointer(Op); 1248 ID.AddPointer(Ty); 1249 void *IP = nullptr; 1250 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1251 1252 // Fold if the operand is constant. 1253 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1254 return getConstant( 1255 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1256 1257 // trunc(trunc(x)) --> trunc(x) 1258 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1259 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1260 1261 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1262 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1263 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1264 1265 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1266 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1267 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1268 1269 if (Depth > MaxCastDepth) { 1270 SCEV *S = 1271 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1272 UniqueSCEVs.InsertNode(S, IP); 1273 addToLoopUseLists(S); 1274 return S; 1275 } 1276 1277 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1278 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1279 // if after transforming we have at most one truncate, not counting truncates 1280 // that replace other casts. 1281 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1282 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1283 SmallVector<const SCEV *, 4> Operands; 1284 unsigned numTruncs = 0; 1285 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1286 ++i) { 1287 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1288 if (!isa<SCEVCastExpr>(CommOp->getOperand(i)) && isa<SCEVTruncateExpr>(S)) 1289 numTruncs++; 1290 Operands.push_back(S); 1291 } 1292 if (numTruncs < 2) { 1293 if (isa<SCEVAddExpr>(Op)) 1294 return getAddExpr(Operands); 1295 else if (isa<SCEVMulExpr>(Op)) 1296 return getMulExpr(Operands); 1297 else 1298 llvm_unreachable("Unexpected SCEV type for Op."); 1299 } 1300 // Although we checked in the beginning that ID is not in the cache, it is 1301 // possible that during recursion and different modification ID was inserted 1302 // into the cache. So if we find it, just return it. 1303 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1304 return S; 1305 } 1306 1307 // If the input value is a chrec scev, truncate the chrec's operands. 1308 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1309 SmallVector<const SCEV *, 4> Operands; 1310 for (const SCEV *Op : AddRec->operands()) 1311 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1312 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1313 } 1314 1315 // The cast wasn't folded; create an explicit cast node. We can reuse 1316 // the existing insert position since if we get here, we won't have 1317 // made any changes which would invalidate it. 1318 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1319 Op, Ty); 1320 UniqueSCEVs.InsertNode(S, IP); 1321 addToLoopUseLists(S); 1322 return S; 1323 } 1324 1325 // Get the limit of a recurrence such that incrementing by Step cannot cause 1326 // signed overflow as long as the value of the recurrence within the 1327 // loop does not exceed this limit before incrementing. 1328 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1329 ICmpInst::Predicate *Pred, 1330 ScalarEvolution *SE) { 1331 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1332 if (SE->isKnownPositive(Step)) { 1333 *Pred = ICmpInst::ICMP_SLT; 1334 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1335 SE->getSignedRangeMax(Step)); 1336 } 1337 if (SE->isKnownNegative(Step)) { 1338 *Pred = ICmpInst::ICMP_SGT; 1339 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1340 SE->getSignedRangeMin(Step)); 1341 } 1342 return nullptr; 1343 } 1344 1345 // Get the limit of a recurrence such that incrementing by Step cannot cause 1346 // unsigned overflow as long as the value of the recurrence within the loop does 1347 // not exceed this limit before incrementing. 1348 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1349 ICmpInst::Predicate *Pred, 1350 ScalarEvolution *SE) { 1351 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1352 *Pred = ICmpInst::ICMP_ULT; 1353 1354 return SE->getConstant(APInt::getMinValue(BitWidth) - 1355 SE->getUnsignedRangeMax(Step)); 1356 } 1357 1358 namespace { 1359 1360 struct ExtendOpTraitsBase { 1361 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1362 unsigned); 1363 }; 1364 1365 // Used to make code generic over signed and unsigned overflow. 1366 template <typename ExtendOp> struct ExtendOpTraits { 1367 // Members present: 1368 // 1369 // static const SCEV::NoWrapFlags WrapType; 1370 // 1371 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1372 // 1373 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1374 // ICmpInst::Predicate *Pred, 1375 // ScalarEvolution *SE); 1376 }; 1377 1378 template <> 1379 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1380 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1381 1382 static const GetExtendExprTy GetExtendExpr; 1383 1384 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1385 ICmpInst::Predicate *Pred, 1386 ScalarEvolution *SE) { 1387 return getSignedOverflowLimitForStep(Step, Pred, SE); 1388 } 1389 }; 1390 1391 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1392 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1393 1394 template <> 1395 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1396 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1397 1398 static const GetExtendExprTy GetExtendExpr; 1399 1400 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1401 ICmpInst::Predicate *Pred, 1402 ScalarEvolution *SE) { 1403 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1404 } 1405 }; 1406 1407 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1408 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1409 1410 } // end anonymous namespace 1411 1412 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1413 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1414 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1415 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1416 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1417 // expression "Step + sext/zext(PreIncAR)" is congruent with 1418 // "sext/zext(PostIncAR)" 1419 template <typename ExtendOpTy> 1420 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1421 ScalarEvolution *SE, unsigned Depth) { 1422 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1423 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1424 1425 const Loop *L = AR->getLoop(); 1426 const SCEV *Start = AR->getStart(); 1427 const SCEV *Step = AR->getStepRecurrence(*SE); 1428 1429 // Check for a simple looking step prior to loop entry. 1430 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1431 if (!SA) 1432 return nullptr; 1433 1434 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1435 // subtraction is expensive. For this purpose, perform a quick and dirty 1436 // difference, by checking for Step in the operand list. 1437 SmallVector<const SCEV *, 4> DiffOps; 1438 for (const SCEV *Op : SA->operands()) 1439 if (Op != Step) 1440 DiffOps.push_back(Op); 1441 1442 if (DiffOps.size() == SA->getNumOperands()) 1443 return nullptr; 1444 1445 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1446 // `Step`: 1447 1448 // 1. NSW/NUW flags on the step increment. 1449 auto PreStartFlags = 1450 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1451 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1452 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1453 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1454 1455 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1456 // "S+X does not sign/unsign-overflow". 1457 // 1458 1459 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1460 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1461 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1462 return PreStart; 1463 1464 // 2. Direct overflow check on the step operation's expression. 1465 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1466 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1467 const SCEV *OperandExtendedStart = 1468 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1469 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1470 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1471 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1472 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1473 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1474 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1475 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1476 } 1477 return PreStart; 1478 } 1479 1480 // 3. Loop precondition. 1481 ICmpInst::Predicate Pred; 1482 const SCEV *OverflowLimit = 1483 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1484 1485 if (OverflowLimit && 1486 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1487 return PreStart; 1488 1489 return nullptr; 1490 } 1491 1492 // Get the normalized zero or sign extended expression for this AddRec's Start. 1493 template <typename ExtendOpTy> 1494 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1495 ScalarEvolution *SE, 1496 unsigned Depth) { 1497 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1498 1499 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1500 if (!PreStart) 1501 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1502 1503 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1504 Depth), 1505 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1506 } 1507 1508 // Try to prove away overflow by looking at "nearby" add recurrences. A 1509 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1510 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1511 // 1512 // Formally: 1513 // 1514 // {S,+,X} == {S-T,+,X} + T 1515 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1516 // 1517 // If ({S-T,+,X} + T) does not overflow ... (1) 1518 // 1519 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1520 // 1521 // If {S-T,+,X} does not overflow ... (2) 1522 // 1523 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1524 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1525 // 1526 // If (S-T)+T does not overflow ... (3) 1527 // 1528 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1529 // == {Ext(S),+,Ext(X)} == LHS 1530 // 1531 // Thus, if (1), (2) and (3) are true for some T, then 1532 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1533 // 1534 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1535 // does not overflow" restricted to the 0th iteration. Therefore we only need 1536 // to check for (1) and (2). 1537 // 1538 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1539 // is `Delta` (defined below). 1540 template <typename ExtendOpTy> 1541 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1542 const SCEV *Step, 1543 const Loop *L) { 1544 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1545 1546 // We restrict `Start` to a constant to prevent SCEV from spending too much 1547 // time here. It is correct (but more expensive) to continue with a 1548 // non-constant `Start` and do a general SCEV subtraction to compute 1549 // `PreStart` below. 1550 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1551 if (!StartC) 1552 return false; 1553 1554 APInt StartAI = StartC->getAPInt(); 1555 1556 for (unsigned Delta : {-2, -1, 1, 2}) { 1557 const SCEV *PreStart = getConstant(StartAI - Delta); 1558 1559 FoldingSetNodeID ID; 1560 ID.AddInteger(scAddRecExpr); 1561 ID.AddPointer(PreStart); 1562 ID.AddPointer(Step); 1563 ID.AddPointer(L); 1564 void *IP = nullptr; 1565 const auto *PreAR = 1566 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1567 1568 // Give up if we don't already have the add recurrence we need because 1569 // actually constructing an add recurrence is relatively expensive. 1570 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1571 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1572 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1573 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1574 DeltaS, &Pred, this); 1575 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1576 return true; 1577 } 1578 } 1579 1580 return false; 1581 } 1582 1583 // Finds an integer D for an expression (C + x + y + ...) such that the top 1584 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1585 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1586 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1587 // the (C + x + y + ...) expression is \p WholeAddExpr. 1588 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1589 const SCEVConstant *ConstantTerm, 1590 const SCEVAddExpr *WholeAddExpr) { 1591 const APInt C = ConstantTerm->getAPInt(); 1592 const unsigned BitWidth = C.getBitWidth(); 1593 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1594 uint32_t TZ = BitWidth; 1595 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1596 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1597 if (TZ) { 1598 // Set D to be as many least significant bits of C as possible while still 1599 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1600 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1601 } 1602 return APInt(BitWidth, 0); 1603 } 1604 1605 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1606 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1607 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1608 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1609 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1610 const APInt &ConstantStart, 1611 const SCEV *Step) { 1612 const unsigned BitWidth = ConstantStart.getBitWidth(); 1613 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1614 if (TZ) 1615 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1616 : ConstantStart; 1617 return APInt(BitWidth, 0); 1618 } 1619 1620 const SCEV * 1621 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1622 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1623 "This is not an extending conversion!"); 1624 assert(isSCEVable(Ty) && 1625 "This is not a conversion to a SCEVable type!"); 1626 Ty = getEffectiveSCEVType(Ty); 1627 1628 // Fold if the operand is constant. 1629 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1630 return getConstant( 1631 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1632 1633 // zext(zext(x)) --> zext(x) 1634 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1635 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1636 1637 // Before doing any expensive analysis, check to see if we've already 1638 // computed a SCEV for this Op and Ty. 1639 FoldingSetNodeID ID; 1640 ID.AddInteger(scZeroExtend); 1641 ID.AddPointer(Op); 1642 ID.AddPointer(Ty); 1643 void *IP = nullptr; 1644 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1645 if (Depth > MaxCastDepth) { 1646 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1647 Op, Ty); 1648 UniqueSCEVs.InsertNode(S, IP); 1649 addToLoopUseLists(S); 1650 return S; 1651 } 1652 1653 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1654 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1655 // It's possible the bits taken off by the truncate were all zero bits. If 1656 // so, we should be able to simplify this further. 1657 const SCEV *X = ST->getOperand(); 1658 ConstantRange CR = getUnsignedRange(X); 1659 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1660 unsigned NewBits = getTypeSizeInBits(Ty); 1661 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1662 CR.zextOrTrunc(NewBits))) 1663 return getTruncateOrZeroExtend(X, Ty, Depth); 1664 } 1665 1666 // If the input value is a chrec scev, and we can prove that the value 1667 // did not overflow the old, smaller, value, we can zero extend all of the 1668 // operands (often constants). This allows analysis of something like 1669 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1670 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1671 if (AR->isAffine()) { 1672 const SCEV *Start = AR->getStart(); 1673 const SCEV *Step = AR->getStepRecurrence(*this); 1674 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1675 const Loop *L = AR->getLoop(); 1676 1677 if (!AR->hasNoUnsignedWrap()) { 1678 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1679 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1680 } 1681 1682 // If we have special knowledge that this addrec won't overflow, 1683 // we don't need to do any further analysis. 1684 if (AR->hasNoUnsignedWrap()) 1685 return getAddRecExpr( 1686 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1687 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1688 1689 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1690 // Note that this serves two purposes: It filters out loops that are 1691 // simply not analyzable, and it covers the case where this code is 1692 // being called from within backedge-taken count analysis, such that 1693 // attempting to ask for the backedge-taken count would likely result 1694 // in infinite recursion. In the later case, the analysis code will 1695 // cope with a conservative value, and it will take care to purge 1696 // that value once it has finished. 1697 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1698 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1699 // Manually compute the final value for AR, checking for 1700 // overflow. 1701 1702 // Check whether the backedge-taken count can be losslessly casted to 1703 // the addrec's type. The count is always unsigned. 1704 const SCEV *CastedMaxBECount = 1705 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1706 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1707 CastedMaxBECount, MaxBECount->getType(), Depth); 1708 if (MaxBECount == RecastedMaxBECount) { 1709 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1710 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1711 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1712 SCEV::FlagAnyWrap, Depth + 1); 1713 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1714 SCEV::FlagAnyWrap, 1715 Depth + 1), 1716 WideTy, Depth + 1); 1717 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1718 const SCEV *WideMaxBECount = 1719 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1720 const SCEV *OperandExtendedAdd = 1721 getAddExpr(WideStart, 1722 getMulExpr(WideMaxBECount, 1723 getZeroExtendExpr(Step, WideTy, Depth + 1), 1724 SCEV::FlagAnyWrap, Depth + 1), 1725 SCEV::FlagAnyWrap, Depth + 1); 1726 if (ZAdd == OperandExtendedAdd) { 1727 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1728 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1729 // Return the expression with the addrec on the outside. 1730 return getAddRecExpr( 1731 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1732 Depth + 1), 1733 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1734 AR->getNoWrapFlags()); 1735 } 1736 // Similar to above, only this time treat the step value as signed. 1737 // This covers loops that count down. 1738 OperandExtendedAdd = 1739 getAddExpr(WideStart, 1740 getMulExpr(WideMaxBECount, 1741 getSignExtendExpr(Step, WideTy, Depth + 1), 1742 SCEV::FlagAnyWrap, Depth + 1), 1743 SCEV::FlagAnyWrap, Depth + 1); 1744 if (ZAdd == OperandExtendedAdd) { 1745 // Cache knowledge of AR NW, which is propagated to this AddRec. 1746 // Negative step causes unsigned wrap, but it still can't self-wrap. 1747 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1748 // Return the expression with the addrec on the outside. 1749 return getAddRecExpr( 1750 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1751 Depth + 1), 1752 getSignExtendExpr(Step, Ty, Depth + 1), L, 1753 AR->getNoWrapFlags()); 1754 } 1755 } 1756 } 1757 1758 // Normally, in the cases we can prove no-overflow via a 1759 // backedge guarding condition, we can also compute a backedge 1760 // taken count for the loop. The exceptions are assumptions and 1761 // guards present in the loop -- SCEV is not great at exploiting 1762 // these to compute max backedge taken counts, but can still use 1763 // these to prove lack of overflow. Use this fact to avoid 1764 // doing extra work that may not pay off. 1765 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1766 !AC.assumptions().empty()) { 1767 // If the backedge is guarded by a comparison with the pre-inc 1768 // value the addrec is safe. Also, if the entry is guarded by 1769 // a comparison with the start value and the backedge is 1770 // guarded by a comparison with the post-inc value, the addrec 1771 // is safe. 1772 if (isKnownPositive(Step)) { 1773 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1774 getUnsignedRangeMax(Step)); 1775 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1776 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 1777 // Cache knowledge of AR NUW, which is propagated to this 1778 // AddRec. 1779 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1780 // Return the expression with the addrec on the outside. 1781 return getAddRecExpr( 1782 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1783 Depth + 1), 1784 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1785 AR->getNoWrapFlags()); 1786 } 1787 } else if (isKnownNegative(Step)) { 1788 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1789 getSignedRangeMin(Step)); 1790 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1791 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1792 // Cache knowledge of AR NW, which is propagated to this 1793 // AddRec. Negative step causes unsigned wrap, but it 1794 // still can't self-wrap. 1795 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1796 // Return the expression with the addrec on the outside. 1797 return getAddRecExpr( 1798 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1799 Depth + 1), 1800 getSignExtendExpr(Step, Ty, Depth + 1), L, 1801 AR->getNoWrapFlags()); 1802 } 1803 } 1804 } 1805 1806 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1807 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1808 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1809 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1810 const APInt &C = SC->getAPInt(); 1811 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1812 if (D != 0) { 1813 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1814 const SCEV *SResidual = 1815 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1816 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1817 return getAddExpr(SZExtD, SZExtR, 1818 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1819 Depth + 1); 1820 } 1821 } 1822 1823 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1824 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1825 return getAddRecExpr( 1826 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1827 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1828 } 1829 } 1830 1831 // zext(A % B) --> zext(A) % zext(B) 1832 { 1833 const SCEV *LHS; 1834 const SCEV *RHS; 1835 if (matchURem(Op, LHS, RHS)) 1836 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1837 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1838 } 1839 1840 // zext(A / B) --> zext(A) / zext(B). 1841 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1842 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1843 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1844 1845 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1846 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1847 if (SA->hasNoUnsignedWrap()) { 1848 // If the addition does not unsign overflow then we can, by definition, 1849 // commute the zero extension with the addition operation. 1850 SmallVector<const SCEV *, 4> Ops; 1851 for (const auto *Op : SA->operands()) 1852 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1853 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1854 } 1855 1856 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1857 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1858 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1859 // 1860 // Often address arithmetics contain expressions like 1861 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1862 // This transformation is useful while proving that such expressions are 1863 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1864 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1865 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1866 if (D != 0) { 1867 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1868 const SCEV *SResidual = 1869 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1870 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1871 return getAddExpr(SZExtD, SZExtR, 1872 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1873 Depth + 1); 1874 } 1875 } 1876 } 1877 1878 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1879 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1880 if (SM->hasNoUnsignedWrap()) { 1881 // If the multiply does not unsign overflow then we can, by definition, 1882 // commute the zero extension with the multiply operation. 1883 SmallVector<const SCEV *, 4> Ops; 1884 for (const auto *Op : SM->operands()) 1885 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1886 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1887 } 1888 1889 // zext(2^K * (trunc X to iN)) to iM -> 1890 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1891 // 1892 // Proof: 1893 // 1894 // zext(2^K * (trunc X to iN)) to iM 1895 // = zext((trunc X to iN) << K) to iM 1896 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1897 // (because shl removes the top K bits) 1898 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1899 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1900 // 1901 if (SM->getNumOperands() == 2) 1902 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1903 if (MulLHS->getAPInt().isPowerOf2()) 1904 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1905 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1906 MulLHS->getAPInt().logBase2(); 1907 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1908 return getMulExpr( 1909 getZeroExtendExpr(MulLHS, Ty), 1910 getZeroExtendExpr( 1911 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1912 SCEV::FlagNUW, Depth + 1); 1913 } 1914 } 1915 1916 // The cast wasn't folded; create an explicit cast node. 1917 // Recompute the insert position, as it may have been invalidated. 1918 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1919 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1920 Op, Ty); 1921 UniqueSCEVs.InsertNode(S, IP); 1922 addToLoopUseLists(S); 1923 return S; 1924 } 1925 1926 const SCEV * 1927 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1928 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1929 "This is not an extending conversion!"); 1930 assert(isSCEVable(Ty) && 1931 "This is not a conversion to a SCEVable type!"); 1932 Ty = getEffectiveSCEVType(Ty); 1933 1934 // Fold if the operand is constant. 1935 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1936 return getConstant( 1937 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1938 1939 // sext(sext(x)) --> sext(x) 1940 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1941 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1942 1943 // sext(zext(x)) --> zext(x) 1944 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1945 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1946 1947 // Before doing any expensive analysis, check to see if we've already 1948 // computed a SCEV for this Op and Ty. 1949 FoldingSetNodeID ID; 1950 ID.AddInteger(scSignExtend); 1951 ID.AddPointer(Op); 1952 ID.AddPointer(Ty); 1953 void *IP = nullptr; 1954 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1955 // Limit recursion depth. 1956 if (Depth > MaxCastDepth) { 1957 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1958 Op, Ty); 1959 UniqueSCEVs.InsertNode(S, IP); 1960 addToLoopUseLists(S); 1961 return S; 1962 } 1963 1964 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1965 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1966 // It's possible the bits taken off by the truncate were all sign bits. If 1967 // so, we should be able to simplify this further. 1968 const SCEV *X = ST->getOperand(); 1969 ConstantRange CR = getSignedRange(X); 1970 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1971 unsigned NewBits = getTypeSizeInBits(Ty); 1972 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1973 CR.sextOrTrunc(NewBits))) 1974 return getTruncateOrSignExtend(X, Ty, Depth); 1975 } 1976 1977 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1978 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1979 if (SA->hasNoSignedWrap()) { 1980 // If the addition does not sign overflow then we can, by definition, 1981 // commute the sign extension with the addition operation. 1982 SmallVector<const SCEV *, 4> Ops; 1983 for (const auto *Op : SA->operands()) 1984 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1985 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1986 } 1987 1988 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 1989 // if D + (C - D + x + y + ...) could be proven to not signed wrap 1990 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1991 // 1992 // For instance, this will bring two seemingly different expressions: 1993 // 1 + sext(5 + 20 * %x + 24 * %y) and 1994 // sext(6 + 20 * %x + 24 * %y) 1995 // to the same form: 1996 // 2 + sext(4 + 20 * %x + 24 * %y) 1997 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1998 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1999 if (D != 0) { 2000 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2001 const SCEV *SResidual = 2002 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 2003 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2004 return getAddExpr(SSExtD, SSExtR, 2005 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2006 Depth + 1); 2007 } 2008 } 2009 } 2010 // If the input value is a chrec scev, and we can prove that the value 2011 // did not overflow the old, smaller, value, we can sign extend all of the 2012 // operands (often constants). This allows analysis of something like 2013 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 2014 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 2015 if (AR->isAffine()) { 2016 const SCEV *Start = AR->getStart(); 2017 const SCEV *Step = AR->getStepRecurrence(*this); 2018 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 2019 const Loop *L = AR->getLoop(); 2020 2021 if (!AR->hasNoSignedWrap()) { 2022 auto NewFlags = proveNoWrapViaConstantRanges(AR); 2023 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 2024 } 2025 2026 // If we have special knowledge that this addrec won't overflow, 2027 // we don't need to do any further analysis. 2028 if (AR->hasNoSignedWrap()) 2029 return getAddRecExpr( 2030 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2031 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 2032 2033 // Check whether the backedge-taken count is SCEVCouldNotCompute. 2034 // Note that this serves two purposes: It filters out loops that are 2035 // simply not analyzable, and it covers the case where this code is 2036 // being called from within backedge-taken count analysis, such that 2037 // attempting to ask for the backedge-taken count would likely result 2038 // in infinite recursion. In the later case, the analysis code will 2039 // cope with a conservative value, and it will take care to purge 2040 // that value once it has finished. 2041 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 2042 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 2043 // Manually compute the final value for AR, checking for 2044 // overflow. 2045 2046 // Check whether the backedge-taken count can be losslessly casted to 2047 // the addrec's type. The count is always unsigned. 2048 const SCEV *CastedMaxBECount = 2049 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 2050 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 2051 CastedMaxBECount, MaxBECount->getType(), Depth); 2052 if (MaxBECount == RecastedMaxBECount) { 2053 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 2054 // Check whether Start+Step*MaxBECount has no signed overflow. 2055 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 2056 SCEV::FlagAnyWrap, Depth + 1); 2057 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 2058 SCEV::FlagAnyWrap, 2059 Depth + 1), 2060 WideTy, Depth + 1); 2061 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 2062 const SCEV *WideMaxBECount = 2063 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 2064 const SCEV *OperandExtendedAdd = 2065 getAddExpr(WideStart, 2066 getMulExpr(WideMaxBECount, 2067 getSignExtendExpr(Step, WideTy, Depth + 1), 2068 SCEV::FlagAnyWrap, Depth + 1), 2069 SCEV::FlagAnyWrap, Depth + 1); 2070 if (SAdd == OperandExtendedAdd) { 2071 // Cache knowledge of AR NSW, which is propagated to this AddRec. 2072 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2073 // Return the expression with the addrec on the outside. 2074 return getAddRecExpr( 2075 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2076 Depth + 1), 2077 getSignExtendExpr(Step, Ty, Depth + 1), L, 2078 AR->getNoWrapFlags()); 2079 } 2080 // Similar to above, only this time treat the step value as unsigned. 2081 // This covers loops that count up with an unsigned step. 2082 OperandExtendedAdd = 2083 getAddExpr(WideStart, 2084 getMulExpr(WideMaxBECount, 2085 getZeroExtendExpr(Step, WideTy, Depth + 1), 2086 SCEV::FlagAnyWrap, Depth + 1), 2087 SCEV::FlagAnyWrap, Depth + 1); 2088 if (SAdd == OperandExtendedAdd) { 2089 // If AR wraps around then 2090 // 2091 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 2092 // => SAdd != OperandExtendedAdd 2093 // 2094 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 2095 // (SAdd == OperandExtendedAdd => AR is NW) 2096 2097 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 2098 2099 // Return the expression with the addrec on the outside. 2100 return getAddRecExpr( 2101 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2102 Depth + 1), 2103 getZeroExtendExpr(Step, Ty, Depth + 1), L, 2104 AR->getNoWrapFlags()); 2105 } 2106 } 2107 } 2108 2109 // Normally, in the cases we can prove no-overflow via a 2110 // backedge guarding condition, we can also compute a backedge 2111 // taken count for the loop. The exceptions are assumptions and 2112 // guards present in the loop -- SCEV is not great at exploiting 2113 // these to compute max backedge taken counts, but can still use 2114 // these to prove lack of overflow. Use this fact to avoid 2115 // doing extra work that may not pay off. 2116 2117 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 2118 !AC.assumptions().empty()) { 2119 // If the backedge is guarded by a comparison with the pre-inc 2120 // value the addrec is safe. Also, if the entry is guarded by 2121 // a comparison with the start value and the backedge is 2122 // guarded by a comparison with the post-inc value, the addrec 2123 // is safe. 2124 ICmpInst::Predicate Pred; 2125 const SCEV *OverflowLimit = 2126 getSignedOverflowLimitForStep(Step, &Pred, this); 2127 if (OverflowLimit && 2128 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 2129 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 2130 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 2131 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2132 return getAddRecExpr( 2133 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2134 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2135 } 2136 } 2137 2138 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 2139 // if D + (C - D + Step * n) could be proven to not signed wrap 2140 // where D maximizes the number of trailing zeros of (C - D + Step * n) 2141 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 2142 const APInt &C = SC->getAPInt(); 2143 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 2144 if (D != 0) { 2145 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2146 const SCEV *SResidual = 2147 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 2148 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2149 return getAddExpr(SSExtD, SSExtR, 2150 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2151 Depth + 1); 2152 } 2153 } 2154 2155 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2156 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2157 return getAddRecExpr( 2158 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2159 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2160 } 2161 } 2162 2163 // If the input value is provably positive and we could not simplify 2164 // away the sext build a zext instead. 2165 if (isKnownNonNegative(Op)) 2166 return getZeroExtendExpr(Op, Ty, Depth + 1); 2167 2168 // The cast wasn't folded; create an explicit cast node. 2169 // Recompute the insert position, as it may have been invalidated. 2170 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2171 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2172 Op, Ty); 2173 UniqueSCEVs.InsertNode(S, IP); 2174 addToLoopUseLists(S); 2175 return S; 2176 } 2177 2178 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2179 /// unspecified bits out to the given type. 2180 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2181 Type *Ty) { 2182 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2183 "This is not an extending conversion!"); 2184 assert(isSCEVable(Ty) && 2185 "This is not a conversion to a SCEVable type!"); 2186 Ty = getEffectiveSCEVType(Ty); 2187 2188 // Sign-extend negative constants. 2189 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2190 if (SC->getAPInt().isNegative()) 2191 return getSignExtendExpr(Op, Ty); 2192 2193 // Peel off a truncate cast. 2194 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2195 const SCEV *NewOp = T->getOperand(); 2196 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2197 return getAnyExtendExpr(NewOp, Ty); 2198 return getTruncateOrNoop(NewOp, Ty); 2199 } 2200 2201 // Next try a zext cast. If the cast is folded, use it. 2202 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2203 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2204 return ZExt; 2205 2206 // Next try a sext cast. If the cast is folded, use it. 2207 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2208 if (!isa<SCEVSignExtendExpr>(SExt)) 2209 return SExt; 2210 2211 // Force the cast to be folded into the operands of an addrec. 2212 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2213 SmallVector<const SCEV *, 4> Ops; 2214 for (const SCEV *Op : AR->operands()) 2215 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2216 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2217 } 2218 2219 // If the expression is obviously signed, use the sext cast value. 2220 if (isa<SCEVSMaxExpr>(Op)) 2221 return SExt; 2222 2223 // Absent any other information, use the zext cast value. 2224 return ZExt; 2225 } 2226 2227 /// Process the given Ops list, which is a list of operands to be added under 2228 /// the given scale, update the given map. This is a helper function for 2229 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2230 /// that would form an add expression like this: 2231 /// 2232 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2233 /// 2234 /// where A and B are constants, update the map with these values: 2235 /// 2236 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2237 /// 2238 /// and add 13 + A*B*29 to AccumulatedConstant. 2239 /// This will allow getAddRecExpr to produce this: 2240 /// 2241 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2242 /// 2243 /// This form often exposes folding opportunities that are hidden in 2244 /// the original operand list. 2245 /// 2246 /// Return true iff it appears that any interesting folding opportunities 2247 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2248 /// the common case where no interesting opportunities are present, and 2249 /// is also used as a check to avoid infinite recursion. 2250 static bool 2251 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2252 SmallVectorImpl<const SCEV *> &NewOps, 2253 APInt &AccumulatedConstant, 2254 const SCEV *const *Ops, size_t NumOperands, 2255 const APInt &Scale, 2256 ScalarEvolution &SE) { 2257 bool Interesting = false; 2258 2259 // Iterate over the add operands. They are sorted, with constants first. 2260 unsigned i = 0; 2261 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2262 ++i; 2263 // Pull a buried constant out to the outside. 2264 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2265 Interesting = true; 2266 AccumulatedConstant += Scale * C->getAPInt(); 2267 } 2268 2269 // Next comes everything else. We're especially interested in multiplies 2270 // here, but they're in the middle, so just visit the rest with one loop. 2271 for (; i != NumOperands; ++i) { 2272 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2273 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2274 APInt NewScale = 2275 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2276 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2277 // A multiplication of a constant with another add; recurse. 2278 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2279 Interesting |= 2280 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2281 Add->op_begin(), Add->getNumOperands(), 2282 NewScale, SE); 2283 } else { 2284 // A multiplication of a constant with some other value. Update 2285 // the map. 2286 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2287 const SCEV *Key = SE.getMulExpr(MulOps); 2288 auto Pair = M.insert({Key, NewScale}); 2289 if (Pair.second) { 2290 NewOps.push_back(Pair.first->first); 2291 } else { 2292 Pair.first->second += NewScale; 2293 // The map already had an entry for this value, which may indicate 2294 // a folding opportunity. 2295 Interesting = true; 2296 } 2297 } 2298 } else { 2299 // An ordinary operand. Update the map. 2300 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2301 M.insert({Ops[i], Scale}); 2302 if (Pair.second) { 2303 NewOps.push_back(Pair.first->first); 2304 } else { 2305 Pair.first->second += Scale; 2306 // The map already had an entry for this value, which may indicate 2307 // a folding opportunity. 2308 Interesting = true; 2309 } 2310 } 2311 } 2312 2313 return Interesting; 2314 } 2315 2316 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2317 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2318 // can't-overflow flags for the operation if possible. 2319 static SCEV::NoWrapFlags 2320 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2321 const ArrayRef<const SCEV *> Ops, 2322 SCEV::NoWrapFlags Flags) { 2323 using namespace std::placeholders; 2324 2325 using OBO = OverflowingBinaryOperator; 2326 2327 bool CanAnalyze = 2328 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2329 (void)CanAnalyze; 2330 assert(CanAnalyze && "don't call from other places!"); 2331 2332 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2333 SCEV::NoWrapFlags SignOrUnsignWrap = 2334 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2335 2336 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2337 auto IsKnownNonNegative = [&](const SCEV *S) { 2338 return SE->isKnownNonNegative(S); 2339 }; 2340 2341 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2342 Flags = 2343 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2344 2345 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2346 2347 if (SignOrUnsignWrap != SignOrUnsignMask && 2348 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2349 isa<SCEVConstant>(Ops[0])) { 2350 2351 auto Opcode = [&] { 2352 switch (Type) { 2353 case scAddExpr: 2354 return Instruction::Add; 2355 case scMulExpr: 2356 return Instruction::Mul; 2357 default: 2358 llvm_unreachable("Unexpected SCEV op."); 2359 } 2360 }(); 2361 2362 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2363 2364 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2365 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2366 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2367 Opcode, C, OBO::NoSignedWrap); 2368 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2369 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2370 } 2371 2372 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2373 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2374 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2375 Opcode, C, OBO::NoUnsignedWrap); 2376 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2377 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2378 } 2379 } 2380 2381 return Flags; 2382 } 2383 2384 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2385 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2386 } 2387 2388 /// Get a canonical add expression, or something simpler if possible. 2389 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2390 SCEV::NoWrapFlags Flags, 2391 unsigned Depth) { 2392 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2393 "only nuw or nsw allowed"); 2394 assert(!Ops.empty() && "Cannot get empty add!"); 2395 if (Ops.size() == 1) return Ops[0]; 2396 #ifndef NDEBUG 2397 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2398 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2399 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2400 "SCEVAddExpr operand types don't match!"); 2401 #endif 2402 2403 // Sort by complexity, this groups all similar expression types together. 2404 GroupByComplexity(Ops, &LI, DT); 2405 2406 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2407 2408 // If there are any constants, fold them together. 2409 unsigned Idx = 0; 2410 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2411 ++Idx; 2412 assert(Idx < Ops.size()); 2413 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2414 // We found two constants, fold them together! 2415 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2416 if (Ops.size() == 2) return Ops[0]; 2417 Ops.erase(Ops.begin()+1); // Erase the folded element 2418 LHSC = cast<SCEVConstant>(Ops[0]); 2419 } 2420 2421 // If we are left with a constant zero being added, strip it off. 2422 if (LHSC->getValue()->isZero()) { 2423 Ops.erase(Ops.begin()); 2424 --Idx; 2425 } 2426 2427 if (Ops.size() == 1) return Ops[0]; 2428 } 2429 2430 // Limit recursion calls depth. 2431 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2432 return getOrCreateAddExpr(Ops, Flags); 2433 2434 // Okay, check to see if the same value occurs in the operand list more than 2435 // once. If so, merge them together into an multiply expression. Since we 2436 // sorted the list, these values are required to be adjacent. 2437 Type *Ty = Ops[0]->getType(); 2438 bool FoundMatch = false; 2439 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2440 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2441 // Scan ahead to count how many equal operands there are. 2442 unsigned Count = 2; 2443 while (i+Count != e && Ops[i+Count] == Ops[i]) 2444 ++Count; 2445 // Merge the values into a multiply. 2446 const SCEV *Scale = getConstant(Ty, Count); 2447 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2448 if (Ops.size() == Count) 2449 return Mul; 2450 Ops[i] = Mul; 2451 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2452 --i; e -= Count - 1; 2453 FoundMatch = true; 2454 } 2455 if (FoundMatch) 2456 return getAddExpr(Ops, Flags, Depth + 1); 2457 2458 // Check for truncates. If all the operands are truncated from the same 2459 // type, see if factoring out the truncate would permit the result to be 2460 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2461 // if the contents of the resulting outer trunc fold to something simple. 2462 auto FindTruncSrcType = [&]() -> Type * { 2463 // We're ultimately looking to fold an addrec of truncs and muls of only 2464 // constants and truncs, so if we find any other types of SCEV 2465 // as operands of the addrec then we bail and return nullptr here. 2466 // Otherwise, we return the type of the operand of a trunc that we find. 2467 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2468 return T->getOperand()->getType(); 2469 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2470 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2471 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2472 return T->getOperand()->getType(); 2473 } 2474 return nullptr; 2475 }; 2476 if (auto *SrcType = FindTruncSrcType()) { 2477 SmallVector<const SCEV *, 8> LargeOps; 2478 bool Ok = true; 2479 // Check all the operands to see if they can be represented in the 2480 // source type of the truncate. 2481 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2482 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2483 if (T->getOperand()->getType() != SrcType) { 2484 Ok = false; 2485 break; 2486 } 2487 LargeOps.push_back(T->getOperand()); 2488 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2489 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2490 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2491 SmallVector<const SCEV *, 8> LargeMulOps; 2492 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2493 if (const SCEVTruncateExpr *T = 2494 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2495 if (T->getOperand()->getType() != SrcType) { 2496 Ok = false; 2497 break; 2498 } 2499 LargeMulOps.push_back(T->getOperand()); 2500 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2501 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2502 } else { 2503 Ok = false; 2504 break; 2505 } 2506 } 2507 if (Ok) 2508 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2509 } else { 2510 Ok = false; 2511 break; 2512 } 2513 } 2514 if (Ok) { 2515 // Evaluate the expression in the larger type. 2516 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2517 // If it folds to something simple, use it. Otherwise, don't. 2518 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2519 return getTruncateExpr(Fold, Ty); 2520 } 2521 } 2522 2523 // Skip past any other cast SCEVs. 2524 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2525 ++Idx; 2526 2527 // If there are add operands they would be next. 2528 if (Idx < Ops.size()) { 2529 bool DeletedAdd = false; 2530 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2531 if (Ops.size() > AddOpsInlineThreshold || 2532 Add->getNumOperands() > AddOpsInlineThreshold) 2533 break; 2534 // If we have an add, expand the add operands onto the end of the operands 2535 // list. 2536 Ops.erase(Ops.begin()+Idx); 2537 Ops.append(Add->op_begin(), Add->op_end()); 2538 DeletedAdd = true; 2539 } 2540 2541 // If we deleted at least one add, we added operands to the end of the list, 2542 // and they are not necessarily sorted. Recurse to resort and resimplify 2543 // any operands we just acquired. 2544 if (DeletedAdd) 2545 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2546 } 2547 2548 // Skip over the add expression until we get to a multiply. 2549 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2550 ++Idx; 2551 2552 // Check to see if there are any folding opportunities present with 2553 // operands multiplied by constant values. 2554 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2555 uint64_t BitWidth = getTypeSizeInBits(Ty); 2556 DenseMap<const SCEV *, APInt> M; 2557 SmallVector<const SCEV *, 8> NewOps; 2558 APInt AccumulatedConstant(BitWidth, 0); 2559 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2560 Ops.data(), Ops.size(), 2561 APInt(BitWidth, 1), *this)) { 2562 struct APIntCompare { 2563 bool operator()(const APInt &LHS, const APInt &RHS) const { 2564 return LHS.ult(RHS); 2565 } 2566 }; 2567 2568 // Some interesting folding opportunity is present, so its worthwhile to 2569 // re-generate the operands list. Group the operands by constant scale, 2570 // to avoid multiplying by the same constant scale multiple times. 2571 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2572 for (const SCEV *NewOp : NewOps) 2573 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2574 // Re-generate the operands list. 2575 Ops.clear(); 2576 if (AccumulatedConstant != 0) 2577 Ops.push_back(getConstant(AccumulatedConstant)); 2578 for (auto &MulOp : MulOpLists) 2579 if (MulOp.first != 0) 2580 Ops.push_back(getMulExpr( 2581 getConstant(MulOp.first), 2582 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2583 SCEV::FlagAnyWrap, Depth + 1)); 2584 if (Ops.empty()) 2585 return getZero(Ty); 2586 if (Ops.size() == 1) 2587 return Ops[0]; 2588 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2589 } 2590 } 2591 2592 // If we are adding something to a multiply expression, make sure the 2593 // something is not already an operand of the multiply. If so, merge it into 2594 // the multiply. 2595 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2596 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2597 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2598 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2599 if (isa<SCEVConstant>(MulOpSCEV)) 2600 continue; 2601 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2602 if (MulOpSCEV == Ops[AddOp]) { 2603 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2604 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2605 if (Mul->getNumOperands() != 2) { 2606 // If the multiply has more than two operands, we must get the 2607 // Y*Z term. 2608 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2609 Mul->op_begin()+MulOp); 2610 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2611 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2612 } 2613 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2614 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2615 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2616 SCEV::FlagAnyWrap, Depth + 1); 2617 if (Ops.size() == 2) return OuterMul; 2618 if (AddOp < Idx) { 2619 Ops.erase(Ops.begin()+AddOp); 2620 Ops.erase(Ops.begin()+Idx-1); 2621 } else { 2622 Ops.erase(Ops.begin()+Idx); 2623 Ops.erase(Ops.begin()+AddOp-1); 2624 } 2625 Ops.push_back(OuterMul); 2626 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2627 } 2628 2629 // Check this multiply against other multiplies being added together. 2630 for (unsigned OtherMulIdx = Idx+1; 2631 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2632 ++OtherMulIdx) { 2633 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2634 // If MulOp occurs in OtherMul, we can fold the two multiplies 2635 // together. 2636 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2637 OMulOp != e; ++OMulOp) 2638 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2639 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2640 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2641 if (Mul->getNumOperands() != 2) { 2642 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2643 Mul->op_begin()+MulOp); 2644 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2645 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2646 } 2647 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2648 if (OtherMul->getNumOperands() != 2) { 2649 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2650 OtherMul->op_begin()+OMulOp); 2651 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2652 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2653 } 2654 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2655 const SCEV *InnerMulSum = 2656 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2657 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2658 SCEV::FlagAnyWrap, Depth + 1); 2659 if (Ops.size() == 2) return OuterMul; 2660 Ops.erase(Ops.begin()+Idx); 2661 Ops.erase(Ops.begin()+OtherMulIdx-1); 2662 Ops.push_back(OuterMul); 2663 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2664 } 2665 } 2666 } 2667 } 2668 2669 // If there are any add recurrences in the operands list, see if any other 2670 // added values are loop invariant. If so, we can fold them into the 2671 // recurrence. 2672 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2673 ++Idx; 2674 2675 // Scan over all recurrences, trying to fold loop invariants into them. 2676 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2677 // Scan all of the other operands to this add and add them to the vector if 2678 // they are loop invariant w.r.t. the recurrence. 2679 SmallVector<const SCEV *, 8> LIOps; 2680 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2681 const Loop *AddRecLoop = AddRec->getLoop(); 2682 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2683 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2684 LIOps.push_back(Ops[i]); 2685 Ops.erase(Ops.begin()+i); 2686 --i; --e; 2687 } 2688 2689 // If we found some loop invariants, fold them into the recurrence. 2690 if (!LIOps.empty()) { 2691 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2692 LIOps.push_back(AddRec->getStart()); 2693 2694 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2695 AddRec->op_end()); 2696 // This follows from the fact that the no-wrap flags on the outer add 2697 // expression are applicable on the 0th iteration, when the add recurrence 2698 // will be equal to its start value. 2699 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2700 2701 // Build the new addrec. Propagate the NUW and NSW flags if both the 2702 // outer add and the inner addrec are guaranteed to have no overflow. 2703 // Always propagate NW. 2704 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2705 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2706 2707 // If all of the other operands were loop invariant, we are done. 2708 if (Ops.size() == 1) return NewRec; 2709 2710 // Otherwise, add the folded AddRec by the non-invariant parts. 2711 for (unsigned i = 0;; ++i) 2712 if (Ops[i] == AddRec) { 2713 Ops[i] = NewRec; 2714 break; 2715 } 2716 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2717 } 2718 2719 // Okay, if there weren't any loop invariants to be folded, check to see if 2720 // there are multiple AddRec's with the same loop induction variable being 2721 // added together. If so, we can fold them. 2722 for (unsigned OtherIdx = Idx+1; 2723 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2724 ++OtherIdx) { 2725 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2726 // so that the 1st found AddRecExpr is dominated by all others. 2727 assert(DT.dominates( 2728 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2729 AddRec->getLoop()->getHeader()) && 2730 "AddRecExprs are not sorted in reverse dominance order?"); 2731 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2732 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2733 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2734 AddRec->op_end()); 2735 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2736 ++OtherIdx) { 2737 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2738 if (OtherAddRec->getLoop() == AddRecLoop) { 2739 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2740 i != e; ++i) { 2741 if (i >= AddRecOps.size()) { 2742 AddRecOps.append(OtherAddRec->op_begin()+i, 2743 OtherAddRec->op_end()); 2744 break; 2745 } 2746 SmallVector<const SCEV *, 2> TwoOps = { 2747 AddRecOps[i], OtherAddRec->getOperand(i)}; 2748 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2749 } 2750 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2751 } 2752 } 2753 // Step size has changed, so we cannot guarantee no self-wraparound. 2754 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2755 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2756 } 2757 } 2758 2759 // Otherwise couldn't fold anything into this recurrence. Move onto the 2760 // next one. 2761 } 2762 2763 // Okay, it looks like we really DO need an add expr. Check to see if we 2764 // already have one, otherwise create a new one. 2765 return getOrCreateAddExpr(Ops, Flags); 2766 } 2767 2768 const SCEV * 2769 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 2770 SCEV::NoWrapFlags Flags) { 2771 FoldingSetNodeID ID; 2772 ID.AddInteger(scAddExpr); 2773 for (const SCEV *Op : Ops) 2774 ID.AddPointer(Op); 2775 void *IP = nullptr; 2776 SCEVAddExpr *S = 2777 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2778 if (!S) { 2779 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2780 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2781 S = new (SCEVAllocator) 2782 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2783 UniqueSCEVs.InsertNode(S, IP); 2784 addToLoopUseLists(S); 2785 } 2786 S->setNoWrapFlags(Flags); 2787 return S; 2788 } 2789 2790 const SCEV * 2791 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 2792 const Loop *L, SCEV::NoWrapFlags Flags) { 2793 FoldingSetNodeID ID; 2794 ID.AddInteger(scAddRecExpr); 2795 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2796 ID.AddPointer(Ops[i]); 2797 ID.AddPointer(L); 2798 void *IP = nullptr; 2799 SCEVAddRecExpr *S = 2800 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2801 if (!S) { 2802 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2803 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2804 S = new (SCEVAllocator) 2805 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 2806 UniqueSCEVs.InsertNode(S, IP); 2807 addToLoopUseLists(S); 2808 } 2809 S->setNoWrapFlags(Flags); 2810 return S; 2811 } 2812 2813 const SCEV * 2814 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 2815 SCEV::NoWrapFlags Flags) { 2816 FoldingSetNodeID ID; 2817 ID.AddInteger(scMulExpr); 2818 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2819 ID.AddPointer(Ops[i]); 2820 void *IP = nullptr; 2821 SCEVMulExpr *S = 2822 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2823 if (!S) { 2824 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2825 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2826 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2827 O, Ops.size()); 2828 UniqueSCEVs.InsertNode(S, IP); 2829 addToLoopUseLists(S); 2830 } 2831 S->setNoWrapFlags(Flags); 2832 return S; 2833 } 2834 2835 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2836 uint64_t k = i*j; 2837 if (j > 1 && k / j != i) Overflow = true; 2838 return k; 2839 } 2840 2841 /// Compute the result of "n choose k", the binomial coefficient. If an 2842 /// intermediate computation overflows, Overflow will be set and the return will 2843 /// be garbage. Overflow is not cleared on absence of overflow. 2844 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2845 // We use the multiplicative formula: 2846 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2847 // At each iteration, we take the n-th term of the numeral and divide by the 2848 // (k-n)th term of the denominator. This division will always produce an 2849 // integral result, and helps reduce the chance of overflow in the 2850 // intermediate computations. However, we can still overflow even when the 2851 // final result would fit. 2852 2853 if (n == 0 || n == k) return 1; 2854 if (k > n) return 0; 2855 2856 if (k > n/2) 2857 k = n-k; 2858 2859 uint64_t r = 1; 2860 for (uint64_t i = 1; i <= k; ++i) { 2861 r = umul_ov(r, n-(i-1), Overflow); 2862 r /= i; 2863 } 2864 return r; 2865 } 2866 2867 /// Determine if any of the operands in this SCEV are a constant or if 2868 /// any of the add or multiply expressions in this SCEV contain a constant. 2869 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2870 struct FindConstantInAddMulChain { 2871 bool FoundConstant = false; 2872 2873 bool follow(const SCEV *S) { 2874 FoundConstant |= isa<SCEVConstant>(S); 2875 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2876 } 2877 2878 bool isDone() const { 2879 return FoundConstant; 2880 } 2881 }; 2882 2883 FindConstantInAddMulChain F; 2884 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2885 ST.visitAll(StartExpr); 2886 return F.FoundConstant; 2887 } 2888 2889 /// Get a canonical multiply expression, or something simpler if possible. 2890 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2891 SCEV::NoWrapFlags Flags, 2892 unsigned Depth) { 2893 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2894 "only nuw or nsw allowed"); 2895 assert(!Ops.empty() && "Cannot get empty mul!"); 2896 if (Ops.size() == 1) return Ops[0]; 2897 #ifndef NDEBUG 2898 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2899 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2900 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2901 "SCEVMulExpr operand types don't match!"); 2902 #endif 2903 2904 // Sort by complexity, this groups all similar expression types together. 2905 GroupByComplexity(Ops, &LI, DT); 2906 2907 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2908 2909 // Limit recursion calls depth. 2910 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2911 return getOrCreateMulExpr(Ops, Flags); 2912 2913 // If there are any constants, fold them together. 2914 unsigned Idx = 0; 2915 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2916 2917 if (Ops.size() == 2) 2918 // C1*(C2+V) -> C1*C2 + C1*V 2919 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2920 // If any of Add's ops are Adds or Muls with a constant, apply this 2921 // transformation as well. 2922 // 2923 // TODO: There are some cases where this transformation is not 2924 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 2925 // this transformation should be narrowed down. 2926 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) 2927 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2928 SCEV::FlagAnyWrap, Depth + 1), 2929 getMulExpr(LHSC, Add->getOperand(1), 2930 SCEV::FlagAnyWrap, Depth + 1), 2931 SCEV::FlagAnyWrap, Depth + 1); 2932 2933 ++Idx; 2934 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2935 // We found two constants, fold them together! 2936 ConstantInt *Fold = 2937 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2938 Ops[0] = getConstant(Fold); 2939 Ops.erase(Ops.begin()+1); // Erase the folded element 2940 if (Ops.size() == 1) return Ops[0]; 2941 LHSC = cast<SCEVConstant>(Ops[0]); 2942 } 2943 2944 // If we are left with a constant one being multiplied, strip it off. 2945 if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) { 2946 Ops.erase(Ops.begin()); 2947 --Idx; 2948 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2949 // If we have a multiply of zero, it will always be zero. 2950 return Ops[0]; 2951 } else if (Ops[0]->isAllOnesValue()) { 2952 // If we have a mul by -1 of an add, try distributing the -1 among the 2953 // add operands. 2954 if (Ops.size() == 2) { 2955 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2956 SmallVector<const SCEV *, 4> NewOps; 2957 bool AnyFolded = false; 2958 for (const SCEV *AddOp : Add->operands()) { 2959 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2960 Depth + 1); 2961 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2962 NewOps.push_back(Mul); 2963 } 2964 if (AnyFolded) 2965 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2966 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2967 // Negation preserves a recurrence's no self-wrap property. 2968 SmallVector<const SCEV *, 4> Operands; 2969 for (const SCEV *AddRecOp : AddRec->operands()) 2970 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2971 Depth + 1)); 2972 2973 return getAddRecExpr(Operands, AddRec->getLoop(), 2974 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2975 } 2976 } 2977 } 2978 2979 if (Ops.size() == 1) 2980 return Ops[0]; 2981 } 2982 2983 // Skip over the add expression until we get to a multiply. 2984 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2985 ++Idx; 2986 2987 // If there are mul operands inline them all into this expression. 2988 if (Idx < Ops.size()) { 2989 bool DeletedMul = false; 2990 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2991 if (Ops.size() > MulOpsInlineThreshold) 2992 break; 2993 // If we have an mul, expand the mul operands onto the end of the 2994 // operands list. 2995 Ops.erase(Ops.begin()+Idx); 2996 Ops.append(Mul->op_begin(), Mul->op_end()); 2997 DeletedMul = true; 2998 } 2999 3000 // If we deleted at least one mul, we added operands to the end of the 3001 // list, and they are not necessarily sorted. Recurse to resort and 3002 // resimplify any operands we just acquired. 3003 if (DeletedMul) 3004 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3005 } 3006 3007 // If there are any add recurrences in the operands list, see if any other 3008 // added values are loop invariant. If so, we can fold them into the 3009 // recurrence. 3010 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 3011 ++Idx; 3012 3013 // Scan over all recurrences, trying to fold loop invariants into them. 3014 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 3015 // Scan all of the other operands to this mul and add them to the vector 3016 // if they are loop invariant w.r.t. the recurrence. 3017 SmallVector<const SCEV *, 8> LIOps; 3018 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 3019 const Loop *AddRecLoop = AddRec->getLoop(); 3020 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3021 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 3022 LIOps.push_back(Ops[i]); 3023 Ops.erase(Ops.begin()+i); 3024 --i; --e; 3025 } 3026 3027 // If we found some loop invariants, fold them into the recurrence. 3028 if (!LIOps.empty()) { 3029 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 3030 SmallVector<const SCEV *, 4> NewOps; 3031 NewOps.reserve(AddRec->getNumOperands()); 3032 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 3033 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 3034 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 3035 SCEV::FlagAnyWrap, Depth + 1)); 3036 3037 // Build the new addrec. Propagate the NUW and NSW flags if both the 3038 // outer mul and the inner addrec are guaranteed to have no overflow. 3039 // 3040 // No self-wrap cannot be guaranteed after changing the step size, but 3041 // will be inferred if either NUW or NSW is true. 3042 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 3043 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 3044 3045 // If all of the other operands were loop invariant, we are done. 3046 if (Ops.size() == 1) return NewRec; 3047 3048 // Otherwise, multiply the folded AddRec by the non-invariant parts. 3049 for (unsigned i = 0;; ++i) 3050 if (Ops[i] == AddRec) { 3051 Ops[i] = NewRec; 3052 break; 3053 } 3054 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3055 } 3056 3057 // Okay, if there weren't any loop invariants to be folded, check to see 3058 // if there are multiple AddRec's with the same loop induction variable 3059 // being multiplied together. If so, we can fold them. 3060 3061 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 3062 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 3063 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 3064 // ]]],+,...up to x=2n}. 3065 // Note that the arguments to choose() are always integers with values 3066 // known at compile time, never SCEV objects. 3067 // 3068 // The implementation avoids pointless extra computations when the two 3069 // addrec's are of different length (mathematically, it's equivalent to 3070 // an infinite stream of zeros on the right). 3071 bool OpsModified = false; 3072 for (unsigned OtherIdx = Idx+1; 3073 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 3074 ++OtherIdx) { 3075 const SCEVAddRecExpr *OtherAddRec = 3076 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 3077 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 3078 continue; 3079 3080 // Limit max number of arguments to avoid creation of unreasonably big 3081 // SCEVAddRecs with very complex operands. 3082 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 3083 MaxAddRecSize || isHugeExpression(AddRec) || 3084 isHugeExpression(OtherAddRec)) 3085 continue; 3086 3087 bool Overflow = false; 3088 Type *Ty = AddRec->getType(); 3089 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 3090 SmallVector<const SCEV*, 7> AddRecOps; 3091 for (int x = 0, xe = AddRec->getNumOperands() + 3092 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 3093 SmallVector <const SCEV *, 7> SumOps; 3094 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 3095 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 3096 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 3097 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 3098 z < ze && !Overflow; ++z) { 3099 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 3100 uint64_t Coeff; 3101 if (LargerThan64Bits) 3102 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 3103 else 3104 Coeff = Coeff1*Coeff2; 3105 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 3106 const SCEV *Term1 = AddRec->getOperand(y-z); 3107 const SCEV *Term2 = OtherAddRec->getOperand(z); 3108 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 3109 SCEV::FlagAnyWrap, Depth + 1)); 3110 } 3111 } 3112 if (SumOps.empty()) 3113 SumOps.push_back(getZero(Ty)); 3114 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 3115 } 3116 if (!Overflow) { 3117 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, 3118 SCEV::FlagAnyWrap); 3119 if (Ops.size() == 2) return NewAddRec; 3120 Ops[Idx] = NewAddRec; 3121 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3122 OpsModified = true; 3123 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 3124 if (!AddRec) 3125 break; 3126 } 3127 } 3128 if (OpsModified) 3129 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3130 3131 // Otherwise couldn't fold anything into this recurrence. Move onto the 3132 // next one. 3133 } 3134 3135 // Okay, it looks like we really DO need an mul expr. Check to see if we 3136 // already have one, otherwise create a new one. 3137 return getOrCreateMulExpr(Ops, Flags); 3138 } 3139 3140 /// Represents an unsigned remainder expression based on unsigned division. 3141 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3142 const SCEV *RHS) { 3143 assert(getEffectiveSCEVType(LHS->getType()) == 3144 getEffectiveSCEVType(RHS->getType()) && 3145 "SCEVURemExpr operand types don't match!"); 3146 3147 // Short-circuit easy cases 3148 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3149 // If constant is one, the result is trivial 3150 if (RHSC->getValue()->isOne()) 3151 return getZero(LHS->getType()); // X urem 1 --> 0 3152 3153 // If constant is a power of two, fold into a zext(trunc(LHS)). 3154 if (RHSC->getAPInt().isPowerOf2()) { 3155 Type *FullTy = LHS->getType(); 3156 Type *TruncTy = 3157 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3158 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3159 } 3160 } 3161 3162 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3163 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3164 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3165 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3166 } 3167 3168 /// Get a canonical unsigned division expression, or something simpler if 3169 /// possible. 3170 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3171 const SCEV *RHS) { 3172 assert(getEffectiveSCEVType(LHS->getType()) == 3173 getEffectiveSCEVType(RHS->getType()) && 3174 "SCEVUDivExpr operand types don't match!"); 3175 3176 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3177 if (RHSC->getValue()->isOne()) 3178 return LHS; // X udiv 1 --> x 3179 // If the denominator is zero, the result of the udiv is undefined. Don't 3180 // try to analyze it, because the resolution chosen here may differ from 3181 // the resolution chosen in other parts of the compiler. 3182 if (!RHSC->getValue()->isZero()) { 3183 // Determine if the division can be folded into the operands of 3184 // its operands. 3185 // TODO: Generalize this to non-constants by using known-bits information. 3186 Type *Ty = LHS->getType(); 3187 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3188 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3189 // For non-power-of-two values, effectively round the value up to the 3190 // nearest power of two. 3191 if (!RHSC->getAPInt().isPowerOf2()) 3192 ++MaxShiftAmt; 3193 IntegerType *ExtTy = 3194 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3195 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3196 if (const SCEVConstant *Step = 3197 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3198 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3199 const APInt &StepInt = Step->getAPInt(); 3200 const APInt &DivInt = RHSC->getAPInt(); 3201 if (!StepInt.urem(DivInt) && 3202 getZeroExtendExpr(AR, ExtTy) == 3203 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3204 getZeroExtendExpr(Step, ExtTy), 3205 AR->getLoop(), SCEV::FlagAnyWrap)) { 3206 SmallVector<const SCEV *, 4> Operands; 3207 for (const SCEV *Op : AR->operands()) 3208 Operands.push_back(getUDivExpr(Op, RHS)); 3209 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3210 } 3211 /// Get a canonical UDivExpr for a recurrence. 3212 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3213 // We can currently only fold X%N if X is constant. 3214 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3215 if (StartC && !DivInt.urem(StepInt) && 3216 getZeroExtendExpr(AR, ExtTy) == 3217 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3218 getZeroExtendExpr(Step, ExtTy), 3219 AR->getLoop(), SCEV::FlagAnyWrap)) { 3220 const APInt &StartInt = StartC->getAPInt(); 3221 const APInt &StartRem = StartInt.urem(StepInt); 3222 if (StartRem != 0) 3223 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, 3224 AR->getLoop(), SCEV::FlagNW); 3225 } 3226 } 3227 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3228 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3229 SmallVector<const SCEV *, 4> Operands; 3230 for (const SCEV *Op : M->operands()) 3231 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3232 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3233 // Find an operand that's safely divisible. 3234 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3235 const SCEV *Op = M->getOperand(i); 3236 const SCEV *Div = getUDivExpr(Op, RHSC); 3237 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3238 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3239 M->op_end()); 3240 Operands[i] = Div; 3241 return getMulExpr(Operands); 3242 } 3243 } 3244 } 3245 3246 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3247 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3248 if (auto *DivisorConstant = 3249 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3250 bool Overflow = false; 3251 APInt NewRHS = 3252 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3253 if (Overflow) { 3254 return getConstant(RHSC->getType(), 0, false); 3255 } 3256 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3257 } 3258 } 3259 3260 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3261 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3262 SmallVector<const SCEV *, 4> Operands; 3263 for (const SCEV *Op : A->operands()) 3264 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3265 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3266 Operands.clear(); 3267 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3268 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3269 if (isa<SCEVUDivExpr>(Op) || 3270 getMulExpr(Op, RHS) != A->getOperand(i)) 3271 break; 3272 Operands.push_back(Op); 3273 } 3274 if (Operands.size() == A->getNumOperands()) 3275 return getAddExpr(Operands); 3276 } 3277 } 3278 3279 // Fold if both operands are constant. 3280 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3281 Constant *LHSCV = LHSC->getValue(); 3282 Constant *RHSCV = RHSC->getValue(); 3283 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3284 RHSCV))); 3285 } 3286 } 3287 } 3288 3289 FoldingSetNodeID ID; 3290 ID.AddInteger(scUDivExpr); 3291 ID.AddPointer(LHS); 3292 ID.AddPointer(RHS); 3293 void *IP = nullptr; 3294 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3295 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3296 LHS, RHS); 3297 UniqueSCEVs.InsertNode(S, IP); 3298 addToLoopUseLists(S); 3299 return S; 3300 } 3301 3302 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3303 APInt A = C1->getAPInt().abs(); 3304 APInt B = C2->getAPInt().abs(); 3305 uint32_t ABW = A.getBitWidth(); 3306 uint32_t BBW = B.getBitWidth(); 3307 3308 if (ABW > BBW) 3309 B = B.zext(ABW); 3310 else if (ABW < BBW) 3311 A = A.zext(BBW); 3312 3313 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3314 } 3315 3316 /// Get a canonical unsigned division expression, or something simpler if 3317 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3318 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3319 /// it's not exact because the udiv may be clearing bits. 3320 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3321 const SCEV *RHS) { 3322 // TODO: we could try to find factors in all sorts of things, but for now we 3323 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3324 // end of this file for inspiration. 3325 3326 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3327 if (!Mul || !Mul->hasNoUnsignedWrap()) 3328 return getUDivExpr(LHS, RHS); 3329 3330 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3331 // If the mulexpr multiplies by a constant, then that constant must be the 3332 // first element of the mulexpr. 3333 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3334 if (LHSCst == RHSCst) { 3335 SmallVector<const SCEV *, 2> Operands; 3336 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3337 return getMulExpr(Operands); 3338 } 3339 3340 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3341 // that there's a factor provided by one of the other terms. We need to 3342 // check. 3343 APInt Factor = gcd(LHSCst, RHSCst); 3344 if (!Factor.isIntN(1)) { 3345 LHSCst = 3346 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3347 RHSCst = 3348 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3349 SmallVector<const SCEV *, 2> Operands; 3350 Operands.push_back(LHSCst); 3351 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3352 LHS = getMulExpr(Operands); 3353 RHS = RHSCst; 3354 Mul = dyn_cast<SCEVMulExpr>(LHS); 3355 if (!Mul) 3356 return getUDivExactExpr(LHS, RHS); 3357 } 3358 } 3359 } 3360 3361 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3362 if (Mul->getOperand(i) == RHS) { 3363 SmallVector<const SCEV *, 2> Operands; 3364 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3365 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3366 return getMulExpr(Operands); 3367 } 3368 } 3369 3370 return getUDivExpr(LHS, RHS); 3371 } 3372 3373 /// Get an add recurrence expression for the specified loop. Simplify the 3374 /// expression as much as possible. 3375 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3376 const Loop *L, 3377 SCEV::NoWrapFlags Flags) { 3378 SmallVector<const SCEV *, 4> Operands; 3379 Operands.push_back(Start); 3380 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3381 if (StepChrec->getLoop() == L) { 3382 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3383 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3384 } 3385 3386 Operands.push_back(Step); 3387 return getAddRecExpr(Operands, L, Flags); 3388 } 3389 3390 /// Get an add recurrence expression for the specified loop. Simplify the 3391 /// expression as much as possible. 3392 const SCEV * 3393 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3394 const Loop *L, SCEV::NoWrapFlags Flags) { 3395 if (Operands.size() == 1) return Operands[0]; 3396 #ifndef NDEBUG 3397 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3398 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3399 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3400 "SCEVAddRecExpr operand types don't match!"); 3401 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3402 assert(isLoopInvariant(Operands[i], L) && 3403 "SCEVAddRecExpr operand is not loop-invariant!"); 3404 #endif 3405 3406 if (Operands.back()->isZero()) { 3407 Operands.pop_back(); 3408 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3409 } 3410 3411 // It's tempting to want to call getMaxBackedgeTakenCount count here and 3412 // use that information to infer NUW and NSW flags. However, computing a 3413 // BE count requires calling getAddRecExpr, so we may not yet have a 3414 // meaningful BE count at this point (and if we don't, we'd be stuck 3415 // with a SCEVCouldNotCompute as the cached BE count). 3416 3417 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3418 3419 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3420 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3421 const Loop *NestedLoop = NestedAR->getLoop(); 3422 if (L->contains(NestedLoop) 3423 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3424 : (!NestedLoop->contains(L) && 3425 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3426 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3427 NestedAR->op_end()); 3428 Operands[0] = NestedAR->getStart(); 3429 // AddRecs require their operands be loop-invariant with respect to their 3430 // loops. Don't perform this transformation if it would break this 3431 // requirement. 3432 bool AllInvariant = all_of( 3433 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3434 3435 if (AllInvariant) { 3436 // Create a recurrence for the outer loop with the same step size. 3437 // 3438 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3439 // inner recurrence has the same property. 3440 SCEV::NoWrapFlags OuterFlags = 3441 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3442 3443 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3444 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3445 return isLoopInvariant(Op, NestedLoop); 3446 }); 3447 3448 if (AllInvariant) { 3449 // Ok, both add recurrences are valid after the transformation. 3450 // 3451 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3452 // the outer recurrence has the same property. 3453 SCEV::NoWrapFlags InnerFlags = 3454 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3455 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3456 } 3457 } 3458 // Reset Operands to its original state. 3459 Operands[0] = NestedAR; 3460 } 3461 } 3462 3463 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3464 // already have one, otherwise create a new one. 3465 return getOrCreateAddRecExpr(Operands, L, Flags); 3466 } 3467 3468 const SCEV * 3469 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3470 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3471 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3472 // getSCEV(Base)->getType() has the same address space as Base->getType() 3473 // because SCEV::getType() preserves the address space. 3474 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); 3475 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3476 // instruction to its SCEV, because the Instruction may be guarded by control 3477 // flow and the no-overflow bits may not be valid for the expression in any 3478 // context. This can be fixed similarly to how these flags are handled for 3479 // adds. 3480 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3481 : SCEV::FlagAnyWrap; 3482 3483 const SCEV *TotalOffset = getZero(IntPtrTy); 3484 // The array size is unimportant. The first thing we do on CurTy is getting 3485 // its element type. 3486 Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0); 3487 for (const SCEV *IndexExpr : IndexExprs) { 3488 // Compute the (potentially symbolic) offset in bytes for this index. 3489 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3490 // For a struct, add the member offset. 3491 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3492 unsigned FieldNo = Index->getZExtValue(); 3493 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); 3494 3495 // Add the field offset to the running total offset. 3496 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3497 3498 // Update CurTy to the type of the field at Index. 3499 CurTy = STy->getTypeAtIndex(Index); 3500 } else { 3501 // Update CurTy to its element type. 3502 CurTy = cast<SequentialType>(CurTy)->getElementType(); 3503 // For an array, add the element offset, explicitly scaled. 3504 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); 3505 // Getelementptr indices are signed. 3506 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); 3507 3508 // Multiply the index by the element size to compute the element offset. 3509 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3510 3511 // Add the element offset to the running total offset. 3512 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3513 } 3514 } 3515 3516 // Add the total offset from all the GEP indices to the base. 3517 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3518 } 3519 3520 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 3521 const SCEV *RHS) { 3522 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3523 return getSMaxExpr(Ops); 3524 } 3525 3526 std::tuple<const SCEV *, FoldingSetNodeID, void *> 3527 ScalarEvolution::findExistingSCEVInCache(int SCEVType, 3528 ArrayRef<const SCEV *> Ops) { 3529 FoldingSetNodeID ID; 3530 void *IP = nullptr; 3531 ID.AddInteger(SCEVType); 3532 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3533 ID.AddPointer(Ops[i]); 3534 return std::tuple<const SCEV *, FoldingSetNodeID, void *>( 3535 UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP); 3536 } 3537 3538 const SCEV * 3539 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3540 assert(!Ops.empty() && "Cannot get empty smax!"); 3541 if (Ops.size() == 1) return Ops[0]; 3542 #ifndef NDEBUG 3543 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3544 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3545 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3546 "SCEVSMaxExpr operand types don't match!"); 3547 #endif 3548 3549 // Sort by complexity, this groups all similar expression types together. 3550 GroupByComplexity(Ops, &LI, DT); 3551 3552 // Check if we have created the same SMax expression before. 3553 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(scSMaxExpr, Ops))) { 3554 return S; 3555 } 3556 3557 // If there are any constants, fold them together. 3558 unsigned Idx = 0; 3559 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3560 ++Idx; 3561 assert(Idx < Ops.size()); 3562 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3563 // We found two constants, fold them together! 3564 ConstantInt *Fold = ConstantInt::get( 3565 getContext(), APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt())); 3566 Ops[0] = getConstant(Fold); 3567 Ops.erase(Ops.begin()+1); // Erase the folded element 3568 if (Ops.size() == 1) return Ops[0]; 3569 LHSC = cast<SCEVConstant>(Ops[0]); 3570 } 3571 3572 // If we are left with a constant minimum-int, strip it off. 3573 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 3574 Ops.erase(Ops.begin()); 3575 --Idx; 3576 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 3577 // If we have an smax with a constant maximum-int, it will always be 3578 // maximum-int. 3579 return Ops[0]; 3580 } 3581 3582 if (Ops.size() == 1) return Ops[0]; 3583 } 3584 3585 // Find the first SMax 3586 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 3587 ++Idx; 3588 3589 // Check to see if one of the operands is an SMax. If so, expand its operands 3590 // onto our operand list, and recurse to simplify. 3591 if (Idx < Ops.size()) { 3592 bool DeletedSMax = false; 3593 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 3594 Ops.erase(Ops.begin()+Idx); 3595 Ops.append(SMax->op_begin(), SMax->op_end()); 3596 DeletedSMax = true; 3597 } 3598 3599 if (DeletedSMax) 3600 return getSMaxExpr(Ops); 3601 } 3602 3603 // Okay, check to see if the same value occurs in the operand list twice. If 3604 // so, delete one. Since we sorted the list, these values are required to 3605 // be adjacent. 3606 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3607 // X smax Y smax Y --> X smax Y 3608 // X smax Y --> X, if X is always greater than Y 3609 if (Ops[i] == Ops[i+1] || 3610 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 3611 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 3612 --i; --e; 3613 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 3614 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 3615 --i; --e; 3616 } 3617 3618 if (Ops.size() == 1) return Ops[0]; 3619 3620 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3621 3622 // Okay, it looks like we really DO need an smax expr. Check to see if we 3623 // already have one, otherwise create a new one. 3624 const SCEV *ExistingSCEV; 3625 FoldingSetNodeID ID; 3626 void *IP; 3627 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(scSMaxExpr, Ops); 3628 if (ExistingSCEV) 3629 return ExistingSCEV; 3630 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3631 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3632 SCEV *S = 3633 new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 3634 UniqueSCEVs.InsertNode(S, IP); 3635 addToLoopUseLists(S); 3636 return S; 3637 } 3638 3639 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 3640 const SCEV *RHS) { 3641 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3642 return getUMaxExpr(Ops); 3643 } 3644 3645 const SCEV * 3646 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3647 assert(!Ops.empty() && "Cannot get empty umax!"); 3648 if (Ops.size() == 1) return Ops[0]; 3649 #ifndef NDEBUG 3650 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3651 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3652 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3653 "SCEVUMaxExpr operand types don't match!"); 3654 #endif 3655 3656 // Sort by complexity, this groups all similar expression types together. 3657 GroupByComplexity(Ops, &LI, DT); 3658 3659 // Check if we have created the same UMax expression before. 3660 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(scUMaxExpr, Ops))) { 3661 return S; 3662 } 3663 3664 // If there are any constants, fold them together. 3665 unsigned Idx = 0; 3666 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3667 ++Idx; 3668 assert(Idx < Ops.size()); 3669 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3670 // We found two constants, fold them together! 3671 ConstantInt *Fold = ConstantInt::get( 3672 getContext(), APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt())); 3673 Ops[0] = getConstant(Fold); 3674 Ops.erase(Ops.begin()+1); // Erase the folded element 3675 if (Ops.size() == 1) return Ops[0]; 3676 LHSC = cast<SCEVConstant>(Ops[0]); 3677 } 3678 3679 // If we are left with a constant minimum-int, strip it off. 3680 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 3681 Ops.erase(Ops.begin()); 3682 --Idx; 3683 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 3684 // If we have an umax with a constant maximum-int, it will always be 3685 // maximum-int. 3686 return Ops[0]; 3687 } 3688 3689 if (Ops.size() == 1) return Ops[0]; 3690 } 3691 3692 // Find the first UMax 3693 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 3694 ++Idx; 3695 3696 // Check to see if one of the operands is a UMax. If so, expand its operands 3697 // onto our operand list, and recurse to simplify. 3698 if (Idx < Ops.size()) { 3699 bool DeletedUMax = false; 3700 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 3701 Ops.erase(Ops.begin()+Idx); 3702 Ops.append(UMax->op_begin(), UMax->op_end()); 3703 DeletedUMax = true; 3704 } 3705 3706 if (DeletedUMax) 3707 return getUMaxExpr(Ops); 3708 } 3709 3710 // Okay, check to see if the same value occurs in the operand list twice. If 3711 // so, delete one. Since we sorted the list, these values are required to 3712 // be adjacent. 3713 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 3714 // X umax Y umax Y --> X umax Y 3715 // X umax Y --> X, if X is always greater than Y 3716 if (Ops[i] == Ops[i + 1] || isKnownViaNonRecursiveReasoning( 3717 ICmpInst::ICMP_UGE, Ops[i], Ops[i + 1])) { 3718 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3719 --i; --e; 3720 } else if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, Ops[i], 3721 Ops[i + 1])) { 3722 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3723 --i; --e; 3724 } 3725 3726 if (Ops.size() == 1) return Ops[0]; 3727 3728 assert(!Ops.empty() && "Reduced umax down to nothing!"); 3729 3730 // Okay, it looks like we really DO need a umax expr. Check to see if we 3731 // already have one, otherwise create a new one. 3732 const SCEV *ExistingSCEV; 3733 FoldingSetNodeID ID; 3734 void *IP; 3735 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(scUMaxExpr, Ops); 3736 if (ExistingSCEV) 3737 return ExistingSCEV; 3738 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3739 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3740 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 3741 O, Ops.size()); 3742 UniqueSCEVs.InsertNode(S, IP); 3743 addToLoopUseLists(S); 3744 return S; 3745 } 3746 3747 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3748 const SCEV *RHS) { 3749 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3750 return getSMinExpr(Ops); 3751 } 3752 3753 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3754 // ~smax(~x, ~y, ~z) == smin(x, y, z). 3755 SmallVector<const SCEV *, 2> NotOps; 3756 for (auto *S : Ops) 3757 NotOps.push_back(getNotSCEV(S)); 3758 return getNotSCEV(getSMaxExpr(NotOps)); 3759 } 3760 3761 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3762 const SCEV *RHS) { 3763 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3764 return getUMinExpr(Ops); 3765 } 3766 3767 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3768 assert(!Ops.empty() && "At least one operand must be!"); 3769 // Trivial case. 3770 if (Ops.size() == 1) 3771 return Ops[0]; 3772 3773 // ~umax(~x, ~y, ~z) == umin(x, y, z). 3774 SmallVector<const SCEV *, 2> NotOps; 3775 for (auto *S : Ops) 3776 NotOps.push_back(getNotSCEV(S)); 3777 return getNotSCEV(getUMaxExpr(NotOps)); 3778 } 3779 3780 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3781 // We can bypass creating a target-independent 3782 // constant expression and then folding it back into a ConstantInt. 3783 // This is just a compile-time optimization. 3784 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3785 } 3786 3787 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3788 StructType *STy, 3789 unsigned FieldNo) { 3790 // We can bypass creating a target-independent 3791 // constant expression and then folding it back into a ConstantInt. 3792 // This is just a compile-time optimization. 3793 return getConstant( 3794 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3795 } 3796 3797 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3798 // Don't attempt to do anything other than create a SCEVUnknown object 3799 // here. createSCEV only calls getUnknown after checking for all other 3800 // interesting possibilities, and any other code that calls getUnknown 3801 // is doing so in order to hide a value from SCEV canonicalization. 3802 3803 FoldingSetNodeID ID; 3804 ID.AddInteger(scUnknown); 3805 ID.AddPointer(V); 3806 void *IP = nullptr; 3807 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3808 assert(cast<SCEVUnknown>(S)->getValue() == V && 3809 "Stale SCEVUnknown in uniquing map!"); 3810 return S; 3811 } 3812 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3813 FirstUnknown); 3814 FirstUnknown = cast<SCEVUnknown>(S); 3815 UniqueSCEVs.InsertNode(S, IP); 3816 return S; 3817 } 3818 3819 //===----------------------------------------------------------------------===// 3820 // Basic SCEV Analysis and PHI Idiom Recognition Code 3821 // 3822 3823 /// Test if values of the given type are analyzable within the SCEV 3824 /// framework. This primarily includes integer types, and it can optionally 3825 /// include pointer types if the ScalarEvolution class has access to 3826 /// target-specific information. 3827 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3828 // Integers and pointers are always SCEVable. 3829 return Ty->isIntOrPtrTy(); 3830 } 3831 3832 /// Return the size in bits of the specified type, for which isSCEVable must 3833 /// return true. 3834 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3835 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3836 if (Ty->isPointerTy()) 3837 return getDataLayout().getIndexTypeSizeInBits(Ty); 3838 return getDataLayout().getTypeSizeInBits(Ty); 3839 } 3840 3841 /// Return a type with the same bitwidth as the given type and which represents 3842 /// how SCEV will treat the given type, for which isSCEVable must return 3843 /// true. For pointer types, this is the pointer-sized integer type. 3844 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3845 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3846 3847 if (Ty->isIntegerTy()) 3848 return Ty; 3849 3850 // The only other support type is pointer. 3851 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3852 return getDataLayout().getIntPtrType(Ty); 3853 } 3854 3855 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3856 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3857 } 3858 3859 const SCEV *ScalarEvolution::getCouldNotCompute() { 3860 return CouldNotCompute.get(); 3861 } 3862 3863 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3864 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3865 auto *SU = dyn_cast<SCEVUnknown>(S); 3866 return SU && SU->getValue() == nullptr; 3867 }); 3868 3869 return !ContainsNulls; 3870 } 3871 3872 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3873 HasRecMapType::iterator I = HasRecMap.find(S); 3874 if (I != HasRecMap.end()) 3875 return I->second; 3876 3877 bool FoundAddRec = SCEVExprContains(S, isa<SCEVAddRecExpr, const SCEV *>); 3878 HasRecMap.insert({S, FoundAddRec}); 3879 return FoundAddRec; 3880 } 3881 3882 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3883 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3884 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3885 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3886 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3887 if (!Add) 3888 return {S, nullptr}; 3889 3890 if (Add->getNumOperands() != 2) 3891 return {S, nullptr}; 3892 3893 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3894 if (!ConstOp) 3895 return {S, nullptr}; 3896 3897 return {Add->getOperand(1), ConstOp->getValue()}; 3898 } 3899 3900 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3901 /// by the value and offset from any ValueOffsetPair in the set. 3902 SetVector<ScalarEvolution::ValueOffsetPair> * 3903 ScalarEvolution::getSCEVValues(const SCEV *S) { 3904 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3905 if (SI == ExprValueMap.end()) 3906 return nullptr; 3907 #ifndef NDEBUG 3908 if (VerifySCEVMap) { 3909 // Check there is no dangling Value in the set returned. 3910 for (const auto &VE : SI->second) 3911 assert(ValueExprMap.count(VE.first)); 3912 } 3913 #endif 3914 return &SI->second; 3915 } 3916 3917 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3918 /// cannot be used separately. eraseValueFromMap should be used to remove 3919 /// V from ValueExprMap and ExprValueMap at the same time. 3920 void ScalarEvolution::eraseValueFromMap(Value *V) { 3921 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3922 if (I != ValueExprMap.end()) { 3923 const SCEV *S = I->second; 3924 // Remove {V, 0} from the set of ExprValueMap[S] 3925 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3926 SV->remove({V, nullptr}); 3927 3928 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3929 const SCEV *Stripped; 3930 ConstantInt *Offset; 3931 std::tie(Stripped, Offset) = splitAddExpr(S); 3932 if (Offset != nullptr) { 3933 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3934 SV->remove({V, Offset}); 3935 } 3936 ValueExprMap.erase(V); 3937 } 3938 } 3939 3940 /// Check whether value has nuw/nsw/exact set but SCEV does not. 3941 /// TODO: In reality it is better to check the poison recursively 3942 /// but this is better than nothing. 3943 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { 3944 if (auto *I = dyn_cast<Instruction>(V)) { 3945 if (isa<OverflowingBinaryOperator>(I)) { 3946 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { 3947 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) 3948 return true; 3949 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) 3950 return true; 3951 } 3952 } else if (isa<PossiblyExactOperator>(I) && I->isExact()) 3953 return true; 3954 } 3955 return false; 3956 } 3957 3958 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3959 /// create a new one. 3960 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3961 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3962 3963 const SCEV *S = getExistingSCEV(V); 3964 if (S == nullptr) { 3965 S = createSCEV(V); 3966 // During PHI resolution, it is possible to create two SCEVs for the same 3967 // V, so it is needed to double check whether V->S is inserted into 3968 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3969 std::pair<ValueExprMapType::iterator, bool> Pair = 3970 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3971 if (Pair.second && !SCEVLostPoisonFlags(S, V)) { 3972 ExprValueMap[S].insert({V, nullptr}); 3973 3974 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3975 // ExprValueMap. 3976 const SCEV *Stripped = S; 3977 ConstantInt *Offset = nullptr; 3978 std::tie(Stripped, Offset) = splitAddExpr(S); 3979 // If stripped is SCEVUnknown, don't bother to save 3980 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3981 // increase the complexity of the expansion code. 3982 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3983 // because it may generate add/sub instead of GEP in SCEV expansion. 3984 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3985 !isa<GetElementPtrInst>(V)) 3986 ExprValueMap[Stripped].insert({V, Offset}); 3987 } 3988 } 3989 return S; 3990 } 3991 3992 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3993 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3994 3995 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3996 if (I != ValueExprMap.end()) { 3997 const SCEV *S = I->second; 3998 if (checkValidity(S)) 3999 return S; 4000 eraseValueFromMap(V); 4001 forgetMemoizedResults(S); 4002 } 4003 return nullptr; 4004 } 4005 4006 /// Return a SCEV corresponding to -V = -1*V 4007 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 4008 SCEV::NoWrapFlags Flags) { 4009 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4010 return getConstant( 4011 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 4012 4013 Type *Ty = V->getType(); 4014 Ty = getEffectiveSCEVType(Ty); 4015 return getMulExpr( 4016 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 4017 } 4018 4019 /// Return a SCEV corresponding to ~V = -1-V 4020 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 4021 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4022 return getConstant( 4023 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 4024 4025 Type *Ty = V->getType(); 4026 Ty = getEffectiveSCEVType(Ty); 4027 const SCEV *AllOnes = 4028 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 4029 return getMinusSCEV(AllOnes, V); 4030 } 4031 4032 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 4033 SCEV::NoWrapFlags Flags, 4034 unsigned Depth) { 4035 // Fast path: X - X --> 0. 4036 if (LHS == RHS) 4037 return getZero(LHS->getType()); 4038 4039 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 4040 // makes it so that we cannot make much use of NUW. 4041 auto AddFlags = SCEV::FlagAnyWrap; 4042 const bool RHSIsNotMinSigned = 4043 !getSignedRangeMin(RHS).isMinSignedValue(); 4044 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 4045 // Let M be the minimum representable signed value. Then (-1)*RHS 4046 // signed-wraps if and only if RHS is M. That can happen even for 4047 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 4048 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 4049 // (-1)*RHS, we need to prove that RHS != M. 4050 // 4051 // If LHS is non-negative and we know that LHS - RHS does not 4052 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 4053 // either by proving that RHS > M or that LHS >= 0. 4054 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 4055 AddFlags = SCEV::FlagNSW; 4056 } 4057 } 4058 4059 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 4060 // RHS is NSW and LHS >= 0. 4061 // 4062 // The difficulty here is that the NSW flag may have been proven 4063 // relative to a loop that is to be found in a recurrence in LHS and 4064 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 4065 // larger scope than intended. 4066 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 4067 4068 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 4069 } 4070 4071 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 4072 unsigned Depth) { 4073 Type *SrcTy = V->getType(); 4074 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4075 "Cannot truncate or zero extend with non-integer arguments!"); 4076 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4077 return V; // No conversion 4078 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4079 return getTruncateExpr(V, Ty, Depth); 4080 return getZeroExtendExpr(V, Ty, Depth); 4081 } 4082 4083 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 4084 unsigned Depth) { 4085 Type *SrcTy = V->getType(); 4086 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4087 "Cannot truncate or zero extend with non-integer arguments!"); 4088 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4089 return V; // No conversion 4090 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4091 return getTruncateExpr(V, Ty, Depth); 4092 return getSignExtendExpr(V, Ty, Depth); 4093 } 4094 4095 const SCEV * 4096 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 4097 Type *SrcTy = V->getType(); 4098 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4099 "Cannot noop or zero extend with non-integer arguments!"); 4100 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4101 "getNoopOrZeroExtend cannot truncate!"); 4102 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4103 return V; // No conversion 4104 return getZeroExtendExpr(V, Ty); 4105 } 4106 4107 const SCEV * 4108 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 4109 Type *SrcTy = V->getType(); 4110 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4111 "Cannot noop or sign extend with non-integer arguments!"); 4112 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4113 "getNoopOrSignExtend cannot truncate!"); 4114 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4115 return V; // No conversion 4116 return getSignExtendExpr(V, Ty); 4117 } 4118 4119 const SCEV * 4120 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 4121 Type *SrcTy = V->getType(); 4122 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4123 "Cannot noop or any extend with non-integer arguments!"); 4124 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4125 "getNoopOrAnyExtend cannot truncate!"); 4126 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4127 return V; // No conversion 4128 return getAnyExtendExpr(V, Ty); 4129 } 4130 4131 const SCEV * 4132 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 4133 Type *SrcTy = V->getType(); 4134 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4135 "Cannot truncate or noop with non-integer arguments!"); 4136 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4137 "getTruncateOrNoop cannot extend!"); 4138 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4139 return V; // No conversion 4140 return getTruncateExpr(V, Ty); 4141 } 4142 4143 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4144 const SCEV *RHS) { 4145 const SCEV *PromotedLHS = LHS; 4146 const SCEV *PromotedRHS = RHS; 4147 4148 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4149 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4150 else 4151 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4152 4153 return getUMaxExpr(PromotedLHS, PromotedRHS); 4154 } 4155 4156 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4157 const SCEV *RHS) { 4158 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4159 return getUMinFromMismatchedTypes(Ops); 4160 } 4161 4162 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( 4163 SmallVectorImpl<const SCEV *> &Ops) { 4164 assert(!Ops.empty() && "At least one operand must be!"); 4165 // Trivial case. 4166 if (Ops.size() == 1) 4167 return Ops[0]; 4168 4169 // Find the max type first. 4170 Type *MaxType = nullptr; 4171 for (auto *S : Ops) 4172 if (MaxType) 4173 MaxType = getWiderType(MaxType, S->getType()); 4174 else 4175 MaxType = S->getType(); 4176 4177 // Extend all ops to max type. 4178 SmallVector<const SCEV *, 2> PromotedOps; 4179 for (auto *S : Ops) 4180 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4181 4182 // Generate umin. 4183 return getUMinExpr(PromotedOps); 4184 } 4185 4186 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4187 // A pointer operand may evaluate to a nonpointer expression, such as null. 4188 if (!V->getType()->isPointerTy()) 4189 return V; 4190 4191 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 4192 return getPointerBase(Cast->getOperand()); 4193 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 4194 const SCEV *PtrOp = nullptr; 4195 for (const SCEV *NAryOp : NAry->operands()) { 4196 if (NAryOp->getType()->isPointerTy()) { 4197 // Cannot find the base of an expression with multiple pointer operands. 4198 if (PtrOp) 4199 return V; 4200 PtrOp = NAryOp; 4201 } 4202 } 4203 if (!PtrOp) 4204 return V; 4205 return getPointerBase(PtrOp); 4206 } 4207 return V; 4208 } 4209 4210 /// Push users of the given Instruction onto the given Worklist. 4211 static void 4212 PushDefUseChildren(Instruction *I, 4213 SmallVectorImpl<Instruction *> &Worklist) { 4214 // Push the def-use children onto the Worklist stack. 4215 for (User *U : I->users()) 4216 Worklist.push_back(cast<Instruction>(U)); 4217 } 4218 4219 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 4220 SmallVector<Instruction *, 16> Worklist; 4221 PushDefUseChildren(PN, Worklist); 4222 4223 SmallPtrSet<Instruction *, 8> Visited; 4224 Visited.insert(PN); 4225 while (!Worklist.empty()) { 4226 Instruction *I = Worklist.pop_back_val(); 4227 if (!Visited.insert(I).second) 4228 continue; 4229 4230 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4231 if (It != ValueExprMap.end()) { 4232 const SCEV *Old = It->second; 4233 4234 // Short-circuit the def-use traversal if the symbolic name 4235 // ceases to appear in expressions. 4236 if (Old != SymName && !hasOperand(Old, SymName)) 4237 continue; 4238 4239 // SCEVUnknown for a PHI either means that it has an unrecognized 4240 // structure, it's a PHI that's in the progress of being computed 4241 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4242 // additional loop trip count information isn't going to change anything. 4243 // In the second case, createNodeForPHI will perform the necessary 4244 // updates on its own when it gets to that point. In the third, we do 4245 // want to forget the SCEVUnknown. 4246 if (!isa<PHINode>(I) || 4247 !isa<SCEVUnknown>(Old) || 4248 (I != PN && Old == SymName)) { 4249 eraseValueFromMap(It->first); 4250 forgetMemoizedResults(Old); 4251 } 4252 } 4253 4254 PushDefUseChildren(I, Worklist); 4255 } 4256 } 4257 4258 namespace { 4259 4260 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4261 /// expression in case its Loop is L. If it is not L then 4262 /// if IgnoreOtherLoops is true then use AddRec itself 4263 /// otherwise rewrite cannot be done. 4264 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4265 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4266 public: 4267 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4268 bool IgnoreOtherLoops = true) { 4269 SCEVInitRewriter Rewriter(L, SE); 4270 const SCEV *Result = Rewriter.visit(S); 4271 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4272 return SE.getCouldNotCompute(); 4273 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4274 ? SE.getCouldNotCompute() 4275 : Result; 4276 } 4277 4278 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4279 if (!SE.isLoopInvariant(Expr, L)) 4280 SeenLoopVariantSCEVUnknown = true; 4281 return Expr; 4282 } 4283 4284 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4285 // Only re-write AddRecExprs for this loop. 4286 if (Expr->getLoop() == L) 4287 return Expr->getStart(); 4288 SeenOtherLoops = true; 4289 return Expr; 4290 } 4291 4292 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4293 4294 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4295 4296 private: 4297 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4298 : SCEVRewriteVisitor(SE), L(L) {} 4299 4300 const Loop *L; 4301 bool SeenLoopVariantSCEVUnknown = false; 4302 bool SeenOtherLoops = false; 4303 }; 4304 4305 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4306 /// increment expression in case its Loop is L. If it is not L then 4307 /// use AddRec itself. 4308 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4309 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4310 public: 4311 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4312 SCEVPostIncRewriter Rewriter(L, SE); 4313 const SCEV *Result = Rewriter.visit(S); 4314 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4315 ? SE.getCouldNotCompute() 4316 : Result; 4317 } 4318 4319 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4320 if (!SE.isLoopInvariant(Expr, L)) 4321 SeenLoopVariantSCEVUnknown = true; 4322 return Expr; 4323 } 4324 4325 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4326 // Only re-write AddRecExprs for this loop. 4327 if (Expr->getLoop() == L) 4328 return Expr->getPostIncExpr(SE); 4329 SeenOtherLoops = true; 4330 return Expr; 4331 } 4332 4333 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4334 4335 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4336 4337 private: 4338 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4339 : SCEVRewriteVisitor(SE), L(L) {} 4340 4341 const Loop *L; 4342 bool SeenLoopVariantSCEVUnknown = false; 4343 bool SeenOtherLoops = false; 4344 }; 4345 4346 /// This class evaluates the compare condition by matching it against the 4347 /// condition of loop latch. If there is a match we assume a true value 4348 /// for the condition while building SCEV nodes. 4349 class SCEVBackedgeConditionFolder 4350 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4351 public: 4352 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4353 ScalarEvolution &SE) { 4354 bool IsPosBECond = false; 4355 Value *BECond = nullptr; 4356 if (BasicBlock *Latch = L->getLoopLatch()) { 4357 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4358 if (BI && BI->isConditional()) { 4359 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4360 "Both outgoing branches should not target same header!"); 4361 BECond = BI->getCondition(); 4362 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4363 } else { 4364 return S; 4365 } 4366 } 4367 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4368 return Rewriter.visit(S); 4369 } 4370 4371 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4372 const SCEV *Result = Expr; 4373 bool InvariantF = SE.isLoopInvariant(Expr, L); 4374 4375 if (!InvariantF) { 4376 Instruction *I = cast<Instruction>(Expr->getValue()); 4377 switch (I->getOpcode()) { 4378 case Instruction::Select: { 4379 SelectInst *SI = cast<SelectInst>(I); 4380 Optional<const SCEV *> Res = 4381 compareWithBackedgeCondition(SI->getCondition()); 4382 if (Res.hasValue()) { 4383 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4384 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4385 } 4386 break; 4387 } 4388 default: { 4389 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4390 if (Res.hasValue()) 4391 Result = Res.getValue(); 4392 break; 4393 } 4394 } 4395 } 4396 return Result; 4397 } 4398 4399 private: 4400 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4401 bool IsPosBECond, ScalarEvolution &SE) 4402 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4403 IsPositiveBECond(IsPosBECond) {} 4404 4405 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4406 4407 const Loop *L; 4408 /// Loop back condition. 4409 Value *BackedgeCond = nullptr; 4410 /// Set to true if loop back is on positive branch condition. 4411 bool IsPositiveBECond; 4412 }; 4413 4414 Optional<const SCEV *> 4415 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4416 4417 // If value matches the backedge condition for loop latch, 4418 // then return a constant evolution node based on loopback 4419 // branch taken. 4420 if (BackedgeCond == IC) 4421 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4422 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4423 return None; 4424 } 4425 4426 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4427 public: 4428 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4429 ScalarEvolution &SE) { 4430 SCEVShiftRewriter Rewriter(L, SE); 4431 const SCEV *Result = Rewriter.visit(S); 4432 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4433 } 4434 4435 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4436 // Only allow AddRecExprs for this loop. 4437 if (!SE.isLoopInvariant(Expr, L)) 4438 Valid = false; 4439 return Expr; 4440 } 4441 4442 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4443 if (Expr->getLoop() == L && Expr->isAffine()) 4444 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4445 Valid = false; 4446 return Expr; 4447 } 4448 4449 bool isValid() { return Valid; } 4450 4451 private: 4452 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4453 : SCEVRewriteVisitor(SE), L(L) {} 4454 4455 const Loop *L; 4456 bool Valid = true; 4457 }; 4458 4459 } // end anonymous namespace 4460 4461 SCEV::NoWrapFlags 4462 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4463 if (!AR->isAffine()) 4464 return SCEV::FlagAnyWrap; 4465 4466 using OBO = OverflowingBinaryOperator; 4467 4468 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4469 4470 if (!AR->hasNoSignedWrap()) { 4471 ConstantRange AddRecRange = getSignedRange(AR); 4472 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4473 4474 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4475 Instruction::Add, IncRange, OBO::NoSignedWrap); 4476 if (NSWRegion.contains(AddRecRange)) 4477 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4478 } 4479 4480 if (!AR->hasNoUnsignedWrap()) { 4481 ConstantRange AddRecRange = getUnsignedRange(AR); 4482 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4483 4484 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4485 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4486 if (NUWRegion.contains(AddRecRange)) 4487 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4488 } 4489 4490 return Result; 4491 } 4492 4493 namespace { 4494 4495 /// Represents an abstract binary operation. This may exist as a 4496 /// normal instruction or constant expression, or may have been 4497 /// derived from an expression tree. 4498 struct BinaryOp { 4499 unsigned Opcode; 4500 Value *LHS; 4501 Value *RHS; 4502 bool IsNSW = false; 4503 bool IsNUW = false; 4504 4505 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4506 /// constant expression. 4507 Operator *Op = nullptr; 4508 4509 explicit BinaryOp(Operator *Op) 4510 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4511 Op(Op) { 4512 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4513 IsNSW = OBO->hasNoSignedWrap(); 4514 IsNUW = OBO->hasNoUnsignedWrap(); 4515 } 4516 } 4517 4518 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4519 bool IsNUW = false) 4520 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 4521 }; 4522 4523 } // end anonymous namespace 4524 4525 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4526 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4527 auto *Op = dyn_cast<Operator>(V); 4528 if (!Op) 4529 return None; 4530 4531 // Implementation detail: all the cleverness here should happen without 4532 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4533 // SCEV expressions when possible, and we should not break that. 4534 4535 switch (Op->getOpcode()) { 4536 case Instruction::Add: 4537 case Instruction::Sub: 4538 case Instruction::Mul: 4539 case Instruction::UDiv: 4540 case Instruction::URem: 4541 case Instruction::And: 4542 case Instruction::Or: 4543 case Instruction::AShr: 4544 case Instruction::Shl: 4545 return BinaryOp(Op); 4546 4547 case Instruction::Xor: 4548 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4549 // If the RHS of the xor is a signmask, then this is just an add. 4550 // Instcombine turns add of signmask into xor as a strength reduction step. 4551 if (RHSC->getValue().isSignMask()) 4552 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4553 return BinaryOp(Op); 4554 4555 case Instruction::LShr: 4556 // Turn logical shift right of a constant into a unsigned divide. 4557 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4558 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4559 4560 // If the shift count is not less than the bitwidth, the result of 4561 // the shift is undefined. Don't try to analyze it, because the 4562 // resolution chosen here may differ from the resolution chosen in 4563 // other parts of the compiler. 4564 if (SA->getValue().ult(BitWidth)) { 4565 Constant *X = 4566 ConstantInt::get(SA->getContext(), 4567 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4568 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4569 } 4570 } 4571 return BinaryOp(Op); 4572 4573 case Instruction::ExtractValue: { 4574 auto *EVI = cast<ExtractValueInst>(Op); 4575 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4576 break; 4577 4578 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 4579 if (!WO) 4580 break; 4581 4582 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 4583 bool Signed = WO->isSigned(); 4584 // TODO: Should add nuw/nsw flags for mul as well. 4585 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 4586 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 4587 4588 // Now that we know that all uses of the arithmetic-result component of 4589 // CI are guarded by the overflow check, we can go ahead and pretend 4590 // that the arithmetic is non-overflowing. 4591 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 4592 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 4593 } 4594 4595 default: 4596 break; 4597 } 4598 4599 return None; 4600 } 4601 4602 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4603 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4604 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4605 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4606 /// follows one of the following patterns: 4607 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4608 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4609 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4610 /// we return the type of the truncation operation, and indicate whether the 4611 /// truncated type should be treated as signed/unsigned by setting 4612 /// \p Signed to true/false, respectively. 4613 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4614 bool &Signed, ScalarEvolution &SE) { 4615 // The case where Op == SymbolicPHI (that is, with no type conversions on 4616 // the way) is handled by the regular add recurrence creating logic and 4617 // would have already been triggered in createAddRecForPHI. Reaching it here 4618 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4619 // because one of the other operands of the SCEVAddExpr updating this PHI is 4620 // not invariant). 4621 // 4622 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4623 // this case predicates that allow us to prove that Op == SymbolicPHI will 4624 // be added. 4625 if (Op == SymbolicPHI) 4626 return nullptr; 4627 4628 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4629 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4630 if (SourceBits != NewBits) 4631 return nullptr; 4632 4633 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4634 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4635 if (!SExt && !ZExt) 4636 return nullptr; 4637 const SCEVTruncateExpr *Trunc = 4638 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4639 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4640 if (!Trunc) 4641 return nullptr; 4642 const SCEV *X = Trunc->getOperand(); 4643 if (X != SymbolicPHI) 4644 return nullptr; 4645 Signed = SExt != nullptr; 4646 return Trunc->getType(); 4647 } 4648 4649 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4650 if (!PN->getType()->isIntegerTy()) 4651 return nullptr; 4652 const Loop *L = LI.getLoopFor(PN->getParent()); 4653 if (!L || L->getHeader() != PN->getParent()) 4654 return nullptr; 4655 return L; 4656 } 4657 4658 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4659 // computation that updates the phi follows the following pattern: 4660 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4661 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4662 // If so, try to see if it can be rewritten as an AddRecExpr under some 4663 // Predicates. If successful, return them as a pair. Also cache the results 4664 // of the analysis. 4665 // 4666 // Example usage scenario: 4667 // Say the Rewriter is called for the following SCEV: 4668 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4669 // where: 4670 // %X = phi i64 (%Start, %BEValue) 4671 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4672 // and call this function with %SymbolicPHI = %X. 4673 // 4674 // The analysis will find that the value coming around the backedge has 4675 // the following SCEV: 4676 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4677 // Upon concluding that this matches the desired pattern, the function 4678 // will return the pair {NewAddRec, SmallPredsVec} where: 4679 // NewAddRec = {%Start,+,%Step} 4680 // SmallPredsVec = {P1, P2, P3} as follows: 4681 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4682 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4683 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4684 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4685 // under the predicates {P1,P2,P3}. 4686 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4687 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4688 // 4689 // TODO's: 4690 // 4691 // 1) Extend the Induction descriptor to also support inductions that involve 4692 // casts: When needed (namely, when we are called in the context of the 4693 // vectorizer induction analysis), a Set of cast instructions will be 4694 // populated by this method, and provided back to isInductionPHI. This is 4695 // needed to allow the vectorizer to properly record them to be ignored by 4696 // the cost model and to avoid vectorizing them (otherwise these casts, 4697 // which are redundant under the runtime overflow checks, will be 4698 // vectorized, which can be costly). 4699 // 4700 // 2) Support additional induction/PHISCEV patterns: We also want to support 4701 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4702 // after the induction update operation (the induction increment): 4703 // 4704 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4705 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4706 // 4707 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4708 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4709 // 4710 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4711 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4712 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4713 SmallVector<const SCEVPredicate *, 3> Predicates; 4714 4715 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4716 // return an AddRec expression under some predicate. 4717 4718 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4719 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4720 assert(L && "Expecting an integer loop header phi"); 4721 4722 // The loop may have multiple entrances or multiple exits; we can analyze 4723 // this phi as an addrec if it has a unique entry value and a unique 4724 // backedge value. 4725 Value *BEValueV = nullptr, *StartValueV = nullptr; 4726 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4727 Value *V = PN->getIncomingValue(i); 4728 if (L->contains(PN->getIncomingBlock(i))) { 4729 if (!BEValueV) { 4730 BEValueV = V; 4731 } else if (BEValueV != V) { 4732 BEValueV = nullptr; 4733 break; 4734 } 4735 } else if (!StartValueV) { 4736 StartValueV = V; 4737 } else if (StartValueV != V) { 4738 StartValueV = nullptr; 4739 break; 4740 } 4741 } 4742 if (!BEValueV || !StartValueV) 4743 return None; 4744 4745 const SCEV *BEValue = getSCEV(BEValueV); 4746 4747 // If the value coming around the backedge is an add with the symbolic 4748 // value we just inserted, possibly with casts that we can ignore under 4749 // an appropriate runtime guard, then we found a simple induction variable! 4750 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4751 if (!Add) 4752 return None; 4753 4754 // If there is a single occurrence of the symbolic value, possibly 4755 // casted, replace it with a recurrence. 4756 unsigned FoundIndex = Add->getNumOperands(); 4757 Type *TruncTy = nullptr; 4758 bool Signed; 4759 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4760 if ((TruncTy = 4761 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4762 if (FoundIndex == e) { 4763 FoundIndex = i; 4764 break; 4765 } 4766 4767 if (FoundIndex == Add->getNumOperands()) 4768 return None; 4769 4770 // Create an add with everything but the specified operand. 4771 SmallVector<const SCEV *, 8> Ops; 4772 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4773 if (i != FoundIndex) 4774 Ops.push_back(Add->getOperand(i)); 4775 const SCEV *Accum = getAddExpr(Ops); 4776 4777 // The runtime checks will not be valid if the step amount is 4778 // varying inside the loop. 4779 if (!isLoopInvariant(Accum, L)) 4780 return None; 4781 4782 // *** Part2: Create the predicates 4783 4784 // Analysis was successful: we have a phi-with-cast pattern for which we 4785 // can return an AddRec expression under the following predicates: 4786 // 4787 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4788 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4789 // P2: An Equal predicate that guarantees that 4790 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4791 // P3: An Equal predicate that guarantees that 4792 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4793 // 4794 // As we next prove, the above predicates guarantee that: 4795 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4796 // 4797 // 4798 // More formally, we want to prove that: 4799 // Expr(i+1) = Start + (i+1) * Accum 4800 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4801 // 4802 // Given that: 4803 // 1) Expr(0) = Start 4804 // 2) Expr(1) = Start + Accum 4805 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4806 // 3) Induction hypothesis (step i): 4807 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4808 // 4809 // Proof: 4810 // Expr(i+1) = 4811 // = Start + (i+1)*Accum 4812 // = (Start + i*Accum) + Accum 4813 // = Expr(i) + Accum 4814 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4815 // :: from step i 4816 // 4817 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4818 // 4819 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4820 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4821 // + Accum :: from P3 4822 // 4823 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4824 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4825 // 4826 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4827 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4828 // 4829 // By induction, the same applies to all iterations 1<=i<n: 4830 // 4831 4832 // Create a truncated addrec for which we will add a no overflow check (P1). 4833 const SCEV *StartVal = getSCEV(StartValueV); 4834 const SCEV *PHISCEV = 4835 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4836 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4837 4838 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4839 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4840 // will be constant. 4841 // 4842 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4843 // add P1. 4844 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4845 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4846 Signed ? SCEVWrapPredicate::IncrementNSSW 4847 : SCEVWrapPredicate::IncrementNUSW; 4848 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4849 Predicates.push_back(AddRecPred); 4850 } 4851 4852 // Create the Equal Predicates P2,P3: 4853 4854 // It is possible that the predicates P2 and/or P3 are computable at 4855 // compile time due to StartVal and/or Accum being constants. 4856 // If either one is, then we can check that now and escape if either P2 4857 // or P3 is false. 4858 4859 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4860 // for each of StartVal and Accum 4861 auto getExtendedExpr = [&](const SCEV *Expr, 4862 bool CreateSignExtend) -> const SCEV * { 4863 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4864 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4865 const SCEV *ExtendedExpr = 4866 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4867 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4868 return ExtendedExpr; 4869 }; 4870 4871 // Given: 4872 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4873 // = getExtendedExpr(Expr) 4874 // Determine whether the predicate P: Expr == ExtendedExpr 4875 // is known to be false at compile time 4876 auto PredIsKnownFalse = [&](const SCEV *Expr, 4877 const SCEV *ExtendedExpr) -> bool { 4878 return Expr != ExtendedExpr && 4879 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4880 }; 4881 4882 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 4883 if (PredIsKnownFalse(StartVal, StartExtended)) { 4884 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 4885 return None; 4886 } 4887 4888 // The Step is always Signed (because the overflow checks are either 4889 // NSSW or NUSW) 4890 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 4891 if (PredIsKnownFalse(Accum, AccumExtended)) { 4892 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 4893 return None; 4894 } 4895 4896 auto AppendPredicate = [&](const SCEV *Expr, 4897 const SCEV *ExtendedExpr) -> void { 4898 if (Expr != ExtendedExpr && 4899 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4900 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4901 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 4902 Predicates.push_back(Pred); 4903 } 4904 }; 4905 4906 AppendPredicate(StartVal, StartExtended); 4907 AppendPredicate(Accum, AccumExtended); 4908 4909 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4910 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4911 // into NewAR if it will also add the runtime overflow checks specified in 4912 // Predicates. 4913 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4914 4915 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4916 std::make_pair(NewAR, Predicates); 4917 // Remember the result of the analysis for this SCEV at this locayyytion. 4918 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4919 return PredRewrite; 4920 } 4921 4922 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4923 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4924 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4925 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4926 if (!L) 4927 return None; 4928 4929 // Check to see if we already analyzed this PHI. 4930 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4931 if (I != PredicatedSCEVRewrites.end()) { 4932 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 4933 I->second; 4934 // Analysis was done before and failed to create an AddRec: 4935 if (Rewrite.first == SymbolicPHI) 4936 return None; 4937 // Analysis was done before and succeeded to create an AddRec under 4938 // a predicate: 4939 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 4940 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 4941 return Rewrite; 4942 } 4943 4944 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4945 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 4946 4947 // Record in the cache that the analysis failed 4948 if (!Rewrite) { 4949 SmallVector<const SCEVPredicate *, 3> Predicates; 4950 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 4951 return None; 4952 } 4953 4954 return Rewrite; 4955 } 4956 4957 // FIXME: This utility is currently required because the Rewriter currently 4958 // does not rewrite this expression: 4959 // {0, +, (sext ix (trunc iy to ix) to iy)} 4960 // into {0, +, %step}, 4961 // even when the following Equal predicate exists: 4962 // "%step == (sext ix (trunc iy to ix) to iy)". 4963 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 4964 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 4965 if (AR1 == AR2) 4966 return true; 4967 4968 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 4969 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 4970 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 4971 return false; 4972 return true; 4973 }; 4974 4975 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 4976 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 4977 return false; 4978 return true; 4979 } 4980 4981 /// A helper function for createAddRecFromPHI to handle simple cases. 4982 /// 4983 /// This function tries to find an AddRec expression for the simplest (yet most 4984 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 4985 /// If it fails, createAddRecFromPHI will use a more general, but slow, 4986 /// technique for finding the AddRec expression. 4987 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 4988 Value *BEValueV, 4989 Value *StartValueV) { 4990 const Loop *L = LI.getLoopFor(PN->getParent()); 4991 assert(L && L->getHeader() == PN->getParent()); 4992 assert(BEValueV && StartValueV); 4993 4994 auto BO = MatchBinaryOp(BEValueV, DT); 4995 if (!BO) 4996 return nullptr; 4997 4998 if (BO->Opcode != Instruction::Add) 4999 return nullptr; 5000 5001 const SCEV *Accum = nullptr; 5002 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 5003 Accum = getSCEV(BO->RHS); 5004 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 5005 Accum = getSCEV(BO->LHS); 5006 5007 if (!Accum) 5008 return nullptr; 5009 5010 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5011 if (BO->IsNUW) 5012 Flags = setFlags(Flags, SCEV::FlagNUW); 5013 if (BO->IsNSW) 5014 Flags = setFlags(Flags, SCEV::FlagNSW); 5015 5016 const SCEV *StartVal = getSCEV(StartValueV); 5017 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5018 5019 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5020 5021 // We can add Flags to the post-inc expression only if we 5022 // know that it is *undefined behavior* for BEValueV to 5023 // overflow. 5024 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5025 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5026 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5027 5028 return PHISCEV; 5029 } 5030 5031 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 5032 const Loop *L = LI.getLoopFor(PN->getParent()); 5033 if (!L || L->getHeader() != PN->getParent()) 5034 return nullptr; 5035 5036 // The loop may have multiple entrances or multiple exits; we can analyze 5037 // this phi as an addrec if it has a unique entry value and a unique 5038 // backedge value. 5039 Value *BEValueV = nullptr, *StartValueV = nullptr; 5040 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5041 Value *V = PN->getIncomingValue(i); 5042 if (L->contains(PN->getIncomingBlock(i))) { 5043 if (!BEValueV) { 5044 BEValueV = V; 5045 } else if (BEValueV != V) { 5046 BEValueV = nullptr; 5047 break; 5048 } 5049 } else if (!StartValueV) { 5050 StartValueV = V; 5051 } else if (StartValueV != V) { 5052 StartValueV = nullptr; 5053 break; 5054 } 5055 } 5056 if (!BEValueV || !StartValueV) 5057 return nullptr; 5058 5059 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 5060 "PHI node already processed?"); 5061 5062 // First, try to find AddRec expression without creating a fictituos symbolic 5063 // value for PN. 5064 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 5065 return S; 5066 5067 // Handle PHI node value symbolically. 5068 const SCEV *SymbolicName = getUnknown(PN); 5069 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 5070 5071 // Using this symbolic name for the PHI, analyze the value coming around 5072 // the back-edge. 5073 const SCEV *BEValue = getSCEV(BEValueV); 5074 5075 // NOTE: If BEValue is loop invariant, we know that the PHI node just 5076 // has a special value for the first iteration of the loop. 5077 5078 // If the value coming around the backedge is an add with the symbolic 5079 // value we just inserted, then we found a simple induction variable! 5080 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 5081 // If there is a single occurrence of the symbolic value, replace it 5082 // with a recurrence. 5083 unsigned FoundIndex = Add->getNumOperands(); 5084 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5085 if (Add->getOperand(i) == SymbolicName) 5086 if (FoundIndex == e) { 5087 FoundIndex = i; 5088 break; 5089 } 5090 5091 if (FoundIndex != Add->getNumOperands()) { 5092 // Create an add with everything but the specified operand. 5093 SmallVector<const SCEV *, 8> Ops; 5094 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5095 if (i != FoundIndex) 5096 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 5097 L, *this)); 5098 const SCEV *Accum = getAddExpr(Ops); 5099 5100 // This is not a valid addrec if the step amount is varying each 5101 // loop iteration, but is not itself an addrec in this loop. 5102 if (isLoopInvariant(Accum, L) || 5103 (isa<SCEVAddRecExpr>(Accum) && 5104 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5105 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5106 5107 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 5108 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5109 if (BO->IsNUW) 5110 Flags = setFlags(Flags, SCEV::FlagNUW); 5111 if (BO->IsNSW) 5112 Flags = setFlags(Flags, SCEV::FlagNSW); 5113 } 5114 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5115 // If the increment is an inbounds GEP, then we know the address 5116 // space cannot be wrapped around. We cannot make any guarantee 5117 // about signed or unsigned overflow because pointers are 5118 // unsigned but we may have a negative index from the base 5119 // pointer. We can guarantee that no unsigned wrap occurs if the 5120 // indices form a positive value. 5121 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5122 Flags = setFlags(Flags, SCEV::FlagNW); 5123 5124 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 5125 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 5126 Flags = setFlags(Flags, SCEV::FlagNUW); 5127 } 5128 5129 // We cannot transfer nuw and nsw flags from subtraction 5130 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5131 // for instance. 5132 } 5133 5134 const SCEV *StartVal = getSCEV(StartValueV); 5135 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5136 5137 // Okay, for the entire analysis of this edge we assumed the PHI 5138 // to be symbolic. We now need to go back and purge all of the 5139 // entries for the scalars that use the symbolic expression. 5140 forgetSymbolicName(PN, SymbolicName); 5141 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5142 5143 // We can add Flags to the post-inc expression only if we 5144 // know that it is *undefined behavior* for BEValueV to 5145 // overflow. 5146 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5147 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5148 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5149 5150 return PHISCEV; 5151 } 5152 } 5153 } else { 5154 // Otherwise, this could be a loop like this: 5155 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5156 // In this case, j = {1,+,1} and BEValue is j. 5157 // Because the other in-value of i (0) fits the evolution of BEValue 5158 // i really is an addrec evolution. 5159 // 5160 // We can generalize this saying that i is the shifted value of BEValue 5161 // by one iteration: 5162 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5163 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5164 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5165 if (Shifted != getCouldNotCompute() && 5166 Start != getCouldNotCompute()) { 5167 const SCEV *StartVal = getSCEV(StartValueV); 5168 if (Start == StartVal) { 5169 // Okay, for the entire analysis of this edge we assumed the PHI 5170 // to be symbolic. We now need to go back and purge all of the 5171 // entries for the scalars that use the symbolic expression. 5172 forgetSymbolicName(PN, SymbolicName); 5173 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 5174 return Shifted; 5175 } 5176 } 5177 } 5178 5179 // Remove the temporary PHI node SCEV that has been inserted while intending 5180 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5181 // as it will prevent later (possibly simpler) SCEV expressions to be added 5182 // to the ValueExprMap. 5183 eraseValueFromMap(PN); 5184 5185 return nullptr; 5186 } 5187 5188 // Checks if the SCEV S is available at BB. S is considered available at BB 5189 // if S can be materialized at BB without introducing a fault. 5190 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 5191 BasicBlock *BB) { 5192 struct CheckAvailable { 5193 bool TraversalDone = false; 5194 bool Available = true; 5195 5196 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 5197 BasicBlock *BB = nullptr; 5198 DominatorTree &DT; 5199 5200 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5201 : L(L), BB(BB), DT(DT) {} 5202 5203 bool setUnavailable() { 5204 TraversalDone = true; 5205 Available = false; 5206 return false; 5207 } 5208 5209 bool follow(const SCEV *S) { 5210 switch (S->getSCEVType()) { 5211 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 5212 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 5213 // These expressions are available if their operand(s) is/are. 5214 return true; 5215 5216 case scAddRecExpr: { 5217 // We allow add recurrences that are on the loop BB is in, or some 5218 // outer loop. This guarantees availability because the value of the 5219 // add recurrence at BB is simply the "current" value of the induction 5220 // variable. We can relax this in the future; for instance an add 5221 // recurrence on a sibling dominating loop is also available at BB. 5222 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5223 if (L && (ARLoop == L || ARLoop->contains(L))) 5224 return true; 5225 5226 return setUnavailable(); 5227 } 5228 5229 case scUnknown: { 5230 // For SCEVUnknown, we check for simple dominance. 5231 const auto *SU = cast<SCEVUnknown>(S); 5232 Value *V = SU->getValue(); 5233 5234 if (isa<Argument>(V)) 5235 return false; 5236 5237 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5238 return false; 5239 5240 return setUnavailable(); 5241 } 5242 5243 case scUDivExpr: 5244 case scCouldNotCompute: 5245 // We do not try to smart about these at all. 5246 return setUnavailable(); 5247 } 5248 llvm_unreachable("switch should be fully covered!"); 5249 } 5250 5251 bool isDone() { return TraversalDone; } 5252 }; 5253 5254 CheckAvailable CA(L, BB, DT); 5255 SCEVTraversal<CheckAvailable> ST(CA); 5256 5257 ST.visitAll(S); 5258 return CA.Available; 5259 } 5260 5261 // Try to match a control flow sequence that branches out at BI and merges back 5262 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5263 // match. 5264 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5265 Value *&C, Value *&LHS, Value *&RHS) { 5266 C = BI->getCondition(); 5267 5268 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5269 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5270 5271 if (!LeftEdge.isSingleEdge()) 5272 return false; 5273 5274 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5275 5276 Use &LeftUse = Merge->getOperandUse(0); 5277 Use &RightUse = Merge->getOperandUse(1); 5278 5279 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5280 LHS = LeftUse; 5281 RHS = RightUse; 5282 return true; 5283 } 5284 5285 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5286 LHS = RightUse; 5287 RHS = LeftUse; 5288 return true; 5289 } 5290 5291 return false; 5292 } 5293 5294 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5295 auto IsReachable = 5296 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5297 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5298 const Loop *L = LI.getLoopFor(PN->getParent()); 5299 5300 // We don't want to break LCSSA, even in a SCEV expression tree. 5301 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5302 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5303 return nullptr; 5304 5305 // Try to match 5306 // 5307 // br %cond, label %left, label %right 5308 // left: 5309 // br label %merge 5310 // right: 5311 // br label %merge 5312 // merge: 5313 // V = phi [ %x, %left ], [ %y, %right ] 5314 // 5315 // as "select %cond, %x, %y" 5316 5317 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5318 assert(IDom && "At least the entry block should dominate PN"); 5319 5320 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5321 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5322 5323 if (BI && BI->isConditional() && 5324 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5325 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5326 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5327 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5328 } 5329 5330 return nullptr; 5331 } 5332 5333 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5334 if (const SCEV *S = createAddRecFromPHI(PN)) 5335 return S; 5336 5337 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5338 return S; 5339 5340 // If the PHI has a single incoming value, follow that value, unless the 5341 // PHI's incoming blocks are in a different loop, in which case doing so 5342 // risks breaking LCSSA form. Instcombine would normally zap these, but 5343 // it doesn't have DominatorTree information, so it may miss cases. 5344 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5345 if (LI.replacementPreservesLCSSAForm(PN, V)) 5346 return getSCEV(V); 5347 5348 // If it's not a loop phi, we can't handle it yet. 5349 return getUnknown(PN); 5350 } 5351 5352 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5353 Value *Cond, 5354 Value *TrueVal, 5355 Value *FalseVal) { 5356 // Handle "constant" branch or select. This can occur for instance when a 5357 // loop pass transforms an inner loop and moves on to process the outer loop. 5358 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5359 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5360 5361 // Try to match some simple smax or umax patterns. 5362 auto *ICI = dyn_cast<ICmpInst>(Cond); 5363 if (!ICI) 5364 return getUnknown(I); 5365 5366 Value *LHS = ICI->getOperand(0); 5367 Value *RHS = ICI->getOperand(1); 5368 5369 switch (ICI->getPredicate()) { 5370 case ICmpInst::ICMP_SLT: 5371 case ICmpInst::ICMP_SLE: 5372 std::swap(LHS, RHS); 5373 LLVM_FALLTHROUGH; 5374 case ICmpInst::ICMP_SGT: 5375 case ICmpInst::ICMP_SGE: 5376 // a >s b ? a+x : b+x -> smax(a, b)+x 5377 // a >s b ? b+x : a+x -> smin(a, b)+x 5378 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5379 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5380 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5381 const SCEV *LA = getSCEV(TrueVal); 5382 const SCEV *RA = getSCEV(FalseVal); 5383 const SCEV *LDiff = getMinusSCEV(LA, LS); 5384 const SCEV *RDiff = getMinusSCEV(RA, RS); 5385 if (LDiff == RDiff) 5386 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5387 LDiff = getMinusSCEV(LA, RS); 5388 RDiff = getMinusSCEV(RA, LS); 5389 if (LDiff == RDiff) 5390 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5391 } 5392 break; 5393 case ICmpInst::ICMP_ULT: 5394 case ICmpInst::ICMP_ULE: 5395 std::swap(LHS, RHS); 5396 LLVM_FALLTHROUGH; 5397 case ICmpInst::ICMP_UGT: 5398 case ICmpInst::ICMP_UGE: 5399 // a >u b ? a+x : b+x -> umax(a, b)+x 5400 // a >u b ? b+x : a+x -> umin(a, b)+x 5401 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5402 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5403 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5404 const SCEV *LA = getSCEV(TrueVal); 5405 const SCEV *RA = getSCEV(FalseVal); 5406 const SCEV *LDiff = getMinusSCEV(LA, LS); 5407 const SCEV *RDiff = getMinusSCEV(RA, RS); 5408 if (LDiff == RDiff) 5409 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5410 LDiff = getMinusSCEV(LA, RS); 5411 RDiff = getMinusSCEV(RA, LS); 5412 if (LDiff == RDiff) 5413 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5414 } 5415 break; 5416 case ICmpInst::ICMP_NE: 5417 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5418 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5419 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5420 const SCEV *One = getOne(I->getType()); 5421 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5422 const SCEV *LA = getSCEV(TrueVal); 5423 const SCEV *RA = getSCEV(FalseVal); 5424 const SCEV *LDiff = getMinusSCEV(LA, LS); 5425 const SCEV *RDiff = getMinusSCEV(RA, One); 5426 if (LDiff == RDiff) 5427 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5428 } 5429 break; 5430 case ICmpInst::ICMP_EQ: 5431 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5432 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5433 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5434 const SCEV *One = getOne(I->getType()); 5435 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5436 const SCEV *LA = getSCEV(TrueVal); 5437 const SCEV *RA = getSCEV(FalseVal); 5438 const SCEV *LDiff = getMinusSCEV(LA, One); 5439 const SCEV *RDiff = getMinusSCEV(RA, LS); 5440 if (LDiff == RDiff) 5441 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5442 } 5443 break; 5444 default: 5445 break; 5446 } 5447 5448 return getUnknown(I); 5449 } 5450 5451 /// Expand GEP instructions into add and multiply operations. This allows them 5452 /// to be analyzed by regular SCEV code. 5453 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5454 // Don't attempt to analyze GEPs over unsized objects. 5455 if (!GEP->getSourceElementType()->isSized()) 5456 return getUnknown(GEP); 5457 5458 SmallVector<const SCEV *, 4> IndexExprs; 5459 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5460 IndexExprs.push_back(getSCEV(*Index)); 5461 return getGEPExpr(GEP, IndexExprs); 5462 } 5463 5464 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5465 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5466 return C->getAPInt().countTrailingZeros(); 5467 5468 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5469 return std::min(GetMinTrailingZeros(T->getOperand()), 5470 (uint32_t)getTypeSizeInBits(T->getType())); 5471 5472 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5473 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5474 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5475 ? getTypeSizeInBits(E->getType()) 5476 : OpRes; 5477 } 5478 5479 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5480 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5481 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5482 ? getTypeSizeInBits(E->getType()) 5483 : OpRes; 5484 } 5485 5486 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5487 // The result is the min of all operands results. 5488 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5489 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5490 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5491 return MinOpRes; 5492 } 5493 5494 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5495 // The result is the sum of all operands results. 5496 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5497 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5498 for (unsigned i = 1, e = M->getNumOperands(); 5499 SumOpRes != BitWidth && i != e; ++i) 5500 SumOpRes = 5501 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5502 return SumOpRes; 5503 } 5504 5505 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5506 // The result is the min of all operands results. 5507 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5508 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5509 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5510 return MinOpRes; 5511 } 5512 5513 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5514 // The result is the min of all operands results. 5515 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5516 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5517 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5518 return MinOpRes; 5519 } 5520 5521 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5522 // The result is the min of all operands results. 5523 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5524 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5525 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5526 return MinOpRes; 5527 } 5528 5529 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5530 // For a SCEVUnknown, ask ValueTracking. 5531 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5532 return Known.countMinTrailingZeros(); 5533 } 5534 5535 // SCEVUDivExpr 5536 return 0; 5537 } 5538 5539 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5540 auto I = MinTrailingZerosCache.find(S); 5541 if (I != MinTrailingZerosCache.end()) 5542 return I->second; 5543 5544 uint32_t Result = GetMinTrailingZerosImpl(S); 5545 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5546 assert(InsertPair.second && "Should insert a new key"); 5547 return InsertPair.first->second; 5548 } 5549 5550 /// Helper method to assign a range to V from metadata present in the IR. 5551 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5552 if (Instruction *I = dyn_cast<Instruction>(V)) 5553 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5554 return getConstantRangeFromMetadata(*MD); 5555 5556 return None; 5557 } 5558 5559 /// Determine the range for a particular SCEV. If SignHint is 5560 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5561 /// with a "cleaner" unsigned (resp. signed) representation. 5562 const ConstantRange & 5563 ScalarEvolution::getRangeRef(const SCEV *S, 5564 ScalarEvolution::RangeSignHint SignHint) { 5565 DenseMap<const SCEV *, ConstantRange> &Cache = 5566 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5567 : SignedRanges; 5568 5569 // See if we've computed this range already. 5570 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5571 if (I != Cache.end()) 5572 return I->second; 5573 5574 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5575 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5576 5577 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5578 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5579 5580 // If the value has known zeros, the maximum value will have those known zeros 5581 // as well. 5582 uint32_t TZ = GetMinTrailingZeros(S); 5583 if (TZ != 0) { 5584 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5585 ConservativeResult = 5586 ConstantRange(APInt::getMinValue(BitWidth), 5587 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5588 else 5589 ConservativeResult = ConstantRange( 5590 APInt::getSignedMinValue(BitWidth), 5591 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5592 } 5593 5594 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5595 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5596 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5597 X = X.add(getRangeRef(Add->getOperand(i), SignHint)); 5598 return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); 5599 } 5600 5601 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5602 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5603 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5604 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5605 return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); 5606 } 5607 5608 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5609 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5610 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5611 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5612 return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); 5613 } 5614 5615 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5616 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5617 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5618 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5619 return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); 5620 } 5621 5622 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5623 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5624 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5625 return setRange(UDiv, SignHint, 5626 ConservativeResult.intersectWith(X.udiv(Y))); 5627 } 5628 5629 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5630 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5631 return setRange(ZExt, SignHint, 5632 ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); 5633 } 5634 5635 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5636 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5637 return setRange(SExt, SignHint, 5638 ConservativeResult.intersectWith(X.signExtend(BitWidth))); 5639 } 5640 5641 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5642 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5643 return setRange(Trunc, SignHint, 5644 ConservativeResult.intersectWith(X.truncate(BitWidth))); 5645 } 5646 5647 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5648 // If there's no unsigned wrap, the value will never be less than its 5649 // initial value. 5650 if (AddRec->hasNoUnsignedWrap()) 5651 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 5652 if (!C->getValue()->isZero()) 5653 ConservativeResult = ConservativeResult.intersectWith( 5654 ConstantRange(C->getAPInt(), APInt(BitWidth, 0))); 5655 5656 // If there's no signed wrap, and all the operands have the same sign or 5657 // zero, the value won't ever change sign. 5658 if (AddRec->hasNoSignedWrap()) { 5659 bool AllNonNeg = true; 5660 bool AllNonPos = true; 5661 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 5662 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 5663 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 5664 } 5665 if (AllNonNeg) 5666 ConservativeResult = ConservativeResult.intersectWith( 5667 ConstantRange(APInt(BitWidth, 0), 5668 APInt::getSignedMinValue(BitWidth))); 5669 else if (AllNonPos) 5670 ConservativeResult = ConservativeResult.intersectWith( 5671 ConstantRange(APInt::getSignedMinValue(BitWidth), 5672 APInt(BitWidth, 1))); 5673 } 5674 5675 // TODO: non-affine addrec 5676 if (AddRec->isAffine()) { 5677 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 5678 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5679 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5680 auto RangeFromAffine = getRangeForAffineAR( 5681 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5682 BitWidth); 5683 if (!RangeFromAffine.isFullSet()) 5684 ConservativeResult = 5685 ConservativeResult.intersectWith(RangeFromAffine); 5686 5687 auto RangeFromFactoring = getRangeViaFactoring( 5688 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5689 BitWidth); 5690 if (!RangeFromFactoring.isFullSet()) 5691 ConservativeResult = 5692 ConservativeResult.intersectWith(RangeFromFactoring); 5693 } 5694 } 5695 5696 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5697 } 5698 5699 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5700 // Check if the IR explicitly contains !range metadata. 5701 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5702 if (MDRange.hasValue()) 5703 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue()); 5704 5705 // Split here to avoid paying the compile-time cost of calling both 5706 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 5707 // if needed. 5708 const DataLayout &DL = getDataLayout(); 5709 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 5710 // For a SCEVUnknown, ask ValueTracking. 5711 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5712 if (Known.One != ~Known.Zero + 1) 5713 ConservativeResult = 5714 ConservativeResult.intersectWith(ConstantRange(Known.One, 5715 ~Known.Zero + 1)); 5716 } else { 5717 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 5718 "generalize as needed!"); 5719 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5720 if (NS > 1) 5721 ConservativeResult = ConservativeResult.intersectWith( 5722 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5723 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1)); 5724 } 5725 5726 // A range of Phi is a subset of union of all ranges of its input. 5727 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 5728 // Make sure that we do not run over cycled Phis. 5729 if (PendingPhiRanges.insert(Phi).second) { 5730 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 5731 for (auto &Op : Phi->operands()) { 5732 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 5733 RangeFromOps = RangeFromOps.unionWith(OpRange); 5734 // No point to continue if we already have a full set. 5735 if (RangeFromOps.isFullSet()) 5736 break; 5737 } 5738 ConservativeResult = ConservativeResult.intersectWith(RangeFromOps); 5739 bool Erased = PendingPhiRanges.erase(Phi); 5740 assert(Erased && "Failed to erase Phi properly?"); 5741 (void) Erased; 5742 } 5743 } 5744 5745 return setRange(U, SignHint, std::move(ConservativeResult)); 5746 } 5747 5748 return setRange(S, SignHint, std::move(ConservativeResult)); 5749 } 5750 5751 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5752 // values that the expression can take. Initially, the expression has a value 5753 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5754 // argument defines if we treat Step as signed or unsigned. 5755 static ConstantRange getRangeForAffineARHelper(APInt Step, 5756 const ConstantRange &StartRange, 5757 const APInt &MaxBECount, 5758 unsigned BitWidth, bool Signed) { 5759 // If either Step or MaxBECount is 0, then the expression won't change, and we 5760 // just need to return the initial range. 5761 if (Step == 0 || MaxBECount == 0) 5762 return StartRange; 5763 5764 // If we don't know anything about the initial value (i.e. StartRange is 5765 // FullRange), then we don't know anything about the final range either. 5766 // Return FullRange. 5767 if (StartRange.isFullSet()) 5768 return ConstantRange::getFull(BitWidth); 5769 5770 // If Step is signed and negative, then we use its absolute value, but we also 5771 // note that we're moving in the opposite direction. 5772 bool Descending = Signed && Step.isNegative(); 5773 5774 if (Signed) 5775 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 5776 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 5777 // This equations hold true due to the well-defined wrap-around behavior of 5778 // APInt. 5779 Step = Step.abs(); 5780 5781 // Check if Offset is more than full span of BitWidth. If it is, the 5782 // expression is guaranteed to overflow. 5783 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 5784 return ConstantRange::getFull(BitWidth); 5785 5786 // Offset is by how much the expression can change. Checks above guarantee no 5787 // overflow here. 5788 APInt Offset = Step * MaxBECount; 5789 5790 // Minimum value of the final range will match the minimal value of StartRange 5791 // if the expression is increasing and will be decreased by Offset otherwise. 5792 // Maximum value of the final range will match the maximal value of StartRange 5793 // if the expression is decreasing and will be increased by Offset otherwise. 5794 APInt StartLower = StartRange.getLower(); 5795 APInt StartUpper = StartRange.getUpper() - 1; 5796 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 5797 : (StartUpper + std::move(Offset)); 5798 5799 // It's possible that the new minimum/maximum value will fall into the initial 5800 // range (due to wrap around). This means that the expression can take any 5801 // value in this bitwidth, and we have to return full range. 5802 if (StartRange.contains(MovedBoundary)) 5803 return ConstantRange::getFull(BitWidth); 5804 5805 APInt NewLower = 5806 Descending ? std::move(MovedBoundary) : std::move(StartLower); 5807 APInt NewUpper = 5808 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 5809 NewUpper += 1; 5810 5811 // If we end up with full range, return a proper full range. 5812 if (NewLower == NewUpper) 5813 return ConstantRange::getFull(BitWidth); 5814 5815 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 5816 return ConstantRange(std::move(NewLower), std::move(NewUpper)); 5817 } 5818 5819 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5820 const SCEV *Step, 5821 const SCEV *MaxBECount, 5822 unsigned BitWidth) { 5823 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5824 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5825 "Precondition!"); 5826 5827 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5828 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5829 5830 // First, consider step signed. 5831 ConstantRange StartSRange = getSignedRange(Start); 5832 ConstantRange StepSRange = getSignedRange(Step); 5833 5834 // If Step can be both positive and negative, we need to find ranges for the 5835 // maximum absolute step values in both directions and union them. 5836 ConstantRange SR = 5837 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5838 MaxBECountValue, BitWidth, /* Signed = */ true); 5839 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5840 StartSRange, MaxBECountValue, 5841 BitWidth, /* Signed = */ true)); 5842 5843 // Next, consider step unsigned. 5844 ConstantRange UR = getRangeForAffineARHelper( 5845 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5846 MaxBECountValue, BitWidth, /* Signed = */ false); 5847 5848 // Finally, intersect signed and unsigned ranges. 5849 return SR.intersectWith(UR); 5850 } 5851 5852 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 5853 const SCEV *Step, 5854 const SCEV *MaxBECount, 5855 unsigned BitWidth) { 5856 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 5857 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 5858 5859 struct SelectPattern { 5860 Value *Condition = nullptr; 5861 APInt TrueValue; 5862 APInt FalseValue; 5863 5864 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 5865 const SCEV *S) { 5866 Optional<unsigned> CastOp; 5867 APInt Offset(BitWidth, 0); 5868 5869 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 5870 "Should be!"); 5871 5872 // Peel off a constant offset: 5873 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 5874 // In the future we could consider being smarter here and handle 5875 // {Start+Step,+,Step} too. 5876 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 5877 return; 5878 5879 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 5880 S = SA->getOperand(1); 5881 } 5882 5883 // Peel off a cast operation 5884 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 5885 CastOp = SCast->getSCEVType(); 5886 S = SCast->getOperand(); 5887 } 5888 5889 using namespace llvm::PatternMatch; 5890 5891 auto *SU = dyn_cast<SCEVUnknown>(S); 5892 const APInt *TrueVal, *FalseVal; 5893 if (!SU || 5894 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 5895 m_APInt(FalseVal)))) { 5896 Condition = nullptr; 5897 return; 5898 } 5899 5900 TrueValue = *TrueVal; 5901 FalseValue = *FalseVal; 5902 5903 // Re-apply the cast we peeled off earlier 5904 if (CastOp.hasValue()) 5905 switch (*CastOp) { 5906 default: 5907 llvm_unreachable("Unknown SCEV cast type!"); 5908 5909 case scTruncate: 5910 TrueValue = TrueValue.trunc(BitWidth); 5911 FalseValue = FalseValue.trunc(BitWidth); 5912 break; 5913 case scZeroExtend: 5914 TrueValue = TrueValue.zext(BitWidth); 5915 FalseValue = FalseValue.zext(BitWidth); 5916 break; 5917 case scSignExtend: 5918 TrueValue = TrueValue.sext(BitWidth); 5919 FalseValue = FalseValue.sext(BitWidth); 5920 break; 5921 } 5922 5923 // Re-apply the constant offset we peeled off earlier 5924 TrueValue += Offset; 5925 FalseValue += Offset; 5926 } 5927 5928 bool isRecognized() { return Condition != nullptr; } 5929 }; 5930 5931 SelectPattern StartPattern(*this, BitWidth, Start); 5932 if (!StartPattern.isRecognized()) 5933 return ConstantRange::getFull(BitWidth); 5934 5935 SelectPattern StepPattern(*this, BitWidth, Step); 5936 if (!StepPattern.isRecognized()) 5937 return ConstantRange::getFull(BitWidth); 5938 5939 if (StartPattern.Condition != StepPattern.Condition) { 5940 // We don't handle this case today; but we could, by considering four 5941 // possibilities below instead of two. I'm not sure if there are cases where 5942 // that will help over what getRange already does, though. 5943 return ConstantRange::getFull(BitWidth); 5944 } 5945 5946 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 5947 // construct arbitrary general SCEV expressions here. This function is called 5948 // from deep in the call stack, and calling getSCEV (on a sext instruction, 5949 // say) can end up caching a suboptimal value. 5950 5951 // FIXME: without the explicit `this` receiver below, MSVC errors out with 5952 // C2352 and C2512 (otherwise it isn't needed). 5953 5954 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 5955 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 5956 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 5957 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 5958 5959 ConstantRange TrueRange = 5960 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 5961 ConstantRange FalseRange = 5962 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 5963 5964 return TrueRange.unionWith(FalseRange); 5965 } 5966 5967 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 5968 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 5969 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 5970 5971 // Return early if there are no flags to propagate to the SCEV. 5972 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5973 if (BinOp->hasNoUnsignedWrap()) 5974 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 5975 if (BinOp->hasNoSignedWrap()) 5976 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 5977 if (Flags == SCEV::FlagAnyWrap) 5978 return SCEV::FlagAnyWrap; 5979 5980 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 5981 } 5982 5983 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 5984 // Here we check that I is in the header of the innermost loop containing I, 5985 // since we only deal with instructions in the loop header. The actual loop we 5986 // need to check later will come from an add recurrence, but getting that 5987 // requires computing the SCEV of the operands, which can be expensive. This 5988 // check we can do cheaply to rule out some cases early. 5989 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 5990 if (InnermostContainingLoop == nullptr || 5991 InnermostContainingLoop->getHeader() != I->getParent()) 5992 return false; 5993 5994 // Only proceed if we can prove that I does not yield poison. 5995 if (!programUndefinedIfFullPoison(I)) 5996 return false; 5997 5998 // At this point we know that if I is executed, then it does not wrap 5999 // according to at least one of NSW or NUW. If I is not executed, then we do 6000 // not know if the calculation that I represents would wrap. Multiple 6001 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 6002 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 6003 // derived from other instructions that map to the same SCEV. We cannot make 6004 // that guarantee for cases where I is not executed. So we need to find the 6005 // loop that I is considered in relation to and prove that I is executed for 6006 // every iteration of that loop. That implies that the value that I 6007 // calculates does not wrap anywhere in the loop, so then we can apply the 6008 // flags to the SCEV. 6009 // 6010 // We check isLoopInvariant to disambiguate in case we are adding recurrences 6011 // from different loops, so that we know which loop to prove that I is 6012 // executed in. 6013 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 6014 // I could be an extractvalue from a call to an overflow intrinsic. 6015 // TODO: We can do better here in some cases. 6016 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 6017 return false; 6018 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 6019 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 6020 bool AllOtherOpsLoopInvariant = true; 6021 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 6022 ++OtherOpIndex) { 6023 if (OtherOpIndex != OpIndex) { 6024 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 6025 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 6026 AllOtherOpsLoopInvariant = false; 6027 break; 6028 } 6029 } 6030 } 6031 if (AllOtherOpsLoopInvariant && 6032 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 6033 return true; 6034 } 6035 } 6036 return false; 6037 } 6038 6039 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 6040 // If we know that \c I can never be poison period, then that's enough. 6041 if (isSCEVExprNeverPoison(I)) 6042 return true; 6043 6044 // For an add recurrence specifically, we assume that infinite loops without 6045 // side effects are undefined behavior, and then reason as follows: 6046 // 6047 // If the add recurrence is poison in any iteration, it is poison on all 6048 // future iterations (since incrementing poison yields poison). If the result 6049 // of the add recurrence is fed into the loop latch condition and the loop 6050 // does not contain any throws or exiting blocks other than the latch, we now 6051 // have the ability to "choose" whether the backedge is taken or not (by 6052 // choosing a sufficiently evil value for the poison feeding into the branch) 6053 // for every iteration including and after the one in which \p I first became 6054 // poison. There are two possibilities (let's call the iteration in which \p 6055 // I first became poison as K): 6056 // 6057 // 1. In the set of iterations including and after K, the loop body executes 6058 // no side effects. In this case executing the backege an infinte number 6059 // of times will yield undefined behavior. 6060 // 6061 // 2. In the set of iterations including and after K, the loop body executes 6062 // at least one side effect. In this case, that specific instance of side 6063 // effect is control dependent on poison, which also yields undefined 6064 // behavior. 6065 6066 auto *ExitingBB = L->getExitingBlock(); 6067 auto *LatchBB = L->getLoopLatch(); 6068 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 6069 return false; 6070 6071 SmallPtrSet<const Instruction *, 16> Pushed; 6072 SmallVector<const Instruction *, 8> PoisonStack; 6073 6074 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 6075 // things that are known to be fully poison under that assumption go on the 6076 // PoisonStack. 6077 Pushed.insert(I); 6078 PoisonStack.push_back(I); 6079 6080 bool LatchControlDependentOnPoison = false; 6081 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 6082 const Instruction *Poison = PoisonStack.pop_back_val(); 6083 6084 for (auto *PoisonUser : Poison->users()) { 6085 if (propagatesFullPoison(cast<Instruction>(PoisonUser))) { 6086 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 6087 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 6088 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 6089 assert(BI->isConditional() && "Only possibility!"); 6090 if (BI->getParent() == LatchBB) { 6091 LatchControlDependentOnPoison = true; 6092 break; 6093 } 6094 } 6095 } 6096 } 6097 6098 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 6099 } 6100 6101 ScalarEvolution::LoopProperties 6102 ScalarEvolution::getLoopProperties(const Loop *L) { 6103 using LoopProperties = ScalarEvolution::LoopProperties; 6104 6105 auto Itr = LoopPropertiesCache.find(L); 6106 if (Itr == LoopPropertiesCache.end()) { 6107 auto HasSideEffects = [](Instruction *I) { 6108 if (auto *SI = dyn_cast<StoreInst>(I)) 6109 return !SI->isSimple(); 6110 6111 return I->mayHaveSideEffects(); 6112 }; 6113 6114 LoopProperties LP = {/* HasNoAbnormalExits */ true, 6115 /*HasNoSideEffects*/ true}; 6116 6117 for (auto *BB : L->getBlocks()) 6118 for (auto &I : *BB) { 6119 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6120 LP.HasNoAbnormalExits = false; 6121 if (HasSideEffects(&I)) 6122 LP.HasNoSideEffects = false; 6123 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 6124 break; // We're already as pessimistic as we can get. 6125 } 6126 6127 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 6128 assert(InsertPair.second && "We just checked!"); 6129 Itr = InsertPair.first; 6130 } 6131 6132 return Itr->second; 6133 } 6134 6135 const SCEV *ScalarEvolution::createSCEV(Value *V) { 6136 if (!isSCEVable(V->getType())) 6137 return getUnknown(V); 6138 6139 if (Instruction *I = dyn_cast<Instruction>(V)) { 6140 // Don't attempt to analyze instructions in blocks that aren't 6141 // reachable. Such instructions don't matter, and they aren't required 6142 // to obey basic rules for definitions dominating uses which this 6143 // analysis depends on. 6144 if (!DT.isReachableFromEntry(I->getParent())) 6145 return getUnknown(UndefValue::get(V->getType())); 6146 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 6147 return getConstant(CI); 6148 else if (isa<ConstantPointerNull>(V)) 6149 return getZero(V->getType()); 6150 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 6151 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 6152 else if (!isa<ConstantExpr>(V)) 6153 return getUnknown(V); 6154 6155 Operator *U = cast<Operator>(V); 6156 if (auto BO = MatchBinaryOp(U, DT)) { 6157 switch (BO->Opcode) { 6158 case Instruction::Add: { 6159 // The simple thing to do would be to just call getSCEV on both operands 6160 // and call getAddExpr with the result. However if we're looking at a 6161 // bunch of things all added together, this can be quite inefficient, 6162 // because it leads to N-1 getAddExpr calls for N ultimate operands. 6163 // Instead, gather up all the operands and make a single getAddExpr call. 6164 // LLVM IR canonical form means we need only traverse the left operands. 6165 SmallVector<const SCEV *, 4> AddOps; 6166 do { 6167 if (BO->Op) { 6168 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6169 AddOps.push_back(OpSCEV); 6170 break; 6171 } 6172 6173 // If a NUW or NSW flag can be applied to the SCEV for this 6174 // addition, then compute the SCEV for this addition by itself 6175 // with a separate call to getAddExpr. We need to do that 6176 // instead of pushing the operands of the addition onto AddOps, 6177 // since the flags are only known to apply to this particular 6178 // addition - they may not apply to other additions that can be 6179 // formed with operands from AddOps. 6180 const SCEV *RHS = getSCEV(BO->RHS); 6181 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6182 if (Flags != SCEV::FlagAnyWrap) { 6183 const SCEV *LHS = getSCEV(BO->LHS); 6184 if (BO->Opcode == Instruction::Sub) 6185 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 6186 else 6187 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 6188 break; 6189 } 6190 } 6191 6192 if (BO->Opcode == Instruction::Sub) 6193 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 6194 else 6195 AddOps.push_back(getSCEV(BO->RHS)); 6196 6197 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6198 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6199 NewBO->Opcode != Instruction::Sub)) { 6200 AddOps.push_back(getSCEV(BO->LHS)); 6201 break; 6202 } 6203 BO = NewBO; 6204 } while (true); 6205 6206 return getAddExpr(AddOps); 6207 } 6208 6209 case Instruction::Mul: { 6210 SmallVector<const SCEV *, 4> MulOps; 6211 do { 6212 if (BO->Op) { 6213 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6214 MulOps.push_back(OpSCEV); 6215 break; 6216 } 6217 6218 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6219 if (Flags != SCEV::FlagAnyWrap) { 6220 MulOps.push_back( 6221 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6222 break; 6223 } 6224 } 6225 6226 MulOps.push_back(getSCEV(BO->RHS)); 6227 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6228 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6229 MulOps.push_back(getSCEV(BO->LHS)); 6230 break; 6231 } 6232 BO = NewBO; 6233 } while (true); 6234 6235 return getMulExpr(MulOps); 6236 } 6237 case Instruction::UDiv: 6238 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6239 case Instruction::URem: 6240 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6241 case Instruction::Sub: { 6242 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6243 if (BO->Op) 6244 Flags = getNoWrapFlagsFromUB(BO->Op); 6245 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6246 } 6247 case Instruction::And: 6248 // For an expression like x&255 that merely masks off the high bits, 6249 // use zext(trunc(x)) as the SCEV expression. 6250 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6251 if (CI->isZero()) 6252 return getSCEV(BO->RHS); 6253 if (CI->isMinusOne()) 6254 return getSCEV(BO->LHS); 6255 const APInt &A = CI->getValue(); 6256 6257 // Instcombine's ShrinkDemandedConstant may strip bits out of 6258 // constants, obscuring what would otherwise be a low-bits mask. 6259 // Use computeKnownBits to compute what ShrinkDemandedConstant 6260 // knew about to reconstruct a low-bits mask value. 6261 unsigned LZ = A.countLeadingZeros(); 6262 unsigned TZ = A.countTrailingZeros(); 6263 unsigned BitWidth = A.getBitWidth(); 6264 KnownBits Known(BitWidth); 6265 computeKnownBits(BO->LHS, Known, getDataLayout(), 6266 0, &AC, nullptr, &DT); 6267 6268 APInt EffectiveMask = 6269 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6270 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6271 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6272 const SCEV *LHS = getSCEV(BO->LHS); 6273 const SCEV *ShiftedLHS = nullptr; 6274 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6275 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6276 // For an expression like (x * 8) & 8, simplify the multiply. 6277 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6278 unsigned GCD = std::min(MulZeros, TZ); 6279 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6280 SmallVector<const SCEV*, 4> MulOps; 6281 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6282 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6283 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6284 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6285 } 6286 } 6287 if (!ShiftedLHS) 6288 ShiftedLHS = getUDivExpr(LHS, MulCount); 6289 return getMulExpr( 6290 getZeroExtendExpr( 6291 getTruncateExpr(ShiftedLHS, 6292 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6293 BO->LHS->getType()), 6294 MulCount); 6295 } 6296 } 6297 break; 6298 6299 case Instruction::Or: 6300 // If the RHS of the Or is a constant, we may have something like: 6301 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6302 // optimizations will transparently handle this case. 6303 // 6304 // In order for this transformation to be safe, the LHS must be of the 6305 // form X*(2^n) and the Or constant must be less than 2^n. 6306 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6307 const SCEV *LHS = getSCEV(BO->LHS); 6308 const APInt &CIVal = CI->getValue(); 6309 if (GetMinTrailingZeros(LHS) >= 6310 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6311 // Build a plain add SCEV. 6312 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 6313 // If the LHS of the add was an addrec and it has no-wrap flags, 6314 // transfer the no-wrap flags, since an or won't introduce a wrap. 6315 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 6316 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 6317 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( 6318 OldAR->getNoWrapFlags()); 6319 } 6320 return S; 6321 } 6322 } 6323 break; 6324 6325 case Instruction::Xor: 6326 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6327 // If the RHS of xor is -1, then this is a not operation. 6328 if (CI->isMinusOne()) 6329 return getNotSCEV(getSCEV(BO->LHS)); 6330 6331 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6332 // This is a variant of the check for xor with -1, and it handles 6333 // the case where instcombine has trimmed non-demanded bits out 6334 // of an xor with -1. 6335 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6336 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6337 if (LBO->getOpcode() == Instruction::And && 6338 LCI->getValue() == CI->getValue()) 6339 if (const SCEVZeroExtendExpr *Z = 6340 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6341 Type *UTy = BO->LHS->getType(); 6342 const SCEV *Z0 = Z->getOperand(); 6343 Type *Z0Ty = Z0->getType(); 6344 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6345 6346 // If C is a low-bits mask, the zero extend is serving to 6347 // mask off the high bits. Complement the operand and 6348 // re-apply the zext. 6349 if (CI->getValue().isMask(Z0TySize)) 6350 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6351 6352 // If C is a single bit, it may be in the sign-bit position 6353 // before the zero-extend. In this case, represent the xor 6354 // using an add, which is equivalent, and re-apply the zext. 6355 APInt Trunc = CI->getValue().trunc(Z0TySize); 6356 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6357 Trunc.isSignMask()) 6358 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6359 UTy); 6360 } 6361 } 6362 break; 6363 6364 case Instruction::Shl: 6365 // Turn shift left of a constant amount into a multiply. 6366 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6367 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6368 6369 // If the shift count is not less than the bitwidth, the result of 6370 // the shift is undefined. Don't try to analyze it, because the 6371 // resolution chosen here may differ from the resolution chosen in 6372 // other parts of the compiler. 6373 if (SA->getValue().uge(BitWidth)) 6374 break; 6375 6376 // It is currently not resolved how to interpret NSW for left 6377 // shift by BitWidth - 1, so we avoid applying flags in that 6378 // case. Remove this check (or this comment) once the situation 6379 // is resolved. See 6380 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html 6381 // and http://reviews.llvm.org/D8890 . 6382 auto Flags = SCEV::FlagAnyWrap; 6383 if (BO->Op && SA->getValue().ult(BitWidth - 1)) 6384 Flags = getNoWrapFlagsFromUB(BO->Op); 6385 6386 Constant *X = ConstantInt::get( 6387 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6388 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6389 } 6390 break; 6391 6392 case Instruction::AShr: { 6393 // AShr X, C, where C is a constant. 6394 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6395 if (!CI) 6396 break; 6397 6398 Type *OuterTy = BO->LHS->getType(); 6399 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6400 // If the shift count is not less than the bitwidth, the result of 6401 // the shift is undefined. Don't try to analyze it, because the 6402 // resolution chosen here may differ from the resolution chosen in 6403 // other parts of the compiler. 6404 if (CI->getValue().uge(BitWidth)) 6405 break; 6406 6407 if (CI->isZero()) 6408 return getSCEV(BO->LHS); // shift by zero --> noop 6409 6410 uint64_t AShrAmt = CI->getZExtValue(); 6411 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6412 6413 Operator *L = dyn_cast<Operator>(BO->LHS); 6414 if (L && L->getOpcode() == Instruction::Shl) { 6415 // X = Shl A, n 6416 // Y = AShr X, m 6417 // Both n and m are constant. 6418 6419 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6420 if (L->getOperand(1) == BO->RHS) 6421 // For a two-shift sext-inreg, i.e. n = m, 6422 // use sext(trunc(x)) as the SCEV expression. 6423 return getSignExtendExpr( 6424 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6425 6426 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6427 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6428 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6429 if (ShlAmt > AShrAmt) { 6430 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6431 // expression. We already checked that ShlAmt < BitWidth, so 6432 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6433 // ShlAmt - AShrAmt < Amt. 6434 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6435 ShlAmt - AShrAmt); 6436 return getSignExtendExpr( 6437 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6438 getConstant(Mul)), OuterTy); 6439 } 6440 } 6441 } 6442 break; 6443 } 6444 } 6445 } 6446 6447 switch (U->getOpcode()) { 6448 case Instruction::Trunc: 6449 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6450 6451 case Instruction::ZExt: 6452 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6453 6454 case Instruction::SExt: 6455 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6456 // The NSW flag of a subtract does not always survive the conversion to 6457 // A + (-1)*B. By pushing sign extension onto its operands we are much 6458 // more likely to preserve NSW and allow later AddRec optimisations. 6459 // 6460 // NOTE: This is effectively duplicating this logic from getSignExtend: 6461 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6462 // but by that point the NSW information has potentially been lost. 6463 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6464 Type *Ty = U->getType(); 6465 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6466 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6467 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6468 } 6469 } 6470 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6471 6472 case Instruction::BitCast: 6473 // BitCasts are no-op casts so we just eliminate the cast. 6474 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6475 return getSCEV(U->getOperand(0)); 6476 break; 6477 6478 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 6479 // lead to pointer expressions which cannot safely be expanded to GEPs, 6480 // because ScalarEvolution doesn't respect the GEP aliasing rules when 6481 // simplifying integer expressions. 6482 6483 case Instruction::GetElementPtr: 6484 return createNodeForGEP(cast<GEPOperator>(U)); 6485 6486 case Instruction::PHI: 6487 return createNodeForPHI(cast<PHINode>(U)); 6488 6489 case Instruction::Select: 6490 // U can also be a select constant expr, which let fall through. Since 6491 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6492 // constant expressions cannot have instructions as operands, we'd have 6493 // returned getUnknown for a select constant expressions anyway. 6494 if (isa<Instruction>(U)) 6495 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6496 U->getOperand(1), U->getOperand(2)); 6497 break; 6498 6499 case Instruction::Call: 6500 case Instruction::Invoke: 6501 if (Value *RV = CallSite(U).getReturnedArgOperand()) 6502 return getSCEV(RV); 6503 break; 6504 } 6505 6506 return getUnknown(V); 6507 } 6508 6509 //===----------------------------------------------------------------------===// 6510 // Iteration Count Computation Code 6511 // 6512 6513 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6514 if (!ExitCount) 6515 return 0; 6516 6517 ConstantInt *ExitConst = ExitCount->getValue(); 6518 6519 // Guard against huge trip counts. 6520 if (ExitConst->getValue().getActiveBits() > 32) 6521 return 0; 6522 6523 // In case of integer overflow, this returns 0, which is correct. 6524 return ((unsigned)ExitConst->getZExtValue()) + 1; 6525 } 6526 6527 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6528 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6529 return getSmallConstantTripCount(L, ExitingBB); 6530 6531 // No trip count information for multiple exits. 6532 return 0; 6533 } 6534 6535 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6536 BasicBlock *ExitingBlock) { 6537 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6538 assert(L->isLoopExiting(ExitingBlock) && 6539 "Exiting block must actually branch out of the loop!"); 6540 const SCEVConstant *ExitCount = 6541 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6542 return getConstantTripCount(ExitCount); 6543 } 6544 6545 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6546 const auto *MaxExitCount = 6547 dyn_cast<SCEVConstant>(getMaxBackedgeTakenCount(L)); 6548 return getConstantTripCount(MaxExitCount); 6549 } 6550 6551 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6552 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6553 return getSmallConstantTripMultiple(L, ExitingBB); 6554 6555 // No trip multiple information for multiple exits. 6556 return 0; 6557 } 6558 6559 /// Returns the largest constant divisor of the trip count of this loop as a 6560 /// normal unsigned value, if possible. This means that the actual trip count is 6561 /// always a multiple of the returned value (don't forget the trip count could 6562 /// very well be zero as well!). 6563 /// 6564 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6565 /// multiple of a constant (which is also the case if the trip count is simply 6566 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6567 /// if the trip count is very large (>= 2^32). 6568 /// 6569 /// As explained in the comments for getSmallConstantTripCount, this assumes 6570 /// that control exits the loop via ExitingBlock. 6571 unsigned 6572 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6573 BasicBlock *ExitingBlock) { 6574 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6575 assert(L->isLoopExiting(ExitingBlock) && 6576 "Exiting block must actually branch out of the loop!"); 6577 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6578 if (ExitCount == getCouldNotCompute()) 6579 return 1; 6580 6581 // Get the trip count from the BE count by adding 1. 6582 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6583 6584 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6585 if (!TC) 6586 // Attempt to factor more general cases. Returns the greatest power of 6587 // two divisor. If overflow happens, the trip count expression is still 6588 // divisible by the greatest power of 2 divisor returned. 6589 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 6590 6591 ConstantInt *Result = TC->getValue(); 6592 6593 // Guard against huge trip counts (this requires checking 6594 // for zero to handle the case where the trip count == -1 and the 6595 // addition wraps). 6596 if (!Result || Result->getValue().getActiveBits() > 32 || 6597 Result->getValue().getActiveBits() == 0) 6598 return 1; 6599 6600 return (unsigned)Result->getZExtValue(); 6601 } 6602 6603 /// Get the expression for the number of loop iterations for which this loop is 6604 /// guaranteed not to exit via ExitingBlock. Otherwise return 6605 /// SCEVCouldNotCompute. 6606 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6607 BasicBlock *ExitingBlock) { 6608 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6609 } 6610 6611 const SCEV * 6612 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6613 SCEVUnionPredicate &Preds) { 6614 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 6615 } 6616 6617 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 6618 return getBackedgeTakenInfo(L).getExact(L, this); 6619 } 6620 6621 /// Similar to getBackedgeTakenCount, except return the least SCEV value that is 6622 /// known never to be less than the actual backedge taken count. 6623 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 6624 return getBackedgeTakenInfo(L).getMax(this); 6625 } 6626 6627 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 6628 return getBackedgeTakenInfo(L).isMaxOrZero(this); 6629 } 6630 6631 /// Push PHI nodes in the header of the given loop onto the given Worklist. 6632 static void 6633 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 6634 BasicBlock *Header = L->getHeader(); 6635 6636 // Push all Loop-header PHIs onto the Worklist stack. 6637 for (PHINode &PN : Header->phis()) 6638 Worklist.push_back(&PN); 6639 } 6640 6641 const ScalarEvolution::BackedgeTakenInfo & 6642 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 6643 auto &BTI = getBackedgeTakenInfo(L); 6644 if (BTI.hasFullInfo()) 6645 return BTI; 6646 6647 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6648 6649 if (!Pair.second) 6650 return Pair.first->second; 6651 6652 BackedgeTakenInfo Result = 6653 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 6654 6655 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 6656 } 6657 6658 const ScalarEvolution::BackedgeTakenInfo & 6659 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 6660 // Initially insert an invalid entry for this loop. If the insertion 6661 // succeeds, proceed to actually compute a backedge-taken count and 6662 // update the value. The temporary CouldNotCompute value tells SCEV 6663 // code elsewhere that it shouldn't attempt to request a new 6664 // backedge-taken count, which could result in infinite recursion. 6665 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 6666 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6667 if (!Pair.second) 6668 return Pair.first->second; 6669 6670 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 6671 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 6672 // must be cleared in this scope. 6673 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 6674 6675 // In product build, there are no usage of statistic. 6676 (void)NumTripCountsComputed; 6677 (void)NumTripCountsNotComputed; 6678 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 6679 const SCEV *BEExact = Result.getExact(L, this); 6680 if (BEExact != getCouldNotCompute()) { 6681 assert(isLoopInvariant(BEExact, L) && 6682 isLoopInvariant(Result.getMax(this), L) && 6683 "Computed backedge-taken count isn't loop invariant for loop!"); 6684 ++NumTripCountsComputed; 6685 } 6686 else if (Result.getMax(this) == getCouldNotCompute() && 6687 isa<PHINode>(L->getHeader()->begin())) { 6688 // Only count loops that have phi nodes as not being computable. 6689 ++NumTripCountsNotComputed; 6690 } 6691 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 6692 6693 // Now that we know more about the trip count for this loop, forget any 6694 // existing SCEV values for PHI nodes in this loop since they are only 6695 // conservative estimates made without the benefit of trip count 6696 // information. This is similar to the code in forgetLoop, except that 6697 // it handles SCEVUnknown PHI nodes specially. 6698 if (Result.hasAnyInfo()) { 6699 SmallVector<Instruction *, 16> Worklist; 6700 PushLoopPHIs(L, Worklist); 6701 6702 SmallPtrSet<Instruction *, 8> Discovered; 6703 while (!Worklist.empty()) { 6704 Instruction *I = Worklist.pop_back_val(); 6705 6706 ValueExprMapType::iterator It = 6707 ValueExprMap.find_as(static_cast<Value *>(I)); 6708 if (It != ValueExprMap.end()) { 6709 const SCEV *Old = It->second; 6710 6711 // SCEVUnknown for a PHI either means that it has an unrecognized 6712 // structure, or it's a PHI that's in the progress of being computed 6713 // by createNodeForPHI. In the former case, additional loop trip 6714 // count information isn't going to change anything. In the later 6715 // case, createNodeForPHI will perform the necessary updates on its 6716 // own when it gets to that point. 6717 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 6718 eraseValueFromMap(It->first); 6719 forgetMemoizedResults(Old); 6720 } 6721 if (PHINode *PN = dyn_cast<PHINode>(I)) 6722 ConstantEvolutionLoopExitValue.erase(PN); 6723 } 6724 6725 // Since we don't need to invalidate anything for correctness and we're 6726 // only invalidating to make SCEV's results more precise, we get to stop 6727 // early to avoid invalidating too much. This is especially important in 6728 // cases like: 6729 // 6730 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 6731 // loop0: 6732 // %pn0 = phi 6733 // ... 6734 // loop1: 6735 // %pn1 = phi 6736 // ... 6737 // 6738 // where both loop0 and loop1's backedge taken count uses the SCEV 6739 // expression for %v. If we don't have the early stop below then in cases 6740 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 6741 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 6742 // count for loop1, effectively nullifying SCEV's trip count cache. 6743 for (auto *U : I->users()) 6744 if (auto *I = dyn_cast<Instruction>(U)) { 6745 auto *LoopForUser = LI.getLoopFor(I->getParent()); 6746 if (LoopForUser && L->contains(LoopForUser) && 6747 Discovered.insert(I).second) 6748 Worklist.push_back(I); 6749 } 6750 } 6751 } 6752 6753 // Re-lookup the insert position, since the call to 6754 // computeBackedgeTakenCount above could result in a 6755 // recusive call to getBackedgeTakenInfo (on a different 6756 // loop), which would invalidate the iterator computed 6757 // earlier. 6758 return BackedgeTakenCounts.find(L)->second = std::move(Result); 6759 } 6760 6761 void ScalarEvolution::forgetAllLoops() { 6762 // This method is intended to forget all info about loops. It should 6763 // invalidate caches as if the following happened: 6764 // - The trip counts of all loops have changed arbitrarily 6765 // - Every llvm::Value has been updated in place to produce a different 6766 // result. 6767 BackedgeTakenCounts.clear(); 6768 PredicatedBackedgeTakenCounts.clear(); 6769 LoopPropertiesCache.clear(); 6770 ConstantEvolutionLoopExitValue.clear(); 6771 ValueExprMap.clear(); 6772 ValuesAtScopes.clear(); 6773 LoopDispositions.clear(); 6774 BlockDispositions.clear(); 6775 UnsignedRanges.clear(); 6776 SignedRanges.clear(); 6777 ExprValueMap.clear(); 6778 HasRecMap.clear(); 6779 MinTrailingZerosCache.clear(); 6780 PredicatedSCEVRewrites.clear(); 6781 } 6782 6783 void ScalarEvolution::forgetLoop(const Loop *L) { 6784 // Drop any stored trip count value. 6785 auto RemoveLoopFromBackedgeMap = 6786 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 6787 auto BTCPos = Map.find(L); 6788 if (BTCPos != Map.end()) { 6789 BTCPos->second.clear(); 6790 Map.erase(BTCPos); 6791 } 6792 }; 6793 6794 SmallVector<const Loop *, 16> LoopWorklist(1, L); 6795 SmallVector<Instruction *, 32> Worklist; 6796 SmallPtrSet<Instruction *, 16> Visited; 6797 6798 // Iterate over all the loops and sub-loops to drop SCEV information. 6799 while (!LoopWorklist.empty()) { 6800 auto *CurrL = LoopWorklist.pop_back_val(); 6801 6802 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 6803 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 6804 6805 // Drop information about predicated SCEV rewrites for this loop. 6806 for (auto I = PredicatedSCEVRewrites.begin(); 6807 I != PredicatedSCEVRewrites.end();) { 6808 std::pair<const SCEV *, const Loop *> Entry = I->first; 6809 if (Entry.second == CurrL) 6810 PredicatedSCEVRewrites.erase(I++); 6811 else 6812 ++I; 6813 } 6814 6815 auto LoopUsersItr = LoopUsers.find(CurrL); 6816 if (LoopUsersItr != LoopUsers.end()) { 6817 for (auto *S : LoopUsersItr->second) 6818 forgetMemoizedResults(S); 6819 LoopUsers.erase(LoopUsersItr); 6820 } 6821 6822 // Drop information about expressions based on loop-header PHIs. 6823 PushLoopPHIs(CurrL, Worklist); 6824 6825 while (!Worklist.empty()) { 6826 Instruction *I = Worklist.pop_back_val(); 6827 if (!Visited.insert(I).second) 6828 continue; 6829 6830 ValueExprMapType::iterator It = 6831 ValueExprMap.find_as(static_cast<Value *>(I)); 6832 if (It != ValueExprMap.end()) { 6833 eraseValueFromMap(It->first); 6834 forgetMemoizedResults(It->second); 6835 if (PHINode *PN = dyn_cast<PHINode>(I)) 6836 ConstantEvolutionLoopExitValue.erase(PN); 6837 } 6838 6839 PushDefUseChildren(I, Worklist); 6840 } 6841 6842 LoopPropertiesCache.erase(CurrL); 6843 // Forget all contained loops too, to avoid dangling entries in the 6844 // ValuesAtScopes map. 6845 LoopWorklist.append(CurrL->begin(), CurrL->end()); 6846 } 6847 } 6848 6849 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 6850 while (Loop *Parent = L->getParentLoop()) 6851 L = Parent; 6852 forgetLoop(L); 6853 } 6854 6855 void ScalarEvolution::forgetValue(Value *V) { 6856 Instruction *I = dyn_cast<Instruction>(V); 6857 if (!I) return; 6858 6859 // Drop information about expressions based on loop-header PHIs. 6860 SmallVector<Instruction *, 16> Worklist; 6861 Worklist.push_back(I); 6862 6863 SmallPtrSet<Instruction *, 8> Visited; 6864 while (!Worklist.empty()) { 6865 I = Worklist.pop_back_val(); 6866 if (!Visited.insert(I).second) 6867 continue; 6868 6869 ValueExprMapType::iterator It = 6870 ValueExprMap.find_as(static_cast<Value *>(I)); 6871 if (It != ValueExprMap.end()) { 6872 eraseValueFromMap(It->first); 6873 forgetMemoizedResults(It->second); 6874 if (PHINode *PN = dyn_cast<PHINode>(I)) 6875 ConstantEvolutionLoopExitValue.erase(PN); 6876 } 6877 6878 PushDefUseChildren(I, Worklist); 6879 } 6880 } 6881 6882 /// Get the exact loop backedge taken count considering all loop exits. A 6883 /// computable result can only be returned for loops with all exiting blocks 6884 /// dominating the latch. howFarToZero assumes that the limit of each loop test 6885 /// is never skipped. This is a valid assumption as long as the loop exits via 6886 /// that test. For precise results, it is the caller's responsibility to specify 6887 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 6888 const SCEV * 6889 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 6890 SCEVUnionPredicate *Preds) const { 6891 // If any exits were not computable, the loop is not computable. 6892 if (!isComplete() || ExitNotTaken.empty()) 6893 return SE->getCouldNotCompute(); 6894 6895 const BasicBlock *Latch = L->getLoopLatch(); 6896 // All exiting blocks we have collected must dominate the only backedge. 6897 if (!Latch) 6898 return SE->getCouldNotCompute(); 6899 6900 // All exiting blocks we have gathered dominate loop's latch, so exact trip 6901 // count is simply a minimum out of all these calculated exit counts. 6902 SmallVector<const SCEV *, 2> Ops; 6903 for (auto &ENT : ExitNotTaken) { 6904 const SCEV *BECount = ENT.ExactNotTaken; 6905 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 6906 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 6907 "We should only have known counts for exiting blocks that dominate " 6908 "latch!"); 6909 6910 Ops.push_back(BECount); 6911 6912 if (Preds && !ENT.hasAlwaysTruePredicate()) 6913 Preds->add(ENT.Predicate.get()); 6914 6915 assert((Preds || ENT.hasAlwaysTruePredicate()) && 6916 "Predicate should be always true!"); 6917 } 6918 6919 return SE->getUMinFromMismatchedTypes(Ops); 6920 } 6921 6922 /// Get the exact not taken count for this loop exit. 6923 const SCEV * 6924 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 6925 ScalarEvolution *SE) const { 6926 for (auto &ENT : ExitNotTaken) 6927 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6928 return ENT.ExactNotTaken; 6929 6930 return SE->getCouldNotCompute(); 6931 } 6932 6933 /// getMax - Get the max backedge taken count for the loop. 6934 const SCEV * 6935 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 6936 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6937 return !ENT.hasAlwaysTruePredicate(); 6938 }; 6939 6940 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax()) 6941 return SE->getCouldNotCompute(); 6942 6943 assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) && 6944 "No point in having a non-constant max backedge taken count!"); 6945 return getMax(); 6946 } 6947 6948 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const { 6949 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6950 return !ENT.hasAlwaysTruePredicate(); 6951 }; 6952 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 6953 } 6954 6955 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 6956 ScalarEvolution *SE) const { 6957 if (getMax() && getMax() != SE->getCouldNotCompute() && 6958 SE->hasOperand(getMax(), S)) 6959 return true; 6960 6961 for (auto &ENT : ExitNotTaken) 6962 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 6963 SE->hasOperand(ENT.ExactNotTaken, S)) 6964 return true; 6965 6966 return false; 6967 } 6968 6969 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 6970 : ExactNotTaken(E), MaxNotTaken(E) { 6971 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6972 isa<SCEVConstant>(MaxNotTaken)) && 6973 "No point in having a non-constant max backedge taken count!"); 6974 } 6975 6976 ScalarEvolution::ExitLimit::ExitLimit( 6977 const SCEV *E, const SCEV *M, bool MaxOrZero, 6978 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 6979 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 6980 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 6981 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 6982 "Exact is not allowed to be less precise than Max"); 6983 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6984 isa<SCEVConstant>(MaxNotTaken)) && 6985 "No point in having a non-constant max backedge taken count!"); 6986 for (auto *PredSet : PredSetList) 6987 for (auto *P : *PredSet) 6988 addPredicate(P); 6989 } 6990 6991 ScalarEvolution::ExitLimit::ExitLimit( 6992 const SCEV *E, const SCEV *M, bool MaxOrZero, 6993 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 6994 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 6995 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6996 isa<SCEVConstant>(MaxNotTaken)) && 6997 "No point in having a non-constant max backedge taken count!"); 6998 } 6999 7000 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 7001 bool MaxOrZero) 7002 : ExitLimit(E, M, MaxOrZero, None) { 7003 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7004 isa<SCEVConstant>(MaxNotTaken)) && 7005 "No point in having a non-constant max backedge taken count!"); 7006 } 7007 7008 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 7009 /// computable exit into a persistent ExitNotTakenInfo array. 7010 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 7011 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> 7012 ExitCounts, 7013 bool Complete, const SCEV *MaxCount, bool MaxOrZero) 7014 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { 7015 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7016 7017 ExitNotTaken.reserve(ExitCounts.size()); 7018 std::transform( 7019 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 7020 [&](const EdgeExitInfo &EEI) { 7021 BasicBlock *ExitBB = EEI.first; 7022 const ExitLimit &EL = EEI.second; 7023 if (EL.Predicates.empty()) 7024 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, nullptr); 7025 7026 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 7027 for (auto *Pred : EL.Predicates) 7028 Predicate->add(Pred); 7029 7030 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, std::move(Predicate)); 7031 }); 7032 assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) && 7033 "No point in having a non-constant max backedge taken count!"); 7034 } 7035 7036 /// Invalidate this result and free the ExitNotTakenInfo array. 7037 void ScalarEvolution::BackedgeTakenInfo::clear() { 7038 ExitNotTaken.clear(); 7039 } 7040 7041 /// Compute the number of times the backedge of the specified loop will execute. 7042 ScalarEvolution::BackedgeTakenInfo 7043 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 7044 bool AllowPredicates) { 7045 SmallVector<BasicBlock *, 8> ExitingBlocks; 7046 L->getExitingBlocks(ExitingBlocks); 7047 7048 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7049 7050 SmallVector<EdgeExitInfo, 4> ExitCounts; 7051 bool CouldComputeBECount = true; 7052 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 7053 const SCEV *MustExitMaxBECount = nullptr; 7054 const SCEV *MayExitMaxBECount = nullptr; 7055 bool MustExitMaxOrZero = false; 7056 7057 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 7058 // and compute maxBECount. 7059 // Do a union of all the predicates here. 7060 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 7061 BasicBlock *ExitBB = ExitingBlocks[i]; 7062 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 7063 7064 assert((AllowPredicates || EL.Predicates.empty()) && 7065 "Predicated exit limit when predicates are not allowed!"); 7066 7067 // 1. For each exit that can be computed, add an entry to ExitCounts. 7068 // CouldComputeBECount is true only if all exits can be computed. 7069 if (EL.ExactNotTaken == getCouldNotCompute()) 7070 // We couldn't compute an exact value for this exit, so 7071 // we won't be able to compute an exact value for the loop. 7072 CouldComputeBECount = false; 7073 else 7074 ExitCounts.emplace_back(ExitBB, EL); 7075 7076 // 2. Derive the loop's MaxBECount from each exit's max number of 7077 // non-exiting iterations. Partition the loop exits into two kinds: 7078 // LoopMustExits and LoopMayExits. 7079 // 7080 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 7081 // is a LoopMayExit. If any computable LoopMustExit is found, then 7082 // MaxBECount is the minimum EL.MaxNotTaken of computable 7083 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 7084 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 7085 // computable EL.MaxNotTaken. 7086 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 7087 DT.dominates(ExitBB, Latch)) { 7088 if (!MustExitMaxBECount) { 7089 MustExitMaxBECount = EL.MaxNotTaken; 7090 MustExitMaxOrZero = EL.MaxOrZero; 7091 } else { 7092 MustExitMaxBECount = 7093 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 7094 } 7095 } else if (MayExitMaxBECount != getCouldNotCompute()) { 7096 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 7097 MayExitMaxBECount = EL.MaxNotTaken; 7098 else { 7099 MayExitMaxBECount = 7100 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 7101 } 7102 } 7103 } 7104 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 7105 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 7106 // The loop backedge will be taken the maximum or zero times if there's 7107 // a single exit that must be taken the maximum or zero times. 7108 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 7109 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 7110 MaxBECount, MaxOrZero); 7111 } 7112 7113 ScalarEvolution::ExitLimit 7114 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 7115 bool AllowPredicates) { 7116 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 7117 // If our exiting block does not dominate the latch, then its connection with 7118 // loop's exit limit may be far from trivial. 7119 const BasicBlock *Latch = L->getLoopLatch(); 7120 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 7121 return getCouldNotCompute(); 7122 7123 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 7124 Instruction *Term = ExitingBlock->getTerminator(); 7125 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 7126 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 7127 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7128 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 7129 "It should have one successor in loop and one exit block!"); 7130 // Proceed to the next level to examine the exit condition expression. 7131 return computeExitLimitFromCond( 7132 L, BI->getCondition(), ExitIfTrue, 7133 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 7134 } 7135 7136 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 7137 // For switch, make sure that there is a single exit from the loop. 7138 BasicBlock *Exit = nullptr; 7139 for (auto *SBB : successors(ExitingBlock)) 7140 if (!L->contains(SBB)) { 7141 if (Exit) // Multiple exit successors. 7142 return getCouldNotCompute(); 7143 Exit = SBB; 7144 } 7145 assert(Exit && "Exiting block must have at least one exit"); 7146 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 7147 /*ControlsExit=*/IsOnlyExit); 7148 } 7149 7150 return getCouldNotCompute(); 7151 } 7152 7153 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 7154 const Loop *L, Value *ExitCond, bool ExitIfTrue, 7155 bool ControlsExit, bool AllowPredicates) { 7156 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 7157 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 7158 ControlsExit, AllowPredicates); 7159 } 7160 7161 Optional<ScalarEvolution::ExitLimit> 7162 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 7163 bool ExitIfTrue, bool ControlsExit, 7164 bool AllowPredicates) { 7165 (void)this->L; 7166 (void)this->ExitIfTrue; 7167 (void)this->AllowPredicates; 7168 7169 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7170 this->AllowPredicates == AllowPredicates && 7171 "Variance in assumed invariant key components!"); 7172 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 7173 if (Itr == TripCountMap.end()) 7174 return None; 7175 return Itr->second; 7176 } 7177 7178 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 7179 bool ExitIfTrue, 7180 bool ControlsExit, 7181 bool AllowPredicates, 7182 const ExitLimit &EL) { 7183 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7184 this->AllowPredicates == AllowPredicates && 7185 "Variance in assumed invariant key components!"); 7186 7187 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 7188 assert(InsertResult.second && "Expected successful insertion!"); 7189 (void)InsertResult; 7190 (void)ExitIfTrue; 7191 } 7192 7193 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 7194 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7195 bool ControlsExit, bool AllowPredicates) { 7196 7197 if (auto MaybeEL = 7198 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7199 return *MaybeEL; 7200 7201 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 7202 ControlsExit, AllowPredicates); 7203 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 7204 return EL; 7205 } 7206 7207 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 7208 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7209 bool ControlsExit, bool AllowPredicates) { 7210 // Check if the controlling expression for this loop is an And or Or. 7211 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 7212 if (BO->getOpcode() == Instruction::And) { 7213 // Recurse on the operands of the and. 7214 bool EitherMayExit = !ExitIfTrue; 7215 ExitLimit EL0 = computeExitLimitFromCondCached( 7216 Cache, L, BO->getOperand(0), ExitIfTrue, 7217 ControlsExit && !EitherMayExit, AllowPredicates); 7218 ExitLimit EL1 = computeExitLimitFromCondCached( 7219 Cache, L, BO->getOperand(1), ExitIfTrue, 7220 ControlsExit && !EitherMayExit, AllowPredicates); 7221 const SCEV *BECount = getCouldNotCompute(); 7222 const SCEV *MaxBECount = getCouldNotCompute(); 7223 if (EitherMayExit) { 7224 // Both conditions must be true for the loop to continue executing. 7225 // Choose the less conservative count. 7226 if (EL0.ExactNotTaken == getCouldNotCompute() || 7227 EL1.ExactNotTaken == getCouldNotCompute()) 7228 BECount = getCouldNotCompute(); 7229 else 7230 BECount = 7231 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7232 if (EL0.MaxNotTaken == getCouldNotCompute()) 7233 MaxBECount = EL1.MaxNotTaken; 7234 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7235 MaxBECount = EL0.MaxNotTaken; 7236 else 7237 MaxBECount = 7238 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7239 } else { 7240 // Both conditions must be true at the same time for the loop to exit. 7241 // For now, be conservative. 7242 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7243 MaxBECount = EL0.MaxNotTaken; 7244 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7245 BECount = EL0.ExactNotTaken; 7246 } 7247 7248 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7249 // to be more aggressive when computing BECount than when computing 7250 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7251 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7252 // to not. 7253 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7254 !isa<SCEVCouldNotCompute>(BECount)) 7255 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7256 7257 return ExitLimit(BECount, MaxBECount, false, 7258 {&EL0.Predicates, &EL1.Predicates}); 7259 } 7260 if (BO->getOpcode() == Instruction::Or) { 7261 // Recurse on the operands of the or. 7262 bool EitherMayExit = ExitIfTrue; 7263 ExitLimit EL0 = computeExitLimitFromCondCached( 7264 Cache, L, BO->getOperand(0), ExitIfTrue, 7265 ControlsExit && !EitherMayExit, AllowPredicates); 7266 ExitLimit EL1 = computeExitLimitFromCondCached( 7267 Cache, L, BO->getOperand(1), ExitIfTrue, 7268 ControlsExit && !EitherMayExit, AllowPredicates); 7269 const SCEV *BECount = getCouldNotCompute(); 7270 const SCEV *MaxBECount = getCouldNotCompute(); 7271 if (EitherMayExit) { 7272 // Both conditions must be false for the loop to continue executing. 7273 // Choose the less conservative count. 7274 if (EL0.ExactNotTaken == getCouldNotCompute() || 7275 EL1.ExactNotTaken == getCouldNotCompute()) 7276 BECount = getCouldNotCompute(); 7277 else 7278 BECount = 7279 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7280 if (EL0.MaxNotTaken == getCouldNotCompute()) 7281 MaxBECount = EL1.MaxNotTaken; 7282 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7283 MaxBECount = EL0.MaxNotTaken; 7284 else 7285 MaxBECount = 7286 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7287 } else { 7288 // Both conditions must be false at the same time for the loop to exit. 7289 // For now, be conservative. 7290 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7291 MaxBECount = EL0.MaxNotTaken; 7292 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7293 BECount = EL0.ExactNotTaken; 7294 } 7295 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7296 // to be more aggressive when computing BECount than when computing 7297 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7298 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7299 // to not. 7300 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7301 !isa<SCEVCouldNotCompute>(BECount)) 7302 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7303 7304 return ExitLimit(BECount, MaxBECount, false, 7305 {&EL0.Predicates, &EL1.Predicates}); 7306 } 7307 } 7308 7309 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7310 // Proceed to the next level to examine the icmp. 7311 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7312 ExitLimit EL = 7313 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 7314 if (EL.hasFullInfo() || !AllowPredicates) 7315 return EL; 7316 7317 // Try again, but use SCEV predicates this time. 7318 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 7319 /*AllowPredicates=*/true); 7320 } 7321 7322 // Check for a constant condition. These are normally stripped out by 7323 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7324 // preserve the CFG and is temporarily leaving constant conditions 7325 // in place. 7326 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7327 if (ExitIfTrue == !CI->getZExtValue()) 7328 // The backedge is always taken. 7329 return getCouldNotCompute(); 7330 else 7331 // The backedge is never taken. 7332 return getZero(CI->getType()); 7333 } 7334 7335 // If it's not an integer or pointer comparison then compute it the hard way. 7336 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7337 } 7338 7339 ScalarEvolution::ExitLimit 7340 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7341 ICmpInst *ExitCond, 7342 bool ExitIfTrue, 7343 bool ControlsExit, 7344 bool AllowPredicates) { 7345 // If the condition was exit on true, convert the condition to exit on false 7346 ICmpInst::Predicate Pred; 7347 if (!ExitIfTrue) 7348 Pred = ExitCond->getPredicate(); 7349 else 7350 Pred = ExitCond->getInversePredicate(); 7351 const ICmpInst::Predicate OriginalPred = Pred; 7352 7353 // Handle common loops like: for (X = "string"; *X; ++X) 7354 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7355 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7356 ExitLimit ItCnt = 7357 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); 7358 if (ItCnt.hasAnyInfo()) 7359 return ItCnt; 7360 } 7361 7362 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7363 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7364 7365 // Try to evaluate any dependencies out of the loop. 7366 LHS = getSCEVAtScope(LHS, L); 7367 RHS = getSCEVAtScope(RHS, L); 7368 7369 // At this point, we would like to compute how many iterations of the 7370 // loop the predicate will return true for these inputs. 7371 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7372 // If there is a loop-invariant, force it into the RHS. 7373 std::swap(LHS, RHS); 7374 Pred = ICmpInst::getSwappedPredicate(Pred); 7375 } 7376 7377 // Simplify the operands before analyzing them. 7378 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7379 7380 // If we have a comparison of a chrec against a constant, try to use value 7381 // ranges to answer this query. 7382 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7383 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7384 if (AddRec->getLoop() == L) { 7385 // Form the constant range. 7386 ConstantRange CompRange = 7387 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 7388 7389 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7390 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7391 } 7392 7393 switch (Pred) { 7394 case ICmpInst::ICMP_NE: { // while (X != Y) 7395 // Convert to: while (X-Y != 0) 7396 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7397 AllowPredicates); 7398 if (EL.hasAnyInfo()) return EL; 7399 break; 7400 } 7401 case ICmpInst::ICMP_EQ: { // while (X == Y) 7402 // Convert to: while (X-Y == 0) 7403 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7404 if (EL.hasAnyInfo()) return EL; 7405 break; 7406 } 7407 case ICmpInst::ICMP_SLT: 7408 case ICmpInst::ICMP_ULT: { // while (X < Y) 7409 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 7410 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7411 AllowPredicates); 7412 if (EL.hasAnyInfo()) return EL; 7413 break; 7414 } 7415 case ICmpInst::ICMP_SGT: 7416 case ICmpInst::ICMP_UGT: { // while (X > Y) 7417 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 7418 ExitLimit EL = 7419 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7420 AllowPredicates); 7421 if (EL.hasAnyInfo()) return EL; 7422 break; 7423 } 7424 default: 7425 break; 7426 } 7427 7428 auto *ExhaustiveCount = 7429 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7430 7431 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7432 return ExhaustiveCount; 7433 7434 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7435 ExitCond->getOperand(1), L, OriginalPred); 7436 } 7437 7438 ScalarEvolution::ExitLimit 7439 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7440 SwitchInst *Switch, 7441 BasicBlock *ExitingBlock, 7442 bool ControlsExit) { 7443 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7444 7445 // Give up if the exit is the default dest of a switch. 7446 if (Switch->getDefaultDest() == ExitingBlock) 7447 return getCouldNotCompute(); 7448 7449 assert(L->contains(Switch->getDefaultDest()) && 7450 "Default case must not exit the loop!"); 7451 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7452 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7453 7454 // while (X != Y) --> while (X-Y != 0) 7455 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7456 if (EL.hasAnyInfo()) 7457 return EL; 7458 7459 return getCouldNotCompute(); 7460 } 7461 7462 static ConstantInt * 7463 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7464 ScalarEvolution &SE) { 7465 const SCEV *InVal = SE.getConstant(C); 7466 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7467 assert(isa<SCEVConstant>(Val) && 7468 "Evaluation of SCEV at constant didn't fold correctly?"); 7469 return cast<SCEVConstant>(Val)->getValue(); 7470 } 7471 7472 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7473 /// compute the backedge execution count. 7474 ScalarEvolution::ExitLimit 7475 ScalarEvolution::computeLoadConstantCompareExitLimit( 7476 LoadInst *LI, 7477 Constant *RHS, 7478 const Loop *L, 7479 ICmpInst::Predicate predicate) { 7480 if (LI->isVolatile()) return getCouldNotCompute(); 7481 7482 // Check to see if the loaded pointer is a getelementptr of a global. 7483 // TODO: Use SCEV instead of manually grubbing with GEPs. 7484 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7485 if (!GEP) return getCouldNotCompute(); 7486 7487 // Make sure that it is really a constant global we are gepping, with an 7488 // initializer, and make sure the first IDX is really 0. 7489 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7490 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7491 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7492 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7493 return getCouldNotCompute(); 7494 7495 // Okay, we allow one non-constant index into the GEP instruction. 7496 Value *VarIdx = nullptr; 7497 std::vector<Constant*> Indexes; 7498 unsigned VarIdxNum = 0; 7499 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7500 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7501 Indexes.push_back(CI); 7502 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7503 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7504 VarIdx = GEP->getOperand(i); 7505 VarIdxNum = i-2; 7506 Indexes.push_back(nullptr); 7507 } 7508 7509 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7510 if (!VarIdx) 7511 return getCouldNotCompute(); 7512 7513 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7514 // Check to see if X is a loop variant variable value now. 7515 const SCEV *Idx = getSCEV(VarIdx); 7516 Idx = getSCEVAtScope(Idx, L); 7517 7518 // We can only recognize very limited forms of loop index expressions, in 7519 // particular, only affine AddRec's like {C1,+,C2}. 7520 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7521 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 7522 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7523 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7524 return getCouldNotCompute(); 7525 7526 unsigned MaxSteps = MaxBruteForceIterations; 7527 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7528 ConstantInt *ItCst = ConstantInt::get( 7529 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7530 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7531 7532 // Form the GEP offset. 7533 Indexes[VarIdxNum] = Val; 7534 7535 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7536 Indexes); 7537 if (!Result) break; // Cannot compute! 7538 7539 // Evaluate the condition for this iteration. 7540 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7541 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7542 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7543 ++NumArrayLenItCounts; 7544 return getConstant(ItCst); // Found terminating iteration! 7545 } 7546 } 7547 return getCouldNotCompute(); 7548 } 7549 7550 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7551 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7552 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7553 if (!RHS) 7554 return getCouldNotCompute(); 7555 7556 const BasicBlock *Latch = L->getLoopLatch(); 7557 if (!Latch) 7558 return getCouldNotCompute(); 7559 7560 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7561 if (!Predecessor) 7562 return getCouldNotCompute(); 7563 7564 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7565 // Return LHS in OutLHS and shift_opt in OutOpCode. 7566 auto MatchPositiveShift = 7567 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7568 7569 using namespace PatternMatch; 7570 7571 ConstantInt *ShiftAmt; 7572 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7573 OutOpCode = Instruction::LShr; 7574 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7575 OutOpCode = Instruction::AShr; 7576 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7577 OutOpCode = Instruction::Shl; 7578 else 7579 return false; 7580 7581 return ShiftAmt->getValue().isStrictlyPositive(); 7582 }; 7583 7584 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 7585 // 7586 // loop: 7587 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 7588 // %iv.shifted = lshr i32 %iv, <positive constant> 7589 // 7590 // Return true on a successful match. Return the corresponding PHI node (%iv 7591 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 7592 auto MatchShiftRecurrence = 7593 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 7594 Optional<Instruction::BinaryOps> PostShiftOpCode; 7595 7596 { 7597 Instruction::BinaryOps OpC; 7598 Value *V; 7599 7600 // If we encounter a shift instruction, "peel off" the shift operation, 7601 // and remember that we did so. Later when we inspect %iv's backedge 7602 // value, we will make sure that the backedge value uses the same 7603 // operation. 7604 // 7605 // Note: the peeled shift operation does not have to be the same 7606 // instruction as the one feeding into the PHI's backedge value. We only 7607 // really care about it being the same *kind* of shift instruction -- 7608 // that's all that is required for our later inferences to hold. 7609 if (MatchPositiveShift(LHS, V, OpC)) { 7610 PostShiftOpCode = OpC; 7611 LHS = V; 7612 } 7613 } 7614 7615 PNOut = dyn_cast<PHINode>(LHS); 7616 if (!PNOut || PNOut->getParent() != L->getHeader()) 7617 return false; 7618 7619 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 7620 Value *OpLHS; 7621 7622 return 7623 // The backedge value for the PHI node must be a shift by a positive 7624 // amount 7625 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 7626 7627 // of the PHI node itself 7628 OpLHS == PNOut && 7629 7630 // and the kind of shift should be match the kind of shift we peeled 7631 // off, if any. 7632 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 7633 }; 7634 7635 PHINode *PN; 7636 Instruction::BinaryOps OpCode; 7637 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 7638 return getCouldNotCompute(); 7639 7640 const DataLayout &DL = getDataLayout(); 7641 7642 // The key rationale for this optimization is that for some kinds of shift 7643 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 7644 // within a finite number of iterations. If the condition guarding the 7645 // backedge (in the sense that the backedge is taken if the condition is true) 7646 // is false for the value the shift recurrence stabilizes to, then we know 7647 // that the backedge is taken only a finite number of times. 7648 7649 ConstantInt *StableValue = nullptr; 7650 switch (OpCode) { 7651 default: 7652 llvm_unreachable("Impossible case!"); 7653 7654 case Instruction::AShr: { 7655 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 7656 // bitwidth(K) iterations. 7657 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 7658 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 7659 Predecessor->getTerminator(), &DT); 7660 auto *Ty = cast<IntegerType>(RHS->getType()); 7661 if (Known.isNonNegative()) 7662 StableValue = ConstantInt::get(Ty, 0); 7663 else if (Known.isNegative()) 7664 StableValue = ConstantInt::get(Ty, -1, true); 7665 else 7666 return getCouldNotCompute(); 7667 7668 break; 7669 } 7670 case Instruction::LShr: 7671 case Instruction::Shl: 7672 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 7673 // stabilize to 0 in at most bitwidth(K) iterations. 7674 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 7675 break; 7676 } 7677 7678 auto *Result = 7679 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 7680 assert(Result->getType()->isIntegerTy(1) && 7681 "Otherwise cannot be an operand to a branch instruction"); 7682 7683 if (Result->isZeroValue()) { 7684 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 7685 const SCEV *UpperBound = 7686 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 7687 return ExitLimit(getCouldNotCompute(), UpperBound, false); 7688 } 7689 7690 return getCouldNotCompute(); 7691 } 7692 7693 /// Return true if we can constant fold an instruction of the specified type, 7694 /// assuming that all operands were constants. 7695 static bool CanConstantFold(const Instruction *I) { 7696 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 7697 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7698 isa<LoadInst>(I)) 7699 return true; 7700 7701 if (const CallInst *CI = dyn_cast<CallInst>(I)) 7702 if (const Function *F = CI->getCalledFunction()) 7703 return canConstantFoldCallTo(CI, F); 7704 return false; 7705 } 7706 7707 /// Determine whether this instruction can constant evolve within this loop 7708 /// assuming its operands can all constant evolve. 7709 static bool canConstantEvolve(Instruction *I, const Loop *L) { 7710 // An instruction outside of the loop can't be derived from a loop PHI. 7711 if (!L->contains(I)) return false; 7712 7713 if (isa<PHINode>(I)) { 7714 // We don't currently keep track of the control flow needed to evaluate 7715 // PHIs, so we cannot handle PHIs inside of loops. 7716 return L->getHeader() == I->getParent(); 7717 } 7718 7719 // If we won't be able to constant fold this expression even if the operands 7720 // are constants, bail early. 7721 return CanConstantFold(I); 7722 } 7723 7724 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 7725 /// recursing through each instruction operand until reaching a loop header phi. 7726 static PHINode * 7727 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 7728 DenseMap<Instruction *, PHINode *> &PHIMap, 7729 unsigned Depth) { 7730 if (Depth > MaxConstantEvolvingDepth) 7731 return nullptr; 7732 7733 // Otherwise, we can evaluate this instruction if all of its operands are 7734 // constant or derived from a PHI node themselves. 7735 PHINode *PHI = nullptr; 7736 for (Value *Op : UseInst->operands()) { 7737 if (isa<Constant>(Op)) continue; 7738 7739 Instruction *OpInst = dyn_cast<Instruction>(Op); 7740 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 7741 7742 PHINode *P = dyn_cast<PHINode>(OpInst); 7743 if (!P) 7744 // If this operand is already visited, reuse the prior result. 7745 // We may have P != PHI if this is the deepest point at which the 7746 // inconsistent paths meet. 7747 P = PHIMap.lookup(OpInst); 7748 if (!P) { 7749 // Recurse and memoize the results, whether a phi is found or not. 7750 // This recursive call invalidates pointers into PHIMap. 7751 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 7752 PHIMap[OpInst] = P; 7753 } 7754 if (!P) 7755 return nullptr; // Not evolving from PHI 7756 if (PHI && PHI != P) 7757 return nullptr; // Evolving from multiple different PHIs. 7758 PHI = P; 7759 } 7760 // This is a expression evolving from a constant PHI! 7761 return PHI; 7762 } 7763 7764 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 7765 /// in the loop that V is derived from. We allow arbitrary operations along the 7766 /// way, but the operands of an operation must either be constants or a value 7767 /// derived from a constant PHI. If this expression does not fit with these 7768 /// constraints, return null. 7769 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 7770 Instruction *I = dyn_cast<Instruction>(V); 7771 if (!I || !canConstantEvolve(I, L)) return nullptr; 7772 7773 if (PHINode *PN = dyn_cast<PHINode>(I)) 7774 return PN; 7775 7776 // Record non-constant instructions contained by the loop. 7777 DenseMap<Instruction *, PHINode *> PHIMap; 7778 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 7779 } 7780 7781 /// EvaluateExpression - Given an expression that passes the 7782 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 7783 /// in the loop has the value PHIVal. If we can't fold this expression for some 7784 /// reason, return null. 7785 static Constant *EvaluateExpression(Value *V, const Loop *L, 7786 DenseMap<Instruction *, Constant *> &Vals, 7787 const DataLayout &DL, 7788 const TargetLibraryInfo *TLI) { 7789 // Convenient constant check, but redundant for recursive calls. 7790 if (Constant *C = dyn_cast<Constant>(V)) return C; 7791 Instruction *I = dyn_cast<Instruction>(V); 7792 if (!I) return nullptr; 7793 7794 if (Constant *C = Vals.lookup(I)) return C; 7795 7796 // An instruction inside the loop depends on a value outside the loop that we 7797 // weren't given a mapping for, or a value such as a call inside the loop. 7798 if (!canConstantEvolve(I, L)) return nullptr; 7799 7800 // An unmapped PHI can be due to a branch or another loop inside this loop, 7801 // or due to this not being the initial iteration through a loop where we 7802 // couldn't compute the evolution of this particular PHI last time. 7803 if (isa<PHINode>(I)) return nullptr; 7804 7805 std::vector<Constant*> Operands(I->getNumOperands()); 7806 7807 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 7808 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 7809 if (!Operand) { 7810 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 7811 if (!Operands[i]) return nullptr; 7812 continue; 7813 } 7814 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 7815 Vals[Operand] = C; 7816 if (!C) return nullptr; 7817 Operands[i] = C; 7818 } 7819 7820 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 7821 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7822 Operands[1], DL, TLI); 7823 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7824 if (!LI->isVolatile()) 7825 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7826 } 7827 return ConstantFoldInstOperands(I, Operands, DL, TLI); 7828 } 7829 7830 7831 // If every incoming value to PN except the one for BB is a specific Constant, 7832 // return that, else return nullptr. 7833 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 7834 Constant *IncomingVal = nullptr; 7835 7836 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 7837 if (PN->getIncomingBlock(i) == BB) 7838 continue; 7839 7840 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 7841 if (!CurrentVal) 7842 return nullptr; 7843 7844 if (IncomingVal != CurrentVal) { 7845 if (IncomingVal) 7846 return nullptr; 7847 IncomingVal = CurrentVal; 7848 } 7849 } 7850 7851 return IncomingVal; 7852 } 7853 7854 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 7855 /// in the header of its containing loop, we know the loop executes a 7856 /// constant number of times, and the PHI node is just a recurrence 7857 /// involving constants, fold it. 7858 Constant * 7859 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 7860 const APInt &BEs, 7861 const Loop *L) { 7862 auto I = ConstantEvolutionLoopExitValue.find(PN); 7863 if (I != ConstantEvolutionLoopExitValue.end()) 7864 return I->second; 7865 7866 if (BEs.ugt(MaxBruteForceIterations)) 7867 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 7868 7869 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 7870 7871 DenseMap<Instruction *, Constant *> CurrentIterVals; 7872 BasicBlock *Header = L->getHeader(); 7873 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7874 7875 BasicBlock *Latch = L->getLoopLatch(); 7876 if (!Latch) 7877 return nullptr; 7878 7879 for (PHINode &PHI : Header->phis()) { 7880 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 7881 CurrentIterVals[&PHI] = StartCST; 7882 } 7883 if (!CurrentIterVals.count(PN)) 7884 return RetVal = nullptr; 7885 7886 Value *BEValue = PN->getIncomingValueForBlock(Latch); 7887 7888 // Execute the loop symbolically to determine the exit value. 7889 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 7890 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 7891 7892 unsigned NumIterations = BEs.getZExtValue(); // must be in range 7893 unsigned IterationNum = 0; 7894 const DataLayout &DL = getDataLayout(); 7895 for (; ; ++IterationNum) { 7896 if (IterationNum == NumIterations) 7897 return RetVal = CurrentIterVals[PN]; // Got exit value! 7898 7899 // Compute the value of the PHIs for the next iteration. 7900 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 7901 DenseMap<Instruction *, Constant *> NextIterVals; 7902 Constant *NextPHI = 7903 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7904 if (!NextPHI) 7905 return nullptr; // Couldn't evaluate! 7906 NextIterVals[PN] = NextPHI; 7907 7908 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 7909 7910 // Also evaluate the other PHI nodes. However, we don't get to stop if we 7911 // cease to be able to evaluate one of them or if they stop evolving, 7912 // because that doesn't necessarily prevent us from computing PN. 7913 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 7914 for (const auto &I : CurrentIterVals) { 7915 PHINode *PHI = dyn_cast<PHINode>(I.first); 7916 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 7917 PHIsToCompute.emplace_back(PHI, I.second); 7918 } 7919 // We use two distinct loops because EvaluateExpression may invalidate any 7920 // iterators into CurrentIterVals. 7921 for (const auto &I : PHIsToCompute) { 7922 PHINode *PHI = I.first; 7923 Constant *&NextPHI = NextIterVals[PHI]; 7924 if (!NextPHI) { // Not already computed. 7925 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7926 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7927 } 7928 if (NextPHI != I.second) 7929 StoppedEvolving = false; 7930 } 7931 7932 // If all entries in CurrentIterVals == NextIterVals then we can stop 7933 // iterating, the loop can't continue to change. 7934 if (StoppedEvolving) 7935 return RetVal = CurrentIterVals[PN]; 7936 7937 CurrentIterVals.swap(NextIterVals); 7938 } 7939 } 7940 7941 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 7942 Value *Cond, 7943 bool ExitWhen) { 7944 PHINode *PN = getConstantEvolvingPHI(Cond, L); 7945 if (!PN) return getCouldNotCompute(); 7946 7947 // If the loop is canonicalized, the PHI will have exactly two entries. 7948 // That's the only form we support here. 7949 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 7950 7951 DenseMap<Instruction *, Constant *> CurrentIterVals; 7952 BasicBlock *Header = L->getHeader(); 7953 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7954 7955 BasicBlock *Latch = L->getLoopLatch(); 7956 assert(Latch && "Should follow from NumIncomingValues == 2!"); 7957 7958 for (PHINode &PHI : Header->phis()) { 7959 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 7960 CurrentIterVals[&PHI] = StartCST; 7961 } 7962 if (!CurrentIterVals.count(PN)) 7963 return getCouldNotCompute(); 7964 7965 // Okay, we find a PHI node that defines the trip count of this loop. Execute 7966 // the loop symbolically to determine when the condition gets a value of 7967 // "ExitWhen". 7968 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 7969 const DataLayout &DL = getDataLayout(); 7970 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 7971 auto *CondVal = dyn_cast_or_null<ConstantInt>( 7972 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 7973 7974 // Couldn't symbolically evaluate. 7975 if (!CondVal) return getCouldNotCompute(); 7976 7977 if (CondVal->getValue() == uint64_t(ExitWhen)) { 7978 ++NumBruteForceTripCountsComputed; 7979 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 7980 } 7981 7982 // Update all the PHI nodes for the next iteration. 7983 DenseMap<Instruction *, Constant *> NextIterVals; 7984 7985 // Create a list of which PHIs we need to compute. We want to do this before 7986 // calling EvaluateExpression on them because that may invalidate iterators 7987 // into CurrentIterVals. 7988 SmallVector<PHINode *, 8> PHIsToCompute; 7989 for (const auto &I : CurrentIterVals) { 7990 PHINode *PHI = dyn_cast<PHINode>(I.first); 7991 if (!PHI || PHI->getParent() != Header) continue; 7992 PHIsToCompute.push_back(PHI); 7993 } 7994 for (PHINode *PHI : PHIsToCompute) { 7995 Constant *&NextPHI = NextIterVals[PHI]; 7996 if (NextPHI) continue; // Already computed! 7997 7998 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7999 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8000 } 8001 CurrentIterVals.swap(NextIterVals); 8002 } 8003 8004 // Too many iterations were needed to evaluate. 8005 return getCouldNotCompute(); 8006 } 8007 8008 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 8009 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 8010 ValuesAtScopes[V]; 8011 // Check to see if we've folded this expression at this loop before. 8012 for (auto &LS : Values) 8013 if (LS.first == L) 8014 return LS.second ? LS.second : V; 8015 8016 Values.emplace_back(L, nullptr); 8017 8018 // Otherwise compute it. 8019 const SCEV *C = computeSCEVAtScope(V, L); 8020 for (auto &LS : reverse(ValuesAtScopes[V])) 8021 if (LS.first == L) { 8022 LS.second = C; 8023 break; 8024 } 8025 return C; 8026 } 8027 8028 /// This builds up a Constant using the ConstantExpr interface. That way, we 8029 /// will return Constants for objects which aren't represented by a 8030 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 8031 /// Returns NULL if the SCEV isn't representable as a Constant. 8032 static Constant *BuildConstantFromSCEV(const SCEV *V) { 8033 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 8034 case scCouldNotCompute: 8035 case scAddRecExpr: 8036 break; 8037 case scConstant: 8038 return cast<SCEVConstant>(V)->getValue(); 8039 case scUnknown: 8040 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 8041 case scSignExtend: { 8042 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 8043 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 8044 return ConstantExpr::getSExt(CastOp, SS->getType()); 8045 break; 8046 } 8047 case scZeroExtend: { 8048 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 8049 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 8050 return ConstantExpr::getZExt(CastOp, SZ->getType()); 8051 break; 8052 } 8053 case scTruncate: { 8054 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 8055 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 8056 return ConstantExpr::getTrunc(CastOp, ST->getType()); 8057 break; 8058 } 8059 case scAddExpr: { 8060 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 8061 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 8062 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8063 unsigned AS = PTy->getAddressSpace(); 8064 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8065 C = ConstantExpr::getBitCast(C, DestPtrTy); 8066 } 8067 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 8068 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 8069 if (!C2) return nullptr; 8070 8071 // First pointer! 8072 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 8073 unsigned AS = C2->getType()->getPointerAddressSpace(); 8074 std::swap(C, C2); 8075 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8076 // The offsets have been converted to bytes. We can add bytes to an 8077 // i8* by GEP with the byte count in the first index. 8078 C = ConstantExpr::getBitCast(C, DestPtrTy); 8079 } 8080 8081 // Don't bother trying to sum two pointers. We probably can't 8082 // statically compute a load that results from it anyway. 8083 if (C2->getType()->isPointerTy()) 8084 return nullptr; 8085 8086 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8087 if (PTy->getElementType()->isStructTy()) 8088 C2 = ConstantExpr::getIntegerCast( 8089 C2, Type::getInt32Ty(C->getContext()), true); 8090 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 8091 } else 8092 C = ConstantExpr::getAdd(C, C2); 8093 } 8094 return C; 8095 } 8096 break; 8097 } 8098 case scMulExpr: { 8099 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 8100 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 8101 // Don't bother with pointers at all. 8102 if (C->getType()->isPointerTy()) return nullptr; 8103 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 8104 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 8105 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 8106 C = ConstantExpr::getMul(C, C2); 8107 } 8108 return C; 8109 } 8110 break; 8111 } 8112 case scUDivExpr: { 8113 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 8114 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 8115 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 8116 if (LHS->getType() == RHS->getType()) 8117 return ConstantExpr::getUDiv(LHS, RHS); 8118 break; 8119 } 8120 case scSMaxExpr: 8121 case scUMaxExpr: 8122 break; // TODO: smax, umax. 8123 } 8124 return nullptr; 8125 } 8126 8127 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 8128 if (isa<SCEVConstant>(V)) return V; 8129 8130 // If this instruction is evolved from a constant-evolving PHI, compute the 8131 // exit value from the loop without using SCEVs. 8132 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 8133 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 8134 if (PHINode *PN = dyn_cast<PHINode>(I)) { 8135 const Loop *LI = this->LI[I->getParent()]; 8136 // Looking for loop exit value. 8137 if (LI && LI->getParentLoop() == L && 8138 PN->getParent() == LI->getHeader()) { 8139 // Okay, there is no closed form solution for the PHI node. Check 8140 // to see if the loop that contains it has a known backedge-taken 8141 // count. If so, we may be able to force computation of the exit 8142 // value. 8143 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 8144 if (const SCEVConstant *BTCC = 8145 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 8146 8147 // This trivial case can show up in some degenerate cases where 8148 // the incoming IR has not yet been fully simplified. 8149 if (BTCC->getValue()->isZero()) { 8150 Value *InitValue = nullptr; 8151 bool MultipleInitValues = false; 8152 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 8153 if (!LI->contains(PN->getIncomingBlock(i))) { 8154 if (!InitValue) 8155 InitValue = PN->getIncomingValue(i); 8156 else if (InitValue != PN->getIncomingValue(i)) { 8157 MultipleInitValues = true; 8158 break; 8159 } 8160 } 8161 if (!MultipleInitValues && InitValue) 8162 return getSCEV(InitValue); 8163 } 8164 } 8165 // Okay, we know how many times the containing loop executes. If 8166 // this is a constant evolving PHI node, get the final value at 8167 // the specified iteration number. 8168 Constant *RV = 8169 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 8170 if (RV) return getSCEV(RV); 8171 } 8172 } 8173 } 8174 8175 // Okay, this is an expression that we cannot symbolically evaluate 8176 // into a SCEV. Check to see if it's possible to symbolically evaluate 8177 // the arguments into constants, and if so, try to constant propagate the 8178 // result. This is particularly useful for computing loop exit values. 8179 if (CanConstantFold(I)) { 8180 SmallVector<Constant *, 4> Operands; 8181 bool MadeImprovement = false; 8182 for (Value *Op : I->operands()) { 8183 if (Constant *C = dyn_cast<Constant>(Op)) { 8184 Operands.push_back(C); 8185 continue; 8186 } 8187 8188 // If any of the operands is non-constant and if they are 8189 // non-integer and non-pointer, don't even try to analyze them 8190 // with scev techniques. 8191 if (!isSCEVable(Op->getType())) 8192 return V; 8193 8194 const SCEV *OrigV = getSCEV(Op); 8195 const SCEV *OpV = getSCEVAtScope(OrigV, L); 8196 MadeImprovement |= OrigV != OpV; 8197 8198 Constant *C = BuildConstantFromSCEV(OpV); 8199 if (!C) return V; 8200 if (C->getType() != Op->getType()) 8201 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 8202 Op->getType(), 8203 false), 8204 C, Op->getType()); 8205 Operands.push_back(C); 8206 } 8207 8208 // Check to see if getSCEVAtScope actually made an improvement. 8209 if (MadeImprovement) { 8210 Constant *C = nullptr; 8211 const DataLayout &DL = getDataLayout(); 8212 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 8213 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8214 Operands[1], DL, &TLI); 8215 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 8216 if (!LI->isVolatile()) 8217 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 8218 } else 8219 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 8220 if (!C) return V; 8221 return getSCEV(C); 8222 } 8223 } 8224 } 8225 8226 // This is some other type of SCEVUnknown, just return it. 8227 return V; 8228 } 8229 8230 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 8231 // Avoid performing the look-up in the common case where the specified 8232 // expression has no loop-variant portions. 8233 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 8234 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8235 if (OpAtScope != Comm->getOperand(i)) { 8236 // Okay, at least one of these operands is loop variant but might be 8237 // foldable. Build a new instance of the folded commutative expression. 8238 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 8239 Comm->op_begin()+i); 8240 NewOps.push_back(OpAtScope); 8241 8242 for (++i; i != e; ++i) { 8243 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8244 NewOps.push_back(OpAtScope); 8245 } 8246 if (isa<SCEVAddExpr>(Comm)) 8247 return getAddExpr(NewOps); 8248 if (isa<SCEVMulExpr>(Comm)) 8249 return getMulExpr(NewOps); 8250 if (isa<SCEVSMaxExpr>(Comm)) 8251 return getSMaxExpr(NewOps); 8252 if (isa<SCEVUMaxExpr>(Comm)) 8253 return getUMaxExpr(NewOps); 8254 llvm_unreachable("Unknown commutative SCEV type!"); 8255 } 8256 } 8257 // If we got here, all operands are loop invariant. 8258 return Comm; 8259 } 8260 8261 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 8262 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 8263 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 8264 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 8265 return Div; // must be loop invariant 8266 return getUDivExpr(LHS, RHS); 8267 } 8268 8269 // If this is a loop recurrence for a loop that does not contain L, then we 8270 // are dealing with the final value computed by the loop. 8271 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 8272 // First, attempt to evaluate each operand. 8273 // Avoid performing the look-up in the common case where the specified 8274 // expression has no loop-variant portions. 8275 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 8276 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 8277 if (OpAtScope == AddRec->getOperand(i)) 8278 continue; 8279 8280 // Okay, at least one of these operands is loop variant but might be 8281 // foldable. Build a new instance of the folded commutative expression. 8282 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8283 AddRec->op_begin()+i); 8284 NewOps.push_back(OpAtScope); 8285 for (++i; i != e; ++i) 8286 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8287 8288 const SCEV *FoldedRec = 8289 getAddRecExpr(NewOps, AddRec->getLoop(), 8290 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8291 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8292 // The addrec may be folded to a nonrecurrence, for example, if the 8293 // induction variable is multiplied by zero after constant folding. Go 8294 // ahead and return the folded value. 8295 if (!AddRec) 8296 return FoldedRec; 8297 break; 8298 } 8299 8300 // If the scope is outside the addrec's loop, evaluate it by using the 8301 // loop exit value of the addrec. 8302 if (!AddRec->getLoop()->contains(L)) { 8303 // To evaluate this recurrence, we need to know how many times the AddRec 8304 // loop iterates. Compute this now. 8305 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8306 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8307 8308 // Then, evaluate the AddRec. 8309 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8310 } 8311 8312 return AddRec; 8313 } 8314 8315 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8316 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8317 if (Op == Cast->getOperand()) 8318 return Cast; // must be loop invariant 8319 return getZeroExtendExpr(Op, Cast->getType()); 8320 } 8321 8322 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 8323 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8324 if (Op == Cast->getOperand()) 8325 return Cast; // must be loop invariant 8326 return getSignExtendExpr(Op, Cast->getType()); 8327 } 8328 8329 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 8330 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8331 if (Op == Cast->getOperand()) 8332 return Cast; // must be loop invariant 8333 return getTruncateExpr(Op, Cast->getType()); 8334 } 8335 8336 llvm_unreachable("Unknown SCEV type!"); 8337 } 8338 8339 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 8340 return getSCEVAtScope(getSCEV(V), L); 8341 } 8342 8343 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 8344 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 8345 return stripInjectiveFunctions(ZExt->getOperand()); 8346 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 8347 return stripInjectiveFunctions(SExt->getOperand()); 8348 return S; 8349 } 8350 8351 /// Finds the minimum unsigned root of the following equation: 8352 /// 8353 /// A * X = B (mod N) 8354 /// 8355 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 8356 /// A and B isn't important. 8357 /// 8358 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 8359 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 8360 ScalarEvolution &SE) { 8361 uint32_t BW = A.getBitWidth(); 8362 assert(BW == SE.getTypeSizeInBits(B->getType())); 8363 assert(A != 0 && "A must be non-zero."); 8364 8365 // 1. D = gcd(A, N) 8366 // 8367 // The gcd of A and N may have only one prime factor: 2. The number of 8368 // trailing zeros in A is its multiplicity 8369 uint32_t Mult2 = A.countTrailingZeros(); 8370 // D = 2^Mult2 8371 8372 // 2. Check if B is divisible by D. 8373 // 8374 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8375 // is not less than multiplicity of this prime factor for D. 8376 if (SE.GetMinTrailingZeros(B) < Mult2) 8377 return SE.getCouldNotCompute(); 8378 8379 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8380 // modulo (N / D). 8381 // 8382 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8383 // (N / D) in general. The inverse itself always fits into BW bits, though, 8384 // so we immediately truncate it. 8385 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8386 APInt Mod(BW + 1, 0); 8387 Mod.setBit(BW - Mult2); // Mod = N / D 8388 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8389 8390 // 4. Compute the minimum unsigned root of the equation: 8391 // I * (B / D) mod (N / D) 8392 // To simplify the computation, we factor out the divide by D: 8393 // (I * B mod N) / D 8394 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8395 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8396 } 8397 8398 /// For a given quadratic addrec, generate coefficients of the corresponding 8399 /// quadratic equation, multiplied by a common value to ensure that they are 8400 /// integers. 8401 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 8402 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 8403 /// were multiplied by, and BitWidth is the bit width of the original addrec 8404 /// coefficients. 8405 /// This function returns None if the addrec coefficients are not compile- 8406 /// time constants. 8407 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 8408 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 8409 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8410 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8411 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8412 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8413 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 8414 << *AddRec << '\n'); 8415 8416 // We currently can only solve this if the coefficients are constants. 8417 if (!LC || !MC || !NC) { 8418 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 8419 return None; 8420 } 8421 8422 APInt L = LC->getAPInt(); 8423 APInt M = MC->getAPInt(); 8424 APInt N = NC->getAPInt(); 8425 assert(!N.isNullValue() && "This is not a quadratic addrec"); 8426 8427 unsigned BitWidth = LC->getAPInt().getBitWidth(); 8428 unsigned NewWidth = BitWidth + 1; 8429 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 8430 << BitWidth << '\n'); 8431 // The sign-extension (as opposed to a zero-extension) here matches the 8432 // extension used in SolveQuadraticEquationWrap (with the same motivation). 8433 N = N.sext(NewWidth); 8434 M = M.sext(NewWidth); 8435 L = L.sext(NewWidth); 8436 8437 // The increments are M, M+N, M+2N, ..., so the accumulated values are 8438 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 8439 // L+M, L+2M+N, L+3M+3N, ... 8440 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 8441 // 8442 // The equation Acc = 0 is then 8443 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 8444 // In a quadratic form it becomes: 8445 // N n^2 + (2M-N) n + 2L = 0. 8446 8447 APInt A = N; 8448 APInt B = 2 * M - A; 8449 APInt C = 2 * L; 8450 APInt T = APInt(NewWidth, 2); 8451 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 8452 << "x + " << C << ", coeff bw: " << NewWidth 8453 << ", multiplied by " << T << '\n'); 8454 return std::make_tuple(A, B, C, T, BitWidth); 8455 } 8456 8457 /// Helper function to compare optional APInts: 8458 /// (a) if X and Y both exist, return min(X, Y), 8459 /// (b) if neither X nor Y exist, return None, 8460 /// (c) if exactly one of X and Y exists, return that value. 8461 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 8462 if (X.hasValue() && Y.hasValue()) { 8463 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 8464 APInt XW = X->sextOrSelf(W); 8465 APInt YW = Y->sextOrSelf(W); 8466 return XW.slt(YW) ? *X : *Y; 8467 } 8468 if (!X.hasValue() && !Y.hasValue()) 8469 return None; 8470 return X.hasValue() ? *X : *Y; 8471 } 8472 8473 /// Helper function to truncate an optional APInt to a given BitWidth. 8474 /// When solving addrec-related equations, it is preferable to return a value 8475 /// that has the same bit width as the original addrec's coefficients. If the 8476 /// solution fits in the original bit width, truncate it (except for i1). 8477 /// Returning a value of a different bit width may inhibit some optimizations. 8478 /// 8479 /// In general, a solution to a quadratic equation generated from an addrec 8480 /// may require BW+1 bits, where BW is the bit width of the addrec's 8481 /// coefficients. The reason is that the coefficients of the quadratic 8482 /// equation are BW+1 bits wide (to avoid truncation when converting from 8483 /// the addrec to the equation). 8484 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 8485 if (!X.hasValue()) 8486 return None; 8487 unsigned W = X->getBitWidth(); 8488 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 8489 return X->trunc(BitWidth); 8490 return X; 8491 } 8492 8493 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 8494 /// iterations. The values L, M, N are assumed to be signed, and they 8495 /// should all have the same bit widths. 8496 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 8497 /// where BW is the bit width of the addrec's coefficients. 8498 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 8499 /// returned as such, otherwise the bit width of the returned value may 8500 /// be greater than BW. 8501 /// 8502 /// This function returns None if 8503 /// (a) the addrec coefficients are not constant, or 8504 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 8505 /// like x^2 = 5, no integer solutions exist, in other cases an integer 8506 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 8507 static Optional<APInt> 8508 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8509 APInt A, B, C, M; 8510 unsigned BitWidth; 8511 auto T = GetQuadraticEquation(AddRec); 8512 if (!T.hasValue()) 8513 return None; 8514 8515 std::tie(A, B, C, M, BitWidth) = *T; 8516 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 8517 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 8518 if (!X.hasValue()) 8519 return None; 8520 8521 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 8522 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 8523 if (!V->isZero()) 8524 return None; 8525 8526 return TruncIfPossible(X, BitWidth); 8527 } 8528 8529 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 8530 /// iterations. The values M, N are assumed to be signed, and they 8531 /// should all have the same bit widths. 8532 /// Find the least n such that c(n) does not belong to the given range, 8533 /// while c(n-1) does. 8534 /// 8535 /// This function returns None if 8536 /// (a) the addrec coefficients are not constant, or 8537 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 8538 /// bounds of the range. 8539 static Optional<APInt> 8540 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 8541 const ConstantRange &Range, ScalarEvolution &SE) { 8542 assert(AddRec->getOperand(0)->isZero() && 8543 "Starting value of addrec should be 0"); 8544 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 8545 << Range << ", addrec " << *AddRec << '\n'); 8546 // This case is handled in getNumIterationsInRange. Here we can assume that 8547 // we start in the range. 8548 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 8549 "Addrec's initial value should be in range"); 8550 8551 APInt A, B, C, M; 8552 unsigned BitWidth; 8553 auto T = GetQuadraticEquation(AddRec); 8554 if (!T.hasValue()) 8555 return None; 8556 8557 // Be careful about the return value: there can be two reasons for not 8558 // returning an actual number. First, if no solutions to the equations 8559 // were found, and second, if the solutions don't leave the given range. 8560 // The first case means that the actual solution is "unknown", the second 8561 // means that it's known, but not valid. If the solution is unknown, we 8562 // cannot make any conclusions. 8563 // Return a pair: the optional solution and a flag indicating if the 8564 // solution was found. 8565 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 8566 // Solve for signed overflow and unsigned overflow, pick the lower 8567 // solution. 8568 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 8569 << Bound << " (before multiplying by " << M << ")\n"); 8570 Bound *= M; // The quadratic equation multiplier. 8571 8572 Optional<APInt> SO = None; 8573 if (BitWidth > 1) { 8574 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8575 "signed overflow\n"); 8576 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 8577 } 8578 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8579 "unsigned overflow\n"); 8580 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 8581 BitWidth+1); 8582 8583 auto LeavesRange = [&] (const APInt &X) { 8584 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 8585 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 8586 if (Range.contains(V0->getValue())) 8587 return false; 8588 // X should be at least 1, so X-1 is non-negative. 8589 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 8590 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 8591 if (Range.contains(V1->getValue())) 8592 return true; 8593 return false; 8594 }; 8595 8596 // If SolveQuadraticEquationWrap returns None, it means that there can 8597 // be a solution, but the function failed to find it. We cannot treat it 8598 // as "no solution". 8599 if (!SO.hasValue() || !UO.hasValue()) 8600 return { None, false }; 8601 8602 // Check the smaller value first to see if it leaves the range. 8603 // At this point, both SO and UO must have values. 8604 Optional<APInt> Min = MinOptional(SO, UO); 8605 if (LeavesRange(*Min)) 8606 return { Min, true }; 8607 Optional<APInt> Max = Min == SO ? UO : SO; 8608 if (LeavesRange(*Max)) 8609 return { Max, true }; 8610 8611 // Solutions were found, but were eliminated, hence the "true". 8612 return { None, true }; 8613 }; 8614 8615 std::tie(A, B, C, M, BitWidth) = *T; 8616 // Lower bound is inclusive, subtract 1 to represent the exiting value. 8617 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; 8618 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); 8619 auto SL = SolveForBoundary(Lower); 8620 auto SU = SolveForBoundary(Upper); 8621 // If any of the solutions was unknown, no meaninigful conclusions can 8622 // be made. 8623 if (!SL.second || !SU.second) 8624 return None; 8625 8626 // Claim: The correct solution is not some value between Min and Max. 8627 // 8628 // Justification: Assuming that Min and Max are different values, one of 8629 // them is when the first signed overflow happens, the other is when the 8630 // first unsigned overflow happens. Crossing the range boundary is only 8631 // possible via an overflow (treating 0 as a special case of it, modeling 8632 // an overflow as crossing k*2^W for some k). 8633 // 8634 // The interesting case here is when Min was eliminated as an invalid 8635 // solution, but Max was not. The argument is that if there was another 8636 // overflow between Min and Max, it would also have been eliminated if 8637 // it was considered. 8638 // 8639 // For a given boundary, it is possible to have two overflows of the same 8640 // type (signed/unsigned) without having the other type in between: this 8641 // can happen when the vertex of the parabola is between the iterations 8642 // corresponding to the overflows. This is only possible when the two 8643 // overflows cross k*2^W for the same k. In such case, if the second one 8644 // left the range (and was the first one to do so), the first overflow 8645 // would have to enter the range, which would mean that either we had left 8646 // the range before or that we started outside of it. Both of these cases 8647 // are contradictions. 8648 // 8649 // Claim: In the case where SolveForBoundary returns None, the correct 8650 // solution is not some value between the Max for this boundary and the 8651 // Min of the other boundary. 8652 // 8653 // Justification: Assume that we had such Max_A and Min_B corresponding 8654 // to range boundaries A and B and such that Max_A < Min_B. If there was 8655 // a solution between Max_A and Min_B, it would have to be caused by an 8656 // overflow corresponding to either A or B. It cannot correspond to B, 8657 // since Min_B is the first occurrence of such an overflow. If it 8658 // corresponded to A, it would have to be either a signed or an unsigned 8659 // overflow that is larger than both eliminated overflows for A. But 8660 // between the eliminated overflows and this overflow, the values would 8661 // cover the entire value space, thus crossing the other boundary, which 8662 // is a contradiction. 8663 8664 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 8665 } 8666 8667 ScalarEvolution::ExitLimit 8668 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 8669 bool AllowPredicates) { 8670 8671 // This is only used for loops with a "x != y" exit test. The exit condition 8672 // is now expressed as a single expression, V = x-y. So the exit test is 8673 // effectively V != 0. We know and take advantage of the fact that this 8674 // expression only being used in a comparison by zero context. 8675 8676 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 8677 // If the value is a constant 8678 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8679 // If the value is already zero, the branch will execute zero times. 8680 if (C->getValue()->isZero()) return C; 8681 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8682 } 8683 8684 const SCEVAddRecExpr *AddRec = 8685 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 8686 8687 if (!AddRec && AllowPredicates) 8688 // Try to make this an AddRec using runtime tests, in the first X 8689 // iterations of this loop, where X is the SCEV expression found by the 8690 // algorithm below. 8691 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 8692 8693 if (!AddRec || AddRec->getLoop() != L) 8694 return getCouldNotCompute(); 8695 8696 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 8697 // the quadratic equation to solve it. 8698 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 8699 // We can only use this value if the chrec ends up with an exact zero 8700 // value at this index. When solving for "X*X != 5", for example, we 8701 // should not accept a root of 2. 8702 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 8703 const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); 8704 return ExitLimit(R, R, false, Predicates); 8705 } 8706 return getCouldNotCompute(); 8707 } 8708 8709 // Otherwise we can only handle this if it is affine. 8710 if (!AddRec->isAffine()) 8711 return getCouldNotCompute(); 8712 8713 // If this is an affine expression, the execution count of this branch is 8714 // the minimum unsigned root of the following equation: 8715 // 8716 // Start + Step*N = 0 (mod 2^BW) 8717 // 8718 // equivalent to: 8719 // 8720 // Step*N = -Start (mod 2^BW) 8721 // 8722 // where BW is the common bit width of Start and Step. 8723 8724 // Get the initial value for the loop. 8725 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 8726 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 8727 8728 // For now we handle only constant steps. 8729 // 8730 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 8731 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 8732 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 8733 // We have not yet seen any such cases. 8734 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 8735 if (!StepC || StepC->getValue()->isZero()) 8736 return getCouldNotCompute(); 8737 8738 // For positive steps (counting up until unsigned overflow): 8739 // N = -Start/Step (as unsigned) 8740 // For negative steps (counting down to zero): 8741 // N = Start/-Step 8742 // First compute the unsigned distance from zero in the direction of Step. 8743 bool CountDown = StepC->getAPInt().isNegative(); 8744 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 8745 8746 // Handle unitary steps, which cannot wraparound. 8747 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 8748 // N = Distance (as unsigned) 8749 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 8750 APInt MaxBECount = getUnsignedRangeMax(Distance); 8751 8752 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 8753 // we end up with a loop whose backedge-taken count is n - 1. Detect this 8754 // case, and see if we can improve the bound. 8755 // 8756 // Explicitly handling this here is necessary because getUnsignedRange 8757 // isn't context-sensitive; it doesn't know that we only care about the 8758 // range inside the loop. 8759 const SCEV *Zero = getZero(Distance->getType()); 8760 const SCEV *One = getOne(Distance->getType()); 8761 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 8762 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 8763 // If Distance + 1 doesn't overflow, we can compute the maximum distance 8764 // as "unsigned_max(Distance + 1) - 1". 8765 ConstantRange CR = getUnsignedRange(DistancePlusOne); 8766 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 8767 } 8768 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 8769 } 8770 8771 // If the condition controls loop exit (the loop exits only if the expression 8772 // is true) and the addition is no-wrap we can use unsigned divide to 8773 // compute the backedge count. In this case, the step may not divide the 8774 // distance, but we don't care because if the condition is "missed" the loop 8775 // will have undefined behavior due to wrapping. 8776 if (ControlsExit && AddRec->hasNoSelfWrap() && 8777 loopHasNoAbnormalExits(AddRec->getLoop())) { 8778 const SCEV *Exact = 8779 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 8780 const SCEV *Max = 8781 Exact == getCouldNotCompute() 8782 ? Exact 8783 : getConstant(getUnsignedRangeMax(Exact)); 8784 return ExitLimit(Exact, Max, false, Predicates); 8785 } 8786 8787 // Solve the general equation. 8788 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 8789 getNegativeSCEV(Start), *this); 8790 const SCEV *M = E == getCouldNotCompute() 8791 ? E 8792 : getConstant(getUnsignedRangeMax(E)); 8793 return ExitLimit(E, M, false, Predicates); 8794 } 8795 8796 ScalarEvolution::ExitLimit 8797 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 8798 // Loops that look like: while (X == 0) are very strange indeed. We don't 8799 // handle them yet except for the trivial case. This could be expanded in the 8800 // future as needed. 8801 8802 // If the value is a constant, check to see if it is known to be non-zero 8803 // already. If so, the backedge will execute zero times. 8804 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8805 if (!C->getValue()->isZero()) 8806 return getZero(C->getType()); 8807 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8808 } 8809 8810 // We could implement others, but I really doubt anyone writes loops like 8811 // this, and if they did, they would already be constant folded. 8812 return getCouldNotCompute(); 8813 } 8814 8815 std::pair<BasicBlock *, BasicBlock *> 8816 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 8817 // If the block has a unique predecessor, then there is no path from the 8818 // predecessor to the block that does not go through the direct edge 8819 // from the predecessor to the block. 8820 if (BasicBlock *Pred = BB->getSinglePredecessor()) 8821 return {Pred, BB}; 8822 8823 // A loop's header is defined to be a block that dominates the loop. 8824 // If the header has a unique predecessor outside the loop, it must be 8825 // a block that has exactly one successor that can reach the loop. 8826 if (Loop *L = LI.getLoopFor(BB)) 8827 return {L->getLoopPredecessor(), L->getHeader()}; 8828 8829 return {nullptr, nullptr}; 8830 } 8831 8832 /// SCEV structural equivalence is usually sufficient for testing whether two 8833 /// expressions are equal, however for the purposes of looking for a condition 8834 /// guarding a loop, it can be useful to be a little more general, since a 8835 /// front-end may have replicated the controlling expression. 8836 static bool HasSameValue(const SCEV *A, const SCEV *B) { 8837 // Quick check to see if they are the same SCEV. 8838 if (A == B) return true; 8839 8840 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 8841 // Not all instructions that are "identical" compute the same value. For 8842 // instance, two distinct alloca instructions allocating the same type are 8843 // identical and do not read memory; but compute distinct values. 8844 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 8845 }; 8846 8847 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 8848 // two different instructions with the same value. Check for this case. 8849 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 8850 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 8851 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 8852 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 8853 if (ComputesEqualValues(AI, BI)) 8854 return true; 8855 8856 // Otherwise assume they may have a different value. 8857 return false; 8858 } 8859 8860 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 8861 const SCEV *&LHS, const SCEV *&RHS, 8862 unsigned Depth) { 8863 bool Changed = false; 8864 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 8865 // '0 != 0'. 8866 auto TrivialCase = [&](bool TriviallyTrue) { 8867 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8868 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 8869 return true; 8870 }; 8871 // If we hit the max recursion limit bail out. 8872 if (Depth >= 3) 8873 return false; 8874 8875 // Canonicalize a constant to the right side. 8876 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 8877 // Check for both operands constant. 8878 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 8879 if (ConstantExpr::getICmp(Pred, 8880 LHSC->getValue(), 8881 RHSC->getValue())->isNullValue()) 8882 return TrivialCase(false); 8883 else 8884 return TrivialCase(true); 8885 } 8886 // Otherwise swap the operands to put the constant on the right. 8887 std::swap(LHS, RHS); 8888 Pred = ICmpInst::getSwappedPredicate(Pred); 8889 Changed = true; 8890 } 8891 8892 // If we're comparing an addrec with a value which is loop-invariant in the 8893 // addrec's loop, put the addrec on the left. Also make a dominance check, 8894 // as both operands could be addrecs loop-invariant in each other's loop. 8895 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 8896 const Loop *L = AR->getLoop(); 8897 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 8898 std::swap(LHS, RHS); 8899 Pred = ICmpInst::getSwappedPredicate(Pred); 8900 Changed = true; 8901 } 8902 } 8903 8904 // If there's a constant operand, canonicalize comparisons with boundary 8905 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 8906 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 8907 const APInt &RA = RC->getAPInt(); 8908 8909 bool SimplifiedByConstantRange = false; 8910 8911 if (!ICmpInst::isEquality(Pred)) { 8912 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 8913 if (ExactCR.isFullSet()) 8914 return TrivialCase(true); 8915 else if (ExactCR.isEmptySet()) 8916 return TrivialCase(false); 8917 8918 APInt NewRHS; 8919 CmpInst::Predicate NewPred; 8920 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 8921 ICmpInst::isEquality(NewPred)) { 8922 // We were able to convert an inequality to an equality. 8923 Pred = NewPred; 8924 RHS = getConstant(NewRHS); 8925 Changed = SimplifiedByConstantRange = true; 8926 } 8927 } 8928 8929 if (!SimplifiedByConstantRange) { 8930 switch (Pred) { 8931 default: 8932 break; 8933 case ICmpInst::ICMP_EQ: 8934 case ICmpInst::ICMP_NE: 8935 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 8936 if (!RA) 8937 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 8938 if (const SCEVMulExpr *ME = 8939 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 8940 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 8941 ME->getOperand(0)->isAllOnesValue()) { 8942 RHS = AE->getOperand(1); 8943 LHS = ME->getOperand(1); 8944 Changed = true; 8945 } 8946 break; 8947 8948 8949 // The "Should have been caught earlier!" messages refer to the fact 8950 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 8951 // should have fired on the corresponding cases, and canonicalized the 8952 // check to trivial case. 8953 8954 case ICmpInst::ICMP_UGE: 8955 assert(!RA.isMinValue() && "Should have been caught earlier!"); 8956 Pred = ICmpInst::ICMP_UGT; 8957 RHS = getConstant(RA - 1); 8958 Changed = true; 8959 break; 8960 case ICmpInst::ICMP_ULE: 8961 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 8962 Pred = ICmpInst::ICMP_ULT; 8963 RHS = getConstant(RA + 1); 8964 Changed = true; 8965 break; 8966 case ICmpInst::ICMP_SGE: 8967 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 8968 Pred = ICmpInst::ICMP_SGT; 8969 RHS = getConstant(RA - 1); 8970 Changed = true; 8971 break; 8972 case ICmpInst::ICMP_SLE: 8973 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 8974 Pred = ICmpInst::ICMP_SLT; 8975 RHS = getConstant(RA + 1); 8976 Changed = true; 8977 break; 8978 } 8979 } 8980 } 8981 8982 // Check for obvious equality. 8983 if (HasSameValue(LHS, RHS)) { 8984 if (ICmpInst::isTrueWhenEqual(Pred)) 8985 return TrivialCase(true); 8986 if (ICmpInst::isFalseWhenEqual(Pred)) 8987 return TrivialCase(false); 8988 } 8989 8990 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 8991 // adding or subtracting 1 from one of the operands. 8992 switch (Pred) { 8993 case ICmpInst::ICMP_SLE: 8994 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 8995 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 8996 SCEV::FlagNSW); 8997 Pred = ICmpInst::ICMP_SLT; 8998 Changed = true; 8999 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 9000 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 9001 SCEV::FlagNSW); 9002 Pred = ICmpInst::ICMP_SLT; 9003 Changed = true; 9004 } 9005 break; 9006 case ICmpInst::ICMP_SGE: 9007 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 9008 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 9009 SCEV::FlagNSW); 9010 Pred = ICmpInst::ICMP_SGT; 9011 Changed = true; 9012 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 9013 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9014 SCEV::FlagNSW); 9015 Pred = ICmpInst::ICMP_SGT; 9016 Changed = true; 9017 } 9018 break; 9019 case ICmpInst::ICMP_ULE: 9020 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 9021 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9022 SCEV::FlagNUW); 9023 Pred = ICmpInst::ICMP_ULT; 9024 Changed = true; 9025 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 9026 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 9027 Pred = ICmpInst::ICMP_ULT; 9028 Changed = true; 9029 } 9030 break; 9031 case ICmpInst::ICMP_UGE: 9032 if (!getUnsignedRangeMin(RHS).isMinValue()) { 9033 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 9034 Pred = ICmpInst::ICMP_UGT; 9035 Changed = true; 9036 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 9037 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9038 SCEV::FlagNUW); 9039 Pred = ICmpInst::ICMP_UGT; 9040 Changed = true; 9041 } 9042 break; 9043 default: 9044 break; 9045 } 9046 9047 // TODO: More simplifications are possible here. 9048 9049 // Recursively simplify until we either hit a recursion limit or nothing 9050 // changes. 9051 if (Changed) 9052 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 9053 9054 return Changed; 9055 } 9056 9057 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 9058 return getSignedRangeMax(S).isNegative(); 9059 } 9060 9061 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 9062 return getSignedRangeMin(S).isStrictlyPositive(); 9063 } 9064 9065 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 9066 return !getSignedRangeMin(S).isNegative(); 9067 } 9068 9069 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 9070 return !getSignedRangeMax(S).isStrictlyPositive(); 9071 } 9072 9073 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 9074 return isKnownNegative(S) || isKnownPositive(S); 9075 } 9076 9077 std::pair<const SCEV *, const SCEV *> 9078 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 9079 // Compute SCEV on entry of loop L. 9080 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 9081 if (Start == getCouldNotCompute()) 9082 return { Start, Start }; 9083 // Compute post increment SCEV for loop L. 9084 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 9085 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 9086 return { Start, PostInc }; 9087 } 9088 9089 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 9090 const SCEV *LHS, const SCEV *RHS) { 9091 // First collect all loops. 9092 SmallPtrSet<const Loop *, 8> LoopsUsed; 9093 getUsedLoops(LHS, LoopsUsed); 9094 getUsedLoops(RHS, LoopsUsed); 9095 9096 if (LoopsUsed.empty()) 9097 return false; 9098 9099 // Domination relationship must be a linear order on collected loops. 9100 #ifndef NDEBUG 9101 for (auto *L1 : LoopsUsed) 9102 for (auto *L2 : LoopsUsed) 9103 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 9104 DT.dominates(L2->getHeader(), L1->getHeader())) && 9105 "Domination relationship is not a linear order"); 9106 #endif 9107 9108 const Loop *MDL = 9109 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 9110 [&](const Loop *L1, const Loop *L2) { 9111 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 9112 }); 9113 9114 // Get init and post increment value for LHS. 9115 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 9116 // if LHS contains unknown non-invariant SCEV then bail out. 9117 if (SplitLHS.first == getCouldNotCompute()) 9118 return false; 9119 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 9120 // Get init and post increment value for RHS. 9121 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 9122 // if RHS contains unknown non-invariant SCEV then bail out. 9123 if (SplitRHS.first == getCouldNotCompute()) 9124 return false; 9125 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 9126 // It is possible that init SCEV contains an invariant load but it does 9127 // not dominate MDL and is not available at MDL loop entry, so we should 9128 // check it here. 9129 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 9130 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 9131 return false; 9132 9133 return isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first) && 9134 isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 9135 SplitRHS.second); 9136 } 9137 9138 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 9139 const SCEV *LHS, const SCEV *RHS) { 9140 // Canonicalize the inputs first. 9141 (void)SimplifyICmpOperands(Pred, LHS, RHS); 9142 9143 if (isKnownViaInduction(Pred, LHS, RHS)) 9144 return true; 9145 9146 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 9147 return true; 9148 9149 // Otherwise see what can be done with some simple reasoning. 9150 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 9151 } 9152 9153 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 9154 const SCEVAddRecExpr *LHS, 9155 const SCEV *RHS) { 9156 const Loop *L = LHS->getLoop(); 9157 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 9158 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 9159 } 9160 9161 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 9162 ICmpInst::Predicate Pred, 9163 bool &Increasing) { 9164 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 9165 9166 #ifndef NDEBUG 9167 // Verify an invariant: inverting the predicate should turn a monotonically 9168 // increasing change to a monotonically decreasing one, and vice versa. 9169 bool IncreasingSwapped; 9170 bool ResultSwapped = isMonotonicPredicateImpl( 9171 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 9172 9173 assert(Result == ResultSwapped && "should be able to analyze both!"); 9174 if (ResultSwapped) 9175 assert(Increasing == !IncreasingSwapped && 9176 "monotonicity should flip as we flip the predicate"); 9177 #endif 9178 9179 return Result; 9180 } 9181 9182 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 9183 ICmpInst::Predicate Pred, 9184 bool &Increasing) { 9185 9186 // A zero step value for LHS means the induction variable is essentially a 9187 // loop invariant value. We don't really depend on the predicate actually 9188 // flipping from false to true (for increasing predicates, and the other way 9189 // around for decreasing predicates), all we care about is that *if* the 9190 // predicate changes then it only changes from false to true. 9191 // 9192 // A zero step value in itself is not very useful, but there may be places 9193 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 9194 // as general as possible. 9195 9196 switch (Pred) { 9197 default: 9198 return false; // Conservative answer 9199 9200 case ICmpInst::ICMP_UGT: 9201 case ICmpInst::ICMP_UGE: 9202 case ICmpInst::ICMP_ULT: 9203 case ICmpInst::ICMP_ULE: 9204 if (!LHS->hasNoUnsignedWrap()) 9205 return false; 9206 9207 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 9208 return true; 9209 9210 case ICmpInst::ICMP_SGT: 9211 case ICmpInst::ICMP_SGE: 9212 case ICmpInst::ICMP_SLT: 9213 case ICmpInst::ICMP_SLE: { 9214 if (!LHS->hasNoSignedWrap()) 9215 return false; 9216 9217 const SCEV *Step = LHS->getStepRecurrence(*this); 9218 9219 if (isKnownNonNegative(Step)) { 9220 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 9221 return true; 9222 } 9223 9224 if (isKnownNonPositive(Step)) { 9225 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 9226 return true; 9227 } 9228 9229 return false; 9230 } 9231 9232 } 9233 9234 llvm_unreachable("switch has default clause!"); 9235 } 9236 9237 bool ScalarEvolution::isLoopInvariantPredicate( 9238 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 9239 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 9240 const SCEV *&InvariantRHS) { 9241 9242 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9243 if (!isLoopInvariant(RHS, L)) { 9244 if (!isLoopInvariant(LHS, L)) 9245 return false; 9246 9247 std::swap(LHS, RHS); 9248 Pred = ICmpInst::getSwappedPredicate(Pred); 9249 } 9250 9251 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9252 if (!ArLHS || ArLHS->getLoop() != L) 9253 return false; 9254 9255 bool Increasing; 9256 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 9257 return false; 9258 9259 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 9260 // true as the loop iterates, and the backedge is control dependent on 9261 // "ArLHS `Pred` RHS" == true then we can reason as follows: 9262 // 9263 // * if the predicate was false in the first iteration then the predicate 9264 // is never evaluated again, since the loop exits without taking the 9265 // backedge. 9266 // * if the predicate was true in the first iteration then it will 9267 // continue to be true for all future iterations since it is 9268 // monotonically increasing. 9269 // 9270 // For both the above possibilities, we can replace the loop varying 9271 // predicate with its value on the first iteration of the loop (which is 9272 // loop invariant). 9273 // 9274 // A similar reasoning applies for a monotonically decreasing predicate, by 9275 // replacing true with false and false with true in the above two bullets. 9276 9277 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 9278 9279 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 9280 return false; 9281 9282 InvariantPred = Pred; 9283 InvariantLHS = ArLHS->getStart(); 9284 InvariantRHS = RHS; 9285 return true; 9286 } 9287 9288 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 9289 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 9290 if (HasSameValue(LHS, RHS)) 9291 return ICmpInst::isTrueWhenEqual(Pred); 9292 9293 // This code is split out from isKnownPredicate because it is called from 9294 // within isLoopEntryGuardedByCond. 9295 9296 auto CheckRanges = 9297 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 9298 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 9299 .contains(RangeLHS); 9300 }; 9301 9302 // The check at the top of the function catches the case where the values are 9303 // known to be equal. 9304 if (Pred == CmpInst::ICMP_EQ) 9305 return false; 9306 9307 if (Pred == CmpInst::ICMP_NE) 9308 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 9309 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 9310 isKnownNonZero(getMinusSCEV(LHS, RHS)); 9311 9312 if (CmpInst::isSigned(Pred)) 9313 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 9314 9315 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 9316 } 9317 9318 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 9319 const SCEV *LHS, 9320 const SCEV *RHS) { 9321 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 9322 // Return Y via OutY. 9323 auto MatchBinaryAddToConst = 9324 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 9325 SCEV::NoWrapFlags ExpectedFlags) { 9326 const SCEV *NonConstOp, *ConstOp; 9327 SCEV::NoWrapFlags FlagsPresent; 9328 9329 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 9330 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 9331 return false; 9332 9333 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 9334 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 9335 }; 9336 9337 APInt C; 9338 9339 switch (Pred) { 9340 default: 9341 break; 9342 9343 case ICmpInst::ICMP_SGE: 9344 std::swap(LHS, RHS); 9345 LLVM_FALLTHROUGH; 9346 case ICmpInst::ICMP_SLE: 9347 // X s<= (X + C)<nsw> if C >= 0 9348 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 9349 return true; 9350 9351 // (X + C)<nsw> s<= X if C <= 0 9352 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 9353 !C.isStrictlyPositive()) 9354 return true; 9355 break; 9356 9357 case ICmpInst::ICMP_SGT: 9358 std::swap(LHS, RHS); 9359 LLVM_FALLTHROUGH; 9360 case ICmpInst::ICMP_SLT: 9361 // X s< (X + C)<nsw> if C > 0 9362 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 9363 C.isStrictlyPositive()) 9364 return true; 9365 9366 // (X + C)<nsw> s< X if C < 0 9367 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 9368 return true; 9369 break; 9370 } 9371 9372 return false; 9373 } 9374 9375 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 9376 const SCEV *LHS, 9377 const SCEV *RHS) { 9378 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 9379 return false; 9380 9381 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 9382 // the stack can result in exponential time complexity. 9383 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 9384 9385 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 9386 // 9387 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 9388 // isKnownPredicate. isKnownPredicate is more powerful, but also more 9389 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 9390 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 9391 // use isKnownPredicate later if needed. 9392 return isKnownNonNegative(RHS) && 9393 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 9394 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 9395 } 9396 9397 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 9398 ICmpInst::Predicate Pred, 9399 const SCEV *LHS, const SCEV *RHS) { 9400 // No need to even try if we know the module has no guards. 9401 if (!HasGuards) 9402 return false; 9403 9404 return any_of(*BB, [&](Instruction &I) { 9405 using namespace llvm::PatternMatch; 9406 9407 Value *Condition; 9408 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 9409 m_Value(Condition))) && 9410 isImpliedCond(Pred, LHS, RHS, Condition, false); 9411 }); 9412 } 9413 9414 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 9415 /// protected by a conditional between LHS and RHS. This is used to 9416 /// to eliminate casts. 9417 bool 9418 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 9419 ICmpInst::Predicate Pred, 9420 const SCEV *LHS, const SCEV *RHS) { 9421 // Interpret a null as meaning no loop, where there is obviously no guard 9422 // (interprocedural conditions notwithstanding). 9423 if (!L) return true; 9424 9425 if (VerifyIR) 9426 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 9427 "This cannot be done on broken IR!"); 9428 9429 9430 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9431 return true; 9432 9433 BasicBlock *Latch = L->getLoopLatch(); 9434 if (!Latch) 9435 return false; 9436 9437 BranchInst *LoopContinuePredicate = 9438 dyn_cast<BranchInst>(Latch->getTerminator()); 9439 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 9440 isImpliedCond(Pred, LHS, RHS, 9441 LoopContinuePredicate->getCondition(), 9442 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 9443 return true; 9444 9445 // We don't want more than one activation of the following loops on the stack 9446 // -- that can lead to O(n!) time complexity. 9447 if (WalkingBEDominatingConds) 9448 return false; 9449 9450 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 9451 9452 // See if we can exploit a trip count to prove the predicate. 9453 const auto &BETakenInfo = getBackedgeTakenInfo(L); 9454 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 9455 if (LatchBECount != getCouldNotCompute()) { 9456 // We know that Latch branches back to the loop header exactly 9457 // LatchBECount times. This means the backdege condition at Latch is 9458 // equivalent to "{0,+,1} u< LatchBECount". 9459 Type *Ty = LatchBECount->getType(); 9460 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 9461 const SCEV *LoopCounter = 9462 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 9463 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 9464 LatchBECount)) 9465 return true; 9466 } 9467 9468 // Check conditions due to any @llvm.assume intrinsics. 9469 for (auto &AssumeVH : AC.assumptions()) { 9470 if (!AssumeVH) 9471 continue; 9472 auto *CI = cast<CallInst>(AssumeVH); 9473 if (!DT.dominates(CI, Latch->getTerminator())) 9474 continue; 9475 9476 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 9477 return true; 9478 } 9479 9480 // If the loop is not reachable from the entry block, we risk running into an 9481 // infinite loop as we walk up into the dom tree. These loops do not matter 9482 // anyway, so we just return a conservative answer when we see them. 9483 if (!DT.isReachableFromEntry(L->getHeader())) 9484 return false; 9485 9486 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 9487 return true; 9488 9489 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 9490 DTN != HeaderDTN; DTN = DTN->getIDom()) { 9491 assert(DTN && "should reach the loop header before reaching the root!"); 9492 9493 BasicBlock *BB = DTN->getBlock(); 9494 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 9495 return true; 9496 9497 BasicBlock *PBB = BB->getSinglePredecessor(); 9498 if (!PBB) 9499 continue; 9500 9501 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 9502 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 9503 continue; 9504 9505 Value *Condition = ContinuePredicate->getCondition(); 9506 9507 // If we have an edge `E` within the loop body that dominates the only 9508 // latch, the condition guarding `E` also guards the backedge. This 9509 // reasoning works only for loops with a single latch. 9510 9511 BasicBlockEdge DominatingEdge(PBB, BB); 9512 if (DominatingEdge.isSingleEdge()) { 9513 // We're constructively (and conservatively) enumerating edges within the 9514 // loop body that dominate the latch. The dominator tree better agree 9515 // with us on this: 9516 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 9517 9518 if (isImpliedCond(Pred, LHS, RHS, Condition, 9519 BB != ContinuePredicate->getSuccessor(0))) 9520 return true; 9521 } 9522 } 9523 9524 return false; 9525 } 9526 9527 bool 9528 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 9529 ICmpInst::Predicate Pred, 9530 const SCEV *LHS, const SCEV *RHS) { 9531 // Interpret a null as meaning no loop, where there is obviously no guard 9532 // (interprocedural conditions notwithstanding). 9533 if (!L) return false; 9534 9535 if (VerifyIR) 9536 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 9537 "This cannot be done on broken IR!"); 9538 9539 // Both LHS and RHS must be available at loop entry. 9540 assert(isAvailableAtLoopEntry(LHS, L) && 9541 "LHS is not available at Loop Entry"); 9542 assert(isAvailableAtLoopEntry(RHS, L) && 9543 "RHS is not available at Loop Entry"); 9544 9545 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9546 return true; 9547 9548 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 9549 // the facts (a >= b && a != b) separately. A typical situation is when the 9550 // non-strict comparison is known from ranges and non-equality is known from 9551 // dominating predicates. If we are proving strict comparison, we always try 9552 // to prove non-equality and non-strict comparison separately. 9553 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 9554 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 9555 bool ProvedNonStrictComparison = false; 9556 bool ProvedNonEquality = false; 9557 9558 if (ProvingStrictComparison) { 9559 ProvedNonStrictComparison = 9560 isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS); 9561 ProvedNonEquality = 9562 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, LHS, RHS); 9563 if (ProvedNonStrictComparison && ProvedNonEquality) 9564 return true; 9565 } 9566 9567 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 9568 auto ProveViaGuard = [&](BasicBlock *Block) { 9569 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 9570 return true; 9571 if (ProvingStrictComparison) { 9572 if (!ProvedNonStrictComparison) 9573 ProvedNonStrictComparison = 9574 isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS); 9575 if (!ProvedNonEquality) 9576 ProvedNonEquality = 9577 isImpliedViaGuard(Block, ICmpInst::ICMP_NE, LHS, RHS); 9578 if (ProvedNonStrictComparison && ProvedNonEquality) 9579 return true; 9580 } 9581 return false; 9582 }; 9583 9584 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 9585 auto ProveViaCond = [&](Value *Condition, bool Inverse) { 9586 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse)) 9587 return true; 9588 if (ProvingStrictComparison) { 9589 if (!ProvedNonStrictComparison) 9590 ProvedNonStrictComparison = 9591 isImpliedCond(NonStrictPredicate, LHS, RHS, Condition, Inverse); 9592 if (!ProvedNonEquality) 9593 ProvedNonEquality = 9594 isImpliedCond(ICmpInst::ICMP_NE, LHS, RHS, Condition, Inverse); 9595 if (ProvedNonStrictComparison && ProvedNonEquality) 9596 return true; 9597 } 9598 return false; 9599 }; 9600 9601 // Starting at the loop predecessor, climb up the predecessor chain, as long 9602 // as there are predecessors that can be found that have unique successors 9603 // leading to the original header. 9604 for (std::pair<BasicBlock *, BasicBlock *> 9605 Pair(L->getLoopPredecessor(), L->getHeader()); 9606 Pair.first; 9607 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 9608 9609 if (ProveViaGuard(Pair.first)) 9610 return true; 9611 9612 BranchInst *LoopEntryPredicate = 9613 dyn_cast<BranchInst>(Pair.first->getTerminator()); 9614 if (!LoopEntryPredicate || 9615 LoopEntryPredicate->isUnconditional()) 9616 continue; 9617 9618 if (ProveViaCond(LoopEntryPredicate->getCondition(), 9619 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 9620 return true; 9621 } 9622 9623 // Check conditions due to any @llvm.assume intrinsics. 9624 for (auto &AssumeVH : AC.assumptions()) { 9625 if (!AssumeVH) 9626 continue; 9627 auto *CI = cast<CallInst>(AssumeVH); 9628 if (!DT.dominates(CI, L->getHeader())) 9629 continue; 9630 9631 if (ProveViaCond(CI->getArgOperand(0), false)) 9632 return true; 9633 } 9634 9635 return false; 9636 } 9637 9638 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 9639 const SCEV *LHS, const SCEV *RHS, 9640 Value *FoundCondValue, 9641 bool Inverse) { 9642 if (!PendingLoopPredicates.insert(FoundCondValue).second) 9643 return false; 9644 9645 auto ClearOnExit = 9646 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 9647 9648 // Recursively handle And and Or conditions. 9649 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 9650 if (BO->getOpcode() == Instruction::And) { 9651 if (!Inverse) 9652 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9653 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9654 } else if (BO->getOpcode() == Instruction::Or) { 9655 if (Inverse) 9656 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9657 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9658 } 9659 } 9660 9661 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 9662 if (!ICI) return false; 9663 9664 // Now that we found a conditional branch that dominates the loop or controls 9665 // the loop latch. Check to see if it is the comparison we are looking for. 9666 ICmpInst::Predicate FoundPred; 9667 if (Inverse) 9668 FoundPred = ICI->getInversePredicate(); 9669 else 9670 FoundPred = ICI->getPredicate(); 9671 9672 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 9673 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 9674 9675 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 9676 } 9677 9678 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 9679 const SCEV *RHS, 9680 ICmpInst::Predicate FoundPred, 9681 const SCEV *FoundLHS, 9682 const SCEV *FoundRHS) { 9683 // Balance the types. 9684 if (getTypeSizeInBits(LHS->getType()) < 9685 getTypeSizeInBits(FoundLHS->getType())) { 9686 if (CmpInst::isSigned(Pred)) { 9687 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 9688 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 9689 } else { 9690 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 9691 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 9692 } 9693 } else if (getTypeSizeInBits(LHS->getType()) > 9694 getTypeSizeInBits(FoundLHS->getType())) { 9695 if (CmpInst::isSigned(FoundPred)) { 9696 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 9697 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 9698 } else { 9699 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 9700 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 9701 } 9702 } 9703 9704 // Canonicalize the query to match the way instcombine will have 9705 // canonicalized the comparison. 9706 if (SimplifyICmpOperands(Pred, LHS, RHS)) 9707 if (LHS == RHS) 9708 return CmpInst::isTrueWhenEqual(Pred); 9709 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 9710 if (FoundLHS == FoundRHS) 9711 return CmpInst::isFalseWhenEqual(FoundPred); 9712 9713 // Check to see if we can make the LHS or RHS match. 9714 if (LHS == FoundRHS || RHS == FoundLHS) { 9715 if (isa<SCEVConstant>(RHS)) { 9716 std::swap(FoundLHS, FoundRHS); 9717 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 9718 } else { 9719 std::swap(LHS, RHS); 9720 Pred = ICmpInst::getSwappedPredicate(Pred); 9721 } 9722 } 9723 9724 // Check whether the found predicate is the same as the desired predicate. 9725 if (FoundPred == Pred) 9726 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9727 9728 // Check whether swapping the found predicate makes it the same as the 9729 // desired predicate. 9730 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 9731 if (isa<SCEVConstant>(RHS)) 9732 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 9733 else 9734 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 9735 RHS, LHS, FoundLHS, FoundRHS); 9736 } 9737 9738 // Unsigned comparison is the same as signed comparison when both the operands 9739 // are non-negative. 9740 if (CmpInst::isUnsigned(FoundPred) && 9741 CmpInst::getSignedPredicate(FoundPred) == Pred && 9742 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 9743 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9744 9745 // Check if we can make progress by sharpening ranges. 9746 if (FoundPred == ICmpInst::ICMP_NE && 9747 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 9748 9749 const SCEVConstant *C = nullptr; 9750 const SCEV *V = nullptr; 9751 9752 if (isa<SCEVConstant>(FoundLHS)) { 9753 C = cast<SCEVConstant>(FoundLHS); 9754 V = FoundRHS; 9755 } else { 9756 C = cast<SCEVConstant>(FoundRHS); 9757 V = FoundLHS; 9758 } 9759 9760 // The guarding predicate tells us that C != V. If the known range 9761 // of V is [C, t), we can sharpen the range to [C + 1, t). The 9762 // range we consider has to correspond to same signedness as the 9763 // predicate we're interested in folding. 9764 9765 APInt Min = ICmpInst::isSigned(Pred) ? 9766 getSignedRangeMin(V) : getUnsignedRangeMin(V); 9767 9768 if (Min == C->getAPInt()) { 9769 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 9770 // This is true even if (Min + 1) wraps around -- in case of 9771 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 9772 9773 APInt SharperMin = Min + 1; 9774 9775 switch (Pred) { 9776 case ICmpInst::ICMP_SGE: 9777 case ICmpInst::ICMP_UGE: 9778 // We know V `Pred` SharperMin. If this implies LHS `Pred` 9779 // RHS, we're done. 9780 if (isImpliedCondOperands(Pred, LHS, RHS, V, 9781 getConstant(SharperMin))) 9782 return true; 9783 LLVM_FALLTHROUGH; 9784 9785 case ICmpInst::ICMP_SGT: 9786 case ICmpInst::ICMP_UGT: 9787 // We know from the range information that (V `Pred` Min || 9788 // V == Min). We know from the guarding condition that !(V 9789 // == Min). This gives us 9790 // 9791 // V `Pred` Min || V == Min && !(V == Min) 9792 // => V `Pred` Min 9793 // 9794 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 9795 9796 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 9797 return true; 9798 LLVM_FALLTHROUGH; 9799 9800 default: 9801 // No change 9802 break; 9803 } 9804 } 9805 } 9806 9807 // Check whether the actual condition is beyond sufficient. 9808 if (FoundPred == ICmpInst::ICMP_EQ) 9809 if (ICmpInst::isTrueWhenEqual(Pred)) 9810 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9811 return true; 9812 if (Pred == ICmpInst::ICMP_NE) 9813 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 9814 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 9815 return true; 9816 9817 // Otherwise assume the worst. 9818 return false; 9819 } 9820 9821 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 9822 const SCEV *&L, const SCEV *&R, 9823 SCEV::NoWrapFlags &Flags) { 9824 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 9825 if (!AE || AE->getNumOperands() != 2) 9826 return false; 9827 9828 L = AE->getOperand(0); 9829 R = AE->getOperand(1); 9830 Flags = AE->getNoWrapFlags(); 9831 return true; 9832 } 9833 9834 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 9835 const SCEV *Less) { 9836 // We avoid subtracting expressions here because this function is usually 9837 // fairly deep in the call stack (i.e. is called many times). 9838 9839 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 9840 const auto *LAR = cast<SCEVAddRecExpr>(Less); 9841 const auto *MAR = cast<SCEVAddRecExpr>(More); 9842 9843 if (LAR->getLoop() != MAR->getLoop()) 9844 return None; 9845 9846 // We look at affine expressions only; not for correctness but to keep 9847 // getStepRecurrence cheap. 9848 if (!LAR->isAffine() || !MAR->isAffine()) 9849 return None; 9850 9851 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 9852 return None; 9853 9854 Less = LAR->getStart(); 9855 More = MAR->getStart(); 9856 9857 // fall through 9858 } 9859 9860 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 9861 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 9862 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 9863 return M - L; 9864 } 9865 9866 SCEV::NoWrapFlags Flags; 9867 const SCEV *LLess = nullptr, *RLess = nullptr; 9868 const SCEV *LMore = nullptr, *RMore = nullptr; 9869 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 9870 // Compare (X + C1) vs X. 9871 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 9872 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 9873 if (RLess == More) 9874 return -(C1->getAPInt()); 9875 9876 // Compare X vs (X + C2). 9877 if (splitBinaryAdd(More, LMore, RMore, Flags)) 9878 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 9879 if (RMore == Less) 9880 return C2->getAPInt(); 9881 9882 // Compare (X + C1) vs (X + C2). 9883 if (C1 && C2 && RLess == RMore) 9884 return C2->getAPInt() - C1->getAPInt(); 9885 9886 return None; 9887 } 9888 9889 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 9890 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 9891 const SCEV *FoundLHS, const SCEV *FoundRHS) { 9892 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 9893 return false; 9894 9895 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9896 if (!AddRecLHS) 9897 return false; 9898 9899 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 9900 if (!AddRecFoundLHS) 9901 return false; 9902 9903 // We'd like to let SCEV reason about control dependencies, so we constrain 9904 // both the inequalities to be about add recurrences on the same loop. This 9905 // way we can use isLoopEntryGuardedByCond later. 9906 9907 const Loop *L = AddRecFoundLHS->getLoop(); 9908 if (L != AddRecLHS->getLoop()) 9909 return false; 9910 9911 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 9912 // 9913 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 9914 // ... (2) 9915 // 9916 // Informal proof for (2), assuming (1) [*]: 9917 // 9918 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 9919 // 9920 // Then 9921 // 9922 // FoundLHS s< FoundRHS s< INT_MIN - C 9923 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 9924 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 9925 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 9926 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 9927 // <=> FoundLHS + C s< FoundRHS + C 9928 // 9929 // [*]: (1) can be proved by ruling out overflow. 9930 // 9931 // [**]: This can be proved by analyzing all the four possibilities: 9932 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 9933 // (A s>= 0, B s>= 0). 9934 // 9935 // Note: 9936 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 9937 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 9938 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 9939 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 9940 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 9941 // C)". 9942 9943 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 9944 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 9945 if (!LDiff || !RDiff || *LDiff != *RDiff) 9946 return false; 9947 9948 if (LDiff->isMinValue()) 9949 return true; 9950 9951 APInt FoundRHSLimit; 9952 9953 if (Pred == CmpInst::ICMP_ULT) { 9954 FoundRHSLimit = -(*RDiff); 9955 } else { 9956 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 9957 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 9958 } 9959 9960 // Try to prove (1) or (2), as needed. 9961 return isAvailableAtLoopEntry(FoundRHS, L) && 9962 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 9963 getConstant(FoundRHSLimit)); 9964 } 9965 9966 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 9967 const SCEV *LHS, const SCEV *RHS, 9968 const SCEV *FoundLHS, 9969 const SCEV *FoundRHS, unsigned Depth) { 9970 const PHINode *LPhi = nullptr, *RPhi = nullptr; 9971 9972 auto ClearOnExit = make_scope_exit([&]() { 9973 if (LPhi) { 9974 bool Erased = PendingMerges.erase(LPhi); 9975 assert(Erased && "Failed to erase LPhi!"); 9976 (void)Erased; 9977 } 9978 if (RPhi) { 9979 bool Erased = PendingMerges.erase(RPhi); 9980 assert(Erased && "Failed to erase RPhi!"); 9981 (void)Erased; 9982 } 9983 }); 9984 9985 // Find respective Phis and check that they are not being pending. 9986 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 9987 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 9988 if (!PendingMerges.insert(Phi).second) 9989 return false; 9990 LPhi = Phi; 9991 } 9992 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 9993 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 9994 // If we detect a loop of Phi nodes being processed by this method, for 9995 // example: 9996 // 9997 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 9998 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 9999 // 10000 // we don't want to deal with a case that complex, so return conservative 10001 // answer false. 10002 if (!PendingMerges.insert(Phi).second) 10003 return false; 10004 RPhi = Phi; 10005 } 10006 10007 // If none of LHS, RHS is a Phi, nothing to do here. 10008 if (!LPhi && !RPhi) 10009 return false; 10010 10011 // If there is a SCEVUnknown Phi we are interested in, make it left. 10012 if (!LPhi) { 10013 std::swap(LHS, RHS); 10014 std::swap(FoundLHS, FoundRHS); 10015 std::swap(LPhi, RPhi); 10016 Pred = ICmpInst::getSwappedPredicate(Pred); 10017 } 10018 10019 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 10020 const BasicBlock *LBB = LPhi->getParent(); 10021 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10022 10023 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 10024 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 10025 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 10026 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 10027 }; 10028 10029 if (RPhi && RPhi->getParent() == LBB) { 10030 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 10031 // If we compare two Phis from the same block, and for each entry block 10032 // the predicate is true for incoming values from this block, then the 10033 // predicate is also true for the Phis. 10034 for (const BasicBlock *IncBB : predecessors(LBB)) { 10035 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10036 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 10037 if (!ProvedEasily(L, R)) 10038 return false; 10039 } 10040 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 10041 // Case two: RHS is also a Phi from the same basic block, and it is an 10042 // AddRec. It means that there is a loop which has both AddRec and Unknown 10043 // PHIs, for it we can compare incoming values of AddRec from above the loop 10044 // and latch with their respective incoming values of LPhi. 10045 // TODO: Generalize to handle loops with many inputs in a header. 10046 if (LPhi->getNumIncomingValues() != 2) return false; 10047 10048 auto *RLoop = RAR->getLoop(); 10049 auto *Predecessor = RLoop->getLoopPredecessor(); 10050 assert(Predecessor && "Loop with AddRec with no predecessor?"); 10051 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 10052 if (!ProvedEasily(L1, RAR->getStart())) 10053 return false; 10054 auto *Latch = RLoop->getLoopLatch(); 10055 assert(Latch && "Loop with AddRec with no latch?"); 10056 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 10057 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 10058 return false; 10059 } else { 10060 // In all other cases go over inputs of LHS and compare each of them to RHS, 10061 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 10062 // At this point RHS is either a non-Phi, or it is a Phi from some block 10063 // different from LBB. 10064 for (const BasicBlock *IncBB : predecessors(LBB)) { 10065 // Check that RHS is available in this block. 10066 if (!dominates(RHS, IncBB)) 10067 return false; 10068 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10069 if (!ProvedEasily(L, RHS)) 10070 return false; 10071 } 10072 } 10073 return true; 10074 } 10075 10076 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 10077 const SCEV *LHS, const SCEV *RHS, 10078 const SCEV *FoundLHS, 10079 const SCEV *FoundRHS) { 10080 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10081 return true; 10082 10083 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10084 return true; 10085 10086 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 10087 FoundLHS, FoundRHS) || 10088 // ~x < ~y --> x > y 10089 isImpliedCondOperandsHelper(Pred, LHS, RHS, 10090 getNotSCEV(FoundRHS), 10091 getNotSCEV(FoundLHS)); 10092 } 10093 10094 /// If Expr computes ~A, return A else return nullptr 10095 static const SCEV *MatchNotExpr(const SCEV *Expr) { 10096 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 10097 if (!Add || Add->getNumOperands() != 2 || 10098 !Add->getOperand(0)->isAllOnesValue()) 10099 return nullptr; 10100 10101 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 10102 if (!AddRHS || AddRHS->getNumOperands() != 2 || 10103 !AddRHS->getOperand(0)->isAllOnesValue()) 10104 return nullptr; 10105 10106 return AddRHS->getOperand(1); 10107 } 10108 10109 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values? 10110 template<typename MaxExprType> 10111 static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, 10112 const SCEV *Candidate) { 10113 const MaxExprType *MaxExpr = dyn_cast<MaxExprType>(MaybeMaxExpr); 10114 if (!MaxExpr) return false; 10115 10116 return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end(); 10117 } 10118 10119 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values? 10120 template<typename MaxExprType> 10121 static bool IsMinConsistingOf(ScalarEvolution &SE, 10122 const SCEV *MaybeMinExpr, 10123 const SCEV *Candidate) { 10124 const SCEV *MaybeMaxExpr = MatchNotExpr(MaybeMinExpr); 10125 if (!MaybeMaxExpr) 10126 return false; 10127 10128 return IsMaxConsistingOf<MaxExprType>(MaybeMaxExpr, SE.getNotSCEV(Candidate)); 10129 } 10130 10131 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 10132 ICmpInst::Predicate Pred, 10133 const SCEV *LHS, const SCEV *RHS) { 10134 // If both sides are affine addrecs for the same loop, with equal 10135 // steps, and we know the recurrences don't wrap, then we only 10136 // need to check the predicate on the starting values. 10137 10138 if (!ICmpInst::isRelational(Pred)) 10139 return false; 10140 10141 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 10142 if (!LAR) 10143 return false; 10144 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10145 if (!RAR) 10146 return false; 10147 if (LAR->getLoop() != RAR->getLoop()) 10148 return false; 10149 if (!LAR->isAffine() || !RAR->isAffine()) 10150 return false; 10151 10152 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 10153 return false; 10154 10155 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 10156 SCEV::FlagNSW : SCEV::FlagNUW; 10157 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 10158 return false; 10159 10160 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 10161 } 10162 10163 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 10164 /// expression? 10165 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 10166 ICmpInst::Predicate Pred, 10167 const SCEV *LHS, const SCEV *RHS) { 10168 switch (Pred) { 10169 default: 10170 return false; 10171 10172 case ICmpInst::ICMP_SGE: 10173 std::swap(LHS, RHS); 10174 LLVM_FALLTHROUGH; 10175 case ICmpInst::ICMP_SLE: 10176 return 10177 // min(A, ...) <= A 10178 IsMinConsistingOf<SCEVSMaxExpr>(SE, LHS, RHS) || 10179 // A <= max(A, ...) 10180 IsMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 10181 10182 case ICmpInst::ICMP_UGE: 10183 std::swap(LHS, RHS); 10184 LLVM_FALLTHROUGH; 10185 case ICmpInst::ICMP_ULE: 10186 return 10187 // min(A, ...) <= A 10188 IsMinConsistingOf<SCEVUMaxExpr>(SE, LHS, RHS) || 10189 // A <= max(A, ...) 10190 IsMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 10191 } 10192 10193 llvm_unreachable("covered switch fell through?!"); 10194 } 10195 10196 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 10197 const SCEV *LHS, const SCEV *RHS, 10198 const SCEV *FoundLHS, 10199 const SCEV *FoundRHS, 10200 unsigned Depth) { 10201 assert(getTypeSizeInBits(LHS->getType()) == 10202 getTypeSizeInBits(RHS->getType()) && 10203 "LHS and RHS have different sizes?"); 10204 assert(getTypeSizeInBits(FoundLHS->getType()) == 10205 getTypeSizeInBits(FoundRHS->getType()) && 10206 "FoundLHS and FoundRHS have different sizes?"); 10207 // We want to avoid hurting the compile time with analysis of too big trees. 10208 if (Depth > MaxSCEVOperationsImplicationDepth) 10209 return false; 10210 // We only want to work with ICMP_SGT comparison so far. 10211 // TODO: Extend to ICMP_UGT? 10212 if (Pred == ICmpInst::ICMP_SLT) { 10213 Pred = ICmpInst::ICMP_SGT; 10214 std::swap(LHS, RHS); 10215 std::swap(FoundLHS, FoundRHS); 10216 } 10217 if (Pred != ICmpInst::ICMP_SGT) 10218 return false; 10219 10220 auto GetOpFromSExt = [&](const SCEV *S) { 10221 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 10222 return Ext->getOperand(); 10223 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 10224 // the constant in some cases. 10225 return S; 10226 }; 10227 10228 // Acquire values from extensions. 10229 auto *OrigLHS = LHS; 10230 auto *OrigFoundLHS = FoundLHS; 10231 LHS = GetOpFromSExt(LHS); 10232 FoundLHS = GetOpFromSExt(FoundLHS); 10233 10234 // Is the SGT predicate can be proved trivially or using the found context. 10235 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 10236 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 10237 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 10238 FoundRHS, Depth + 1); 10239 }; 10240 10241 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 10242 // We want to avoid creation of any new non-constant SCEV. Since we are 10243 // going to compare the operands to RHS, we should be certain that we don't 10244 // need any size extensions for this. So let's decline all cases when the 10245 // sizes of types of LHS and RHS do not match. 10246 // TODO: Maybe try to get RHS from sext to catch more cases? 10247 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 10248 return false; 10249 10250 // Should not overflow. 10251 if (!LHSAddExpr->hasNoSignedWrap()) 10252 return false; 10253 10254 auto *LL = LHSAddExpr->getOperand(0); 10255 auto *LR = LHSAddExpr->getOperand(1); 10256 auto *MinusOne = getNegativeSCEV(getOne(RHS->getType())); 10257 10258 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 10259 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 10260 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 10261 }; 10262 // Try to prove the following rule: 10263 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 10264 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 10265 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 10266 return true; 10267 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 10268 Value *LL, *LR; 10269 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 10270 10271 using namespace llvm::PatternMatch; 10272 10273 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 10274 // Rules for division. 10275 // We are going to perform some comparisons with Denominator and its 10276 // derivative expressions. In general case, creating a SCEV for it may 10277 // lead to a complex analysis of the entire graph, and in particular it 10278 // can request trip count recalculation for the same loop. This would 10279 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 10280 // this, we only want to create SCEVs that are constants in this section. 10281 // So we bail if Denominator is not a constant. 10282 if (!isa<ConstantInt>(LR)) 10283 return false; 10284 10285 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 10286 10287 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 10288 // then a SCEV for the numerator already exists and matches with FoundLHS. 10289 auto *Numerator = getExistingSCEV(LL); 10290 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 10291 return false; 10292 10293 // Make sure that the numerator matches with FoundLHS and the denominator 10294 // is positive. 10295 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 10296 return false; 10297 10298 auto *DTy = Denominator->getType(); 10299 auto *FRHSTy = FoundRHS->getType(); 10300 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 10301 // One of types is a pointer and another one is not. We cannot extend 10302 // them properly to a wider type, so let us just reject this case. 10303 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 10304 // to avoid this check. 10305 return false; 10306 10307 // Given that: 10308 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 10309 auto *WTy = getWiderType(DTy, FRHSTy); 10310 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 10311 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 10312 10313 // Try to prove the following rule: 10314 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 10315 // For example, given that FoundLHS > 2. It means that FoundLHS is at 10316 // least 3. If we divide it by Denominator < 4, we will have at least 1. 10317 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 10318 if (isKnownNonPositive(RHS) && 10319 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 10320 return true; 10321 10322 // Try to prove the following rule: 10323 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 10324 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 10325 // If we divide it by Denominator > 2, then: 10326 // 1. If FoundLHS is negative, then the result is 0. 10327 // 2. If FoundLHS is non-negative, then the result is non-negative. 10328 // Anyways, the result is non-negative. 10329 auto *MinusOne = getNegativeSCEV(getOne(WTy)); 10330 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 10331 if (isKnownNegative(RHS) && 10332 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 10333 return true; 10334 } 10335 } 10336 10337 // If our expression contained SCEVUnknown Phis, and we split it down and now 10338 // need to prove something for them, try to prove the predicate for every 10339 // possible incoming values of those Phis. 10340 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 10341 return true; 10342 10343 return false; 10344 } 10345 10346 bool 10347 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 10348 const SCEV *LHS, const SCEV *RHS) { 10349 return isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 10350 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 10351 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 10352 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 10353 } 10354 10355 bool 10356 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 10357 const SCEV *LHS, const SCEV *RHS, 10358 const SCEV *FoundLHS, 10359 const SCEV *FoundRHS) { 10360 switch (Pred) { 10361 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 10362 case ICmpInst::ICMP_EQ: 10363 case ICmpInst::ICMP_NE: 10364 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 10365 return true; 10366 break; 10367 case ICmpInst::ICMP_SLT: 10368 case ICmpInst::ICMP_SLE: 10369 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 10370 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 10371 return true; 10372 break; 10373 case ICmpInst::ICMP_SGT: 10374 case ICmpInst::ICMP_SGE: 10375 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 10376 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 10377 return true; 10378 break; 10379 case ICmpInst::ICMP_ULT: 10380 case ICmpInst::ICMP_ULE: 10381 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 10382 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 10383 return true; 10384 break; 10385 case ICmpInst::ICMP_UGT: 10386 case ICmpInst::ICMP_UGE: 10387 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 10388 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 10389 return true; 10390 break; 10391 } 10392 10393 // Maybe it can be proved via operations? 10394 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10395 return true; 10396 10397 return false; 10398 } 10399 10400 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 10401 const SCEV *LHS, 10402 const SCEV *RHS, 10403 const SCEV *FoundLHS, 10404 const SCEV *FoundRHS) { 10405 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 10406 // The restriction on `FoundRHS` be lifted easily -- it exists only to 10407 // reduce the compile time impact of this optimization. 10408 return false; 10409 10410 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 10411 if (!Addend) 10412 return false; 10413 10414 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 10415 10416 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 10417 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 10418 ConstantRange FoundLHSRange = 10419 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 10420 10421 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 10422 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 10423 10424 // We can also compute the range of values for `LHS` that satisfy the 10425 // consequent, "`LHS` `Pred` `RHS`": 10426 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 10427 ConstantRange SatisfyingLHSRange = 10428 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 10429 10430 // The antecedent implies the consequent if every value of `LHS` that 10431 // satisfies the antecedent also satisfies the consequent. 10432 return SatisfyingLHSRange.contains(LHSRange); 10433 } 10434 10435 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 10436 bool IsSigned, bool NoWrap) { 10437 assert(isKnownPositive(Stride) && "Positive stride expected!"); 10438 10439 if (NoWrap) return false; 10440 10441 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10442 const SCEV *One = getOne(Stride->getType()); 10443 10444 if (IsSigned) { 10445 APInt MaxRHS = getSignedRangeMax(RHS); 10446 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 10447 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10448 10449 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 10450 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 10451 } 10452 10453 APInt MaxRHS = getUnsignedRangeMax(RHS); 10454 APInt MaxValue = APInt::getMaxValue(BitWidth); 10455 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10456 10457 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 10458 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 10459 } 10460 10461 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 10462 bool IsSigned, bool NoWrap) { 10463 if (NoWrap) return false; 10464 10465 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10466 const SCEV *One = getOne(Stride->getType()); 10467 10468 if (IsSigned) { 10469 APInt MinRHS = getSignedRangeMin(RHS); 10470 APInt MinValue = APInt::getSignedMinValue(BitWidth); 10471 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10472 10473 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 10474 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 10475 } 10476 10477 APInt MinRHS = getUnsignedRangeMin(RHS); 10478 APInt MinValue = APInt::getMinValue(BitWidth); 10479 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10480 10481 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 10482 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 10483 } 10484 10485 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 10486 bool Equality) { 10487 const SCEV *One = getOne(Step->getType()); 10488 Delta = Equality ? getAddExpr(Delta, Step) 10489 : getAddExpr(Delta, getMinusSCEV(Step, One)); 10490 return getUDivExpr(Delta, Step); 10491 } 10492 10493 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 10494 const SCEV *Stride, 10495 const SCEV *End, 10496 unsigned BitWidth, 10497 bool IsSigned) { 10498 10499 assert(!isKnownNonPositive(Stride) && 10500 "Stride is expected strictly positive!"); 10501 // Calculate the maximum backedge count based on the range of values 10502 // permitted by Start, End, and Stride. 10503 const SCEV *MaxBECount; 10504 APInt MinStart = 10505 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 10506 10507 APInt StrideForMaxBECount = 10508 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 10509 10510 // We already know that the stride is positive, so we paper over conservatism 10511 // in our range computation by forcing StrideForMaxBECount to be at least one. 10512 // In theory this is unnecessary, but we expect MaxBECount to be a 10513 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 10514 // is nothing to constant fold it to). 10515 APInt One(BitWidth, 1, IsSigned); 10516 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 10517 10518 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 10519 : APInt::getMaxValue(BitWidth); 10520 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 10521 10522 // Although End can be a MAX expression we estimate MaxEnd considering only 10523 // the case End = RHS of the loop termination condition. This is safe because 10524 // in the other case (End - Start) is zero, leading to a zero maximum backedge 10525 // taken count. 10526 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 10527 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 10528 10529 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 10530 getConstant(StrideForMaxBECount) /* Step */, 10531 false /* Equality */); 10532 10533 return MaxBECount; 10534 } 10535 10536 ScalarEvolution::ExitLimit 10537 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 10538 const Loop *L, bool IsSigned, 10539 bool ControlsExit, bool AllowPredicates) { 10540 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10541 10542 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10543 bool PredicatedIV = false; 10544 10545 if (!IV && AllowPredicates) { 10546 // Try to make this an AddRec using runtime tests, in the first X 10547 // iterations of this loop, where X is the SCEV expression found by the 10548 // algorithm below. 10549 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10550 PredicatedIV = true; 10551 } 10552 10553 // Avoid weird loops 10554 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10555 return getCouldNotCompute(); 10556 10557 bool NoWrap = ControlsExit && 10558 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10559 10560 const SCEV *Stride = IV->getStepRecurrence(*this); 10561 10562 bool PositiveStride = isKnownPositive(Stride); 10563 10564 // Avoid negative or zero stride values. 10565 if (!PositiveStride) { 10566 // We can compute the correct backedge taken count for loops with unknown 10567 // strides if we can prove that the loop is not an infinite loop with side 10568 // effects. Here's the loop structure we are trying to handle - 10569 // 10570 // i = start 10571 // do { 10572 // A[i] = i; 10573 // i += s; 10574 // } while (i < end); 10575 // 10576 // The backedge taken count for such loops is evaluated as - 10577 // (max(end, start + stride) - start - 1) /u stride 10578 // 10579 // The additional preconditions that we need to check to prove correctness 10580 // of the above formula is as follows - 10581 // 10582 // a) IV is either nuw or nsw depending upon signedness (indicated by the 10583 // NoWrap flag). 10584 // b) loop is single exit with no side effects. 10585 // 10586 // 10587 // Precondition a) implies that if the stride is negative, this is a single 10588 // trip loop. The backedge taken count formula reduces to zero in this case. 10589 // 10590 // Precondition b) implies that the unknown stride cannot be zero otherwise 10591 // we have UB. 10592 // 10593 // The positive stride case is the same as isKnownPositive(Stride) returning 10594 // true (original behavior of the function). 10595 // 10596 // We want to make sure that the stride is truly unknown as there are edge 10597 // cases where ScalarEvolution propagates no wrap flags to the 10598 // post-increment/decrement IV even though the increment/decrement operation 10599 // itself is wrapping. The computed backedge taken count may be wrong in 10600 // such cases. This is prevented by checking that the stride is not known to 10601 // be either positive or non-positive. For example, no wrap flags are 10602 // propagated to the post-increment IV of this loop with a trip count of 2 - 10603 // 10604 // unsigned char i; 10605 // for(i=127; i<128; i+=129) 10606 // A[i] = i; 10607 // 10608 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 10609 !loopHasNoSideEffects(L)) 10610 return getCouldNotCompute(); 10611 } else if (!Stride->isOne() && 10612 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 10613 // Avoid proven overflow cases: this will ensure that the backedge taken 10614 // count will not generate any unsigned overflow. Relaxed no-overflow 10615 // conditions exploit NoWrapFlags, allowing to optimize in presence of 10616 // undefined behaviors like the case of C language. 10617 return getCouldNotCompute(); 10618 10619 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 10620 : ICmpInst::ICMP_ULT; 10621 const SCEV *Start = IV->getStart(); 10622 const SCEV *End = RHS; 10623 // When the RHS is not invariant, we do not know the end bound of the loop and 10624 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 10625 // calculate the MaxBECount, given the start, stride and max value for the end 10626 // bound of the loop (RHS), and the fact that IV does not overflow (which is 10627 // checked above). 10628 if (!isLoopInvariant(RHS, L)) { 10629 const SCEV *MaxBECount = computeMaxBECountForLT( 10630 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10631 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 10632 false /*MaxOrZero*/, Predicates); 10633 } 10634 // If the backedge is taken at least once, then it will be taken 10635 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 10636 // is the LHS value of the less-than comparison the first time it is evaluated 10637 // and End is the RHS. 10638 const SCEV *BECountIfBackedgeTaken = 10639 computeBECount(getMinusSCEV(End, Start), Stride, false); 10640 // If the loop entry is guarded by the result of the backedge test of the 10641 // first loop iteration, then we know the backedge will be taken at least 10642 // once and so the backedge taken count is as above. If not then we use the 10643 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 10644 // as if the backedge is taken at least once max(End,Start) is End and so the 10645 // result is as above, and if not max(End,Start) is Start so we get a backedge 10646 // count of zero. 10647 const SCEV *BECount; 10648 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 10649 BECount = BECountIfBackedgeTaken; 10650 else { 10651 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 10652 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 10653 } 10654 10655 const SCEV *MaxBECount; 10656 bool MaxOrZero = false; 10657 if (isa<SCEVConstant>(BECount)) 10658 MaxBECount = BECount; 10659 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 10660 // If we know exactly how many times the backedge will be taken if it's 10661 // taken at least once, then the backedge count will either be that or 10662 // zero. 10663 MaxBECount = BECountIfBackedgeTaken; 10664 MaxOrZero = true; 10665 } else { 10666 MaxBECount = computeMaxBECountForLT( 10667 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10668 } 10669 10670 if (isa<SCEVCouldNotCompute>(MaxBECount) && 10671 !isa<SCEVCouldNotCompute>(BECount)) 10672 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 10673 10674 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 10675 } 10676 10677 ScalarEvolution::ExitLimit 10678 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 10679 const Loop *L, bool IsSigned, 10680 bool ControlsExit, bool AllowPredicates) { 10681 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10682 // We handle only IV > Invariant 10683 if (!isLoopInvariant(RHS, L)) 10684 return getCouldNotCompute(); 10685 10686 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10687 if (!IV && AllowPredicates) 10688 // Try to make this an AddRec using runtime tests, in the first X 10689 // iterations of this loop, where X is the SCEV expression found by the 10690 // algorithm below. 10691 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10692 10693 // Avoid weird loops 10694 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10695 return getCouldNotCompute(); 10696 10697 bool NoWrap = ControlsExit && 10698 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10699 10700 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 10701 10702 // Avoid negative or zero stride values 10703 if (!isKnownPositive(Stride)) 10704 return getCouldNotCompute(); 10705 10706 // Avoid proven overflow cases: this will ensure that the backedge taken count 10707 // will not generate any unsigned overflow. Relaxed no-overflow conditions 10708 // exploit NoWrapFlags, allowing to optimize in presence of undefined 10709 // behaviors like the case of C language. 10710 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 10711 return getCouldNotCompute(); 10712 10713 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 10714 : ICmpInst::ICMP_UGT; 10715 10716 const SCEV *Start = IV->getStart(); 10717 const SCEV *End = RHS; 10718 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) 10719 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 10720 10721 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 10722 10723 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 10724 : getUnsignedRangeMax(Start); 10725 10726 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 10727 : getUnsignedRangeMin(Stride); 10728 10729 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 10730 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 10731 : APInt::getMinValue(BitWidth) + (MinStride - 1); 10732 10733 // Although End can be a MIN expression we estimate MinEnd considering only 10734 // the case End = RHS. This is safe because in the other case (Start - End) 10735 // is zero, leading to a zero maximum backedge taken count. 10736 APInt MinEnd = 10737 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 10738 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 10739 10740 10741 const SCEV *MaxBECount = getCouldNotCompute(); 10742 if (isa<SCEVConstant>(BECount)) 10743 MaxBECount = BECount; 10744 else 10745 MaxBECount = computeBECount(getConstant(MaxStart - MinEnd), 10746 getConstant(MinStride), false); 10747 10748 if (isa<SCEVCouldNotCompute>(MaxBECount)) 10749 MaxBECount = BECount; 10750 10751 return ExitLimit(BECount, MaxBECount, false, Predicates); 10752 } 10753 10754 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 10755 ScalarEvolution &SE) const { 10756 if (Range.isFullSet()) // Infinite loop. 10757 return SE.getCouldNotCompute(); 10758 10759 // If the start is a non-zero constant, shift the range to simplify things. 10760 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 10761 if (!SC->getValue()->isZero()) { 10762 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 10763 Operands[0] = SE.getZero(SC->getType()); 10764 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 10765 getNoWrapFlags(FlagNW)); 10766 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 10767 return ShiftedAddRec->getNumIterationsInRange( 10768 Range.subtract(SC->getAPInt()), SE); 10769 // This is strange and shouldn't happen. 10770 return SE.getCouldNotCompute(); 10771 } 10772 10773 // The only time we can solve this is when we have all constant indices. 10774 // Otherwise, we cannot determine the overflow conditions. 10775 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 10776 return SE.getCouldNotCompute(); 10777 10778 // Okay at this point we know that all elements of the chrec are constants and 10779 // that the start element is zero. 10780 10781 // First check to see if the range contains zero. If not, the first 10782 // iteration exits. 10783 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 10784 if (!Range.contains(APInt(BitWidth, 0))) 10785 return SE.getZero(getType()); 10786 10787 if (isAffine()) { 10788 // If this is an affine expression then we have this situation: 10789 // Solve {0,+,A} in Range === Ax in Range 10790 10791 // We know that zero is in the range. If A is positive then we know that 10792 // the upper value of the range must be the first possible exit value. 10793 // If A is negative then the lower of the range is the last possible loop 10794 // value. Also note that we already checked for a full range. 10795 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 10796 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 10797 10798 // The exit value should be (End+A)/A. 10799 APInt ExitVal = (End + A).udiv(A); 10800 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 10801 10802 // Evaluate at the exit value. If we really did fall out of the valid 10803 // range, then we computed our trip count, otherwise wrap around or other 10804 // things must have happened. 10805 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 10806 if (Range.contains(Val->getValue())) 10807 return SE.getCouldNotCompute(); // Something strange happened 10808 10809 // Ensure that the previous value is in the range. This is a sanity check. 10810 assert(Range.contains( 10811 EvaluateConstantChrecAtConstant(this, 10812 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 10813 "Linear scev computation is off in a bad way!"); 10814 return SE.getConstant(ExitValue); 10815 } 10816 10817 if (isQuadratic()) { 10818 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 10819 return SE.getConstant(S.getValue()); 10820 } 10821 10822 return SE.getCouldNotCompute(); 10823 } 10824 10825 const SCEVAddRecExpr * 10826 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 10827 assert(getNumOperands() > 1 && "AddRec with zero step?"); 10828 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 10829 // but in this case we cannot guarantee that the value returned will be an 10830 // AddRec because SCEV does not have a fixed point where it stops 10831 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 10832 // may happen if we reach arithmetic depth limit while simplifying. So we 10833 // construct the returned value explicitly. 10834 SmallVector<const SCEV *, 3> Ops; 10835 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 10836 // (this + Step) is {A+B,+,B+C,+...,+,N}. 10837 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 10838 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 10839 // We know that the last operand is not a constant zero (otherwise it would 10840 // have been popped out earlier). This guarantees us that if the result has 10841 // the same last operand, then it will also not be popped out, meaning that 10842 // the returned value will be an AddRec. 10843 const SCEV *Last = getOperand(getNumOperands() - 1); 10844 assert(!Last->isZero() && "Recurrency with zero step?"); 10845 Ops.push_back(Last); 10846 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 10847 SCEV::FlagAnyWrap)); 10848 } 10849 10850 // Return true when S contains at least an undef value. 10851 static inline bool containsUndefs(const SCEV *S) { 10852 return SCEVExprContains(S, [](const SCEV *S) { 10853 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 10854 return isa<UndefValue>(SU->getValue()); 10855 return false; 10856 }); 10857 } 10858 10859 namespace { 10860 10861 // Collect all steps of SCEV expressions. 10862 struct SCEVCollectStrides { 10863 ScalarEvolution &SE; 10864 SmallVectorImpl<const SCEV *> &Strides; 10865 10866 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 10867 : SE(SE), Strides(S) {} 10868 10869 bool follow(const SCEV *S) { 10870 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 10871 Strides.push_back(AR->getStepRecurrence(SE)); 10872 return true; 10873 } 10874 10875 bool isDone() const { return false; } 10876 }; 10877 10878 // Collect all SCEVUnknown and SCEVMulExpr expressions. 10879 struct SCEVCollectTerms { 10880 SmallVectorImpl<const SCEV *> &Terms; 10881 10882 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 10883 10884 bool follow(const SCEV *S) { 10885 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 10886 isa<SCEVSignExtendExpr>(S)) { 10887 if (!containsUndefs(S)) 10888 Terms.push_back(S); 10889 10890 // Stop recursion: once we collected a term, do not walk its operands. 10891 return false; 10892 } 10893 10894 // Keep looking. 10895 return true; 10896 } 10897 10898 bool isDone() const { return false; } 10899 }; 10900 10901 // Check if a SCEV contains an AddRecExpr. 10902 struct SCEVHasAddRec { 10903 bool &ContainsAddRec; 10904 10905 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 10906 ContainsAddRec = false; 10907 } 10908 10909 bool follow(const SCEV *S) { 10910 if (isa<SCEVAddRecExpr>(S)) { 10911 ContainsAddRec = true; 10912 10913 // Stop recursion: once we collected a term, do not walk its operands. 10914 return false; 10915 } 10916 10917 // Keep looking. 10918 return true; 10919 } 10920 10921 bool isDone() const { return false; } 10922 }; 10923 10924 // Find factors that are multiplied with an expression that (possibly as a 10925 // subexpression) contains an AddRecExpr. In the expression: 10926 // 10927 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 10928 // 10929 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 10930 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 10931 // parameters as they form a product with an induction variable. 10932 // 10933 // This collector expects all array size parameters to be in the same MulExpr. 10934 // It might be necessary to later add support for collecting parameters that are 10935 // spread over different nested MulExpr. 10936 struct SCEVCollectAddRecMultiplies { 10937 SmallVectorImpl<const SCEV *> &Terms; 10938 ScalarEvolution &SE; 10939 10940 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 10941 : Terms(T), SE(SE) {} 10942 10943 bool follow(const SCEV *S) { 10944 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 10945 bool HasAddRec = false; 10946 SmallVector<const SCEV *, 0> Operands; 10947 for (auto Op : Mul->operands()) { 10948 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 10949 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 10950 Operands.push_back(Op); 10951 } else if (Unknown) { 10952 HasAddRec = true; 10953 } else { 10954 bool ContainsAddRec; 10955 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 10956 visitAll(Op, ContiansAddRec); 10957 HasAddRec |= ContainsAddRec; 10958 } 10959 } 10960 if (Operands.size() == 0) 10961 return true; 10962 10963 if (!HasAddRec) 10964 return false; 10965 10966 Terms.push_back(SE.getMulExpr(Operands)); 10967 // Stop recursion: once we collected a term, do not walk its operands. 10968 return false; 10969 } 10970 10971 // Keep looking. 10972 return true; 10973 } 10974 10975 bool isDone() const { return false; } 10976 }; 10977 10978 } // end anonymous namespace 10979 10980 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 10981 /// two places: 10982 /// 1) The strides of AddRec expressions. 10983 /// 2) Unknowns that are multiplied with AddRec expressions. 10984 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 10985 SmallVectorImpl<const SCEV *> &Terms) { 10986 SmallVector<const SCEV *, 4> Strides; 10987 SCEVCollectStrides StrideCollector(*this, Strides); 10988 visitAll(Expr, StrideCollector); 10989 10990 LLVM_DEBUG({ 10991 dbgs() << "Strides:\n"; 10992 for (const SCEV *S : Strides) 10993 dbgs() << *S << "\n"; 10994 }); 10995 10996 for (const SCEV *S : Strides) { 10997 SCEVCollectTerms TermCollector(Terms); 10998 visitAll(S, TermCollector); 10999 } 11000 11001 LLVM_DEBUG({ 11002 dbgs() << "Terms:\n"; 11003 for (const SCEV *T : Terms) 11004 dbgs() << *T << "\n"; 11005 }); 11006 11007 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 11008 visitAll(Expr, MulCollector); 11009 } 11010 11011 static bool findArrayDimensionsRec(ScalarEvolution &SE, 11012 SmallVectorImpl<const SCEV *> &Terms, 11013 SmallVectorImpl<const SCEV *> &Sizes) { 11014 int Last = Terms.size() - 1; 11015 const SCEV *Step = Terms[Last]; 11016 11017 // End of recursion. 11018 if (Last == 0) { 11019 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 11020 SmallVector<const SCEV *, 2> Qs; 11021 for (const SCEV *Op : M->operands()) 11022 if (!isa<SCEVConstant>(Op)) 11023 Qs.push_back(Op); 11024 11025 Step = SE.getMulExpr(Qs); 11026 } 11027 11028 Sizes.push_back(Step); 11029 return true; 11030 } 11031 11032 for (const SCEV *&Term : Terms) { 11033 // Normalize the terms before the next call to findArrayDimensionsRec. 11034 const SCEV *Q, *R; 11035 SCEVDivision::divide(SE, Term, Step, &Q, &R); 11036 11037 // Bail out when GCD does not evenly divide one of the terms. 11038 if (!R->isZero()) 11039 return false; 11040 11041 Term = Q; 11042 } 11043 11044 // Remove all SCEVConstants. 11045 Terms.erase( 11046 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 11047 Terms.end()); 11048 11049 if (Terms.size() > 0) 11050 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 11051 return false; 11052 11053 Sizes.push_back(Step); 11054 return true; 11055 } 11056 11057 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 11058 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 11059 for (const SCEV *T : Terms) 11060 if (SCEVExprContains(T, isa<SCEVUnknown, const SCEV *>)) 11061 return true; 11062 return false; 11063 } 11064 11065 // Return the number of product terms in S. 11066 static inline int numberOfTerms(const SCEV *S) { 11067 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 11068 return Expr->getNumOperands(); 11069 return 1; 11070 } 11071 11072 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 11073 if (isa<SCEVConstant>(T)) 11074 return nullptr; 11075 11076 if (isa<SCEVUnknown>(T)) 11077 return T; 11078 11079 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 11080 SmallVector<const SCEV *, 2> Factors; 11081 for (const SCEV *Op : M->operands()) 11082 if (!isa<SCEVConstant>(Op)) 11083 Factors.push_back(Op); 11084 11085 return SE.getMulExpr(Factors); 11086 } 11087 11088 return T; 11089 } 11090 11091 /// Return the size of an element read or written by Inst. 11092 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 11093 Type *Ty; 11094 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 11095 Ty = Store->getValueOperand()->getType(); 11096 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 11097 Ty = Load->getType(); 11098 else 11099 return nullptr; 11100 11101 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 11102 return getSizeOfExpr(ETy, Ty); 11103 } 11104 11105 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 11106 SmallVectorImpl<const SCEV *> &Sizes, 11107 const SCEV *ElementSize) { 11108 if (Terms.size() < 1 || !ElementSize) 11109 return; 11110 11111 // Early return when Terms do not contain parameters: we do not delinearize 11112 // non parametric SCEVs. 11113 if (!containsParameters(Terms)) 11114 return; 11115 11116 LLVM_DEBUG({ 11117 dbgs() << "Terms:\n"; 11118 for (const SCEV *T : Terms) 11119 dbgs() << *T << "\n"; 11120 }); 11121 11122 // Remove duplicates. 11123 array_pod_sort(Terms.begin(), Terms.end()); 11124 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 11125 11126 // Put larger terms first. 11127 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) { 11128 return numberOfTerms(LHS) > numberOfTerms(RHS); 11129 }); 11130 11131 // Try to divide all terms by the element size. If term is not divisible by 11132 // element size, proceed with the original term. 11133 for (const SCEV *&Term : Terms) { 11134 const SCEV *Q, *R; 11135 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 11136 if (!Q->isZero()) 11137 Term = Q; 11138 } 11139 11140 SmallVector<const SCEV *, 4> NewTerms; 11141 11142 // Remove constant factors. 11143 for (const SCEV *T : Terms) 11144 if (const SCEV *NewT = removeConstantFactors(*this, T)) 11145 NewTerms.push_back(NewT); 11146 11147 LLVM_DEBUG({ 11148 dbgs() << "Terms after sorting:\n"; 11149 for (const SCEV *T : NewTerms) 11150 dbgs() << *T << "\n"; 11151 }); 11152 11153 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 11154 Sizes.clear(); 11155 return; 11156 } 11157 11158 // The last element to be pushed into Sizes is the size of an element. 11159 Sizes.push_back(ElementSize); 11160 11161 LLVM_DEBUG({ 11162 dbgs() << "Sizes:\n"; 11163 for (const SCEV *S : Sizes) 11164 dbgs() << *S << "\n"; 11165 }); 11166 } 11167 11168 void ScalarEvolution::computeAccessFunctions( 11169 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 11170 SmallVectorImpl<const SCEV *> &Sizes) { 11171 // Early exit in case this SCEV is not an affine multivariate function. 11172 if (Sizes.empty()) 11173 return; 11174 11175 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 11176 if (!AR->isAffine()) 11177 return; 11178 11179 const SCEV *Res = Expr; 11180 int Last = Sizes.size() - 1; 11181 for (int i = Last; i >= 0; i--) { 11182 const SCEV *Q, *R; 11183 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 11184 11185 LLVM_DEBUG({ 11186 dbgs() << "Res: " << *Res << "\n"; 11187 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 11188 dbgs() << "Res divided by Sizes[i]:\n"; 11189 dbgs() << "Quotient: " << *Q << "\n"; 11190 dbgs() << "Remainder: " << *R << "\n"; 11191 }); 11192 11193 Res = Q; 11194 11195 // Do not record the last subscript corresponding to the size of elements in 11196 // the array. 11197 if (i == Last) { 11198 11199 // Bail out if the remainder is too complex. 11200 if (isa<SCEVAddRecExpr>(R)) { 11201 Subscripts.clear(); 11202 Sizes.clear(); 11203 return; 11204 } 11205 11206 continue; 11207 } 11208 11209 // Record the access function for the current subscript. 11210 Subscripts.push_back(R); 11211 } 11212 11213 // Also push in last position the remainder of the last division: it will be 11214 // the access function of the innermost dimension. 11215 Subscripts.push_back(Res); 11216 11217 std::reverse(Subscripts.begin(), Subscripts.end()); 11218 11219 LLVM_DEBUG({ 11220 dbgs() << "Subscripts:\n"; 11221 for (const SCEV *S : Subscripts) 11222 dbgs() << *S << "\n"; 11223 }); 11224 } 11225 11226 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 11227 /// sizes of an array access. Returns the remainder of the delinearization that 11228 /// is the offset start of the array. The SCEV->delinearize algorithm computes 11229 /// the multiples of SCEV coefficients: that is a pattern matching of sub 11230 /// expressions in the stride and base of a SCEV corresponding to the 11231 /// computation of a GCD (greatest common divisor) of base and stride. When 11232 /// SCEV->delinearize fails, it returns the SCEV unchanged. 11233 /// 11234 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 11235 /// 11236 /// void foo(long n, long m, long o, double A[n][m][o]) { 11237 /// 11238 /// for (long i = 0; i < n; i++) 11239 /// for (long j = 0; j < m; j++) 11240 /// for (long k = 0; k < o; k++) 11241 /// A[i][j][k] = 1.0; 11242 /// } 11243 /// 11244 /// the delinearization input is the following AddRec SCEV: 11245 /// 11246 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 11247 /// 11248 /// From this SCEV, we are able to say that the base offset of the access is %A 11249 /// because it appears as an offset that does not divide any of the strides in 11250 /// the loops: 11251 /// 11252 /// CHECK: Base offset: %A 11253 /// 11254 /// and then SCEV->delinearize determines the size of some of the dimensions of 11255 /// the array as these are the multiples by which the strides are happening: 11256 /// 11257 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 11258 /// 11259 /// Note that the outermost dimension remains of UnknownSize because there are 11260 /// no strides that would help identifying the size of the last dimension: when 11261 /// the array has been statically allocated, one could compute the size of that 11262 /// dimension by dividing the overall size of the array by the size of the known 11263 /// dimensions: %m * %o * 8. 11264 /// 11265 /// Finally delinearize provides the access functions for the array reference 11266 /// that does correspond to A[i][j][k] of the above C testcase: 11267 /// 11268 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 11269 /// 11270 /// The testcases are checking the output of a function pass: 11271 /// DelinearizationPass that walks through all loads and stores of a function 11272 /// asking for the SCEV of the memory access with respect to all enclosing 11273 /// loops, calling SCEV->delinearize on that and printing the results. 11274 void ScalarEvolution::delinearize(const SCEV *Expr, 11275 SmallVectorImpl<const SCEV *> &Subscripts, 11276 SmallVectorImpl<const SCEV *> &Sizes, 11277 const SCEV *ElementSize) { 11278 // First step: collect parametric terms. 11279 SmallVector<const SCEV *, 4> Terms; 11280 collectParametricTerms(Expr, Terms); 11281 11282 if (Terms.empty()) 11283 return; 11284 11285 // Second step: find subscript sizes. 11286 findArrayDimensions(Terms, Sizes, ElementSize); 11287 11288 if (Sizes.empty()) 11289 return; 11290 11291 // Third step: compute the access functions for each subscript. 11292 computeAccessFunctions(Expr, Subscripts, Sizes); 11293 11294 if (Subscripts.empty()) 11295 return; 11296 11297 LLVM_DEBUG({ 11298 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 11299 dbgs() << "ArrayDecl[UnknownSize]"; 11300 for (const SCEV *S : Sizes) 11301 dbgs() << "[" << *S << "]"; 11302 11303 dbgs() << "\nArrayRef"; 11304 for (const SCEV *S : Subscripts) 11305 dbgs() << "[" << *S << "]"; 11306 dbgs() << "\n"; 11307 }); 11308 } 11309 11310 //===----------------------------------------------------------------------===// 11311 // SCEVCallbackVH Class Implementation 11312 //===----------------------------------------------------------------------===// 11313 11314 void ScalarEvolution::SCEVCallbackVH::deleted() { 11315 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11316 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 11317 SE->ConstantEvolutionLoopExitValue.erase(PN); 11318 SE->eraseValueFromMap(getValPtr()); 11319 // this now dangles! 11320 } 11321 11322 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 11323 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11324 11325 // Forget all the expressions associated with users of the old value, 11326 // so that future queries will recompute the expressions using the new 11327 // value. 11328 Value *Old = getValPtr(); 11329 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 11330 SmallPtrSet<User *, 8> Visited; 11331 while (!Worklist.empty()) { 11332 User *U = Worklist.pop_back_val(); 11333 // Deleting the Old value will cause this to dangle. Postpone 11334 // that until everything else is done. 11335 if (U == Old) 11336 continue; 11337 if (!Visited.insert(U).second) 11338 continue; 11339 if (PHINode *PN = dyn_cast<PHINode>(U)) 11340 SE->ConstantEvolutionLoopExitValue.erase(PN); 11341 SE->eraseValueFromMap(U); 11342 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 11343 } 11344 // Delete the Old value. 11345 if (PHINode *PN = dyn_cast<PHINode>(Old)) 11346 SE->ConstantEvolutionLoopExitValue.erase(PN); 11347 SE->eraseValueFromMap(Old); 11348 // this now dangles! 11349 } 11350 11351 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 11352 : CallbackVH(V), SE(se) {} 11353 11354 //===----------------------------------------------------------------------===// 11355 // ScalarEvolution Class Implementation 11356 //===----------------------------------------------------------------------===// 11357 11358 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 11359 AssumptionCache &AC, DominatorTree &DT, 11360 LoopInfo &LI) 11361 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 11362 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 11363 LoopDispositions(64), BlockDispositions(64) { 11364 // To use guards for proving predicates, we need to scan every instruction in 11365 // relevant basic blocks, and not just terminators. Doing this is a waste of 11366 // time if the IR does not actually contain any calls to 11367 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 11368 // 11369 // This pessimizes the case where a pass that preserves ScalarEvolution wants 11370 // to _add_ guards to the module when there weren't any before, and wants 11371 // ScalarEvolution to optimize based on those guards. For now we prefer to be 11372 // efficient in lieu of being smart in that rather obscure case. 11373 11374 auto *GuardDecl = F.getParent()->getFunction( 11375 Intrinsic::getName(Intrinsic::experimental_guard)); 11376 HasGuards = GuardDecl && !GuardDecl->use_empty(); 11377 } 11378 11379 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 11380 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 11381 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 11382 ValueExprMap(std::move(Arg.ValueExprMap)), 11383 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 11384 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 11385 PendingMerges(std::move(Arg.PendingMerges)), 11386 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 11387 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 11388 PredicatedBackedgeTakenCounts( 11389 std::move(Arg.PredicatedBackedgeTakenCounts)), 11390 ConstantEvolutionLoopExitValue( 11391 std::move(Arg.ConstantEvolutionLoopExitValue)), 11392 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 11393 LoopDispositions(std::move(Arg.LoopDispositions)), 11394 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 11395 BlockDispositions(std::move(Arg.BlockDispositions)), 11396 UnsignedRanges(std::move(Arg.UnsignedRanges)), 11397 SignedRanges(std::move(Arg.SignedRanges)), 11398 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 11399 UniquePreds(std::move(Arg.UniquePreds)), 11400 SCEVAllocator(std::move(Arg.SCEVAllocator)), 11401 LoopUsers(std::move(Arg.LoopUsers)), 11402 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 11403 FirstUnknown(Arg.FirstUnknown) { 11404 Arg.FirstUnknown = nullptr; 11405 } 11406 11407 ScalarEvolution::~ScalarEvolution() { 11408 // Iterate through all the SCEVUnknown instances and call their 11409 // destructors, so that they release their references to their values. 11410 for (SCEVUnknown *U = FirstUnknown; U;) { 11411 SCEVUnknown *Tmp = U; 11412 U = U->Next; 11413 Tmp->~SCEVUnknown(); 11414 } 11415 FirstUnknown = nullptr; 11416 11417 ExprValueMap.clear(); 11418 ValueExprMap.clear(); 11419 HasRecMap.clear(); 11420 11421 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 11422 // that a loop had multiple computable exits. 11423 for (auto &BTCI : BackedgeTakenCounts) 11424 BTCI.second.clear(); 11425 for (auto &BTCI : PredicatedBackedgeTakenCounts) 11426 BTCI.second.clear(); 11427 11428 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 11429 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 11430 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 11431 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 11432 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 11433 } 11434 11435 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 11436 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 11437 } 11438 11439 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 11440 const Loop *L) { 11441 // Print all inner loops first 11442 for (Loop *I : *L) 11443 PrintLoopInfo(OS, SE, I); 11444 11445 OS << "Loop "; 11446 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11447 OS << ": "; 11448 11449 SmallVector<BasicBlock *, 8> ExitBlocks; 11450 L->getExitBlocks(ExitBlocks); 11451 if (ExitBlocks.size() != 1) 11452 OS << "<multiple exits> "; 11453 11454 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 11455 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 11456 } else { 11457 OS << "Unpredictable backedge-taken count. "; 11458 } 11459 11460 OS << "\n" 11461 "Loop "; 11462 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11463 OS << ": "; 11464 11465 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 11466 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 11467 if (SE->isBackedgeTakenCountMaxOrZero(L)) 11468 OS << ", actual taken count either this or zero."; 11469 } else { 11470 OS << "Unpredictable max backedge-taken count. "; 11471 } 11472 11473 OS << "\n" 11474 "Loop "; 11475 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11476 OS << ": "; 11477 11478 SCEVUnionPredicate Pred; 11479 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 11480 if (!isa<SCEVCouldNotCompute>(PBT)) { 11481 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 11482 OS << " Predicates:\n"; 11483 Pred.print(OS, 4); 11484 } else { 11485 OS << "Unpredictable predicated backedge-taken count. "; 11486 } 11487 OS << "\n"; 11488 11489 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 11490 OS << "Loop "; 11491 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11492 OS << ": "; 11493 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 11494 } 11495 } 11496 11497 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 11498 switch (LD) { 11499 case ScalarEvolution::LoopVariant: 11500 return "Variant"; 11501 case ScalarEvolution::LoopInvariant: 11502 return "Invariant"; 11503 case ScalarEvolution::LoopComputable: 11504 return "Computable"; 11505 } 11506 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 11507 } 11508 11509 void ScalarEvolution::print(raw_ostream &OS) const { 11510 // ScalarEvolution's implementation of the print method is to print 11511 // out SCEV values of all instructions that are interesting. Doing 11512 // this potentially causes it to create new SCEV objects though, 11513 // which technically conflicts with the const qualifier. This isn't 11514 // observable from outside the class though, so casting away the 11515 // const isn't dangerous. 11516 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11517 11518 OS << "Classifying expressions for: "; 11519 F.printAsOperand(OS, /*PrintType=*/false); 11520 OS << "\n"; 11521 for (Instruction &I : instructions(F)) 11522 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 11523 OS << I << '\n'; 11524 OS << " --> "; 11525 const SCEV *SV = SE.getSCEV(&I); 11526 SV->print(OS); 11527 if (!isa<SCEVCouldNotCompute>(SV)) { 11528 OS << " U: "; 11529 SE.getUnsignedRange(SV).print(OS); 11530 OS << " S: "; 11531 SE.getSignedRange(SV).print(OS); 11532 } 11533 11534 const Loop *L = LI.getLoopFor(I.getParent()); 11535 11536 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 11537 if (AtUse != SV) { 11538 OS << " --> "; 11539 AtUse->print(OS); 11540 if (!isa<SCEVCouldNotCompute>(AtUse)) { 11541 OS << " U: "; 11542 SE.getUnsignedRange(AtUse).print(OS); 11543 OS << " S: "; 11544 SE.getSignedRange(AtUse).print(OS); 11545 } 11546 } 11547 11548 if (L) { 11549 OS << "\t\t" "Exits: "; 11550 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 11551 if (!SE.isLoopInvariant(ExitValue, L)) { 11552 OS << "<<Unknown>>"; 11553 } else { 11554 OS << *ExitValue; 11555 } 11556 11557 bool First = true; 11558 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 11559 if (First) { 11560 OS << "\t\t" "LoopDispositions: { "; 11561 First = false; 11562 } else { 11563 OS << ", "; 11564 } 11565 11566 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11567 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 11568 } 11569 11570 for (auto *InnerL : depth_first(L)) { 11571 if (InnerL == L) 11572 continue; 11573 if (First) { 11574 OS << "\t\t" "LoopDispositions: { "; 11575 First = false; 11576 } else { 11577 OS << ", "; 11578 } 11579 11580 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11581 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 11582 } 11583 11584 OS << " }"; 11585 } 11586 11587 OS << "\n"; 11588 } 11589 11590 OS << "Determining loop execution counts for: "; 11591 F.printAsOperand(OS, /*PrintType=*/false); 11592 OS << "\n"; 11593 for (Loop *I : LI) 11594 PrintLoopInfo(OS, &SE, I); 11595 } 11596 11597 ScalarEvolution::LoopDisposition 11598 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 11599 auto &Values = LoopDispositions[S]; 11600 for (auto &V : Values) { 11601 if (V.getPointer() == L) 11602 return V.getInt(); 11603 } 11604 Values.emplace_back(L, LoopVariant); 11605 LoopDisposition D = computeLoopDisposition(S, L); 11606 auto &Values2 = LoopDispositions[S]; 11607 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11608 if (V.getPointer() == L) { 11609 V.setInt(D); 11610 break; 11611 } 11612 } 11613 return D; 11614 } 11615 11616 ScalarEvolution::LoopDisposition 11617 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 11618 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11619 case scConstant: 11620 return LoopInvariant; 11621 case scTruncate: 11622 case scZeroExtend: 11623 case scSignExtend: 11624 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 11625 case scAddRecExpr: { 11626 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11627 11628 // If L is the addrec's loop, it's computable. 11629 if (AR->getLoop() == L) 11630 return LoopComputable; 11631 11632 // Add recurrences are never invariant in the function-body (null loop). 11633 if (!L) 11634 return LoopVariant; 11635 11636 // Everything that is not defined at loop entry is variant. 11637 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 11638 return LoopVariant; 11639 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 11640 " dominate the contained loop's header?"); 11641 11642 // This recurrence is invariant w.r.t. L if AR's loop contains L. 11643 if (AR->getLoop()->contains(L)) 11644 return LoopInvariant; 11645 11646 // This recurrence is variant w.r.t. L if any of its operands 11647 // are variant. 11648 for (auto *Op : AR->operands()) 11649 if (!isLoopInvariant(Op, L)) 11650 return LoopVariant; 11651 11652 // Otherwise it's loop-invariant. 11653 return LoopInvariant; 11654 } 11655 case scAddExpr: 11656 case scMulExpr: 11657 case scUMaxExpr: 11658 case scSMaxExpr: { 11659 bool HasVarying = false; 11660 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 11661 LoopDisposition D = getLoopDisposition(Op, L); 11662 if (D == LoopVariant) 11663 return LoopVariant; 11664 if (D == LoopComputable) 11665 HasVarying = true; 11666 } 11667 return HasVarying ? LoopComputable : LoopInvariant; 11668 } 11669 case scUDivExpr: { 11670 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11671 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 11672 if (LD == LoopVariant) 11673 return LoopVariant; 11674 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 11675 if (RD == LoopVariant) 11676 return LoopVariant; 11677 return (LD == LoopInvariant && RD == LoopInvariant) ? 11678 LoopInvariant : LoopComputable; 11679 } 11680 case scUnknown: 11681 // All non-instruction values are loop invariant. All instructions are loop 11682 // invariant if they are not contained in the specified loop. 11683 // Instructions are never considered invariant in the function body 11684 // (null loop) because they are defined within the "loop". 11685 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 11686 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 11687 return LoopInvariant; 11688 case scCouldNotCompute: 11689 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11690 } 11691 llvm_unreachable("Unknown SCEV kind!"); 11692 } 11693 11694 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 11695 return getLoopDisposition(S, L) == LoopInvariant; 11696 } 11697 11698 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 11699 return getLoopDisposition(S, L) == LoopComputable; 11700 } 11701 11702 ScalarEvolution::BlockDisposition 11703 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11704 auto &Values = BlockDispositions[S]; 11705 for (auto &V : Values) { 11706 if (V.getPointer() == BB) 11707 return V.getInt(); 11708 } 11709 Values.emplace_back(BB, DoesNotDominateBlock); 11710 BlockDisposition D = computeBlockDisposition(S, BB); 11711 auto &Values2 = BlockDispositions[S]; 11712 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11713 if (V.getPointer() == BB) { 11714 V.setInt(D); 11715 break; 11716 } 11717 } 11718 return D; 11719 } 11720 11721 ScalarEvolution::BlockDisposition 11722 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11723 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11724 case scConstant: 11725 return ProperlyDominatesBlock; 11726 case scTruncate: 11727 case scZeroExtend: 11728 case scSignExtend: 11729 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 11730 case scAddRecExpr: { 11731 // This uses a "dominates" query instead of "properly dominates" query 11732 // to test for proper dominance too, because the instruction which 11733 // produces the addrec's value is a PHI, and a PHI effectively properly 11734 // dominates its entire containing block. 11735 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11736 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 11737 return DoesNotDominateBlock; 11738 11739 // Fall through into SCEVNAryExpr handling. 11740 LLVM_FALLTHROUGH; 11741 } 11742 case scAddExpr: 11743 case scMulExpr: 11744 case scUMaxExpr: 11745 case scSMaxExpr: { 11746 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 11747 bool Proper = true; 11748 for (const SCEV *NAryOp : NAry->operands()) { 11749 BlockDisposition D = getBlockDisposition(NAryOp, BB); 11750 if (D == DoesNotDominateBlock) 11751 return DoesNotDominateBlock; 11752 if (D == DominatesBlock) 11753 Proper = false; 11754 } 11755 return Proper ? ProperlyDominatesBlock : DominatesBlock; 11756 } 11757 case scUDivExpr: { 11758 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11759 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 11760 BlockDisposition LD = getBlockDisposition(LHS, BB); 11761 if (LD == DoesNotDominateBlock) 11762 return DoesNotDominateBlock; 11763 BlockDisposition RD = getBlockDisposition(RHS, BB); 11764 if (RD == DoesNotDominateBlock) 11765 return DoesNotDominateBlock; 11766 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 11767 ProperlyDominatesBlock : DominatesBlock; 11768 } 11769 case scUnknown: 11770 if (Instruction *I = 11771 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 11772 if (I->getParent() == BB) 11773 return DominatesBlock; 11774 if (DT.properlyDominates(I->getParent(), BB)) 11775 return ProperlyDominatesBlock; 11776 return DoesNotDominateBlock; 11777 } 11778 return ProperlyDominatesBlock; 11779 case scCouldNotCompute: 11780 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11781 } 11782 llvm_unreachable("Unknown SCEV kind!"); 11783 } 11784 11785 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 11786 return getBlockDisposition(S, BB) >= DominatesBlock; 11787 } 11788 11789 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 11790 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 11791 } 11792 11793 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 11794 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 11795 } 11796 11797 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 11798 auto IsS = [&](const SCEV *X) { return S == X; }; 11799 auto ContainsS = [&](const SCEV *X) { 11800 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 11801 }; 11802 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 11803 } 11804 11805 void 11806 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 11807 ValuesAtScopes.erase(S); 11808 LoopDispositions.erase(S); 11809 BlockDispositions.erase(S); 11810 UnsignedRanges.erase(S); 11811 SignedRanges.erase(S); 11812 ExprValueMap.erase(S); 11813 HasRecMap.erase(S); 11814 MinTrailingZerosCache.erase(S); 11815 11816 for (auto I = PredicatedSCEVRewrites.begin(); 11817 I != PredicatedSCEVRewrites.end();) { 11818 std::pair<const SCEV *, const Loop *> Entry = I->first; 11819 if (Entry.first == S) 11820 PredicatedSCEVRewrites.erase(I++); 11821 else 11822 ++I; 11823 } 11824 11825 auto RemoveSCEVFromBackedgeMap = 11826 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 11827 for (auto I = Map.begin(), E = Map.end(); I != E;) { 11828 BackedgeTakenInfo &BEInfo = I->second; 11829 if (BEInfo.hasOperand(S, this)) { 11830 BEInfo.clear(); 11831 Map.erase(I++); 11832 } else 11833 ++I; 11834 } 11835 }; 11836 11837 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 11838 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 11839 } 11840 11841 void 11842 ScalarEvolution::getUsedLoops(const SCEV *S, 11843 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 11844 struct FindUsedLoops { 11845 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 11846 : LoopsUsed(LoopsUsed) {} 11847 SmallPtrSetImpl<const Loop *> &LoopsUsed; 11848 bool follow(const SCEV *S) { 11849 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 11850 LoopsUsed.insert(AR->getLoop()); 11851 return true; 11852 } 11853 11854 bool isDone() const { return false; } 11855 }; 11856 11857 FindUsedLoops F(LoopsUsed); 11858 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 11859 } 11860 11861 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 11862 SmallPtrSet<const Loop *, 8> LoopsUsed; 11863 getUsedLoops(S, LoopsUsed); 11864 for (auto *L : LoopsUsed) 11865 LoopUsers[L].push_back(S); 11866 } 11867 11868 void ScalarEvolution::verify() const { 11869 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11870 ScalarEvolution SE2(F, TLI, AC, DT, LI); 11871 11872 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 11873 11874 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 11875 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 11876 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 11877 11878 const SCEV *visitConstant(const SCEVConstant *Constant) { 11879 return SE.getConstant(Constant->getAPInt()); 11880 } 11881 11882 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 11883 return SE.getUnknown(Expr->getValue()); 11884 } 11885 11886 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 11887 return SE.getCouldNotCompute(); 11888 } 11889 }; 11890 11891 SCEVMapper SCM(SE2); 11892 11893 while (!LoopStack.empty()) { 11894 auto *L = LoopStack.pop_back_val(); 11895 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 11896 11897 auto *CurBECount = SCM.visit( 11898 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 11899 auto *NewBECount = SE2.getBackedgeTakenCount(L); 11900 11901 if (CurBECount == SE2.getCouldNotCompute() || 11902 NewBECount == SE2.getCouldNotCompute()) { 11903 // NB! This situation is legal, but is very suspicious -- whatever pass 11904 // change the loop to make a trip count go from could not compute to 11905 // computable or vice-versa *should have* invalidated SCEV. However, we 11906 // choose not to assert here (for now) since we don't want false 11907 // positives. 11908 continue; 11909 } 11910 11911 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 11912 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 11913 // not propagate undef aggressively). This means we can (and do) fail 11914 // verification in cases where a transform makes the trip count of a loop 11915 // go from "undef" to "undef+1" (say). The transform is fine, since in 11916 // both cases the loop iterates "undef" times, but SCEV thinks we 11917 // increased the trip count of the loop by 1 incorrectly. 11918 continue; 11919 } 11920 11921 if (SE.getTypeSizeInBits(CurBECount->getType()) > 11922 SE.getTypeSizeInBits(NewBECount->getType())) 11923 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 11924 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 11925 SE.getTypeSizeInBits(NewBECount->getType())) 11926 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 11927 11928 auto *ConstantDelta = 11929 dyn_cast<SCEVConstant>(SE2.getMinusSCEV(CurBECount, NewBECount)); 11930 11931 if (ConstantDelta && ConstantDelta->getAPInt() != 0) { 11932 dbgs() << "Trip Count Changed!\n"; 11933 dbgs() << "Old: " << *CurBECount << "\n"; 11934 dbgs() << "New: " << *NewBECount << "\n"; 11935 dbgs() << "Delta: " << *ConstantDelta << "\n"; 11936 std::abort(); 11937 } 11938 } 11939 } 11940 11941 bool ScalarEvolution::invalidate( 11942 Function &F, const PreservedAnalyses &PA, 11943 FunctionAnalysisManager::Invalidator &Inv) { 11944 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 11945 // of its dependencies is invalidated. 11946 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 11947 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 11948 Inv.invalidate<AssumptionAnalysis>(F, PA) || 11949 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 11950 Inv.invalidate<LoopAnalysis>(F, PA); 11951 } 11952 11953 AnalysisKey ScalarEvolutionAnalysis::Key; 11954 11955 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 11956 FunctionAnalysisManager &AM) { 11957 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 11958 AM.getResult<AssumptionAnalysis>(F), 11959 AM.getResult<DominatorTreeAnalysis>(F), 11960 AM.getResult<LoopAnalysis>(F)); 11961 } 11962 11963 PreservedAnalyses 11964 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 11965 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 11966 return PreservedAnalyses::all(); 11967 } 11968 11969 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 11970 "Scalar Evolution Analysis", false, true) 11971 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 11972 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 11973 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 11974 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 11975 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 11976 "Scalar Evolution Analysis", false, true) 11977 11978 char ScalarEvolutionWrapperPass::ID = 0; 11979 11980 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 11981 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 11982 } 11983 11984 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 11985 SE.reset(new ScalarEvolution( 11986 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 11987 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 11988 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 11989 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 11990 return false; 11991 } 11992 11993 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 11994 11995 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 11996 SE->print(OS); 11997 } 11998 11999 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 12000 if (!VerifySCEV) 12001 return; 12002 12003 SE->verify(); 12004 } 12005 12006 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 12007 AU.setPreservesAll(); 12008 AU.addRequiredTransitive<AssumptionCacheTracker>(); 12009 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 12010 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 12011 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 12012 } 12013 12014 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 12015 const SCEV *RHS) { 12016 FoldingSetNodeID ID; 12017 assert(LHS->getType() == RHS->getType() && 12018 "Type mismatch between LHS and RHS"); 12019 // Unique this node based on the arguments 12020 ID.AddInteger(SCEVPredicate::P_Equal); 12021 ID.AddPointer(LHS); 12022 ID.AddPointer(RHS); 12023 void *IP = nullptr; 12024 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12025 return S; 12026 SCEVEqualPredicate *Eq = new (SCEVAllocator) 12027 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 12028 UniquePreds.InsertNode(Eq, IP); 12029 return Eq; 12030 } 12031 12032 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 12033 const SCEVAddRecExpr *AR, 12034 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12035 FoldingSetNodeID ID; 12036 // Unique this node based on the arguments 12037 ID.AddInteger(SCEVPredicate::P_Wrap); 12038 ID.AddPointer(AR); 12039 ID.AddInteger(AddedFlags); 12040 void *IP = nullptr; 12041 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12042 return S; 12043 auto *OF = new (SCEVAllocator) 12044 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 12045 UniquePreds.InsertNode(OF, IP); 12046 return OF; 12047 } 12048 12049 namespace { 12050 12051 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 12052 public: 12053 12054 /// Rewrites \p S in the context of a loop L and the SCEV predication 12055 /// infrastructure. 12056 /// 12057 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 12058 /// equivalences present in \p Pred. 12059 /// 12060 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 12061 /// \p NewPreds such that the result will be an AddRecExpr. 12062 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 12063 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12064 SCEVUnionPredicate *Pred) { 12065 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 12066 return Rewriter.visit(S); 12067 } 12068 12069 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12070 if (Pred) { 12071 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 12072 for (auto *Pred : ExprPreds) 12073 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 12074 if (IPred->getLHS() == Expr) 12075 return IPred->getRHS(); 12076 } 12077 return convertToAddRecWithPreds(Expr); 12078 } 12079 12080 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 12081 const SCEV *Operand = visit(Expr->getOperand()); 12082 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12083 if (AR && AR->getLoop() == L && AR->isAffine()) { 12084 // This couldn't be folded because the operand didn't have the nuw 12085 // flag. Add the nusw flag as an assumption that we could make. 12086 const SCEV *Step = AR->getStepRecurrence(SE); 12087 Type *Ty = Expr->getType(); 12088 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 12089 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 12090 SE.getSignExtendExpr(Step, Ty), L, 12091 AR->getNoWrapFlags()); 12092 } 12093 return SE.getZeroExtendExpr(Operand, Expr->getType()); 12094 } 12095 12096 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 12097 const SCEV *Operand = visit(Expr->getOperand()); 12098 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12099 if (AR && AR->getLoop() == L && AR->isAffine()) { 12100 // This couldn't be folded because the operand didn't have the nsw 12101 // flag. Add the nssw flag as an assumption that we could make. 12102 const SCEV *Step = AR->getStepRecurrence(SE); 12103 Type *Ty = Expr->getType(); 12104 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 12105 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 12106 SE.getSignExtendExpr(Step, Ty), L, 12107 AR->getNoWrapFlags()); 12108 } 12109 return SE.getSignExtendExpr(Operand, Expr->getType()); 12110 } 12111 12112 private: 12113 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 12114 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12115 SCEVUnionPredicate *Pred) 12116 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 12117 12118 bool addOverflowAssumption(const SCEVPredicate *P) { 12119 if (!NewPreds) { 12120 // Check if we've already made this assumption. 12121 return Pred && Pred->implies(P); 12122 } 12123 NewPreds->insert(P); 12124 return true; 12125 } 12126 12127 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 12128 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12129 auto *A = SE.getWrapPredicate(AR, AddedFlags); 12130 return addOverflowAssumption(A); 12131 } 12132 12133 // If \p Expr represents a PHINode, we try to see if it can be represented 12134 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 12135 // to add this predicate as a runtime overflow check, we return the AddRec. 12136 // If \p Expr does not meet these conditions (is not a PHI node, or we 12137 // couldn't create an AddRec for it, or couldn't add the predicate), we just 12138 // return \p Expr. 12139 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 12140 if (!isa<PHINode>(Expr->getValue())) 12141 return Expr; 12142 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 12143 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 12144 if (!PredicatedRewrite) 12145 return Expr; 12146 for (auto *P : PredicatedRewrite->second){ 12147 // Wrap predicates from outer loops are not supported. 12148 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 12149 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 12150 if (L != AR->getLoop()) 12151 return Expr; 12152 } 12153 if (!addOverflowAssumption(P)) 12154 return Expr; 12155 } 12156 return PredicatedRewrite->first; 12157 } 12158 12159 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 12160 SCEVUnionPredicate *Pred; 12161 const Loop *L; 12162 }; 12163 12164 } // end anonymous namespace 12165 12166 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 12167 SCEVUnionPredicate &Preds) { 12168 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 12169 } 12170 12171 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 12172 const SCEV *S, const Loop *L, 12173 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 12174 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 12175 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 12176 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 12177 12178 if (!AddRec) 12179 return nullptr; 12180 12181 // Since the transformation was successful, we can now transfer the SCEV 12182 // predicates. 12183 for (auto *P : TransformPreds) 12184 Preds.insert(P); 12185 12186 return AddRec; 12187 } 12188 12189 /// SCEV predicates 12190 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 12191 SCEVPredicateKind Kind) 12192 : FastID(ID), Kind(Kind) {} 12193 12194 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 12195 const SCEV *LHS, const SCEV *RHS) 12196 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 12197 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 12198 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 12199 } 12200 12201 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 12202 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 12203 12204 if (!Op) 12205 return false; 12206 12207 return Op->LHS == LHS && Op->RHS == RHS; 12208 } 12209 12210 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 12211 12212 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 12213 12214 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 12215 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 12216 } 12217 12218 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 12219 const SCEVAddRecExpr *AR, 12220 IncrementWrapFlags Flags) 12221 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 12222 12223 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 12224 12225 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 12226 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 12227 12228 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 12229 } 12230 12231 bool SCEVWrapPredicate::isAlwaysTrue() const { 12232 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 12233 IncrementWrapFlags IFlags = Flags; 12234 12235 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 12236 IFlags = clearFlags(IFlags, IncrementNSSW); 12237 12238 return IFlags == IncrementAnyWrap; 12239 } 12240 12241 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 12242 OS.indent(Depth) << *getExpr() << " Added Flags: "; 12243 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 12244 OS << "<nusw>"; 12245 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 12246 OS << "<nssw>"; 12247 OS << "\n"; 12248 } 12249 12250 SCEVWrapPredicate::IncrementWrapFlags 12251 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 12252 ScalarEvolution &SE) { 12253 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 12254 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 12255 12256 // We can safely transfer the NSW flag as NSSW. 12257 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 12258 ImpliedFlags = IncrementNSSW; 12259 12260 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 12261 // If the increment is positive, the SCEV NUW flag will also imply the 12262 // WrapPredicate NUSW flag. 12263 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 12264 if (Step->getValue()->getValue().isNonNegative()) 12265 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 12266 } 12267 12268 return ImpliedFlags; 12269 } 12270 12271 /// Union predicates don't get cached so create a dummy set ID for it. 12272 SCEVUnionPredicate::SCEVUnionPredicate() 12273 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 12274 12275 bool SCEVUnionPredicate::isAlwaysTrue() const { 12276 return all_of(Preds, 12277 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 12278 } 12279 12280 ArrayRef<const SCEVPredicate *> 12281 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 12282 auto I = SCEVToPreds.find(Expr); 12283 if (I == SCEVToPreds.end()) 12284 return ArrayRef<const SCEVPredicate *>(); 12285 return I->second; 12286 } 12287 12288 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 12289 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 12290 return all_of(Set->Preds, 12291 [this](const SCEVPredicate *I) { return this->implies(I); }); 12292 12293 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 12294 if (ScevPredsIt == SCEVToPreds.end()) 12295 return false; 12296 auto &SCEVPreds = ScevPredsIt->second; 12297 12298 return any_of(SCEVPreds, 12299 [N](const SCEVPredicate *I) { return I->implies(N); }); 12300 } 12301 12302 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 12303 12304 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 12305 for (auto Pred : Preds) 12306 Pred->print(OS, Depth); 12307 } 12308 12309 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 12310 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 12311 for (auto Pred : Set->Preds) 12312 add(Pred); 12313 return; 12314 } 12315 12316 if (implies(N)) 12317 return; 12318 12319 const SCEV *Key = N->getExpr(); 12320 assert(Key && "Only SCEVUnionPredicate doesn't have an " 12321 " associated expression!"); 12322 12323 SCEVToPreds[Key].push_back(N); 12324 Preds.push_back(N); 12325 } 12326 12327 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 12328 Loop &L) 12329 : SE(SE), L(L) {} 12330 12331 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 12332 const SCEV *Expr = SE.getSCEV(V); 12333 RewriteEntry &Entry = RewriteMap[Expr]; 12334 12335 // If we already have an entry and the version matches, return it. 12336 if (Entry.second && Generation == Entry.first) 12337 return Entry.second; 12338 12339 // We found an entry but it's stale. Rewrite the stale entry 12340 // according to the current predicate. 12341 if (Entry.second) 12342 Expr = Entry.second; 12343 12344 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 12345 Entry = {Generation, NewSCEV}; 12346 12347 return NewSCEV; 12348 } 12349 12350 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 12351 if (!BackedgeCount) { 12352 SCEVUnionPredicate BackedgePred; 12353 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 12354 addPredicate(BackedgePred); 12355 } 12356 return BackedgeCount; 12357 } 12358 12359 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 12360 if (Preds.implies(&Pred)) 12361 return; 12362 Preds.add(&Pred); 12363 updateGeneration(); 12364 } 12365 12366 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 12367 return Preds; 12368 } 12369 12370 void PredicatedScalarEvolution::updateGeneration() { 12371 // If the generation number wrapped recompute everything. 12372 if (++Generation == 0) { 12373 for (auto &II : RewriteMap) { 12374 const SCEV *Rewritten = II.second.second; 12375 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 12376 } 12377 } 12378 } 12379 12380 void PredicatedScalarEvolution::setNoOverflow( 12381 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12382 const SCEV *Expr = getSCEV(V); 12383 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12384 12385 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 12386 12387 // Clear the statically implied flags. 12388 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 12389 addPredicate(*SE.getWrapPredicate(AR, Flags)); 12390 12391 auto II = FlagsMap.insert({V, Flags}); 12392 if (!II.second) 12393 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 12394 } 12395 12396 bool PredicatedScalarEvolution::hasNoOverflow( 12397 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12398 const SCEV *Expr = getSCEV(V); 12399 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12400 12401 Flags = SCEVWrapPredicate::clearFlags( 12402 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 12403 12404 auto II = FlagsMap.find(V); 12405 12406 if (II != FlagsMap.end()) 12407 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 12408 12409 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 12410 } 12411 12412 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 12413 const SCEV *Expr = this->getSCEV(V); 12414 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 12415 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 12416 12417 if (!New) 12418 return nullptr; 12419 12420 for (auto *P : NewPreds) 12421 Preds.add(P); 12422 12423 updateGeneration(); 12424 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 12425 return New; 12426 } 12427 12428 PredicatedScalarEvolution::PredicatedScalarEvolution( 12429 const PredicatedScalarEvolution &Init) 12430 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 12431 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 12432 for (const auto &I : Init.FlagsMap) 12433 FlagsMap.insert(I); 12434 } 12435 12436 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 12437 // For each block. 12438 for (auto *BB : L.getBlocks()) 12439 for (auto &I : *BB) { 12440 if (!SE.isSCEVable(I.getType())) 12441 continue; 12442 12443 auto *Expr = SE.getSCEV(&I); 12444 auto II = RewriteMap.find(Expr); 12445 12446 if (II == RewriteMap.end()) 12447 continue; 12448 12449 // Don't print things that are not interesting. 12450 if (II->second.second == Expr) 12451 continue; 12452 12453 OS.indent(Depth) << "[PSE]" << I << ":\n"; 12454 OS.indent(Depth + 2) << *Expr << "\n"; 12455 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 12456 } 12457 } 12458 12459 // Match the mathematical pattern A - (A / B) * B, where A and B can be 12460 // arbitrary expressions. 12461 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 12462 // 4, A / B becomes X / 8). 12463 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 12464 const SCEV *&RHS) { 12465 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 12466 if (Add == nullptr || Add->getNumOperands() != 2) 12467 return false; 12468 12469 const SCEV *A = Add->getOperand(1); 12470 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 12471 12472 if (Mul == nullptr) 12473 return false; 12474 12475 const auto MatchURemWithDivisor = [&](const SCEV *B) { 12476 // (SomeExpr + (-(SomeExpr / B) * B)). 12477 if (Expr == getURemExpr(A, B)) { 12478 LHS = A; 12479 RHS = B; 12480 return true; 12481 } 12482 return false; 12483 }; 12484 12485 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 12486 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 12487 return MatchURemWithDivisor(Mul->getOperand(1)) || 12488 MatchURemWithDivisor(Mul->getOperand(2)); 12489 12490 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 12491 if (Mul->getNumOperands() == 2) 12492 return MatchURemWithDivisor(Mul->getOperand(1)) || 12493 MatchURemWithDivisor(Mul->getOperand(0)) || 12494 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 12495 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 12496 return false; 12497 } 12498