1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 83 #include "llvm/Analysis/TargetLibraryInfo.h" 84 #include "llvm/Analysis/ValueTracking.h" 85 #include "llvm/Config/llvm-config.h" 86 #include "llvm/IR/Argument.h" 87 #include "llvm/IR/BasicBlock.h" 88 #include "llvm/IR/CFG.h" 89 #include "llvm/IR/CallSite.h" 90 #include "llvm/IR/Constant.h" 91 #include "llvm/IR/ConstantRange.h" 92 #include "llvm/IR/Constants.h" 93 #include "llvm/IR/DataLayout.h" 94 #include "llvm/IR/DerivedTypes.h" 95 #include "llvm/IR/Dominators.h" 96 #include "llvm/IR/Function.h" 97 #include "llvm/IR/GlobalAlias.h" 98 #include "llvm/IR/GlobalValue.h" 99 #include "llvm/IR/GlobalVariable.h" 100 #include "llvm/IR/InstIterator.h" 101 #include "llvm/IR/InstrTypes.h" 102 #include "llvm/IR/Instruction.h" 103 #include "llvm/IR/Instructions.h" 104 #include "llvm/IR/IntrinsicInst.h" 105 #include "llvm/IR/Intrinsics.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/Metadata.h" 108 #include "llvm/IR/Operator.h" 109 #include "llvm/IR/PatternMatch.h" 110 #include "llvm/IR/Type.h" 111 #include "llvm/IR/Use.h" 112 #include "llvm/IR/User.h" 113 #include "llvm/IR/Value.h" 114 #include "llvm/IR/Verifier.h" 115 #include "llvm/InitializePasses.h" 116 #include "llvm/Pass.h" 117 #include "llvm/Support/Casting.h" 118 #include "llvm/Support/CommandLine.h" 119 #include "llvm/Support/Compiler.h" 120 #include "llvm/Support/Debug.h" 121 #include "llvm/Support/ErrorHandling.h" 122 #include "llvm/Support/KnownBits.h" 123 #include "llvm/Support/SaveAndRestore.h" 124 #include "llvm/Support/raw_ostream.h" 125 #include <algorithm> 126 #include <cassert> 127 #include <climits> 128 #include <cstddef> 129 #include <cstdint> 130 #include <cstdlib> 131 #include <map> 132 #include <memory> 133 #include <tuple> 134 #include <utility> 135 #include <vector> 136 137 using namespace llvm; 138 139 #define DEBUG_TYPE "scalar-evolution" 140 141 STATISTIC(NumArrayLenItCounts, 142 "Number of trip counts computed with array length"); 143 STATISTIC(NumTripCountsComputed, 144 "Number of loops with predictable loop counts"); 145 STATISTIC(NumTripCountsNotComputed, 146 "Number of loops without predictable loop counts"); 147 STATISTIC(NumBruteForceTripCountsComputed, 148 "Number of loops with trip counts computed by force"); 149 150 static cl::opt<unsigned> 151 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 152 cl::ZeroOrMore, 153 cl::desc("Maximum number of iterations SCEV will " 154 "symbolically execute a constant " 155 "derived loop"), 156 cl::init(100)); 157 158 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 159 static cl::opt<bool> VerifySCEV( 160 "verify-scev", cl::Hidden, 161 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 162 static cl::opt<bool> VerifySCEVStrict( 163 "verify-scev-strict", cl::Hidden, 164 cl::desc("Enable stricter verification with -verify-scev is passed")); 165 static cl::opt<bool> 166 VerifySCEVMap("verify-scev-maps", cl::Hidden, 167 cl::desc("Verify no dangling value in ScalarEvolution's " 168 "ExprValueMap (slow)")); 169 170 static cl::opt<bool> VerifyIR( 171 "scev-verify-ir", cl::Hidden, 172 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 173 cl::init(false)); 174 175 static cl::opt<unsigned> MulOpsInlineThreshold( 176 "scev-mulops-inline-threshold", cl::Hidden, 177 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 178 cl::init(32)); 179 180 static cl::opt<unsigned> AddOpsInlineThreshold( 181 "scev-addops-inline-threshold", cl::Hidden, 182 cl::desc("Threshold for inlining addition operands into a SCEV"), 183 cl::init(500)); 184 185 static cl::opt<unsigned> MaxSCEVCompareDepth( 186 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 187 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 188 cl::init(32)); 189 190 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 191 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 192 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 193 cl::init(2)); 194 195 static cl::opt<unsigned> MaxValueCompareDepth( 196 "scalar-evolution-max-value-compare-depth", cl::Hidden, 197 cl::desc("Maximum depth of recursive value complexity comparisons"), 198 cl::init(2)); 199 200 static cl::opt<unsigned> 201 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 202 cl::desc("Maximum depth of recursive arithmetics"), 203 cl::init(32)); 204 205 static cl::opt<unsigned> MaxConstantEvolvingDepth( 206 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 207 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 208 209 static cl::opt<unsigned> 210 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 211 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 212 cl::init(8)); 213 214 static cl::opt<unsigned> 215 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 216 cl::desc("Max coefficients in AddRec during evolving"), 217 cl::init(8)); 218 219 static cl::opt<unsigned> 220 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 221 cl::desc("Size of the expression which is considered huge"), 222 cl::init(4096)); 223 224 static cl::opt<bool> 225 ClassifyExpressions("scalar-evolution-classify-expressions", 226 cl::Hidden, cl::init(true), 227 cl::desc("When printing analysis, include information on every instruction")); 228 229 230 //===----------------------------------------------------------------------===// 231 // SCEV class definitions 232 //===----------------------------------------------------------------------===// 233 234 //===----------------------------------------------------------------------===// 235 // Implementation of the SCEV class. 236 // 237 238 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 239 LLVM_DUMP_METHOD void SCEV::dump() const { 240 print(dbgs()); 241 dbgs() << '\n'; 242 } 243 #endif 244 245 void SCEV::print(raw_ostream &OS) const { 246 switch (static_cast<SCEVTypes>(getSCEVType())) { 247 case scConstant: 248 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 249 return; 250 case scTruncate: { 251 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 252 const SCEV *Op = Trunc->getOperand(); 253 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 254 << *Trunc->getType() << ")"; 255 return; 256 } 257 case scZeroExtend: { 258 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 259 const SCEV *Op = ZExt->getOperand(); 260 OS << "(zext " << *Op->getType() << " " << *Op << " to " 261 << *ZExt->getType() << ")"; 262 return; 263 } 264 case scSignExtend: { 265 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 266 const SCEV *Op = SExt->getOperand(); 267 OS << "(sext " << *Op->getType() << " " << *Op << " to " 268 << *SExt->getType() << ")"; 269 return; 270 } 271 case scAddRecExpr: { 272 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 273 OS << "{" << *AR->getOperand(0); 274 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 275 OS << ",+," << *AR->getOperand(i); 276 OS << "}<"; 277 if (AR->hasNoUnsignedWrap()) 278 OS << "nuw><"; 279 if (AR->hasNoSignedWrap()) 280 OS << "nsw><"; 281 if (AR->hasNoSelfWrap() && 282 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 283 OS << "nw><"; 284 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 285 OS << ">"; 286 return; 287 } 288 case scAddExpr: 289 case scMulExpr: 290 case scUMaxExpr: 291 case scSMaxExpr: 292 case scUMinExpr: 293 case scSMinExpr: { 294 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 295 const char *OpStr = nullptr; 296 switch (NAry->getSCEVType()) { 297 case scAddExpr: OpStr = " + "; break; 298 case scMulExpr: OpStr = " * "; break; 299 case scUMaxExpr: OpStr = " umax "; break; 300 case scSMaxExpr: OpStr = " smax "; break; 301 case scUMinExpr: 302 OpStr = " umin "; 303 break; 304 case scSMinExpr: 305 OpStr = " smin "; 306 break; 307 } 308 OS << "("; 309 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end(); 310 I != E; ++I) { 311 OS << **I; 312 if (std::next(I) != E) 313 OS << OpStr; 314 } 315 OS << ")"; 316 switch (NAry->getSCEVType()) { 317 case scAddExpr: 318 case scMulExpr: 319 if (NAry->hasNoUnsignedWrap()) 320 OS << "<nuw>"; 321 if (NAry->hasNoSignedWrap()) 322 OS << "<nsw>"; 323 } 324 return; 325 } 326 case scUDivExpr: { 327 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 328 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 329 return; 330 } 331 case scUnknown: { 332 const SCEVUnknown *U = cast<SCEVUnknown>(this); 333 Type *AllocTy; 334 if (U->isSizeOf(AllocTy)) { 335 OS << "sizeof(" << *AllocTy << ")"; 336 return; 337 } 338 if (U->isAlignOf(AllocTy)) { 339 OS << "alignof(" << *AllocTy << ")"; 340 return; 341 } 342 343 Type *CTy; 344 Constant *FieldNo; 345 if (U->isOffsetOf(CTy, FieldNo)) { 346 OS << "offsetof(" << *CTy << ", "; 347 FieldNo->printAsOperand(OS, false); 348 OS << ")"; 349 return; 350 } 351 352 // Otherwise just print it normally. 353 U->getValue()->printAsOperand(OS, false); 354 return; 355 } 356 case scCouldNotCompute: 357 OS << "***COULDNOTCOMPUTE***"; 358 return; 359 } 360 llvm_unreachable("Unknown SCEV kind!"); 361 } 362 363 Type *SCEV::getType() const { 364 switch (static_cast<SCEVTypes>(getSCEVType())) { 365 case scConstant: 366 return cast<SCEVConstant>(this)->getType(); 367 case scTruncate: 368 case scZeroExtend: 369 case scSignExtend: 370 return cast<SCEVCastExpr>(this)->getType(); 371 case scAddRecExpr: 372 case scMulExpr: 373 case scUMaxExpr: 374 case scSMaxExpr: 375 case scUMinExpr: 376 case scSMinExpr: 377 return cast<SCEVNAryExpr>(this)->getType(); 378 case scAddExpr: 379 return cast<SCEVAddExpr>(this)->getType(); 380 case scUDivExpr: 381 return cast<SCEVUDivExpr>(this)->getType(); 382 case scUnknown: 383 return cast<SCEVUnknown>(this)->getType(); 384 case scCouldNotCompute: 385 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 386 } 387 llvm_unreachable("Unknown SCEV kind!"); 388 } 389 390 bool SCEV::isZero() const { 391 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 392 return SC->getValue()->isZero(); 393 return false; 394 } 395 396 bool SCEV::isOne() const { 397 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 398 return SC->getValue()->isOne(); 399 return false; 400 } 401 402 bool SCEV::isAllOnesValue() const { 403 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 404 return SC->getValue()->isMinusOne(); 405 return false; 406 } 407 408 bool SCEV::isNonConstantNegative() const { 409 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 410 if (!Mul) return false; 411 412 // If there is a constant factor, it will be first. 413 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 414 if (!SC) return false; 415 416 // Return true if the value is negative, this matches things like (-42 * V). 417 return SC->getAPInt().isNegative(); 418 } 419 420 SCEVCouldNotCompute::SCEVCouldNotCompute() : 421 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 422 423 bool SCEVCouldNotCompute::classof(const SCEV *S) { 424 return S->getSCEVType() == scCouldNotCompute; 425 } 426 427 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 428 FoldingSetNodeID ID; 429 ID.AddInteger(scConstant); 430 ID.AddPointer(V); 431 void *IP = nullptr; 432 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 433 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 434 UniqueSCEVs.InsertNode(S, IP); 435 return S; 436 } 437 438 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 439 return getConstant(ConstantInt::get(getContext(), Val)); 440 } 441 442 const SCEV * 443 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 444 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 445 return getConstant(ConstantInt::get(ITy, V, isSigned)); 446 } 447 448 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 449 unsigned SCEVTy, const SCEV *op, Type *ty) 450 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Op(op), Ty(ty) {} 451 452 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 453 const SCEV *op, Type *ty) 454 : SCEVCastExpr(ID, scTruncate, op, ty) { 455 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 456 "Cannot truncate non-integer value!"); 457 } 458 459 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 460 const SCEV *op, Type *ty) 461 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 462 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 463 "Cannot zero extend non-integer value!"); 464 } 465 466 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 467 const SCEV *op, Type *ty) 468 : SCEVCastExpr(ID, scSignExtend, op, ty) { 469 assert(Op->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 470 "Cannot sign extend non-integer value!"); 471 } 472 473 void SCEVUnknown::deleted() { 474 // Clear this SCEVUnknown from various maps. 475 SE->forgetMemoizedResults(this); 476 477 // Remove this SCEVUnknown from the uniquing map. 478 SE->UniqueSCEVs.RemoveNode(this); 479 480 // Release the value. 481 setValPtr(nullptr); 482 } 483 484 void SCEVUnknown::allUsesReplacedWith(Value *New) { 485 // Remove this SCEVUnknown from the uniquing map. 486 SE->UniqueSCEVs.RemoveNode(this); 487 488 // Update this SCEVUnknown to point to the new value. This is needed 489 // because there may still be outstanding SCEVs which still point to 490 // this SCEVUnknown. 491 setValPtr(New); 492 } 493 494 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 495 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 496 if (VCE->getOpcode() == Instruction::PtrToInt) 497 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 498 if (CE->getOpcode() == Instruction::GetElementPtr && 499 CE->getOperand(0)->isNullValue() && 500 CE->getNumOperands() == 2) 501 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 502 if (CI->isOne()) { 503 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 504 ->getElementType(); 505 return true; 506 } 507 508 return false; 509 } 510 511 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 512 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 513 if (VCE->getOpcode() == Instruction::PtrToInt) 514 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 515 if (CE->getOpcode() == Instruction::GetElementPtr && 516 CE->getOperand(0)->isNullValue()) { 517 Type *Ty = 518 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 519 if (StructType *STy = dyn_cast<StructType>(Ty)) 520 if (!STy->isPacked() && 521 CE->getNumOperands() == 3 && 522 CE->getOperand(1)->isNullValue()) { 523 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 524 if (CI->isOne() && 525 STy->getNumElements() == 2 && 526 STy->getElementType(0)->isIntegerTy(1)) { 527 AllocTy = STy->getElementType(1); 528 return true; 529 } 530 } 531 } 532 533 return false; 534 } 535 536 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 537 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 538 if (VCE->getOpcode() == Instruction::PtrToInt) 539 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 540 if (CE->getOpcode() == Instruction::GetElementPtr && 541 CE->getNumOperands() == 3 && 542 CE->getOperand(0)->isNullValue() && 543 CE->getOperand(1)->isNullValue()) { 544 Type *Ty = 545 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 546 // Ignore vector types here so that ScalarEvolutionExpander doesn't 547 // emit getelementptrs that index into vectors. 548 if (Ty->isStructTy() || Ty->isArrayTy()) { 549 CTy = Ty; 550 FieldNo = CE->getOperand(2); 551 return true; 552 } 553 } 554 555 return false; 556 } 557 558 //===----------------------------------------------------------------------===// 559 // SCEV Utilities 560 //===----------------------------------------------------------------------===// 561 562 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 563 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 564 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 565 /// have been previously deemed to be "equally complex" by this routine. It is 566 /// intended to avoid exponential time complexity in cases like: 567 /// 568 /// %a = f(%x, %y) 569 /// %b = f(%a, %a) 570 /// %c = f(%b, %b) 571 /// 572 /// %d = f(%x, %y) 573 /// %e = f(%d, %d) 574 /// %f = f(%e, %e) 575 /// 576 /// CompareValueComplexity(%f, %c) 577 /// 578 /// Since we do not continue running this routine on expression trees once we 579 /// have seen unequal values, there is no need to track them in the cache. 580 static int 581 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 582 const LoopInfo *const LI, Value *LV, Value *RV, 583 unsigned Depth) { 584 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 585 return 0; 586 587 // Order pointer values after integer values. This helps SCEVExpander form 588 // GEPs. 589 bool LIsPointer = LV->getType()->isPointerTy(), 590 RIsPointer = RV->getType()->isPointerTy(); 591 if (LIsPointer != RIsPointer) 592 return (int)LIsPointer - (int)RIsPointer; 593 594 // Compare getValueID values. 595 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 596 if (LID != RID) 597 return (int)LID - (int)RID; 598 599 // Sort arguments by their position. 600 if (const auto *LA = dyn_cast<Argument>(LV)) { 601 const auto *RA = cast<Argument>(RV); 602 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 603 return (int)LArgNo - (int)RArgNo; 604 } 605 606 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 607 const auto *RGV = cast<GlobalValue>(RV); 608 609 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 610 auto LT = GV->getLinkage(); 611 return !(GlobalValue::isPrivateLinkage(LT) || 612 GlobalValue::isInternalLinkage(LT)); 613 }; 614 615 // Use the names to distinguish the two values, but only if the 616 // names are semantically important. 617 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 618 return LGV->getName().compare(RGV->getName()); 619 } 620 621 // For instructions, compare their loop depth, and their operand count. This 622 // is pretty loose. 623 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 624 const auto *RInst = cast<Instruction>(RV); 625 626 // Compare loop depths. 627 const BasicBlock *LParent = LInst->getParent(), 628 *RParent = RInst->getParent(); 629 if (LParent != RParent) { 630 unsigned LDepth = LI->getLoopDepth(LParent), 631 RDepth = LI->getLoopDepth(RParent); 632 if (LDepth != RDepth) 633 return (int)LDepth - (int)RDepth; 634 } 635 636 // Compare the number of operands. 637 unsigned LNumOps = LInst->getNumOperands(), 638 RNumOps = RInst->getNumOperands(); 639 if (LNumOps != RNumOps) 640 return (int)LNumOps - (int)RNumOps; 641 642 for (unsigned Idx : seq(0u, LNumOps)) { 643 int Result = 644 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 645 RInst->getOperand(Idx), Depth + 1); 646 if (Result != 0) 647 return Result; 648 } 649 } 650 651 EqCacheValue.unionSets(LV, RV); 652 return 0; 653 } 654 655 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 656 // than RHS, respectively. A three-way result allows recursive comparisons to be 657 // more efficient. 658 static int CompareSCEVComplexity( 659 EquivalenceClasses<const SCEV *> &EqCacheSCEV, 660 EquivalenceClasses<const Value *> &EqCacheValue, 661 const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, 662 DominatorTree &DT, unsigned Depth = 0) { 663 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 664 if (LHS == RHS) 665 return 0; 666 667 // Primarily, sort the SCEVs by their getSCEVType(). 668 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 669 if (LType != RType) 670 return (int)LType - (int)RType; 671 672 if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS)) 673 return 0; 674 // Aside from the getSCEVType() ordering, the particular ordering 675 // isn't very important except that it's beneficial to be consistent, 676 // so that (a + b) and (b + a) don't end up as different expressions. 677 switch (static_cast<SCEVTypes>(LType)) { 678 case scUnknown: { 679 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 680 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 681 682 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 683 RU->getValue(), Depth + 1); 684 if (X == 0) 685 EqCacheSCEV.unionSets(LHS, RHS); 686 return X; 687 } 688 689 case scConstant: { 690 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 691 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 692 693 // Compare constant values. 694 const APInt &LA = LC->getAPInt(); 695 const APInt &RA = RC->getAPInt(); 696 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 697 if (LBitWidth != RBitWidth) 698 return (int)LBitWidth - (int)RBitWidth; 699 return LA.ult(RA) ? -1 : 1; 700 } 701 702 case scAddRecExpr: { 703 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 704 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 705 706 // There is always a dominance between two recs that are used by one SCEV, 707 // so we can safely sort recs by loop header dominance. We require such 708 // order in getAddExpr. 709 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 710 if (LLoop != RLoop) { 711 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 712 assert(LHead != RHead && "Two loops share the same header?"); 713 if (DT.dominates(LHead, RHead)) 714 return 1; 715 else 716 assert(DT.dominates(RHead, LHead) && 717 "No dominance between recurrences used by one SCEV?"); 718 return -1; 719 } 720 721 // Addrec complexity grows with operand count. 722 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 723 if (LNumOps != RNumOps) 724 return (int)LNumOps - (int)RNumOps; 725 726 // Lexicographically compare. 727 for (unsigned i = 0; i != LNumOps; ++i) { 728 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 729 LA->getOperand(i), RA->getOperand(i), DT, 730 Depth + 1); 731 if (X != 0) 732 return X; 733 } 734 EqCacheSCEV.unionSets(LHS, RHS); 735 return 0; 736 } 737 738 case scAddExpr: 739 case scMulExpr: 740 case scSMaxExpr: 741 case scUMaxExpr: 742 case scSMinExpr: 743 case scUMinExpr: { 744 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 745 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 746 747 // Lexicographically compare n-ary expressions. 748 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 749 if (LNumOps != RNumOps) 750 return (int)LNumOps - (int)RNumOps; 751 752 for (unsigned i = 0; i != LNumOps; ++i) { 753 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 754 LC->getOperand(i), RC->getOperand(i), DT, 755 Depth + 1); 756 if (X != 0) 757 return X; 758 } 759 EqCacheSCEV.unionSets(LHS, RHS); 760 return 0; 761 } 762 763 case scUDivExpr: { 764 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 765 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 766 767 // Lexicographically compare udiv expressions. 768 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 769 RC->getLHS(), DT, Depth + 1); 770 if (X != 0) 771 return X; 772 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 773 RC->getRHS(), DT, Depth + 1); 774 if (X == 0) 775 EqCacheSCEV.unionSets(LHS, RHS); 776 return X; 777 } 778 779 case scTruncate: 780 case scZeroExtend: 781 case scSignExtend: { 782 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 783 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 784 785 // Compare cast expressions by operand. 786 int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 787 LC->getOperand(), RC->getOperand(), DT, 788 Depth + 1); 789 if (X == 0) 790 EqCacheSCEV.unionSets(LHS, RHS); 791 return X; 792 } 793 794 case scCouldNotCompute: 795 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 796 } 797 llvm_unreachable("Unknown SCEV kind!"); 798 } 799 800 /// Given a list of SCEV objects, order them by their complexity, and group 801 /// objects of the same complexity together by value. When this routine is 802 /// finished, we know that any duplicates in the vector are consecutive and that 803 /// complexity is monotonically increasing. 804 /// 805 /// Note that we go take special precautions to ensure that we get deterministic 806 /// results from this routine. In other words, we don't want the results of 807 /// this to depend on where the addresses of various SCEV objects happened to 808 /// land in memory. 809 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 810 LoopInfo *LI, DominatorTree &DT) { 811 if (Ops.size() < 2) return; // Noop 812 813 EquivalenceClasses<const SCEV *> EqCacheSCEV; 814 EquivalenceClasses<const Value *> EqCacheValue; 815 if (Ops.size() == 2) { 816 // This is the common case, which also happens to be trivially simple. 817 // Special case it. 818 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 819 if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0) 820 std::swap(LHS, RHS); 821 return; 822 } 823 824 // Do the rough sort by complexity. 825 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { 826 return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT) < 827 0; 828 }); 829 830 // Now that we are sorted by complexity, group elements of the same 831 // complexity. Note that this is, at worst, N^2, but the vector is likely to 832 // be extremely short in practice. Note that we take this approach because we 833 // do not want to depend on the addresses of the objects we are grouping. 834 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 835 const SCEV *S = Ops[i]; 836 unsigned Complexity = S->getSCEVType(); 837 838 // If there are any objects of the same complexity and same value as this 839 // one, group them. 840 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 841 if (Ops[j] == S) { // Found a duplicate. 842 // Move it to immediately after i'th element. 843 std::swap(Ops[i+1], Ops[j]); 844 ++i; // no need to rescan it. 845 if (i == e-2) return; // Done! 846 } 847 } 848 } 849 } 850 851 // Returns the size of the SCEV S. 852 static inline int sizeOfSCEV(const SCEV *S) { 853 struct FindSCEVSize { 854 int Size = 0; 855 856 FindSCEVSize() = default; 857 858 bool follow(const SCEV *S) { 859 ++Size; 860 // Keep looking at all operands of S. 861 return true; 862 } 863 864 bool isDone() const { 865 return false; 866 } 867 }; 868 869 FindSCEVSize F; 870 SCEVTraversal<FindSCEVSize> ST(F); 871 ST.visitAll(S); 872 return F.Size; 873 } 874 875 /// Returns true if the subtree of \p S contains at least HugeExprThreshold 876 /// nodes. 877 static bool isHugeExpression(const SCEV *S) { 878 return S->getExpressionSize() >= HugeExprThreshold; 879 } 880 881 /// Returns true of \p Ops contains a huge SCEV (see definition above). 882 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 883 return any_of(Ops, isHugeExpression); 884 } 885 886 namespace { 887 888 struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> { 889 public: 890 // Computes the Quotient and Remainder of the division of Numerator by 891 // Denominator. 892 static void divide(ScalarEvolution &SE, const SCEV *Numerator, 893 const SCEV *Denominator, const SCEV **Quotient, 894 const SCEV **Remainder) { 895 assert(Numerator && Denominator && "Uninitialized SCEV"); 896 897 SCEVDivision D(SE, Numerator, Denominator); 898 899 // Check for the trivial case here to avoid having to check for it in the 900 // rest of the code. 901 if (Numerator == Denominator) { 902 *Quotient = D.One; 903 *Remainder = D.Zero; 904 return; 905 } 906 907 if (Numerator->isZero()) { 908 *Quotient = D.Zero; 909 *Remainder = D.Zero; 910 return; 911 } 912 913 // A simple case when N/1. The quotient is N. 914 if (Denominator->isOne()) { 915 *Quotient = Numerator; 916 *Remainder = D.Zero; 917 return; 918 } 919 920 // Split the Denominator when it is a product. 921 if (const SCEVMulExpr *T = dyn_cast<SCEVMulExpr>(Denominator)) { 922 const SCEV *Q, *R; 923 *Quotient = Numerator; 924 for (const SCEV *Op : T->operands()) { 925 divide(SE, *Quotient, Op, &Q, &R); 926 *Quotient = Q; 927 928 // Bail out when the Numerator is not divisible by one of the terms of 929 // the Denominator. 930 if (!R->isZero()) { 931 *Quotient = D.Zero; 932 *Remainder = Numerator; 933 return; 934 } 935 } 936 *Remainder = D.Zero; 937 return; 938 } 939 940 D.visit(Numerator); 941 *Quotient = D.Quotient; 942 *Remainder = D.Remainder; 943 } 944 945 // Except in the trivial case described above, we do not know how to divide 946 // Expr by Denominator for the following functions with empty implementation. 947 void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {} 948 void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {} 949 void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {} 950 void visitUDivExpr(const SCEVUDivExpr *Numerator) {} 951 void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {} 952 void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {} 953 void visitSMinExpr(const SCEVSMinExpr *Numerator) {} 954 void visitUMinExpr(const SCEVUMinExpr *Numerator) {} 955 void visitUnknown(const SCEVUnknown *Numerator) {} 956 void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} 957 958 void visitConstant(const SCEVConstant *Numerator) { 959 if (const SCEVConstant *D = dyn_cast<SCEVConstant>(Denominator)) { 960 APInt NumeratorVal = Numerator->getAPInt(); 961 APInt DenominatorVal = D->getAPInt(); 962 uint32_t NumeratorBW = NumeratorVal.getBitWidth(); 963 uint32_t DenominatorBW = DenominatorVal.getBitWidth(); 964 965 if (NumeratorBW > DenominatorBW) 966 DenominatorVal = DenominatorVal.sext(NumeratorBW); 967 else if (NumeratorBW < DenominatorBW) 968 NumeratorVal = NumeratorVal.sext(DenominatorBW); 969 970 APInt QuotientVal(NumeratorVal.getBitWidth(), 0); 971 APInt RemainderVal(NumeratorVal.getBitWidth(), 0); 972 APInt::sdivrem(NumeratorVal, DenominatorVal, QuotientVal, RemainderVal); 973 Quotient = SE.getConstant(QuotientVal); 974 Remainder = SE.getConstant(RemainderVal); 975 return; 976 } 977 } 978 979 void visitAddRecExpr(const SCEVAddRecExpr *Numerator) { 980 const SCEV *StartQ, *StartR, *StepQ, *StepR; 981 if (!Numerator->isAffine()) 982 return cannotDivide(Numerator); 983 divide(SE, Numerator->getStart(), Denominator, &StartQ, &StartR); 984 divide(SE, Numerator->getStepRecurrence(SE), Denominator, &StepQ, &StepR); 985 // Bail out if the types do not match. 986 Type *Ty = Denominator->getType(); 987 if (Ty != StartQ->getType() || Ty != StartR->getType() || 988 Ty != StepQ->getType() || Ty != StepR->getType()) 989 return cannotDivide(Numerator); 990 Quotient = SE.getAddRecExpr(StartQ, StepQ, Numerator->getLoop(), 991 Numerator->getNoWrapFlags()); 992 Remainder = SE.getAddRecExpr(StartR, StepR, Numerator->getLoop(), 993 Numerator->getNoWrapFlags()); 994 } 995 996 void visitAddExpr(const SCEVAddExpr *Numerator) { 997 SmallVector<const SCEV *, 2> Qs, Rs; 998 Type *Ty = Denominator->getType(); 999 1000 for (const SCEV *Op : Numerator->operands()) { 1001 const SCEV *Q, *R; 1002 divide(SE, Op, Denominator, &Q, &R); 1003 1004 // Bail out if types do not match. 1005 if (Ty != Q->getType() || Ty != R->getType()) 1006 return cannotDivide(Numerator); 1007 1008 Qs.push_back(Q); 1009 Rs.push_back(R); 1010 } 1011 1012 if (Qs.size() == 1) { 1013 Quotient = Qs[0]; 1014 Remainder = Rs[0]; 1015 return; 1016 } 1017 1018 Quotient = SE.getAddExpr(Qs); 1019 Remainder = SE.getAddExpr(Rs); 1020 } 1021 1022 void visitMulExpr(const SCEVMulExpr *Numerator) { 1023 SmallVector<const SCEV *, 2> Qs; 1024 Type *Ty = Denominator->getType(); 1025 1026 bool FoundDenominatorTerm = false; 1027 for (const SCEV *Op : Numerator->operands()) { 1028 // Bail out if types do not match. 1029 if (Ty != Op->getType()) 1030 return cannotDivide(Numerator); 1031 1032 if (FoundDenominatorTerm) { 1033 Qs.push_back(Op); 1034 continue; 1035 } 1036 1037 // Check whether Denominator divides one of the product operands. 1038 const SCEV *Q, *R; 1039 divide(SE, Op, Denominator, &Q, &R); 1040 if (!R->isZero()) { 1041 Qs.push_back(Op); 1042 continue; 1043 } 1044 1045 // Bail out if types do not match. 1046 if (Ty != Q->getType()) 1047 return cannotDivide(Numerator); 1048 1049 FoundDenominatorTerm = true; 1050 Qs.push_back(Q); 1051 } 1052 1053 if (FoundDenominatorTerm) { 1054 Remainder = Zero; 1055 if (Qs.size() == 1) 1056 Quotient = Qs[0]; 1057 else 1058 Quotient = SE.getMulExpr(Qs); 1059 return; 1060 } 1061 1062 if (!isa<SCEVUnknown>(Denominator)) 1063 return cannotDivide(Numerator); 1064 1065 // The Remainder is obtained by replacing Denominator by 0 in Numerator. 1066 ValueToValueMap RewriteMap; 1067 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1068 cast<SCEVConstant>(Zero)->getValue(); 1069 Remainder = SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1070 1071 if (Remainder->isZero()) { 1072 // The Quotient is obtained by replacing Denominator by 1 in Numerator. 1073 RewriteMap[cast<SCEVUnknown>(Denominator)->getValue()] = 1074 cast<SCEVConstant>(One)->getValue(); 1075 Quotient = 1076 SCEVParameterRewriter::rewrite(Numerator, SE, RewriteMap, true); 1077 return; 1078 } 1079 1080 // Quotient is (Numerator - Remainder) divided by Denominator. 1081 const SCEV *Q, *R; 1082 const SCEV *Diff = SE.getMinusSCEV(Numerator, Remainder); 1083 // This SCEV does not seem to simplify: fail the division here. 1084 if (sizeOfSCEV(Diff) > sizeOfSCEV(Numerator)) 1085 return cannotDivide(Numerator); 1086 divide(SE, Diff, Denominator, &Q, &R); 1087 if (R != Zero) 1088 return cannotDivide(Numerator); 1089 Quotient = Q; 1090 } 1091 1092 private: 1093 SCEVDivision(ScalarEvolution &S, const SCEV *Numerator, 1094 const SCEV *Denominator) 1095 : SE(S), Denominator(Denominator) { 1096 Zero = SE.getZero(Denominator->getType()); 1097 One = SE.getOne(Denominator->getType()); 1098 1099 // We generally do not know how to divide Expr by Denominator. We 1100 // initialize the division to a "cannot divide" state to simplify the rest 1101 // of the code. 1102 cannotDivide(Numerator); 1103 } 1104 1105 // Convenience function for giving up on the division. We set the quotient to 1106 // be equal to zero and the remainder to be equal to the numerator. 1107 void cannotDivide(const SCEV *Numerator) { 1108 Quotient = Zero; 1109 Remainder = Numerator; 1110 } 1111 1112 ScalarEvolution &SE; 1113 const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One; 1114 }; 1115 1116 } // end anonymous namespace 1117 1118 //===----------------------------------------------------------------------===// 1119 // Simple SCEV method implementations 1120 //===----------------------------------------------------------------------===// 1121 1122 /// Compute BC(It, K). The result has width W. Assume, K > 0. 1123 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 1124 ScalarEvolution &SE, 1125 Type *ResultTy) { 1126 // Handle the simplest case efficiently. 1127 if (K == 1) 1128 return SE.getTruncateOrZeroExtend(It, ResultTy); 1129 1130 // We are using the following formula for BC(It, K): 1131 // 1132 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 1133 // 1134 // Suppose, W is the bitwidth of the return value. We must be prepared for 1135 // overflow. Hence, we must assure that the result of our computation is 1136 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 1137 // safe in modular arithmetic. 1138 // 1139 // However, this code doesn't use exactly that formula; the formula it uses 1140 // is something like the following, where T is the number of factors of 2 in 1141 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 1142 // exponentiation: 1143 // 1144 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 1145 // 1146 // This formula is trivially equivalent to the previous formula. However, 1147 // this formula can be implemented much more efficiently. The trick is that 1148 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 1149 // arithmetic. To do exact division in modular arithmetic, all we have 1150 // to do is multiply by the inverse. Therefore, this step can be done at 1151 // width W. 1152 // 1153 // The next issue is how to safely do the division by 2^T. The way this 1154 // is done is by doing the multiplication step at a width of at least W + T 1155 // bits. This way, the bottom W+T bits of the product are accurate. Then, 1156 // when we perform the division by 2^T (which is equivalent to a right shift 1157 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 1158 // truncated out after the division by 2^T. 1159 // 1160 // In comparison to just directly using the first formula, this technique 1161 // is much more efficient; using the first formula requires W * K bits, 1162 // but this formula less than W + K bits. Also, the first formula requires 1163 // a division step, whereas this formula only requires multiplies and shifts. 1164 // 1165 // It doesn't matter whether the subtraction step is done in the calculation 1166 // width or the input iteration count's width; if the subtraction overflows, 1167 // the result must be zero anyway. We prefer here to do it in the width of 1168 // the induction variable because it helps a lot for certain cases; CodeGen 1169 // isn't smart enough to ignore the overflow, which leads to much less 1170 // efficient code if the width of the subtraction is wider than the native 1171 // register width. 1172 // 1173 // (It's possible to not widen at all by pulling out factors of 2 before 1174 // the multiplication; for example, K=2 can be calculated as 1175 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 1176 // extra arithmetic, so it's not an obvious win, and it gets 1177 // much more complicated for K > 3.) 1178 1179 // Protection from insane SCEVs; this bound is conservative, 1180 // but it probably doesn't matter. 1181 if (K > 1000) 1182 return SE.getCouldNotCompute(); 1183 1184 unsigned W = SE.getTypeSizeInBits(ResultTy); 1185 1186 // Calculate K! / 2^T and T; we divide out the factors of two before 1187 // multiplying for calculating K! / 2^T to avoid overflow. 1188 // Other overflow doesn't matter because we only care about the bottom 1189 // W bits of the result. 1190 APInt OddFactorial(W, 1); 1191 unsigned T = 1; 1192 for (unsigned i = 3; i <= K; ++i) { 1193 APInt Mult(W, i); 1194 unsigned TwoFactors = Mult.countTrailingZeros(); 1195 T += TwoFactors; 1196 Mult.lshrInPlace(TwoFactors); 1197 OddFactorial *= Mult; 1198 } 1199 1200 // We need at least W + T bits for the multiplication step 1201 unsigned CalculationBits = W + T; 1202 1203 // Calculate 2^T, at width T+W. 1204 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1205 1206 // Calculate the multiplicative inverse of K! / 2^T; 1207 // this multiplication factor will perform the exact division by 1208 // K! / 2^T. 1209 APInt Mod = APInt::getSignedMinValue(W+1); 1210 APInt MultiplyFactor = OddFactorial.zext(W+1); 1211 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1212 MultiplyFactor = MultiplyFactor.trunc(W); 1213 1214 // Calculate the product, at width T+W 1215 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1216 CalculationBits); 1217 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1218 for (unsigned i = 1; i != K; ++i) { 1219 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1220 Dividend = SE.getMulExpr(Dividend, 1221 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1222 } 1223 1224 // Divide by 2^T 1225 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1226 1227 // Truncate the result, and divide by K! / 2^T. 1228 1229 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1230 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1231 } 1232 1233 /// Return the value of this chain of recurrences at the specified iteration 1234 /// number. We can evaluate this recurrence by multiplying each element in the 1235 /// chain by the binomial coefficient corresponding to it. In other words, we 1236 /// can evaluate {A,+,B,+,C,+,D} as: 1237 /// 1238 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1239 /// 1240 /// where BC(It, k) stands for binomial coefficient. 1241 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1242 ScalarEvolution &SE) const { 1243 const SCEV *Result = getStart(); 1244 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1245 // The computation is correct in the face of overflow provided that the 1246 // multiplication is performed _after_ the evaluation of the binomial 1247 // coefficient. 1248 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1249 if (isa<SCEVCouldNotCompute>(Coeff)) 1250 return Coeff; 1251 1252 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1253 } 1254 return Result; 1255 } 1256 1257 //===----------------------------------------------------------------------===// 1258 // SCEV Expression folder implementations 1259 //===----------------------------------------------------------------------===// 1260 1261 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1262 unsigned Depth) { 1263 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1264 "This is not a truncating conversion!"); 1265 assert(isSCEVable(Ty) && 1266 "This is not a conversion to a SCEVable type!"); 1267 Ty = getEffectiveSCEVType(Ty); 1268 1269 FoldingSetNodeID ID; 1270 ID.AddInteger(scTruncate); 1271 ID.AddPointer(Op); 1272 ID.AddPointer(Ty); 1273 void *IP = nullptr; 1274 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1275 1276 // Fold if the operand is constant. 1277 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1278 return getConstant( 1279 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1280 1281 // trunc(trunc(x)) --> trunc(x) 1282 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1283 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1284 1285 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1286 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1287 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1288 1289 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1290 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1291 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1292 1293 if (Depth > MaxCastDepth) { 1294 SCEV *S = 1295 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1296 UniqueSCEVs.InsertNode(S, IP); 1297 addToLoopUseLists(S); 1298 return S; 1299 } 1300 1301 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1302 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1303 // if after transforming we have at most one truncate, not counting truncates 1304 // that replace other casts. 1305 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1306 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1307 SmallVector<const SCEV *, 4> Operands; 1308 unsigned numTruncs = 0; 1309 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1310 ++i) { 1311 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1312 if (!isa<SCEVCastExpr>(CommOp->getOperand(i)) && isa<SCEVTruncateExpr>(S)) 1313 numTruncs++; 1314 Operands.push_back(S); 1315 } 1316 if (numTruncs < 2) { 1317 if (isa<SCEVAddExpr>(Op)) 1318 return getAddExpr(Operands); 1319 else if (isa<SCEVMulExpr>(Op)) 1320 return getMulExpr(Operands); 1321 else 1322 llvm_unreachable("Unexpected SCEV type for Op."); 1323 } 1324 // Although we checked in the beginning that ID is not in the cache, it is 1325 // possible that during recursion and different modification ID was inserted 1326 // into the cache. So if we find it, just return it. 1327 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1328 return S; 1329 } 1330 1331 // If the input value is a chrec scev, truncate the chrec's operands. 1332 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1333 SmallVector<const SCEV *, 4> Operands; 1334 for (const SCEV *Op : AddRec->operands()) 1335 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1336 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1337 } 1338 1339 // The cast wasn't folded; create an explicit cast node. We can reuse 1340 // the existing insert position since if we get here, we won't have 1341 // made any changes which would invalidate it. 1342 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1343 Op, Ty); 1344 UniqueSCEVs.InsertNode(S, IP); 1345 addToLoopUseLists(S); 1346 return S; 1347 } 1348 1349 // Get the limit of a recurrence such that incrementing by Step cannot cause 1350 // signed overflow as long as the value of the recurrence within the 1351 // loop does not exceed this limit before incrementing. 1352 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1353 ICmpInst::Predicate *Pred, 1354 ScalarEvolution *SE) { 1355 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1356 if (SE->isKnownPositive(Step)) { 1357 *Pred = ICmpInst::ICMP_SLT; 1358 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1359 SE->getSignedRangeMax(Step)); 1360 } 1361 if (SE->isKnownNegative(Step)) { 1362 *Pred = ICmpInst::ICMP_SGT; 1363 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1364 SE->getSignedRangeMin(Step)); 1365 } 1366 return nullptr; 1367 } 1368 1369 // Get the limit of a recurrence such that incrementing by Step cannot cause 1370 // unsigned overflow as long as the value of the recurrence within the loop does 1371 // not exceed this limit before incrementing. 1372 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1373 ICmpInst::Predicate *Pred, 1374 ScalarEvolution *SE) { 1375 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1376 *Pred = ICmpInst::ICMP_ULT; 1377 1378 return SE->getConstant(APInt::getMinValue(BitWidth) - 1379 SE->getUnsignedRangeMax(Step)); 1380 } 1381 1382 namespace { 1383 1384 struct ExtendOpTraitsBase { 1385 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1386 unsigned); 1387 }; 1388 1389 // Used to make code generic over signed and unsigned overflow. 1390 template <typename ExtendOp> struct ExtendOpTraits { 1391 // Members present: 1392 // 1393 // static const SCEV::NoWrapFlags WrapType; 1394 // 1395 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1396 // 1397 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1398 // ICmpInst::Predicate *Pred, 1399 // ScalarEvolution *SE); 1400 }; 1401 1402 template <> 1403 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1404 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1405 1406 static const GetExtendExprTy GetExtendExpr; 1407 1408 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1409 ICmpInst::Predicate *Pred, 1410 ScalarEvolution *SE) { 1411 return getSignedOverflowLimitForStep(Step, Pred, SE); 1412 } 1413 }; 1414 1415 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1416 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1417 1418 template <> 1419 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1420 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1421 1422 static const GetExtendExprTy GetExtendExpr; 1423 1424 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1425 ICmpInst::Predicate *Pred, 1426 ScalarEvolution *SE) { 1427 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1428 } 1429 }; 1430 1431 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1432 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1433 1434 } // end anonymous namespace 1435 1436 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1437 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1438 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1439 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1440 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1441 // expression "Step + sext/zext(PreIncAR)" is congruent with 1442 // "sext/zext(PostIncAR)" 1443 template <typename ExtendOpTy> 1444 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1445 ScalarEvolution *SE, unsigned Depth) { 1446 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1447 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1448 1449 const Loop *L = AR->getLoop(); 1450 const SCEV *Start = AR->getStart(); 1451 const SCEV *Step = AR->getStepRecurrence(*SE); 1452 1453 // Check for a simple looking step prior to loop entry. 1454 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1455 if (!SA) 1456 return nullptr; 1457 1458 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1459 // subtraction is expensive. For this purpose, perform a quick and dirty 1460 // difference, by checking for Step in the operand list. 1461 SmallVector<const SCEV *, 4> DiffOps; 1462 for (const SCEV *Op : SA->operands()) 1463 if (Op != Step) 1464 DiffOps.push_back(Op); 1465 1466 if (DiffOps.size() == SA->getNumOperands()) 1467 return nullptr; 1468 1469 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1470 // `Step`: 1471 1472 // 1. NSW/NUW flags on the step increment. 1473 auto PreStartFlags = 1474 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1475 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1476 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1477 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1478 1479 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1480 // "S+X does not sign/unsign-overflow". 1481 // 1482 1483 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1484 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1485 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1486 return PreStart; 1487 1488 // 2. Direct overflow check on the step operation's expression. 1489 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1490 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1491 const SCEV *OperandExtendedStart = 1492 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1493 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1494 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1495 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1496 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1497 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1498 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1499 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(WrapType); 1500 } 1501 return PreStart; 1502 } 1503 1504 // 3. Loop precondition. 1505 ICmpInst::Predicate Pred; 1506 const SCEV *OverflowLimit = 1507 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1508 1509 if (OverflowLimit && 1510 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1511 return PreStart; 1512 1513 return nullptr; 1514 } 1515 1516 // Get the normalized zero or sign extended expression for this AddRec's Start. 1517 template <typename ExtendOpTy> 1518 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1519 ScalarEvolution *SE, 1520 unsigned Depth) { 1521 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1522 1523 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1524 if (!PreStart) 1525 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1526 1527 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1528 Depth), 1529 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1530 } 1531 1532 // Try to prove away overflow by looking at "nearby" add recurrences. A 1533 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1534 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1535 // 1536 // Formally: 1537 // 1538 // {S,+,X} == {S-T,+,X} + T 1539 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1540 // 1541 // If ({S-T,+,X} + T) does not overflow ... (1) 1542 // 1543 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1544 // 1545 // If {S-T,+,X} does not overflow ... (2) 1546 // 1547 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1548 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1549 // 1550 // If (S-T)+T does not overflow ... (3) 1551 // 1552 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1553 // == {Ext(S),+,Ext(X)} == LHS 1554 // 1555 // Thus, if (1), (2) and (3) are true for some T, then 1556 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1557 // 1558 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1559 // does not overflow" restricted to the 0th iteration. Therefore we only need 1560 // to check for (1) and (2). 1561 // 1562 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1563 // is `Delta` (defined below). 1564 template <typename ExtendOpTy> 1565 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1566 const SCEV *Step, 1567 const Loop *L) { 1568 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1569 1570 // We restrict `Start` to a constant to prevent SCEV from spending too much 1571 // time here. It is correct (but more expensive) to continue with a 1572 // non-constant `Start` and do a general SCEV subtraction to compute 1573 // `PreStart` below. 1574 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1575 if (!StartC) 1576 return false; 1577 1578 APInt StartAI = StartC->getAPInt(); 1579 1580 for (unsigned Delta : {-2, -1, 1, 2}) { 1581 const SCEV *PreStart = getConstant(StartAI - Delta); 1582 1583 FoldingSetNodeID ID; 1584 ID.AddInteger(scAddRecExpr); 1585 ID.AddPointer(PreStart); 1586 ID.AddPointer(Step); 1587 ID.AddPointer(L); 1588 void *IP = nullptr; 1589 const auto *PreAR = 1590 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1591 1592 // Give up if we don't already have the add recurrence we need because 1593 // actually constructing an add recurrence is relatively expensive. 1594 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1595 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1596 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1597 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1598 DeltaS, &Pred, this); 1599 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1600 return true; 1601 } 1602 } 1603 1604 return false; 1605 } 1606 1607 // Finds an integer D for an expression (C + x + y + ...) such that the top 1608 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1609 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1610 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1611 // the (C + x + y + ...) expression is \p WholeAddExpr. 1612 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1613 const SCEVConstant *ConstantTerm, 1614 const SCEVAddExpr *WholeAddExpr) { 1615 const APInt C = ConstantTerm->getAPInt(); 1616 const unsigned BitWidth = C.getBitWidth(); 1617 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1618 uint32_t TZ = BitWidth; 1619 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1620 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1621 if (TZ) { 1622 // Set D to be as many least significant bits of C as possible while still 1623 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1624 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1625 } 1626 return APInt(BitWidth, 0); 1627 } 1628 1629 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1630 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1631 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1632 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1633 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1634 const APInt &ConstantStart, 1635 const SCEV *Step) { 1636 const unsigned BitWidth = ConstantStart.getBitWidth(); 1637 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1638 if (TZ) 1639 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1640 : ConstantStart; 1641 return APInt(BitWidth, 0); 1642 } 1643 1644 const SCEV * 1645 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1646 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1647 "This is not an extending conversion!"); 1648 assert(isSCEVable(Ty) && 1649 "This is not a conversion to a SCEVable type!"); 1650 Ty = getEffectiveSCEVType(Ty); 1651 1652 // Fold if the operand is constant. 1653 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1654 return getConstant( 1655 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1656 1657 // zext(zext(x)) --> zext(x) 1658 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1659 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1660 1661 // Before doing any expensive analysis, check to see if we've already 1662 // computed a SCEV for this Op and Ty. 1663 FoldingSetNodeID ID; 1664 ID.AddInteger(scZeroExtend); 1665 ID.AddPointer(Op); 1666 ID.AddPointer(Ty); 1667 void *IP = nullptr; 1668 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1669 if (Depth > MaxCastDepth) { 1670 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1671 Op, Ty); 1672 UniqueSCEVs.InsertNode(S, IP); 1673 addToLoopUseLists(S); 1674 return S; 1675 } 1676 1677 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1678 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1679 // It's possible the bits taken off by the truncate were all zero bits. If 1680 // so, we should be able to simplify this further. 1681 const SCEV *X = ST->getOperand(); 1682 ConstantRange CR = getUnsignedRange(X); 1683 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1684 unsigned NewBits = getTypeSizeInBits(Ty); 1685 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1686 CR.zextOrTrunc(NewBits))) 1687 return getTruncateOrZeroExtend(X, Ty, Depth); 1688 } 1689 1690 // If the input value is a chrec scev, and we can prove that the value 1691 // did not overflow the old, smaller, value, we can zero extend all of the 1692 // operands (often constants). This allows analysis of something like 1693 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1694 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1695 if (AR->isAffine()) { 1696 const SCEV *Start = AR->getStart(); 1697 const SCEV *Step = AR->getStepRecurrence(*this); 1698 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1699 const Loop *L = AR->getLoop(); 1700 1701 if (!AR->hasNoUnsignedWrap()) { 1702 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1703 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 1704 } 1705 1706 // If we have special knowledge that this addrec won't overflow, 1707 // we don't need to do any further analysis. 1708 if (AR->hasNoUnsignedWrap()) 1709 return getAddRecExpr( 1710 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1711 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1712 1713 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1714 // Note that this serves two purposes: It filters out loops that are 1715 // simply not analyzable, and it covers the case where this code is 1716 // being called from within backedge-taken count analysis, such that 1717 // attempting to ask for the backedge-taken count would likely result 1718 // in infinite recursion. In the later case, the analysis code will 1719 // cope with a conservative value, and it will take care to purge 1720 // that value once it has finished. 1721 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1722 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1723 // Manually compute the final value for AR, checking for 1724 // overflow. 1725 1726 // Check whether the backedge-taken count can be losslessly casted to 1727 // the addrec's type. The count is always unsigned. 1728 const SCEV *CastedMaxBECount = 1729 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1730 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1731 CastedMaxBECount, MaxBECount->getType(), Depth); 1732 if (MaxBECount == RecastedMaxBECount) { 1733 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1734 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1735 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1736 SCEV::FlagAnyWrap, Depth + 1); 1737 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1738 SCEV::FlagAnyWrap, 1739 Depth + 1), 1740 WideTy, Depth + 1); 1741 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1742 const SCEV *WideMaxBECount = 1743 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1744 const SCEV *OperandExtendedAdd = 1745 getAddExpr(WideStart, 1746 getMulExpr(WideMaxBECount, 1747 getZeroExtendExpr(Step, WideTy, Depth + 1), 1748 SCEV::FlagAnyWrap, Depth + 1), 1749 SCEV::FlagAnyWrap, Depth + 1); 1750 if (ZAdd == OperandExtendedAdd) { 1751 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1752 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1753 // Return the expression with the addrec on the outside. 1754 return getAddRecExpr( 1755 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1756 Depth + 1), 1757 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1758 AR->getNoWrapFlags()); 1759 } 1760 // Similar to above, only this time treat the step value as signed. 1761 // This covers loops that count down. 1762 OperandExtendedAdd = 1763 getAddExpr(WideStart, 1764 getMulExpr(WideMaxBECount, 1765 getSignExtendExpr(Step, WideTy, Depth + 1), 1766 SCEV::FlagAnyWrap, Depth + 1), 1767 SCEV::FlagAnyWrap, Depth + 1); 1768 if (ZAdd == OperandExtendedAdd) { 1769 // Cache knowledge of AR NW, which is propagated to this AddRec. 1770 // Negative step causes unsigned wrap, but it still can't self-wrap. 1771 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1772 // Return the expression with the addrec on the outside. 1773 return getAddRecExpr( 1774 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1775 Depth + 1), 1776 getSignExtendExpr(Step, Ty, Depth + 1), L, 1777 AR->getNoWrapFlags()); 1778 } 1779 } 1780 } 1781 1782 // Normally, in the cases we can prove no-overflow via a 1783 // backedge guarding condition, we can also compute a backedge 1784 // taken count for the loop. The exceptions are assumptions and 1785 // guards present in the loop -- SCEV is not great at exploiting 1786 // these to compute max backedge taken counts, but can still use 1787 // these to prove lack of overflow. Use this fact to avoid 1788 // doing extra work that may not pay off. 1789 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1790 !AC.assumptions().empty()) { 1791 // If the backedge is guarded by a comparison with the pre-inc 1792 // value the addrec is safe. Also, if the entry is guarded by 1793 // a comparison with the start value and the backedge is 1794 // guarded by a comparison with the post-inc value, the addrec 1795 // is safe. 1796 if (isKnownPositive(Step)) { 1797 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1798 getUnsignedRangeMax(Step)); 1799 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1800 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 1801 // Cache knowledge of AR NUW, which is propagated to this 1802 // AddRec. 1803 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1804 // Return the expression with the addrec on the outside. 1805 return getAddRecExpr( 1806 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1807 Depth + 1), 1808 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1809 AR->getNoWrapFlags()); 1810 } 1811 } else if (isKnownNegative(Step)) { 1812 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1813 getSignedRangeMin(Step)); 1814 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1815 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1816 // Cache knowledge of AR NW, which is propagated to this 1817 // AddRec. Negative step causes unsigned wrap, but it 1818 // still can't self-wrap. 1819 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 1820 // Return the expression with the addrec on the outside. 1821 return getAddRecExpr( 1822 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1823 Depth + 1), 1824 getSignExtendExpr(Step, Ty, Depth + 1), L, 1825 AR->getNoWrapFlags()); 1826 } 1827 } 1828 } 1829 1830 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1831 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1832 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1833 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1834 const APInt &C = SC->getAPInt(); 1835 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1836 if (D != 0) { 1837 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1838 const SCEV *SResidual = 1839 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1840 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1841 return getAddExpr(SZExtD, SZExtR, 1842 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1843 Depth + 1); 1844 } 1845 } 1846 1847 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1848 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW); 1849 return getAddRecExpr( 1850 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1851 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1852 } 1853 } 1854 1855 // zext(A % B) --> zext(A) % zext(B) 1856 { 1857 const SCEV *LHS; 1858 const SCEV *RHS; 1859 if (matchURem(Op, LHS, RHS)) 1860 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1861 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1862 } 1863 1864 // zext(A / B) --> zext(A) / zext(B). 1865 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1866 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1867 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1868 1869 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1870 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1871 if (SA->hasNoUnsignedWrap()) { 1872 // If the addition does not unsign overflow then we can, by definition, 1873 // commute the zero extension with the addition operation. 1874 SmallVector<const SCEV *, 4> Ops; 1875 for (const auto *Op : SA->operands()) 1876 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1877 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1878 } 1879 1880 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1881 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1882 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1883 // 1884 // Often address arithmetics contain expressions like 1885 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1886 // This transformation is useful while proving that such expressions are 1887 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1888 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1889 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1890 if (D != 0) { 1891 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1892 const SCEV *SResidual = 1893 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1894 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1895 return getAddExpr(SZExtD, SZExtR, 1896 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1897 Depth + 1); 1898 } 1899 } 1900 } 1901 1902 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1903 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1904 if (SM->hasNoUnsignedWrap()) { 1905 // If the multiply does not unsign overflow then we can, by definition, 1906 // commute the zero extension with the multiply operation. 1907 SmallVector<const SCEV *, 4> Ops; 1908 for (const auto *Op : SM->operands()) 1909 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1910 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1911 } 1912 1913 // zext(2^K * (trunc X to iN)) to iM -> 1914 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1915 // 1916 // Proof: 1917 // 1918 // zext(2^K * (trunc X to iN)) to iM 1919 // = zext((trunc X to iN) << K) to iM 1920 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1921 // (because shl removes the top K bits) 1922 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1923 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1924 // 1925 if (SM->getNumOperands() == 2) 1926 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1927 if (MulLHS->getAPInt().isPowerOf2()) 1928 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1929 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1930 MulLHS->getAPInt().logBase2(); 1931 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1932 return getMulExpr( 1933 getZeroExtendExpr(MulLHS, Ty), 1934 getZeroExtendExpr( 1935 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1936 SCEV::FlagNUW, Depth + 1); 1937 } 1938 } 1939 1940 // The cast wasn't folded; create an explicit cast node. 1941 // Recompute the insert position, as it may have been invalidated. 1942 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1943 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1944 Op, Ty); 1945 UniqueSCEVs.InsertNode(S, IP); 1946 addToLoopUseLists(S); 1947 return S; 1948 } 1949 1950 const SCEV * 1951 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1952 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1953 "This is not an extending conversion!"); 1954 assert(isSCEVable(Ty) && 1955 "This is not a conversion to a SCEVable type!"); 1956 Ty = getEffectiveSCEVType(Ty); 1957 1958 // Fold if the operand is constant. 1959 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1960 return getConstant( 1961 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1962 1963 // sext(sext(x)) --> sext(x) 1964 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1965 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1966 1967 // sext(zext(x)) --> zext(x) 1968 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1969 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1970 1971 // Before doing any expensive analysis, check to see if we've already 1972 // computed a SCEV for this Op and Ty. 1973 FoldingSetNodeID ID; 1974 ID.AddInteger(scSignExtend); 1975 ID.AddPointer(Op); 1976 ID.AddPointer(Ty); 1977 void *IP = nullptr; 1978 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1979 // Limit recursion depth. 1980 if (Depth > MaxCastDepth) { 1981 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1982 Op, Ty); 1983 UniqueSCEVs.InsertNode(S, IP); 1984 addToLoopUseLists(S); 1985 return S; 1986 } 1987 1988 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1989 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1990 // It's possible the bits taken off by the truncate were all sign bits. If 1991 // so, we should be able to simplify this further. 1992 const SCEV *X = ST->getOperand(); 1993 ConstantRange CR = getSignedRange(X); 1994 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1995 unsigned NewBits = getTypeSizeInBits(Ty); 1996 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1997 CR.sextOrTrunc(NewBits))) 1998 return getTruncateOrSignExtend(X, Ty, Depth); 1999 } 2000 2001 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 2002 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 2003 if (SA->hasNoSignedWrap()) { 2004 // If the addition does not sign overflow then we can, by definition, 2005 // commute the sign extension with the addition operation. 2006 SmallVector<const SCEV *, 4> Ops; 2007 for (const auto *Op : SA->operands()) 2008 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 2009 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 2010 } 2011 2012 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 2013 // if D + (C - D + x + y + ...) could be proven to not signed wrap 2014 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 2015 // 2016 // For instance, this will bring two seemingly different expressions: 2017 // 1 + sext(5 + 20 * %x + 24 * %y) and 2018 // sext(6 + 20 * %x + 24 * %y) 2019 // to the same form: 2020 // 2 + sext(4 + 20 * %x + 24 * %y) 2021 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 2022 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 2023 if (D != 0) { 2024 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2025 const SCEV *SResidual = 2026 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 2027 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2028 return getAddExpr(SSExtD, SSExtR, 2029 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2030 Depth + 1); 2031 } 2032 } 2033 } 2034 // If the input value is a chrec scev, and we can prove that the value 2035 // did not overflow the old, smaller, value, we can sign extend all of the 2036 // operands (often constants). This allows analysis of something like 2037 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 2038 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 2039 if (AR->isAffine()) { 2040 const SCEV *Start = AR->getStart(); 2041 const SCEV *Step = AR->getStepRecurrence(*this); 2042 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 2043 const Loop *L = AR->getLoop(); 2044 2045 if (!AR->hasNoSignedWrap()) { 2046 auto NewFlags = proveNoWrapViaConstantRanges(AR); 2047 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(NewFlags); 2048 } 2049 2050 // If we have special knowledge that this addrec won't overflow, 2051 // we don't need to do any further analysis. 2052 if (AR->hasNoSignedWrap()) 2053 return getAddRecExpr( 2054 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2055 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 2056 2057 // Check whether the backedge-taken count is SCEVCouldNotCompute. 2058 // Note that this serves two purposes: It filters out loops that are 2059 // simply not analyzable, and it covers the case where this code is 2060 // being called from within backedge-taken count analysis, such that 2061 // attempting to ask for the backedge-taken count would likely result 2062 // in infinite recursion. In the later case, the analysis code will 2063 // cope with a conservative value, and it will take care to purge 2064 // that value once it has finished. 2065 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 2066 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 2067 // Manually compute the final value for AR, checking for 2068 // overflow. 2069 2070 // Check whether the backedge-taken count can be losslessly casted to 2071 // the addrec's type. The count is always unsigned. 2072 const SCEV *CastedMaxBECount = 2073 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 2074 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 2075 CastedMaxBECount, MaxBECount->getType(), Depth); 2076 if (MaxBECount == RecastedMaxBECount) { 2077 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 2078 // Check whether Start+Step*MaxBECount has no signed overflow. 2079 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 2080 SCEV::FlagAnyWrap, Depth + 1); 2081 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 2082 SCEV::FlagAnyWrap, 2083 Depth + 1), 2084 WideTy, Depth + 1); 2085 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 2086 const SCEV *WideMaxBECount = 2087 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 2088 const SCEV *OperandExtendedAdd = 2089 getAddExpr(WideStart, 2090 getMulExpr(WideMaxBECount, 2091 getSignExtendExpr(Step, WideTy, Depth + 1), 2092 SCEV::FlagAnyWrap, Depth + 1), 2093 SCEV::FlagAnyWrap, Depth + 1); 2094 if (SAdd == OperandExtendedAdd) { 2095 // Cache knowledge of AR NSW, which is propagated to this AddRec. 2096 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2097 // Return the expression with the addrec on the outside. 2098 return getAddRecExpr( 2099 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2100 Depth + 1), 2101 getSignExtendExpr(Step, Ty, Depth + 1), L, 2102 AR->getNoWrapFlags()); 2103 } 2104 // Similar to above, only this time treat the step value as unsigned. 2105 // This covers loops that count up with an unsigned step. 2106 OperandExtendedAdd = 2107 getAddExpr(WideStart, 2108 getMulExpr(WideMaxBECount, 2109 getZeroExtendExpr(Step, WideTy, Depth + 1), 2110 SCEV::FlagAnyWrap, Depth + 1), 2111 SCEV::FlagAnyWrap, Depth + 1); 2112 if (SAdd == OperandExtendedAdd) { 2113 // If AR wraps around then 2114 // 2115 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 2116 // => SAdd != OperandExtendedAdd 2117 // 2118 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 2119 // (SAdd == OperandExtendedAdd => AR is NW) 2120 2121 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW); 2122 2123 // Return the expression with the addrec on the outside. 2124 return getAddRecExpr( 2125 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2126 Depth + 1), 2127 getZeroExtendExpr(Step, Ty, Depth + 1), L, 2128 AR->getNoWrapFlags()); 2129 } 2130 } 2131 } 2132 2133 // Normally, in the cases we can prove no-overflow via a 2134 // backedge guarding condition, we can also compute a backedge 2135 // taken count for the loop. The exceptions are assumptions and 2136 // guards present in the loop -- SCEV is not great at exploiting 2137 // these to compute max backedge taken counts, but can still use 2138 // these to prove lack of overflow. Use this fact to avoid 2139 // doing extra work that may not pay off. 2140 2141 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 2142 !AC.assumptions().empty()) { 2143 // If the backedge is guarded by a comparison with the pre-inc 2144 // value the addrec is safe. Also, if the entry is guarded by 2145 // a comparison with the start value and the backedge is 2146 // guarded by a comparison with the post-inc value, the addrec 2147 // is safe. 2148 ICmpInst::Predicate Pred; 2149 const SCEV *OverflowLimit = 2150 getSignedOverflowLimitForStep(Step, &Pred, this); 2151 if (OverflowLimit && 2152 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 2153 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 2154 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec. 2155 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2156 return getAddRecExpr( 2157 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2158 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2159 } 2160 } 2161 2162 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 2163 // if D + (C - D + Step * n) could be proven to not signed wrap 2164 // where D maximizes the number of trailing zeros of (C - D + Step * n) 2165 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 2166 const APInt &C = SC->getAPInt(); 2167 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 2168 if (D != 0) { 2169 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2170 const SCEV *SResidual = 2171 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 2172 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2173 return getAddExpr(SSExtD, SSExtR, 2174 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2175 Depth + 1); 2176 } 2177 } 2178 2179 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2180 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW); 2181 return getAddRecExpr( 2182 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2183 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2184 } 2185 } 2186 2187 // If the input value is provably positive and we could not simplify 2188 // away the sext build a zext instead. 2189 if (isKnownNonNegative(Op)) 2190 return getZeroExtendExpr(Op, Ty, Depth + 1); 2191 2192 // The cast wasn't folded; create an explicit cast node. 2193 // Recompute the insert position, as it may have been invalidated. 2194 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2195 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2196 Op, Ty); 2197 UniqueSCEVs.InsertNode(S, IP); 2198 addToLoopUseLists(S); 2199 return S; 2200 } 2201 2202 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2203 /// unspecified bits out to the given type. 2204 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2205 Type *Ty) { 2206 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2207 "This is not an extending conversion!"); 2208 assert(isSCEVable(Ty) && 2209 "This is not a conversion to a SCEVable type!"); 2210 Ty = getEffectiveSCEVType(Ty); 2211 2212 // Sign-extend negative constants. 2213 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2214 if (SC->getAPInt().isNegative()) 2215 return getSignExtendExpr(Op, Ty); 2216 2217 // Peel off a truncate cast. 2218 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2219 const SCEV *NewOp = T->getOperand(); 2220 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2221 return getAnyExtendExpr(NewOp, Ty); 2222 return getTruncateOrNoop(NewOp, Ty); 2223 } 2224 2225 // Next try a zext cast. If the cast is folded, use it. 2226 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2227 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2228 return ZExt; 2229 2230 // Next try a sext cast. If the cast is folded, use it. 2231 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2232 if (!isa<SCEVSignExtendExpr>(SExt)) 2233 return SExt; 2234 2235 // Force the cast to be folded into the operands of an addrec. 2236 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2237 SmallVector<const SCEV *, 4> Ops; 2238 for (const SCEV *Op : AR->operands()) 2239 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2240 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2241 } 2242 2243 // If the expression is obviously signed, use the sext cast value. 2244 if (isa<SCEVSMaxExpr>(Op)) 2245 return SExt; 2246 2247 // Absent any other information, use the zext cast value. 2248 return ZExt; 2249 } 2250 2251 /// Process the given Ops list, which is a list of operands to be added under 2252 /// the given scale, update the given map. This is a helper function for 2253 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2254 /// that would form an add expression like this: 2255 /// 2256 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2257 /// 2258 /// where A and B are constants, update the map with these values: 2259 /// 2260 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2261 /// 2262 /// and add 13 + A*B*29 to AccumulatedConstant. 2263 /// This will allow getAddRecExpr to produce this: 2264 /// 2265 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2266 /// 2267 /// This form often exposes folding opportunities that are hidden in 2268 /// the original operand list. 2269 /// 2270 /// Return true iff it appears that any interesting folding opportunities 2271 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2272 /// the common case where no interesting opportunities are present, and 2273 /// is also used as a check to avoid infinite recursion. 2274 static bool 2275 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2276 SmallVectorImpl<const SCEV *> &NewOps, 2277 APInt &AccumulatedConstant, 2278 const SCEV *const *Ops, size_t NumOperands, 2279 const APInt &Scale, 2280 ScalarEvolution &SE) { 2281 bool Interesting = false; 2282 2283 // Iterate over the add operands. They are sorted, with constants first. 2284 unsigned i = 0; 2285 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2286 ++i; 2287 // Pull a buried constant out to the outside. 2288 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2289 Interesting = true; 2290 AccumulatedConstant += Scale * C->getAPInt(); 2291 } 2292 2293 // Next comes everything else. We're especially interested in multiplies 2294 // here, but they're in the middle, so just visit the rest with one loop. 2295 for (; i != NumOperands; ++i) { 2296 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2297 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2298 APInt NewScale = 2299 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2300 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2301 // A multiplication of a constant with another add; recurse. 2302 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2303 Interesting |= 2304 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2305 Add->op_begin(), Add->getNumOperands(), 2306 NewScale, SE); 2307 } else { 2308 // A multiplication of a constant with some other value. Update 2309 // the map. 2310 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 2311 const SCEV *Key = SE.getMulExpr(MulOps); 2312 auto Pair = M.insert({Key, NewScale}); 2313 if (Pair.second) { 2314 NewOps.push_back(Pair.first->first); 2315 } else { 2316 Pair.first->second += NewScale; 2317 // The map already had an entry for this value, which may indicate 2318 // a folding opportunity. 2319 Interesting = true; 2320 } 2321 } 2322 } else { 2323 // An ordinary operand. Update the map. 2324 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2325 M.insert({Ops[i], Scale}); 2326 if (Pair.second) { 2327 NewOps.push_back(Pair.first->first); 2328 } else { 2329 Pair.first->second += Scale; 2330 // The map already had an entry for this value, which may indicate 2331 // a folding opportunity. 2332 Interesting = true; 2333 } 2334 } 2335 } 2336 2337 return Interesting; 2338 } 2339 2340 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2341 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2342 // can't-overflow flags for the operation if possible. 2343 static SCEV::NoWrapFlags 2344 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2345 const ArrayRef<const SCEV *> Ops, 2346 SCEV::NoWrapFlags Flags) { 2347 using namespace std::placeholders; 2348 2349 using OBO = OverflowingBinaryOperator; 2350 2351 bool CanAnalyze = 2352 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2353 (void)CanAnalyze; 2354 assert(CanAnalyze && "don't call from other places!"); 2355 2356 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2357 SCEV::NoWrapFlags SignOrUnsignWrap = 2358 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2359 2360 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2361 auto IsKnownNonNegative = [&](const SCEV *S) { 2362 return SE->isKnownNonNegative(S); 2363 }; 2364 2365 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2366 Flags = 2367 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2368 2369 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2370 2371 if (SignOrUnsignWrap != SignOrUnsignMask && 2372 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2373 isa<SCEVConstant>(Ops[0])) { 2374 2375 auto Opcode = [&] { 2376 switch (Type) { 2377 case scAddExpr: 2378 return Instruction::Add; 2379 case scMulExpr: 2380 return Instruction::Mul; 2381 default: 2382 llvm_unreachable("Unexpected SCEV op."); 2383 } 2384 }(); 2385 2386 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2387 2388 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2389 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2390 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2391 Opcode, C, OBO::NoSignedWrap); 2392 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2393 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2394 } 2395 2396 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2397 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2398 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2399 Opcode, C, OBO::NoUnsignedWrap); 2400 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2401 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2402 } 2403 } 2404 2405 return Flags; 2406 } 2407 2408 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2409 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2410 } 2411 2412 /// Get a canonical add expression, or something simpler if possible. 2413 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2414 SCEV::NoWrapFlags Flags, 2415 unsigned Depth) { 2416 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2417 "only nuw or nsw allowed"); 2418 assert(!Ops.empty() && "Cannot get empty add!"); 2419 if (Ops.size() == 1) return Ops[0]; 2420 #ifndef NDEBUG 2421 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2422 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2423 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2424 "SCEVAddExpr operand types don't match!"); 2425 #endif 2426 2427 // Sort by complexity, this groups all similar expression types together. 2428 GroupByComplexity(Ops, &LI, DT); 2429 2430 Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); 2431 2432 // If there are any constants, fold them together. 2433 unsigned Idx = 0; 2434 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2435 ++Idx; 2436 assert(Idx < Ops.size()); 2437 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2438 // We found two constants, fold them together! 2439 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2440 if (Ops.size() == 2) return Ops[0]; 2441 Ops.erase(Ops.begin()+1); // Erase the folded element 2442 LHSC = cast<SCEVConstant>(Ops[0]); 2443 } 2444 2445 // If we are left with a constant zero being added, strip it off. 2446 if (LHSC->getValue()->isZero()) { 2447 Ops.erase(Ops.begin()); 2448 --Idx; 2449 } 2450 2451 if (Ops.size() == 1) return Ops[0]; 2452 } 2453 2454 // Limit recursion calls depth. 2455 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2456 return getOrCreateAddExpr(Ops, Flags); 2457 2458 // Okay, check to see if the same value occurs in the operand list more than 2459 // once. If so, merge them together into an multiply expression. Since we 2460 // sorted the list, these values are required to be adjacent. 2461 Type *Ty = Ops[0]->getType(); 2462 bool FoundMatch = false; 2463 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2464 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2465 // Scan ahead to count how many equal operands there are. 2466 unsigned Count = 2; 2467 while (i+Count != e && Ops[i+Count] == Ops[i]) 2468 ++Count; 2469 // Merge the values into a multiply. 2470 const SCEV *Scale = getConstant(Ty, Count); 2471 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2472 if (Ops.size() == Count) 2473 return Mul; 2474 Ops[i] = Mul; 2475 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2476 --i; e -= Count - 1; 2477 FoundMatch = true; 2478 } 2479 if (FoundMatch) 2480 return getAddExpr(Ops, Flags, Depth + 1); 2481 2482 // Check for truncates. If all the operands are truncated from the same 2483 // type, see if factoring out the truncate would permit the result to be 2484 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2485 // if the contents of the resulting outer trunc fold to something simple. 2486 auto FindTruncSrcType = [&]() -> Type * { 2487 // We're ultimately looking to fold an addrec of truncs and muls of only 2488 // constants and truncs, so if we find any other types of SCEV 2489 // as operands of the addrec then we bail and return nullptr here. 2490 // Otherwise, we return the type of the operand of a trunc that we find. 2491 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2492 return T->getOperand()->getType(); 2493 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2494 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2495 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2496 return T->getOperand()->getType(); 2497 } 2498 return nullptr; 2499 }; 2500 if (auto *SrcType = FindTruncSrcType()) { 2501 SmallVector<const SCEV *, 8> LargeOps; 2502 bool Ok = true; 2503 // Check all the operands to see if they can be represented in the 2504 // source type of the truncate. 2505 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2506 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2507 if (T->getOperand()->getType() != SrcType) { 2508 Ok = false; 2509 break; 2510 } 2511 LargeOps.push_back(T->getOperand()); 2512 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2513 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2514 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2515 SmallVector<const SCEV *, 8> LargeMulOps; 2516 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2517 if (const SCEVTruncateExpr *T = 2518 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2519 if (T->getOperand()->getType() != SrcType) { 2520 Ok = false; 2521 break; 2522 } 2523 LargeMulOps.push_back(T->getOperand()); 2524 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2525 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2526 } else { 2527 Ok = false; 2528 break; 2529 } 2530 } 2531 if (Ok) 2532 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2533 } else { 2534 Ok = false; 2535 break; 2536 } 2537 } 2538 if (Ok) { 2539 // Evaluate the expression in the larger type. 2540 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2541 // If it folds to something simple, use it. Otherwise, don't. 2542 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2543 return getTruncateExpr(Fold, Ty); 2544 } 2545 } 2546 2547 // Skip past any other cast SCEVs. 2548 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2549 ++Idx; 2550 2551 // If there are add operands they would be next. 2552 if (Idx < Ops.size()) { 2553 bool DeletedAdd = false; 2554 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2555 if (Ops.size() > AddOpsInlineThreshold || 2556 Add->getNumOperands() > AddOpsInlineThreshold) 2557 break; 2558 // If we have an add, expand the add operands onto the end of the operands 2559 // list. 2560 Ops.erase(Ops.begin()+Idx); 2561 Ops.append(Add->op_begin(), Add->op_end()); 2562 DeletedAdd = true; 2563 } 2564 2565 // If we deleted at least one add, we added operands to the end of the list, 2566 // and they are not necessarily sorted. Recurse to resort and resimplify 2567 // any operands we just acquired. 2568 if (DeletedAdd) 2569 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2570 } 2571 2572 // Skip over the add expression until we get to a multiply. 2573 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2574 ++Idx; 2575 2576 // Check to see if there are any folding opportunities present with 2577 // operands multiplied by constant values. 2578 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2579 uint64_t BitWidth = getTypeSizeInBits(Ty); 2580 DenseMap<const SCEV *, APInt> M; 2581 SmallVector<const SCEV *, 8> NewOps; 2582 APInt AccumulatedConstant(BitWidth, 0); 2583 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2584 Ops.data(), Ops.size(), 2585 APInt(BitWidth, 1), *this)) { 2586 struct APIntCompare { 2587 bool operator()(const APInt &LHS, const APInt &RHS) const { 2588 return LHS.ult(RHS); 2589 } 2590 }; 2591 2592 // Some interesting folding opportunity is present, so its worthwhile to 2593 // re-generate the operands list. Group the operands by constant scale, 2594 // to avoid multiplying by the same constant scale multiple times. 2595 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2596 for (const SCEV *NewOp : NewOps) 2597 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2598 // Re-generate the operands list. 2599 Ops.clear(); 2600 if (AccumulatedConstant != 0) 2601 Ops.push_back(getConstant(AccumulatedConstant)); 2602 for (auto &MulOp : MulOpLists) 2603 if (MulOp.first != 0) 2604 Ops.push_back(getMulExpr( 2605 getConstant(MulOp.first), 2606 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2607 SCEV::FlagAnyWrap, Depth + 1)); 2608 if (Ops.empty()) 2609 return getZero(Ty); 2610 if (Ops.size() == 1) 2611 return Ops[0]; 2612 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2613 } 2614 } 2615 2616 // If we are adding something to a multiply expression, make sure the 2617 // something is not already an operand of the multiply. If so, merge it into 2618 // the multiply. 2619 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2620 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2621 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2622 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2623 if (isa<SCEVConstant>(MulOpSCEV)) 2624 continue; 2625 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2626 if (MulOpSCEV == Ops[AddOp]) { 2627 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2628 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2629 if (Mul->getNumOperands() != 2) { 2630 // If the multiply has more than two operands, we must get the 2631 // Y*Z term. 2632 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2633 Mul->op_begin()+MulOp); 2634 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2635 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2636 } 2637 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2638 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2639 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2640 SCEV::FlagAnyWrap, Depth + 1); 2641 if (Ops.size() == 2) return OuterMul; 2642 if (AddOp < Idx) { 2643 Ops.erase(Ops.begin()+AddOp); 2644 Ops.erase(Ops.begin()+Idx-1); 2645 } else { 2646 Ops.erase(Ops.begin()+Idx); 2647 Ops.erase(Ops.begin()+AddOp-1); 2648 } 2649 Ops.push_back(OuterMul); 2650 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2651 } 2652 2653 // Check this multiply against other multiplies being added together. 2654 for (unsigned OtherMulIdx = Idx+1; 2655 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2656 ++OtherMulIdx) { 2657 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2658 // If MulOp occurs in OtherMul, we can fold the two multiplies 2659 // together. 2660 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2661 OMulOp != e; ++OMulOp) 2662 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2663 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2664 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2665 if (Mul->getNumOperands() != 2) { 2666 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2667 Mul->op_begin()+MulOp); 2668 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2669 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2670 } 2671 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2672 if (OtherMul->getNumOperands() != 2) { 2673 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2674 OtherMul->op_begin()+OMulOp); 2675 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2676 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2677 } 2678 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2679 const SCEV *InnerMulSum = 2680 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2681 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2682 SCEV::FlagAnyWrap, Depth + 1); 2683 if (Ops.size() == 2) return OuterMul; 2684 Ops.erase(Ops.begin()+Idx); 2685 Ops.erase(Ops.begin()+OtherMulIdx-1); 2686 Ops.push_back(OuterMul); 2687 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2688 } 2689 } 2690 } 2691 } 2692 2693 // If there are any add recurrences in the operands list, see if any other 2694 // added values are loop invariant. If so, we can fold them into the 2695 // recurrence. 2696 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2697 ++Idx; 2698 2699 // Scan over all recurrences, trying to fold loop invariants into them. 2700 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2701 // Scan all of the other operands to this add and add them to the vector if 2702 // they are loop invariant w.r.t. the recurrence. 2703 SmallVector<const SCEV *, 8> LIOps; 2704 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2705 const Loop *AddRecLoop = AddRec->getLoop(); 2706 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2707 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2708 LIOps.push_back(Ops[i]); 2709 Ops.erase(Ops.begin()+i); 2710 --i; --e; 2711 } 2712 2713 // If we found some loop invariants, fold them into the recurrence. 2714 if (!LIOps.empty()) { 2715 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2716 LIOps.push_back(AddRec->getStart()); 2717 2718 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2719 AddRec->op_end()); 2720 // This follows from the fact that the no-wrap flags on the outer add 2721 // expression are applicable on the 0th iteration, when the add recurrence 2722 // will be equal to its start value. 2723 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2724 2725 // Build the new addrec. Propagate the NUW and NSW flags if both the 2726 // outer add and the inner addrec are guaranteed to have no overflow. 2727 // Always propagate NW. 2728 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2729 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2730 2731 // If all of the other operands were loop invariant, we are done. 2732 if (Ops.size() == 1) return NewRec; 2733 2734 // Otherwise, add the folded AddRec by the non-invariant parts. 2735 for (unsigned i = 0;; ++i) 2736 if (Ops[i] == AddRec) { 2737 Ops[i] = NewRec; 2738 break; 2739 } 2740 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2741 } 2742 2743 // Okay, if there weren't any loop invariants to be folded, check to see if 2744 // there are multiple AddRec's with the same loop induction variable being 2745 // added together. If so, we can fold them. 2746 for (unsigned OtherIdx = Idx+1; 2747 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2748 ++OtherIdx) { 2749 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2750 // so that the 1st found AddRecExpr is dominated by all others. 2751 assert(DT.dominates( 2752 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2753 AddRec->getLoop()->getHeader()) && 2754 "AddRecExprs are not sorted in reverse dominance order?"); 2755 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2756 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2757 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 2758 AddRec->op_end()); 2759 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2760 ++OtherIdx) { 2761 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2762 if (OtherAddRec->getLoop() == AddRecLoop) { 2763 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2764 i != e; ++i) { 2765 if (i >= AddRecOps.size()) { 2766 AddRecOps.append(OtherAddRec->op_begin()+i, 2767 OtherAddRec->op_end()); 2768 break; 2769 } 2770 SmallVector<const SCEV *, 2> TwoOps = { 2771 AddRecOps[i], OtherAddRec->getOperand(i)}; 2772 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2773 } 2774 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2775 } 2776 } 2777 // Step size has changed, so we cannot guarantee no self-wraparound. 2778 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2779 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2780 } 2781 } 2782 2783 // Otherwise couldn't fold anything into this recurrence. Move onto the 2784 // next one. 2785 } 2786 2787 // Okay, it looks like we really DO need an add expr. Check to see if we 2788 // already have one, otherwise create a new one. 2789 return getOrCreateAddExpr(Ops, Flags); 2790 } 2791 2792 const SCEV * 2793 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 2794 SCEV::NoWrapFlags Flags) { 2795 FoldingSetNodeID ID; 2796 ID.AddInteger(scAddExpr); 2797 for (const SCEV *Op : Ops) 2798 ID.AddPointer(Op); 2799 void *IP = nullptr; 2800 SCEVAddExpr *S = 2801 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2802 if (!S) { 2803 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2804 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2805 S = new (SCEVAllocator) 2806 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2807 UniqueSCEVs.InsertNode(S, IP); 2808 addToLoopUseLists(S); 2809 } 2810 S->setNoWrapFlags(Flags); 2811 return S; 2812 } 2813 2814 const SCEV * 2815 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 2816 const Loop *L, SCEV::NoWrapFlags Flags) { 2817 FoldingSetNodeID ID; 2818 ID.AddInteger(scAddRecExpr); 2819 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2820 ID.AddPointer(Ops[i]); 2821 ID.AddPointer(L); 2822 void *IP = nullptr; 2823 SCEVAddRecExpr *S = 2824 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2825 if (!S) { 2826 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2827 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2828 S = new (SCEVAllocator) 2829 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 2830 UniqueSCEVs.InsertNode(S, IP); 2831 addToLoopUseLists(S); 2832 } 2833 S->setNoWrapFlags(Flags); 2834 return S; 2835 } 2836 2837 const SCEV * 2838 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 2839 SCEV::NoWrapFlags Flags) { 2840 FoldingSetNodeID ID; 2841 ID.AddInteger(scMulExpr); 2842 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2843 ID.AddPointer(Ops[i]); 2844 void *IP = nullptr; 2845 SCEVMulExpr *S = 2846 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2847 if (!S) { 2848 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2849 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2850 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2851 O, Ops.size()); 2852 UniqueSCEVs.InsertNode(S, IP); 2853 addToLoopUseLists(S); 2854 } 2855 S->setNoWrapFlags(Flags); 2856 return S; 2857 } 2858 2859 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2860 uint64_t k = i*j; 2861 if (j > 1 && k / j != i) Overflow = true; 2862 return k; 2863 } 2864 2865 /// Compute the result of "n choose k", the binomial coefficient. If an 2866 /// intermediate computation overflows, Overflow will be set and the return will 2867 /// be garbage. Overflow is not cleared on absence of overflow. 2868 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2869 // We use the multiplicative formula: 2870 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2871 // At each iteration, we take the n-th term of the numeral and divide by the 2872 // (k-n)th term of the denominator. This division will always produce an 2873 // integral result, and helps reduce the chance of overflow in the 2874 // intermediate computations. However, we can still overflow even when the 2875 // final result would fit. 2876 2877 if (n == 0 || n == k) return 1; 2878 if (k > n) return 0; 2879 2880 if (k > n/2) 2881 k = n-k; 2882 2883 uint64_t r = 1; 2884 for (uint64_t i = 1; i <= k; ++i) { 2885 r = umul_ov(r, n-(i-1), Overflow); 2886 r /= i; 2887 } 2888 return r; 2889 } 2890 2891 /// Determine if any of the operands in this SCEV are a constant or if 2892 /// any of the add or multiply expressions in this SCEV contain a constant. 2893 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2894 struct FindConstantInAddMulChain { 2895 bool FoundConstant = false; 2896 2897 bool follow(const SCEV *S) { 2898 FoundConstant |= isa<SCEVConstant>(S); 2899 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2900 } 2901 2902 bool isDone() const { 2903 return FoundConstant; 2904 } 2905 }; 2906 2907 FindConstantInAddMulChain F; 2908 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2909 ST.visitAll(StartExpr); 2910 return F.FoundConstant; 2911 } 2912 2913 /// Get a canonical multiply expression, or something simpler if possible. 2914 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2915 SCEV::NoWrapFlags Flags, 2916 unsigned Depth) { 2917 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) && 2918 "only nuw or nsw allowed"); 2919 assert(!Ops.empty() && "Cannot get empty mul!"); 2920 if (Ops.size() == 1) return Ops[0]; 2921 #ifndef NDEBUG 2922 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2923 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2924 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2925 "SCEVMulExpr operand types don't match!"); 2926 #endif 2927 2928 // Sort by complexity, this groups all similar expression types together. 2929 GroupByComplexity(Ops, &LI, DT); 2930 2931 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); 2932 2933 // Limit recursion calls depth. 2934 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2935 return getOrCreateMulExpr(Ops, Flags); 2936 2937 // If there are any constants, fold them together. 2938 unsigned Idx = 0; 2939 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2940 2941 if (Ops.size() == 2) 2942 // C1*(C2+V) -> C1*C2 + C1*V 2943 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2944 // If any of Add's ops are Adds or Muls with a constant, apply this 2945 // transformation as well. 2946 // 2947 // TODO: There are some cases where this transformation is not 2948 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 2949 // this transformation should be narrowed down. 2950 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) 2951 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2952 SCEV::FlagAnyWrap, Depth + 1), 2953 getMulExpr(LHSC, Add->getOperand(1), 2954 SCEV::FlagAnyWrap, Depth + 1), 2955 SCEV::FlagAnyWrap, Depth + 1); 2956 2957 ++Idx; 2958 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2959 // We found two constants, fold them together! 2960 ConstantInt *Fold = 2961 ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); 2962 Ops[0] = getConstant(Fold); 2963 Ops.erase(Ops.begin()+1); // Erase the folded element 2964 if (Ops.size() == 1) return Ops[0]; 2965 LHSC = cast<SCEVConstant>(Ops[0]); 2966 } 2967 2968 // If we are left with a constant one being multiplied, strip it off. 2969 if (cast<SCEVConstant>(Ops[0])->getValue()->isOne()) { 2970 Ops.erase(Ops.begin()); 2971 --Idx; 2972 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 2973 // If we have a multiply of zero, it will always be zero. 2974 return Ops[0]; 2975 } else if (Ops[0]->isAllOnesValue()) { 2976 // If we have a mul by -1 of an add, try distributing the -1 among the 2977 // add operands. 2978 if (Ops.size() == 2) { 2979 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2980 SmallVector<const SCEV *, 4> NewOps; 2981 bool AnyFolded = false; 2982 for (const SCEV *AddOp : Add->operands()) { 2983 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2984 Depth + 1); 2985 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2986 NewOps.push_back(Mul); 2987 } 2988 if (AnyFolded) 2989 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2990 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2991 // Negation preserves a recurrence's no self-wrap property. 2992 SmallVector<const SCEV *, 4> Operands; 2993 for (const SCEV *AddRecOp : AddRec->operands()) 2994 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2995 Depth + 1)); 2996 2997 return getAddRecExpr(Operands, AddRec->getLoop(), 2998 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2999 } 3000 } 3001 } 3002 3003 if (Ops.size() == 1) 3004 return Ops[0]; 3005 } 3006 3007 // Skip over the add expression until we get to a multiply. 3008 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 3009 ++Idx; 3010 3011 // If there are mul operands inline them all into this expression. 3012 if (Idx < Ops.size()) { 3013 bool DeletedMul = false; 3014 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 3015 if (Ops.size() > MulOpsInlineThreshold) 3016 break; 3017 // If we have an mul, expand the mul operands onto the end of the 3018 // operands list. 3019 Ops.erase(Ops.begin()+Idx); 3020 Ops.append(Mul->op_begin(), Mul->op_end()); 3021 DeletedMul = true; 3022 } 3023 3024 // If we deleted at least one mul, we added operands to the end of the 3025 // list, and they are not necessarily sorted. Recurse to resort and 3026 // resimplify any operands we just acquired. 3027 if (DeletedMul) 3028 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3029 } 3030 3031 // If there are any add recurrences in the operands list, see if any other 3032 // added values are loop invariant. If so, we can fold them into the 3033 // recurrence. 3034 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 3035 ++Idx; 3036 3037 // Scan over all recurrences, trying to fold loop invariants into them. 3038 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 3039 // Scan all of the other operands to this mul and add them to the vector 3040 // if they are loop invariant w.r.t. the recurrence. 3041 SmallVector<const SCEV *, 8> LIOps; 3042 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 3043 const Loop *AddRecLoop = AddRec->getLoop(); 3044 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3045 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 3046 LIOps.push_back(Ops[i]); 3047 Ops.erase(Ops.begin()+i); 3048 --i; --e; 3049 } 3050 3051 // If we found some loop invariants, fold them into the recurrence. 3052 if (!LIOps.empty()) { 3053 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 3054 SmallVector<const SCEV *, 4> NewOps; 3055 NewOps.reserve(AddRec->getNumOperands()); 3056 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 3057 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 3058 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 3059 SCEV::FlagAnyWrap, Depth + 1)); 3060 3061 // Build the new addrec. Propagate the NUW and NSW flags if both the 3062 // outer mul and the inner addrec are guaranteed to have no overflow. 3063 // 3064 // No self-wrap cannot be guaranteed after changing the step size, but 3065 // will be inferred if either NUW or NSW is true. 3066 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); 3067 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); 3068 3069 // If all of the other operands were loop invariant, we are done. 3070 if (Ops.size() == 1) return NewRec; 3071 3072 // Otherwise, multiply the folded AddRec by the non-invariant parts. 3073 for (unsigned i = 0;; ++i) 3074 if (Ops[i] == AddRec) { 3075 Ops[i] = NewRec; 3076 break; 3077 } 3078 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3079 } 3080 3081 // Okay, if there weren't any loop invariants to be folded, check to see 3082 // if there are multiple AddRec's with the same loop induction variable 3083 // being multiplied together. If so, we can fold them. 3084 3085 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 3086 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 3087 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 3088 // ]]],+,...up to x=2n}. 3089 // Note that the arguments to choose() are always integers with values 3090 // known at compile time, never SCEV objects. 3091 // 3092 // The implementation avoids pointless extra computations when the two 3093 // addrec's are of different length (mathematically, it's equivalent to 3094 // an infinite stream of zeros on the right). 3095 bool OpsModified = false; 3096 for (unsigned OtherIdx = Idx+1; 3097 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 3098 ++OtherIdx) { 3099 const SCEVAddRecExpr *OtherAddRec = 3100 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 3101 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 3102 continue; 3103 3104 // Limit max number of arguments to avoid creation of unreasonably big 3105 // SCEVAddRecs with very complex operands. 3106 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 3107 MaxAddRecSize || isHugeExpression(AddRec) || 3108 isHugeExpression(OtherAddRec)) 3109 continue; 3110 3111 bool Overflow = false; 3112 Type *Ty = AddRec->getType(); 3113 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 3114 SmallVector<const SCEV*, 7> AddRecOps; 3115 for (int x = 0, xe = AddRec->getNumOperands() + 3116 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 3117 SmallVector <const SCEV *, 7> SumOps; 3118 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 3119 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 3120 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 3121 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 3122 z < ze && !Overflow; ++z) { 3123 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 3124 uint64_t Coeff; 3125 if (LargerThan64Bits) 3126 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 3127 else 3128 Coeff = Coeff1*Coeff2; 3129 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 3130 const SCEV *Term1 = AddRec->getOperand(y-z); 3131 const SCEV *Term2 = OtherAddRec->getOperand(z); 3132 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 3133 SCEV::FlagAnyWrap, Depth + 1)); 3134 } 3135 } 3136 if (SumOps.empty()) 3137 SumOps.push_back(getZero(Ty)); 3138 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 3139 } 3140 if (!Overflow) { 3141 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, 3142 SCEV::FlagAnyWrap); 3143 if (Ops.size() == 2) return NewAddRec; 3144 Ops[Idx] = NewAddRec; 3145 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3146 OpsModified = true; 3147 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 3148 if (!AddRec) 3149 break; 3150 } 3151 } 3152 if (OpsModified) 3153 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3154 3155 // Otherwise couldn't fold anything into this recurrence. Move onto the 3156 // next one. 3157 } 3158 3159 // Okay, it looks like we really DO need an mul expr. Check to see if we 3160 // already have one, otherwise create a new one. 3161 return getOrCreateMulExpr(Ops, Flags); 3162 } 3163 3164 /// Represents an unsigned remainder expression based on unsigned division. 3165 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3166 const SCEV *RHS) { 3167 assert(getEffectiveSCEVType(LHS->getType()) == 3168 getEffectiveSCEVType(RHS->getType()) && 3169 "SCEVURemExpr operand types don't match!"); 3170 3171 // Short-circuit easy cases 3172 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3173 // If constant is one, the result is trivial 3174 if (RHSC->getValue()->isOne()) 3175 return getZero(LHS->getType()); // X urem 1 --> 0 3176 3177 // If constant is a power of two, fold into a zext(trunc(LHS)). 3178 if (RHSC->getAPInt().isPowerOf2()) { 3179 Type *FullTy = LHS->getType(); 3180 Type *TruncTy = 3181 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3182 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3183 } 3184 } 3185 3186 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3187 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3188 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3189 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3190 } 3191 3192 /// Get a canonical unsigned division expression, or something simpler if 3193 /// possible. 3194 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3195 const SCEV *RHS) { 3196 assert(getEffectiveSCEVType(LHS->getType()) == 3197 getEffectiveSCEVType(RHS->getType()) && 3198 "SCEVUDivExpr operand types don't match!"); 3199 3200 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3201 if (RHSC->getValue()->isOne()) 3202 return LHS; // X udiv 1 --> x 3203 // If the denominator is zero, the result of the udiv is undefined. Don't 3204 // try to analyze it, because the resolution chosen here may differ from 3205 // the resolution chosen in other parts of the compiler. 3206 if (!RHSC->getValue()->isZero()) { 3207 // Determine if the division can be folded into the operands of 3208 // its operands. 3209 // TODO: Generalize this to non-constants by using known-bits information. 3210 Type *Ty = LHS->getType(); 3211 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3212 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3213 // For non-power-of-two values, effectively round the value up to the 3214 // nearest power of two. 3215 if (!RHSC->getAPInt().isPowerOf2()) 3216 ++MaxShiftAmt; 3217 IntegerType *ExtTy = 3218 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3219 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3220 if (const SCEVConstant *Step = 3221 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3222 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3223 const APInt &StepInt = Step->getAPInt(); 3224 const APInt &DivInt = RHSC->getAPInt(); 3225 if (!StepInt.urem(DivInt) && 3226 getZeroExtendExpr(AR, ExtTy) == 3227 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3228 getZeroExtendExpr(Step, ExtTy), 3229 AR->getLoop(), SCEV::FlagAnyWrap)) { 3230 SmallVector<const SCEV *, 4> Operands; 3231 for (const SCEV *Op : AR->operands()) 3232 Operands.push_back(getUDivExpr(Op, RHS)); 3233 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3234 } 3235 /// Get a canonical UDivExpr for a recurrence. 3236 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3237 // We can currently only fold X%N if X is constant. 3238 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3239 if (StartC && !DivInt.urem(StepInt) && 3240 getZeroExtendExpr(AR, ExtTy) == 3241 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3242 getZeroExtendExpr(Step, ExtTy), 3243 AR->getLoop(), SCEV::FlagAnyWrap)) { 3244 const APInt &StartInt = StartC->getAPInt(); 3245 const APInt &StartRem = StartInt.urem(StepInt); 3246 if (StartRem != 0) 3247 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step, 3248 AR->getLoop(), SCEV::FlagNW); 3249 } 3250 } 3251 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3252 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3253 SmallVector<const SCEV *, 4> Operands; 3254 for (const SCEV *Op : M->operands()) 3255 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3256 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3257 // Find an operand that's safely divisible. 3258 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3259 const SCEV *Op = M->getOperand(i); 3260 const SCEV *Div = getUDivExpr(Op, RHSC); 3261 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3262 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 3263 M->op_end()); 3264 Operands[i] = Div; 3265 return getMulExpr(Operands); 3266 } 3267 } 3268 } 3269 3270 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3271 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3272 if (auto *DivisorConstant = 3273 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3274 bool Overflow = false; 3275 APInt NewRHS = 3276 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3277 if (Overflow) { 3278 return getConstant(RHSC->getType(), 0, false); 3279 } 3280 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3281 } 3282 } 3283 3284 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3285 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3286 SmallVector<const SCEV *, 4> Operands; 3287 for (const SCEV *Op : A->operands()) 3288 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3289 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3290 Operands.clear(); 3291 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3292 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3293 if (isa<SCEVUDivExpr>(Op) || 3294 getMulExpr(Op, RHS) != A->getOperand(i)) 3295 break; 3296 Operands.push_back(Op); 3297 } 3298 if (Operands.size() == A->getNumOperands()) 3299 return getAddExpr(Operands); 3300 } 3301 } 3302 3303 // Fold if both operands are constant. 3304 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3305 Constant *LHSCV = LHSC->getValue(); 3306 Constant *RHSCV = RHSC->getValue(); 3307 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3308 RHSCV))); 3309 } 3310 } 3311 } 3312 3313 FoldingSetNodeID ID; 3314 ID.AddInteger(scUDivExpr); 3315 ID.AddPointer(LHS); 3316 ID.AddPointer(RHS); 3317 void *IP = nullptr; 3318 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3319 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3320 LHS, RHS); 3321 UniqueSCEVs.InsertNode(S, IP); 3322 addToLoopUseLists(S); 3323 return S; 3324 } 3325 3326 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3327 APInt A = C1->getAPInt().abs(); 3328 APInt B = C2->getAPInt().abs(); 3329 uint32_t ABW = A.getBitWidth(); 3330 uint32_t BBW = B.getBitWidth(); 3331 3332 if (ABW > BBW) 3333 B = B.zext(ABW); 3334 else if (ABW < BBW) 3335 A = A.zext(BBW); 3336 3337 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3338 } 3339 3340 /// Get a canonical unsigned division expression, or something simpler if 3341 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3342 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3343 /// it's not exact because the udiv may be clearing bits. 3344 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3345 const SCEV *RHS) { 3346 // TODO: we could try to find factors in all sorts of things, but for now we 3347 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3348 // end of this file for inspiration. 3349 3350 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3351 if (!Mul || !Mul->hasNoUnsignedWrap()) 3352 return getUDivExpr(LHS, RHS); 3353 3354 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3355 // If the mulexpr multiplies by a constant, then that constant must be the 3356 // first element of the mulexpr. 3357 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3358 if (LHSCst == RHSCst) { 3359 SmallVector<const SCEV *, 2> Operands; 3360 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3361 return getMulExpr(Operands); 3362 } 3363 3364 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3365 // that there's a factor provided by one of the other terms. We need to 3366 // check. 3367 APInt Factor = gcd(LHSCst, RHSCst); 3368 if (!Factor.isIntN(1)) { 3369 LHSCst = 3370 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3371 RHSCst = 3372 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3373 SmallVector<const SCEV *, 2> Operands; 3374 Operands.push_back(LHSCst); 3375 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3376 LHS = getMulExpr(Operands); 3377 RHS = RHSCst; 3378 Mul = dyn_cast<SCEVMulExpr>(LHS); 3379 if (!Mul) 3380 return getUDivExactExpr(LHS, RHS); 3381 } 3382 } 3383 } 3384 3385 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3386 if (Mul->getOperand(i) == RHS) { 3387 SmallVector<const SCEV *, 2> Operands; 3388 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3389 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3390 return getMulExpr(Operands); 3391 } 3392 } 3393 3394 return getUDivExpr(LHS, RHS); 3395 } 3396 3397 /// Get an add recurrence expression for the specified loop. Simplify the 3398 /// expression as much as possible. 3399 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3400 const Loop *L, 3401 SCEV::NoWrapFlags Flags) { 3402 SmallVector<const SCEV *, 4> Operands; 3403 Operands.push_back(Start); 3404 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3405 if (StepChrec->getLoop() == L) { 3406 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3407 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3408 } 3409 3410 Operands.push_back(Step); 3411 return getAddRecExpr(Operands, L, Flags); 3412 } 3413 3414 /// Get an add recurrence expression for the specified loop. Simplify the 3415 /// expression as much as possible. 3416 const SCEV * 3417 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3418 const Loop *L, SCEV::NoWrapFlags Flags) { 3419 if (Operands.size() == 1) return Operands[0]; 3420 #ifndef NDEBUG 3421 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3422 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3423 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3424 "SCEVAddRecExpr operand types don't match!"); 3425 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3426 assert(isLoopInvariant(Operands[i], L) && 3427 "SCEVAddRecExpr operand is not loop-invariant!"); 3428 #endif 3429 3430 if (Operands.back()->isZero()) { 3431 Operands.pop_back(); 3432 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3433 } 3434 3435 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and 3436 // use that information to infer NUW and NSW flags. However, computing a 3437 // BE count requires calling getAddRecExpr, so we may not yet have a 3438 // meaningful BE count at this point (and if we don't, we'd be stuck 3439 // with a SCEVCouldNotCompute as the cached BE count). 3440 3441 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3442 3443 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3444 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3445 const Loop *NestedLoop = NestedAR->getLoop(); 3446 if (L->contains(NestedLoop) 3447 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3448 : (!NestedLoop->contains(L) && 3449 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3450 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 3451 NestedAR->op_end()); 3452 Operands[0] = NestedAR->getStart(); 3453 // AddRecs require their operands be loop-invariant with respect to their 3454 // loops. Don't perform this transformation if it would break this 3455 // requirement. 3456 bool AllInvariant = all_of( 3457 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3458 3459 if (AllInvariant) { 3460 // Create a recurrence for the outer loop with the same step size. 3461 // 3462 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3463 // inner recurrence has the same property. 3464 SCEV::NoWrapFlags OuterFlags = 3465 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3466 3467 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3468 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3469 return isLoopInvariant(Op, NestedLoop); 3470 }); 3471 3472 if (AllInvariant) { 3473 // Ok, both add recurrences are valid after the transformation. 3474 // 3475 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3476 // the outer recurrence has the same property. 3477 SCEV::NoWrapFlags InnerFlags = 3478 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3479 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3480 } 3481 } 3482 // Reset Operands to its original state. 3483 Operands[0] = NestedAR; 3484 } 3485 } 3486 3487 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3488 // already have one, otherwise create a new one. 3489 return getOrCreateAddRecExpr(Operands, L, Flags); 3490 } 3491 3492 const SCEV * 3493 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3494 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3495 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3496 // getSCEV(Base)->getType() has the same address space as Base->getType() 3497 // because SCEV::getType() preserves the address space. 3498 Type *IntPtrTy = getEffectiveSCEVType(BaseExpr->getType()); 3499 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3500 // instruction to its SCEV, because the Instruction may be guarded by control 3501 // flow and the no-overflow bits may not be valid for the expression in any 3502 // context. This can be fixed similarly to how these flags are handled for 3503 // adds. 3504 SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW 3505 : SCEV::FlagAnyWrap; 3506 3507 const SCEV *TotalOffset = getZero(IntPtrTy); 3508 // The array size is unimportant. The first thing we do on CurTy is getting 3509 // its element type. 3510 Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0); 3511 for (const SCEV *IndexExpr : IndexExprs) { 3512 // Compute the (potentially symbolic) offset in bytes for this index. 3513 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3514 // For a struct, add the member offset. 3515 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3516 unsigned FieldNo = Index->getZExtValue(); 3517 const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo); 3518 3519 // Add the field offset to the running total offset. 3520 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 3521 3522 // Update CurTy to the type of the field at Index. 3523 CurTy = STy->getTypeAtIndex(Index); 3524 } else { 3525 // Update CurTy to its element type. 3526 CurTy = cast<SequentialType>(CurTy)->getElementType(); 3527 // For an array, add the element offset, explicitly scaled. 3528 const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, CurTy); 3529 // Getelementptr indices are signed. 3530 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntPtrTy); 3531 3532 // Multiply the index by the element size to compute the element offset. 3533 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, Wrap); 3534 3535 // Add the element offset to the running total offset. 3536 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 3537 } 3538 } 3539 3540 // Add the total offset from all the GEP indices to the base. 3541 return getAddExpr(BaseExpr, TotalOffset, Wrap); 3542 } 3543 3544 std::tuple<const SCEV *, FoldingSetNodeID, void *> 3545 ScalarEvolution::findExistingSCEVInCache(int SCEVType, 3546 ArrayRef<const SCEV *> Ops) { 3547 FoldingSetNodeID ID; 3548 void *IP = nullptr; 3549 ID.AddInteger(SCEVType); 3550 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3551 ID.AddPointer(Ops[i]); 3552 return std::tuple<const SCEV *, FoldingSetNodeID, void *>( 3553 UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP); 3554 } 3555 3556 const SCEV *ScalarEvolution::getMinMaxExpr(unsigned Kind, 3557 SmallVectorImpl<const SCEV *> &Ops) { 3558 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 3559 if (Ops.size() == 1) return Ops[0]; 3560 #ifndef NDEBUG 3561 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3562 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3563 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3564 "Operand types don't match!"); 3565 #endif 3566 3567 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; 3568 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; 3569 3570 // Sort by complexity, this groups all similar expression types together. 3571 GroupByComplexity(Ops, &LI, DT); 3572 3573 // Check if we have created the same expression before. 3574 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) { 3575 return S; 3576 } 3577 3578 // If there are any constants, fold them together. 3579 unsigned Idx = 0; 3580 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3581 ++Idx; 3582 assert(Idx < Ops.size()); 3583 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { 3584 if (Kind == scSMaxExpr) 3585 return APIntOps::smax(LHS, RHS); 3586 else if (Kind == scSMinExpr) 3587 return APIntOps::smin(LHS, RHS); 3588 else if (Kind == scUMaxExpr) 3589 return APIntOps::umax(LHS, RHS); 3590 else if (Kind == scUMinExpr) 3591 return APIntOps::umin(LHS, RHS); 3592 llvm_unreachable("Unknown SCEV min/max opcode"); 3593 }; 3594 3595 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3596 // We found two constants, fold them together! 3597 ConstantInt *Fold = ConstantInt::get( 3598 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); 3599 Ops[0] = getConstant(Fold); 3600 Ops.erase(Ops.begin()+1); // Erase the folded element 3601 if (Ops.size() == 1) return Ops[0]; 3602 LHSC = cast<SCEVConstant>(Ops[0]); 3603 } 3604 3605 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); 3606 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); 3607 3608 if (IsMax ? IsMinV : IsMaxV) { 3609 // If we are left with a constant minimum(/maximum)-int, strip it off. 3610 Ops.erase(Ops.begin()); 3611 --Idx; 3612 } else if (IsMax ? IsMaxV : IsMinV) { 3613 // If we have a max(/min) with a constant maximum(/minimum)-int, 3614 // it will always be the extremum. 3615 return LHSC; 3616 } 3617 3618 if (Ops.size() == 1) return Ops[0]; 3619 } 3620 3621 // Find the first operation of the same kind 3622 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) 3623 ++Idx; 3624 3625 // Check to see if one of the operands is of the same kind. If so, expand its 3626 // operands onto our operand list, and recurse to simplify. 3627 if (Idx < Ops.size()) { 3628 bool DeletedAny = false; 3629 while (Ops[Idx]->getSCEVType() == Kind) { 3630 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); 3631 Ops.erase(Ops.begin()+Idx); 3632 Ops.append(SMME->op_begin(), SMME->op_end()); 3633 DeletedAny = true; 3634 } 3635 3636 if (DeletedAny) 3637 return getMinMaxExpr(Kind, Ops); 3638 } 3639 3640 // Okay, check to see if the same value occurs in the operand list twice. If 3641 // so, delete one. Since we sorted the list, these values are required to 3642 // be adjacent. 3643 llvm::CmpInst::Predicate GEPred = 3644 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 3645 llvm::CmpInst::Predicate LEPred = 3646 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 3647 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; 3648 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; 3649 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { 3650 if (Ops[i] == Ops[i + 1] || 3651 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { 3652 // X op Y op Y --> X op Y 3653 // X op Y --> X, if we know X, Y are ordered appropriately 3654 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3655 --i; 3656 --e; 3657 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], 3658 Ops[i + 1])) { 3659 // X op Y --> Y, if we know X, Y are ordered appropriately 3660 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3661 --i; 3662 --e; 3663 } 3664 } 3665 3666 if (Ops.size() == 1) return Ops[0]; 3667 3668 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3669 3670 // Okay, it looks like we really DO need an expr. Check to see if we 3671 // already have one, otherwise create a new one. 3672 const SCEV *ExistingSCEV; 3673 FoldingSetNodeID ID; 3674 void *IP; 3675 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops); 3676 if (ExistingSCEV) 3677 return ExistingSCEV; 3678 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3679 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3680 SCEV *S = new (SCEVAllocator) SCEVMinMaxExpr( 3681 ID.Intern(SCEVAllocator), static_cast<SCEVTypes>(Kind), O, Ops.size()); 3682 3683 UniqueSCEVs.InsertNode(S, IP); 3684 addToLoopUseLists(S); 3685 return S; 3686 } 3687 3688 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3689 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3690 return getSMaxExpr(Ops); 3691 } 3692 3693 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3694 return getMinMaxExpr(scSMaxExpr, Ops); 3695 } 3696 3697 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3698 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3699 return getUMaxExpr(Ops); 3700 } 3701 3702 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3703 return getMinMaxExpr(scUMaxExpr, Ops); 3704 } 3705 3706 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3707 const SCEV *RHS) { 3708 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3709 return getSMinExpr(Ops); 3710 } 3711 3712 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3713 return getMinMaxExpr(scSMinExpr, Ops); 3714 } 3715 3716 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3717 const SCEV *RHS) { 3718 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3719 return getUMinExpr(Ops); 3720 } 3721 3722 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3723 return getMinMaxExpr(scUMinExpr, Ops); 3724 } 3725 3726 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3727 // We can bypass creating a target-independent 3728 // constant expression and then folding it back into a ConstantInt. 3729 // This is just a compile-time optimization. 3730 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3731 } 3732 3733 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3734 StructType *STy, 3735 unsigned FieldNo) { 3736 // We can bypass creating a target-independent 3737 // constant expression and then folding it back into a ConstantInt. 3738 // This is just a compile-time optimization. 3739 return getConstant( 3740 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3741 } 3742 3743 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3744 // Don't attempt to do anything other than create a SCEVUnknown object 3745 // here. createSCEV only calls getUnknown after checking for all other 3746 // interesting possibilities, and any other code that calls getUnknown 3747 // is doing so in order to hide a value from SCEV canonicalization. 3748 3749 FoldingSetNodeID ID; 3750 ID.AddInteger(scUnknown); 3751 ID.AddPointer(V); 3752 void *IP = nullptr; 3753 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3754 assert(cast<SCEVUnknown>(S)->getValue() == V && 3755 "Stale SCEVUnknown in uniquing map!"); 3756 return S; 3757 } 3758 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3759 FirstUnknown); 3760 FirstUnknown = cast<SCEVUnknown>(S); 3761 UniqueSCEVs.InsertNode(S, IP); 3762 return S; 3763 } 3764 3765 //===----------------------------------------------------------------------===// 3766 // Basic SCEV Analysis and PHI Idiom Recognition Code 3767 // 3768 3769 /// Test if values of the given type are analyzable within the SCEV 3770 /// framework. This primarily includes integer types, and it can optionally 3771 /// include pointer types if the ScalarEvolution class has access to 3772 /// target-specific information. 3773 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3774 // Integers and pointers are always SCEVable. 3775 return Ty->isIntOrPtrTy(); 3776 } 3777 3778 /// Return the size in bits of the specified type, for which isSCEVable must 3779 /// return true. 3780 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3781 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3782 if (Ty->isPointerTy()) 3783 return getDataLayout().getIndexTypeSizeInBits(Ty); 3784 return getDataLayout().getTypeSizeInBits(Ty); 3785 } 3786 3787 /// Return a type with the same bitwidth as the given type and which represents 3788 /// how SCEV will treat the given type, for which isSCEVable must return 3789 /// true. For pointer types, this is the pointer-sized integer type. 3790 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3791 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3792 3793 if (Ty->isIntegerTy()) 3794 return Ty; 3795 3796 // The only other support type is pointer. 3797 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3798 return getDataLayout().getIntPtrType(Ty); 3799 } 3800 3801 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3802 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3803 } 3804 3805 const SCEV *ScalarEvolution::getCouldNotCompute() { 3806 return CouldNotCompute.get(); 3807 } 3808 3809 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3810 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3811 auto *SU = dyn_cast<SCEVUnknown>(S); 3812 return SU && SU->getValue() == nullptr; 3813 }); 3814 3815 return !ContainsNulls; 3816 } 3817 3818 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3819 HasRecMapType::iterator I = HasRecMap.find(S); 3820 if (I != HasRecMap.end()) 3821 return I->second; 3822 3823 bool FoundAddRec = SCEVExprContains(S, isa<SCEVAddRecExpr, const SCEV *>); 3824 HasRecMap.insert({S, FoundAddRec}); 3825 return FoundAddRec; 3826 } 3827 3828 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3829 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3830 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3831 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3832 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3833 if (!Add) 3834 return {S, nullptr}; 3835 3836 if (Add->getNumOperands() != 2) 3837 return {S, nullptr}; 3838 3839 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3840 if (!ConstOp) 3841 return {S, nullptr}; 3842 3843 return {Add->getOperand(1), ConstOp->getValue()}; 3844 } 3845 3846 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3847 /// by the value and offset from any ValueOffsetPair in the set. 3848 SetVector<ScalarEvolution::ValueOffsetPair> * 3849 ScalarEvolution::getSCEVValues(const SCEV *S) { 3850 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3851 if (SI == ExprValueMap.end()) 3852 return nullptr; 3853 #ifndef NDEBUG 3854 if (VerifySCEVMap) { 3855 // Check there is no dangling Value in the set returned. 3856 for (const auto &VE : SI->second) 3857 assert(ValueExprMap.count(VE.first)); 3858 } 3859 #endif 3860 return &SI->second; 3861 } 3862 3863 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3864 /// cannot be used separately. eraseValueFromMap should be used to remove 3865 /// V from ValueExprMap and ExprValueMap at the same time. 3866 void ScalarEvolution::eraseValueFromMap(Value *V) { 3867 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3868 if (I != ValueExprMap.end()) { 3869 const SCEV *S = I->second; 3870 // Remove {V, 0} from the set of ExprValueMap[S] 3871 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3872 SV->remove({V, nullptr}); 3873 3874 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3875 const SCEV *Stripped; 3876 ConstantInt *Offset; 3877 std::tie(Stripped, Offset) = splitAddExpr(S); 3878 if (Offset != nullptr) { 3879 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3880 SV->remove({V, Offset}); 3881 } 3882 ValueExprMap.erase(V); 3883 } 3884 } 3885 3886 /// Check whether value has nuw/nsw/exact set but SCEV does not. 3887 /// TODO: In reality it is better to check the poison recursively 3888 /// but this is better than nothing. 3889 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { 3890 if (auto *I = dyn_cast<Instruction>(V)) { 3891 if (isa<OverflowingBinaryOperator>(I)) { 3892 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { 3893 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) 3894 return true; 3895 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) 3896 return true; 3897 } 3898 } else if (isa<PossiblyExactOperator>(I) && I->isExact()) 3899 return true; 3900 } 3901 return false; 3902 } 3903 3904 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3905 /// create a new one. 3906 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3907 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3908 3909 const SCEV *S = getExistingSCEV(V); 3910 if (S == nullptr) { 3911 S = createSCEV(V); 3912 // During PHI resolution, it is possible to create two SCEVs for the same 3913 // V, so it is needed to double check whether V->S is inserted into 3914 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3915 std::pair<ValueExprMapType::iterator, bool> Pair = 3916 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3917 if (Pair.second && !SCEVLostPoisonFlags(S, V)) { 3918 ExprValueMap[S].insert({V, nullptr}); 3919 3920 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3921 // ExprValueMap. 3922 const SCEV *Stripped = S; 3923 ConstantInt *Offset = nullptr; 3924 std::tie(Stripped, Offset) = splitAddExpr(S); 3925 // If stripped is SCEVUnknown, don't bother to save 3926 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3927 // increase the complexity of the expansion code. 3928 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3929 // because it may generate add/sub instead of GEP in SCEV expansion. 3930 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3931 !isa<GetElementPtrInst>(V)) 3932 ExprValueMap[Stripped].insert({V, Offset}); 3933 } 3934 } 3935 return S; 3936 } 3937 3938 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3939 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3940 3941 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3942 if (I != ValueExprMap.end()) { 3943 const SCEV *S = I->second; 3944 if (checkValidity(S)) 3945 return S; 3946 eraseValueFromMap(V); 3947 forgetMemoizedResults(S); 3948 } 3949 return nullptr; 3950 } 3951 3952 /// Return a SCEV corresponding to -V = -1*V 3953 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3954 SCEV::NoWrapFlags Flags) { 3955 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3956 return getConstant( 3957 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3958 3959 Type *Ty = V->getType(); 3960 Ty = getEffectiveSCEVType(Ty); 3961 return getMulExpr( 3962 V, getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))), Flags); 3963 } 3964 3965 /// If Expr computes ~A, return A else return nullptr 3966 static const SCEV *MatchNotExpr(const SCEV *Expr) { 3967 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 3968 if (!Add || Add->getNumOperands() != 2 || 3969 !Add->getOperand(0)->isAllOnesValue()) 3970 return nullptr; 3971 3972 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 3973 if (!AddRHS || AddRHS->getNumOperands() != 2 || 3974 !AddRHS->getOperand(0)->isAllOnesValue()) 3975 return nullptr; 3976 3977 return AddRHS->getOperand(1); 3978 } 3979 3980 /// Return a SCEV corresponding to ~V = -1-V 3981 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3982 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3983 return getConstant( 3984 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3985 3986 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) 3987 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { 3988 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { 3989 SmallVector<const SCEV *, 2> MatchedOperands; 3990 for (const SCEV *Operand : MME->operands()) { 3991 const SCEV *Matched = MatchNotExpr(Operand); 3992 if (!Matched) 3993 return (const SCEV *)nullptr; 3994 MatchedOperands.push_back(Matched); 3995 } 3996 return getMinMaxExpr( 3997 SCEVMinMaxExpr::negate(static_cast<SCEVTypes>(MME->getSCEVType())), 3998 MatchedOperands); 3999 }; 4000 if (const SCEV *Replaced = MatchMinMaxNegation(MME)) 4001 return Replaced; 4002 } 4003 4004 Type *Ty = V->getType(); 4005 Ty = getEffectiveSCEVType(Ty); 4006 const SCEV *AllOnes = 4007 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 4008 return getMinusSCEV(AllOnes, V); 4009 } 4010 4011 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 4012 SCEV::NoWrapFlags Flags, 4013 unsigned Depth) { 4014 // Fast path: X - X --> 0. 4015 if (LHS == RHS) 4016 return getZero(LHS->getType()); 4017 4018 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 4019 // makes it so that we cannot make much use of NUW. 4020 auto AddFlags = SCEV::FlagAnyWrap; 4021 const bool RHSIsNotMinSigned = 4022 !getSignedRangeMin(RHS).isMinSignedValue(); 4023 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 4024 // Let M be the minimum representable signed value. Then (-1)*RHS 4025 // signed-wraps if and only if RHS is M. That can happen even for 4026 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 4027 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 4028 // (-1)*RHS, we need to prove that RHS != M. 4029 // 4030 // If LHS is non-negative and we know that LHS - RHS does not 4031 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 4032 // either by proving that RHS > M or that LHS >= 0. 4033 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 4034 AddFlags = SCEV::FlagNSW; 4035 } 4036 } 4037 4038 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 4039 // RHS is NSW and LHS >= 0. 4040 // 4041 // The difficulty here is that the NSW flag may have been proven 4042 // relative to a loop that is to be found in a recurrence in LHS and 4043 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 4044 // larger scope than intended. 4045 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 4046 4047 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 4048 } 4049 4050 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 4051 unsigned Depth) { 4052 Type *SrcTy = V->getType(); 4053 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4054 "Cannot truncate or zero extend with non-integer arguments!"); 4055 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4056 return V; // No conversion 4057 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4058 return getTruncateExpr(V, Ty, Depth); 4059 return getZeroExtendExpr(V, Ty, Depth); 4060 } 4061 4062 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 4063 unsigned Depth) { 4064 Type *SrcTy = V->getType(); 4065 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4066 "Cannot truncate or zero extend with non-integer arguments!"); 4067 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4068 return V; // No conversion 4069 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4070 return getTruncateExpr(V, Ty, Depth); 4071 return getSignExtendExpr(V, Ty, Depth); 4072 } 4073 4074 const SCEV * 4075 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 4076 Type *SrcTy = V->getType(); 4077 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4078 "Cannot noop or zero extend with non-integer arguments!"); 4079 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4080 "getNoopOrZeroExtend cannot truncate!"); 4081 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4082 return V; // No conversion 4083 return getZeroExtendExpr(V, Ty); 4084 } 4085 4086 const SCEV * 4087 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 4088 Type *SrcTy = V->getType(); 4089 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4090 "Cannot noop or sign extend with non-integer arguments!"); 4091 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4092 "getNoopOrSignExtend cannot truncate!"); 4093 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4094 return V; // No conversion 4095 return getSignExtendExpr(V, Ty); 4096 } 4097 4098 const SCEV * 4099 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 4100 Type *SrcTy = V->getType(); 4101 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4102 "Cannot noop or any extend with non-integer arguments!"); 4103 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4104 "getNoopOrAnyExtend cannot truncate!"); 4105 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4106 return V; // No conversion 4107 return getAnyExtendExpr(V, Ty); 4108 } 4109 4110 const SCEV * 4111 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 4112 Type *SrcTy = V->getType(); 4113 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4114 "Cannot truncate or noop with non-integer arguments!"); 4115 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4116 "getTruncateOrNoop cannot extend!"); 4117 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4118 return V; // No conversion 4119 return getTruncateExpr(V, Ty); 4120 } 4121 4122 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4123 const SCEV *RHS) { 4124 const SCEV *PromotedLHS = LHS; 4125 const SCEV *PromotedRHS = RHS; 4126 4127 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4128 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4129 else 4130 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4131 4132 return getUMaxExpr(PromotedLHS, PromotedRHS); 4133 } 4134 4135 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4136 const SCEV *RHS) { 4137 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4138 return getUMinFromMismatchedTypes(Ops); 4139 } 4140 4141 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( 4142 SmallVectorImpl<const SCEV *> &Ops) { 4143 assert(!Ops.empty() && "At least one operand must be!"); 4144 // Trivial case. 4145 if (Ops.size() == 1) 4146 return Ops[0]; 4147 4148 // Find the max type first. 4149 Type *MaxType = nullptr; 4150 for (auto *S : Ops) 4151 if (MaxType) 4152 MaxType = getWiderType(MaxType, S->getType()); 4153 else 4154 MaxType = S->getType(); 4155 4156 // Extend all ops to max type. 4157 SmallVector<const SCEV *, 2> PromotedOps; 4158 for (auto *S : Ops) 4159 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4160 4161 // Generate umin. 4162 return getUMinExpr(PromotedOps); 4163 } 4164 4165 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4166 // A pointer operand may evaluate to a nonpointer expression, such as null. 4167 if (!V->getType()->isPointerTy()) 4168 return V; 4169 4170 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 4171 return getPointerBase(Cast->getOperand()); 4172 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 4173 const SCEV *PtrOp = nullptr; 4174 for (const SCEV *NAryOp : NAry->operands()) { 4175 if (NAryOp->getType()->isPointerTy()) { 4176 // Cannot find the base of an expression with multiple pointer operands. 4177 if (PtrOp) 4178 return V; 4179 PtrOp = NAryOp; 4180 } 4181 } 4182 if (!PtrOp) 4183 return V; 4184 return getPointerBase(PtrOp); 4185 } 4186 return V; 4187 } 4188 4189 /// Push users of the given Instruction onto the given Worklist. 4190 static void 4191 PushDefUseChildren(Instruction *I, 4192 SmallVectorImpl<Instruction *> &Worklist) { 4193 // Push the def-use children onto the Worklist stack. 4194 for (User *U : I->users()) 4195 Worklist.push_back(cast<Instruction>(U)); 4196 } 4197 4198 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 4199 SmallVector<Instruction *, 16> Worklist; 4200 PushDefUseChildren(PN, Worklist); 4201 4202 SmallPtrSet<Instruction *, 8> Visited; 4203 Visited.insert(PN); 4204 while (!Worklist.empty()) { 4205 Instruction *I = Worklist.pop_back_val(); 4206 if (!Visited.insert(I).second) 4207 continue; 4208 4209 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4210 if (It != ValueExprMap.end()) { 4211 const SCEV *Old = It->second; 4212 4213 // Short-circuit the def-use traversal if the symbolic name 4214 // ceases to appear in expressions. 4215 if (Old != SymName && !hasOperand(Old, SymName)) 4216 continue; 4217 4218 // SCEVUnknown for a PHI either means that it has an unrecognized 4219 // structure, it's a PHI that's in the progress of being computed 4220 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4221 // additional loop trip count information isn't going to change anything. 4222 // In the second case, createNodeForPHI will perform the necessary 4223 // updates on its own when it gets to that point. In the third, we do 4224 // want to forget the SCEVUnknown. 4225 if (!isa<PHINode>(I) || 4226 !isa<SCEVUnknown>(Old) || 4227 (I != PN && Old == SymName)) { 4228 eraseValueFromMap(It->first); 4229 forgetMemoizedResults(Old); 4230 } 4231 } 4232 4233 PushDefUseChildren(I, Worklist); 4234 } 4235 } 4236 4237 namespace { 4238 4239 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4240 /// expression in case its Loop is L. If it is not L then 4241 /// if IgnoreOtherLoops is true then use AddRec itself 4242 /// otherwise rewrite cannot be done. 4243 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4244 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4245 public: 4246 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4247 bool IgnoreOtherLoops = true) { 4248 SCEVInitRewriter Rewriter(L, SE); 4249 const SCEV *Result = Rewriter.visit(S); 4250 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4251 return SE.getCouldNotCompute(); 4252 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4253 ? SE.getCouldNotCompute() 4254 : Result; 4255 } 4256 4257 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4258 if (!SE.isLoopInvariant(Expr, L)) 4259 SeenLoopVariantSCEVUnknown = true; 4260 return Expr; 4261 } 4262 4263 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4264 // Only re-write AddRecExprs for this loop. 4265 if (Expr->getLoop() == L) 4266 return Expr->getStart(); 4267 SeenOtherLoops = true; 4268 return Expr; 4269 } 4270 4271 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4272 4273 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4274 4275 private: 4276 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4277 : SCEVRewriteVisitor(SE), L(L) {} 4278 4279 const Loop *L; 4280 bool SeenLoopVariantSCEVUnknown = false; 4281 bool SeenOtherLoops = false; 4282 }; 4283 4284 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4285 /// increment expression in case its Loop is L. If it is not L then 4286 /// use AddRec itself. 4287 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4288 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4289 public: 4290 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4291 SCEVPostIncRewriter Rewriter(L, SE); 4292 const SCEV *Result = Rewriter.visit(S); 4293 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4294 ? SE.getCouldNotCompute() 4295 : Result; 4296 } 4297 4298 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4299 if (!SE.isLoopInvariant(Expr, L)) 4300 SeenLoopVariantSCEVUnknown = true; 4301 return Expr; 4302 } 4303 4304 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4305 // Only re-write AddRecExprs for this loop. 4306 if (Expr->getLoop() == L) 4307 return Expr->getPostIncExpr(SE); 4308 SeenOtherLoops = true; 4309 return Expr; 4310 } 4311 4312 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4313 4314 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4315 4316 private: 4317 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4318 : SCEVRewriteVisitor(SE), L(L) {} 4319 4320 const Loop *L; 4321 bool SeenLoopVariantSCEVUnknown = false; 4322 bool SeenOtherLoops = false; 4323 }; 4324 4325 /// This class evaluates the compare condition by matching it against the 4326 /// condition of loop latch. If there is a match we assume a true value 4327 /// for the condition while building SCEV nodes. 4328 class SCEVBackedgeConditionFolder 4329 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4330 public: 4331 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4332 ScalarEvolution &SE) { 4333 bool IsPosBECond = false; 4334 Value *BECond = nullptr; 4335 if (BasicBlock *Latch = L->getLoopLatch()) { 4336 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4337 if (BI && BI->isConditional()) { 4338 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4339 "Both outgoing branches should not target same header!"); 4340 BECond = BI->getCondition(); 4341 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4342 } else { 4343 return S; 4344 } 4345 } 4346 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4347 return Rewriter.visit(S); 4348 } 4349 4350 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4351 const SCEV *Result = Expr; 4352 bool InvariantF = SE.isLoopInvariant(Expr, L); 4353 4354 if (!InvariantF) { 4355 Instruction *I = cast<Instruction>(Expr->getValue()); 4356 switch (I->getOpcode()) { 4357 case Instruction::Select: { 4358 SelectInst *SI = cast<SelectInst>(I); 4359 Optional<const SCEV *> Res = 4360 compareWithBackedgeCondition(SI->getCondition()); 4361 if (Res.hasValue()) { 4362 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4363 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4364 } 4365 break; 4366 } 4367 default: { 4368 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4369 if (Res.hasValue()) 4370 Result = Res.getValue(); 4371 break; 4372 } 4373 } 4374 } 4375 return Result; 4376 } 4377 4378 private: 4379 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4380 bool IsPosBECond, ScalarEvolution &SE) 4381 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4382 IsPositiveBECond(IsPosBECond) {} 4383 4384 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4385 4386 const Loop *L; 4387 /// Loop back condition. 4388 Value *BackedgeCond = nullptr; 4389 /// Set to true if loop back is on positive branch condition. 4390 bool IsPositiveBECond; 4391 }; 4392 4393 Optional<const SCEV *> 4394 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4395 4396 // If value matches the backedge condition for loop latch, 4397 // then return a constant evolution node based on loopback 4398 // branch taken. 4399 if (BackedgeCond == IC) 4400 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4401 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4402 return None; 4403 } 4404 4405 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4406 public: 4407 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4408 ScalarEvolution &SE) { 4409 SCEVShiftRewriter Rewriter(L, SE); 4410 const SCEV *Result = Rewriter.visit(S); 4411 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4412 } 4413 4414 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4415 // Only allow AddRecExprs for this loop. 4416 if (!SE.isLoopInvariant(Expr, L)) 4417 Valid = false; 4418 return Expr; 4419 } 4420 4421 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4422 if (Expr->getLoop() == L && Expr->isAffine()) 4423 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4424 Valid = false; 4425 return Expr; 4426 } 4427 4428 bool isValid() { return Valid; } 4429 4430 private: 4431 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4432 : SCEVRewriteVisitor(SE), L(L) {} 4433 4434 const Loop *L; 4435 bool Valid = true; 4436 }; 4437 4438 } // end anonymous namespace 4439 4440 SCEV::NoWrapFlags 4441 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4442 if (!AR->isAffine()) 4443 return SCEV::FlagAnyWrap; 4444 4445 using OBO = OverflowingBinaryOperator; 4446 4447 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4448 4449 if (!AR->hasNoSignedWrap()) { 4450 ConstantRange AddRecRange = getSignedRange(AR); 4451 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4452 4453 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4454 Instruction::Add, IncRange, OBO::NoSignedWrap); 4455 if (NSWRegion.contains(AddRecRange)) 4456 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4457 } 4458 4459 if (!AR->hasNoUnsignedWrap()) { 4460 ConstantRange AddRecRange = getUnsignedRange(AR); 4461 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4462 4463 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4464 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4465 if (NUWRegion.contains(AddRecRange)) 4466 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4467 } 4468 4469 return Result; 4470 } 4471 4472 namespace { 4473 4474 /// Represents an abstract binary operation. This may exist as a 4475 /// normal instruction or constant expression, or may have been 4476 /// derived from an expression tree. 4477 struct BinaryOp { 4478 unsigned Opcode; 4479 Value *LHS; 4480 Value *RHS; 4481 bool IsNSW = false; 4482 bool IsNUW = false; 4483 4484 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4485 /// constant expression. 4486 Operator *Op = nullptr; 4487 4488 explicit BinaryOp(Operator *Op) 4489 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4490 Op(Op) { 4491 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4492 IsNSW = OBO->hasNoSignedWrap(); 4493 IsNUW = OBO->hasNoUnsignedWrap(); 4494 } 4495 } 4496 4497 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4498 bool IsNUW = false) 4499 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 4500 }; 4501 4502 } // end anonymous namespace 4503 4504 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4505 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4506 auto *Op = dyn_cast<Operator>(V); 4507 if (!Op) 4508 return None; 4509 4510 // Implementation detail: all the cleverness here should happen without 4511 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4512 // SCEV expressions when possible, and we should not break that. 4513 4514 switch (Op->getOpcode()) { 4515 case Instruction::Add: 4516 case Instruction::Sub: 4517 case Instruction::Mul: 4518 case Instruction::UDiv: 4519 case Instruction::URem: 4520 case Instruction::And: 4521 case Instruction::Or: 4522 case Instruction::AShr: 4523 case Instruction::Shl: 4524 return BinaryOp(Op); 4525 4526 case Instruction::Xor: 4527 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4528 // If the RHS of the xor is a signmask, then this is just an add. 4529 // Instcombine turns add of signmask into xor as a strength reduction step. 4530 if (RHSC->getValue().isSignMask()) 4531 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4532 return BinaryOp(Op); 4533 4534 case Instruction::LShr: 4535 // Turn logical shift right of a constant into a unsigned divide. 4536 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4537 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4538 4539 // If the shift count is not less than the bitwidth, the result of 4540 // the shift is undefined. Don't try to analyze it, because the 4541 // resolution chosen here may differ from the resolution chosen in 4542 // other parts of the compiler. 4543 if (SA->getValue().ult(BitWidth)) { 4544 Constant *X = 4545 ConstantInt::get(SA->getContext(), 4546 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4547 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4548 } 4549 } 4550 return BinaryOp(Op); 4551 4552 case Instruction::ExtractValue: { 4553 auto *EVI = cast<ExtractValueInst>(Op); 4554 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4555 break; 4556 4557 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 4558 if (!WO) 4559 break; 4560 4561 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 4562 bool Signed = WO->isSigned(); 4563 // TODO: Should add nuw/nsw flags for mul as well. 4564 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 4565 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 4566 4567 // Now that we know that all uses of the arithmetic-result component of 4568 // CI are guarded by the overflow check, we can go ahead and pretend 4569 // that the arithmetic is non-overflowing. 4570 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 4571 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 4572 } 4573 4574 default: 4575 break; 4576 } 4577 4578 return None; 4579 } 4580 4581 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4582 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4583 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4584 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4585 /// follows one of the following patterns: 4586 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4587 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4588 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4589 /// we return the type of the truncation operation, and indicate whether the 4590 /// truncated type should be treated as signed/unsigned by setting 4591 /// \p Signed to true/false, respectively. 4592 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4593 bool &Signed, ScalarEvolution &SE) { 4594 // The case where Op == SymbolicPHI (that is, with no type conversions on 4595 // the way) is handled by the regular add recurrence creating logic and 4596 // would have already been triggered in createAddRecForPHI. Reaching it here 4597 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4598 // because one of the other operands of the SCEVAddExpr updating this PHI is 4599 // not invariant). 4600 // 4601 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4602 // this case predicates that allow us to prove that Op == SymbolicPHI will 4603 // be added. 4604 if (Op == SymbolicPHI) 4605 return nullptr; 4606 4607 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4608 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4609 if (SourceBits != NewBits) 4610 return nullptr; 4611 4612 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4613 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4614 if (!SExt && !ZExt) 4615 return nullptr; 4616 const SCEVTruncateExpr *Trunc = 4617 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4618 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4619 if (!Trunc) 4620 return nullptr; 4621 const SCEV *X = Trunc->getOperand(); 4622 if (X != SymbolicPHI) 4623 return nullptr; 4624 Signed = SExt != nullptr; 4625 return Trunc->getType(); 4626 } 4627 4628 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4629 if (!PN->getType()->isIntegerTy()) 4630 return nullptr; 4631 const Loop *L = LI.getLoopFor(PN->getParent()); 4632 if (!L || L->getHeader() != PN->getParent()) 4633 return nullptr; 4634 return L; 4635 } 4636 4637 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4638 // computation that updates the phi follows the following pattern: 4639 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4640 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4641 // If so, try to see if it can be rewritten as an AddRecExpr under some 4642 // Predicates. If successful, return them as a pair. Also cache the results 4643 // of the analysis. 4644 // 4645 // Example usage scenario: 4646 // Say the Rewriter is called for the following SCEV: 4647 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4648 // where: 4649 // %X = phi i64 (%Start, %BEValue) 4650 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4651 // and call this function with %SymbolicPHI = %X. 4652 // 4653 // The analysis will find that the value coming around the backedge has 4654 // the following SCEV: 4655 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4656 // Upon concluding that this matches the desired pattern, the function 4657 // will return the pair {NewAddRec, SmallPredsVec} where: 4658 // NewAddRec = {%Start,+,%Step} 4659 // SmallPredsVec = {P1, P2, P3} as follows: 4660 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4661 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4662 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4663 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4664 // under the predicates {P1,P2,P3}. 4665 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4666 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4667 // 4668 // TODO's: 4669 // 4670 // 1) Extend the Induction descriptor to also support inductions that involve 4671 // casts: When needed (namely, when we are called in the context of the 4672 // vectorizer induction analysis), a Set of cast instructions will be 4673 // populated by this method, and provided back to isInductionPHI. This is 4674 // needed to allow the vectorizer to properly record them to be ignored by 4675 // the cost model and to avoid vectorizing them (otherwise these casts, 4676 // which are redundant under the runtime overflow checks, will be 4677 // vectorized, which can be costly). 4678 // 4679 // 2) Support additional induction/PHISCEV patterns: We also want to support 4680 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4681 // after the induction update operation (the induction increment): 4682 // 4683 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4684 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4685 // 4686 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4687 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4688 // 4689 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4690 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4691 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4692 SmallVector<const SCEVPredicate *, 3> Predicates; 4693 4694 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4695 // return an AddRec expression under some predicate. 4696 4697 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4698 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4699 assert(L && "Expecting an integer loop header phi"); 4700 4701 // The loop may have multiple entrances or multiple exits; we can analyze 4702 // this phi as an addrec if it has a unique entry value and a unique 4703 // backedge value. 4704 Value *BEValueV = nullptr, *StartValueV = nullptr; 4705 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4706 Value *V = PN->getIncomingValue(i); 4707 if (L->contains(PN->getIncomingBlock(i))) { 4708 if (!BEValueV) { 4709 BEValueV = V; 4710 } else if (BEValueV != V) { 4711 BEValueV = nullptr; 4712 break; 4713 } 4714 } else if (!StartValueV) { 4715 StartValueV = V; 4716 } else if (StartValueV != V) { 4717 StartValueV = nullptr; 4718 break; 4719 } 4720 } 4721 if (!BEValueV || !StartValueV) 4722 return None; 4723 4724 const SCEV *BEValue = getSCEV(BEValueV); 4725 4726 // If the value coming around the backedge is an add with the symbolic 4727 // value we just inserted, possibly with casts that we can ignore under 4728 // an appropriate runtime guard, then we found a simple induction variable! 4729 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4730 if (!Add) 4731 return None; 4732 4733 // If there is a single occurrence of the symbolic value, possibly 4734 // casted, replace it with a recurrence. 4735 unsigned FoundIndex = Add->getNumOperands(); 4736 Type *TruncTy = nullptr; 4737 bool Signed; 4738 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4739 if ((TruncTy = 4740 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4741 if (FoundIndex == e) { 4742 FoundIndex = i; 4743 break; 4744 } 4745 4746 if (FoundIndex == Add->getNumOperands()) 4747 return None; 4748 4749 // Create an add with everything but the specified operand. 4750 SmallVector<const SCEV *, 8> Ops; 4751 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4752 if (i != FoundIndex) 4753 Ops.push_back(Add->getOperand(i)); 4754 const SCEV *Accum = getAddExpr(Ops); 4755 4756 // The runtime checks will not be valid if the step amount is 4757 // varying inside the loop. 4758 if (!isLoopInvariant(Accum, L)) 4759 return None; 4760 4761 // *** Part2: Create the predicates 4762 4763 // Analysis was successful: we have a phi-with-cast pattern for which we 4764 // can return an AddRec expression under the following predicates: 4765 // 4766 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4767 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4768 // P2: An Equal predicate that guarantees that 4769 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4770 // P3: An Equal predicate that guarantees that 4771 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4772 // 4773 // As we next prove, the above predicates guarantee that: 4774 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4775 // 4776 // 4777 // More formally, we want to prove that: 4778 // Expr(i+1) = Start + (i+1) * Accum 4779 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4780 // 4781 // Given that: 4782 // 1) Expr(0) = Start 4783 // 2) Expr(1) = Start + Accum 4784 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4785 // 3) Induction hypothesis (step i): 4786 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4787 // 4788 // Proof: 4789 // Expr(i+1) = 4790 // = Start + (i+1)*Accum 4791 // = (Start + i*Accum) + Accum 4792 // = Expr(i) + Accum 4793 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4794 // :: from step i 4795 // 4796 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4797 // 4798 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4799 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4800 // + Accum :: from P3 4801 // 4802 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4803 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4804 // 4805 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4806 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4807 // 4808 // By induction, the same applies to all iterations 1<=i<n: 4809 // 4810 4811 // Create a truncated addrec for which we will add a no overflow check (P1). 4812 const SCEV *StartVal = getSCEV(StartValueV); 4813 const SCEV *PHISCEV = 4814 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4815 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4816 4817 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4818 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4819 // will be constant. 4820 // 4821 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4822 // add P1. 4823 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4824 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4825 Signed ? SCEVWrapPredicate::IncrementNSSW 4826 : SCEVWrapPredicate::IncrementNUSW; 4827 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4828 Predicates.push_back(AddRecPred); 4829 } 4830 4831 // Create the Equal Predicates P2,P3: 4832 4833 // It is possible that the predicates P2 and/or P3 are computable at 4834 // compile time due to StartVal and/or Accum being constants. 4835 // If either one is, then we can check that now and escape if either P2 4836 // or P3 is false. 4837 4838 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4839 // for each of StartVal and Accum 4840 auto getExtendedExpr = [&](const SCEV *Expr, 4841 bool CreateSignExtend) -> const SCEV * { 4842 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4843 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4844 const SCEV *ExtendedExpr = 4845 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4846 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4847 return ExtendedExpr; 4848 }; 4849 4850 // Given: 4851 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4852 // = getExtendedExpr(Expr) 4853 // Determine whether the predicate P: Expr == ExtendedExpr 4854 // is known to be false at compile time 4855 auto PredIsKnownFalse = [&](const SCEV *Expr, 4856 const SCEV *ExtendedExpr) -> bool { 4857 return Expr != ExtendedExpr && 4858 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4859 }; 4860 4861 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 4862 if (PredIsKnownFalse(StartVal, StartExtended)) { 4863 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 4864 return None; 4865 } 4866 4867 // The Step is always Signed (because the overflow checks are either 4868 // NSSW or NUSW) 4869 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 4870 if (PredIsKnownFalse(Accum, AccumExtended)) { 4871 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 4872 return None; 4873 } 4874 4875 auto AppendPredicate = [&](const SCEV *Expr, 4876 const SCEV *ExtendedExpr) -> void { 4877 if (Expr != ExtendedExpr && 4878 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4879 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4880 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 4881 Predicates.push_back(Pred); 4882 } 4883 }; 4884 4885 AppendPredicate(StartVal, StartExtended); 4886 AppendPredicate(Accum, AccumExtended); 4887 4888 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4889 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4890 // into NewAR if it will also add the runtime overflow checks specified in 4891 // Predicates. 4892 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4893 4894 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4895 std::make_pair(NewAR, Predicates); 4896 // Remember the result of the analysis for this SCEV at this locayyytion. 4897 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4898 return PredRewrite; 4899 } 4900 4901 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4902 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4903 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4904 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4905 if (!L) 4906 return None; 4907 4908 // Check to see if we already analyzed this PHI. 4909 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 4910 if (I != PredicatedSCEVRewrites.end()) { 4911 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 4912 I->second; 4913 // Analysis was done before and failed to create an AddRec: 4914 if (Rewrite.first == SymbolicPHI) 4915 return None; 4916 // Analysis was done before and succeeded to create an AddRec under 4917 // a predicate: 4918 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 4919 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 4920 return Rewrite; 4921 } 4922 4923 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4924 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 4925 4926 // Record in the cache that the analysis failed 4927 if (!Rewrite) { 4928 SmallVector<const SCEVPredicate *, 3> Predicates; 4929 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 4930 return None; 4931 } 4932 4933 return Rewrite; 4934 } 4935 4936 // FIXME: This utility is currently required because the Rewriter currently 4937 // does not rewrite this expression: 4938 // {0, +, (sext ix (trunc iy to ix) to iy)} 4939 // into {0, +, %step}, 4940 // even when the following Equal predicate exists: 4941 // "%step == (sext ix (trunc iy to ix) to iy)". 4942 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 4943 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 4944 if (AR1 == AR2) 4945 return true; 4946 4947 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 4948 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 4949 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 4950 return false; 4951 return true; 4952 }; 4953 4954 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 4955 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 4956 return false; 4957 return true; 4958 } 4959 4960 /// A helper function for createAddRecFromPHI to handle simple cases. 4961 /// 4962 /// This function tries to find an AddRec expression for the simplest (yet most 4963 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 4964 /// If it fails, createAddRecFromPHI will use a more general, but slow, 4965 /// technique for finding the AddRec expression. 4966 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 4967 Value *BEValueV, 4968 Value *StartValueV) { 4969 const Loop *L = LI.getLoopFor(PN->getParent()); 4970 assert(L && L->getHeader() == PN->getParent()); 4971 assert(BEValueV && StartValueV); 4972 4973 auto BO = MatchBinaryOp(BEValueV, DT); 4974 if (!BO) 4975 return nullptr; 4976 4977 if (BO->Opcode != Instruction::Add) 4978 return nullptr; 4979 4980 const SCEV *Accum = nullptr; 4981 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 4982 Accum = getSCEV(BO->RHS); 4983 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 4984 Accum = getSCEV(BO->LHS); 4985 4986 if (!Accum) 4987 return nullptr; 4988 4989 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 4990 if (BO->IsNUW) 4991 Flags = setFlags(Flags, SCEV::FlagNUW); 4992 if (BO->IsNSW) 4993 Flags = setFlags(Flags, SCEV::FlagNSW); 4994 4995 const SCEV *StartVal = getSCEV(StartValueV); 4996 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 4997 4998 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 4999 5000 // We can add Flags to the post-inc expression only if we 5001 // know that it is *undefined behavior* for BEValueV to 5002 // overflow. 5003 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5004 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5005 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5006 5007 return PHISCEV; 5008 } 5009 5010 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 5011 const Loop *L = LI.getLoopFor(PN->getParent()); 5012 if (!L || L->getHeader() != PN->getParent()) 5013 return nullptr; 5014 5015 // The loop may have multiple entrances or multiple exits; we can analyze 5016 // this phi as an addrec if it has a unique entry value and a unique 5017 // backedge value. 5018 Value *BEValueV = nullptr, *StartValueV = nullptr; 5019 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5020 Value *V = PN->getIncomingValue(i); 5021 if (L->contains(PN->getIncomingBlock(i))) { 5022 if (!BEValueV) { 5023 BEValueV = V; 5024 } else if (BEValueV != V) { 5025 BEValueV = nullptr; 5026 break; 5027 } 5028 } else if (!StartValueV) { 5029 StartValueV = V; 5030 } else if (StartValueV != V) { 5031 StartValueV = nullptr; 5032 break; 5033 } 5034 } 5035 if (!BEValueV || !StartValueV) 5036 return nullptr; 5037 5038 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 5039 "PHI node already processed?"); 5040 5041 // First, try to find AddRec expression without creating a fictituos symbolic 5042 // value for PN. 5043 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 5044 return S; 5045 5046 // Handle PHI node value symbolically. 5047 const SCEV *SymbolicName = getUnknown(PN); 5048 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 5049 5050 // Using this symbolic name for the PHI, analyze the value coming around 5051 // the back-edge. 5052 const SCEV *BEValue = getSCEV(BEValueV); 5053 5054 // NOTE: If BEValue is loop invariant, we know that the PHI node just 5055 // has a special value for the first iteration of the loop. 5056 5057 // If the value coming around the backedge is an add with the symbolic 5058 // value we just inserted, then we found a simple induction variable! 5059 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 5060 // If there is a single occurrence of the symbolic value, replace it 5061 // with a recurrence. 5062 unsigned FoundIndex = Add->getNumOperands(); 5063 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5064 if (Add->getOperand(i) == SymbolicName) 5065 if (FoundIndex == e) { 5066 FoundIndex = i; 5067 break; 5068 } 5069 5070 if (FoundIndex != Add->getNumOperands()) { 5071 // Create an add with everything but the specified operand. 5072 SmallVector<const SCEV *, 8> Ops; 5073 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5074 if (i != FoundIndex) 5075 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 5076 L, *this)); 5077 const SCEV *Accum = getAddExpr(Ops); 5078 5079 // This is not a valid addrec if the step amount is varying each 5080 // loop iteration, but is not itself an addrec in this loop. 5081 if (isLoopInvariant(Accum, L) || 5082 (isa<SCEVAddRecExpr>(Accum) && 5083 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5084 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5085 5086 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 5087 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5088 if (BO->IsNUW) 5089 Flags = setFlags(Flags, SCEV::FlagNUW); 5090 if (BO->IsNSW) 5091 Flags = setFlags(Flags, SCEV::FlagNSW); 5092 } 5093 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5094 // If the increment is an inbounds GEP, then we know the address 5095 // space cannot be wrapped around. We cannot make any guarantee 5096 // about signed or unsigned overflow because pointers are 5097 // unsigned but we may have a negative index from the base 5098 // pointer. We can guarantee that no unsigned wrap occurs if the 5099 // indices form a positive value. 5100 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5101 Flags = setFlags(Flags, SCEV::FlagNW); 5102 5103 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 5104 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 5105 Flags = setFlags(Flags, SCEV::FlagNUW); 5106 } 5107 5108 // We cannot transfer nuw and nsw flags from subtraction 5109 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5110 // for instance. 5111 } 5112 5113 const SCEV *StartVal = getSCEV(StartValueV); 5114 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5115 5116 // Okay, for the entire analysis of this edge we assumed the PHI 5117 // to be symbolic. We now need to go back and purge all of the 5118 // entries for the scalars that use the symbolic expression. 5119 forgetSymbolicName(PN, SymbolicName); 5120 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5121 5122 // We can add Flags to the post-inc expression only if we 5123 // know that it is *undefined behavior* for BEValueV to 5124 // overflow. 5125 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5126 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5127 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5128 5129 return PHISCEV; 5130 } 5131 } 5132 } else { 5133 // Otherwise, this could be a loop like this: 5134 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5135 // In this case, j = {1,+,1} and BEValue is j. 5136 // Because the other in-value of i (0) fits the evolution of BEValue 5137 // i really is an addrec evolution. 5138 // 5139 // We can generalize this saying that i is the shifted value of BEValue 5140 // by one iteration: 5141 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5142 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5143 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5144 if (Shifted != getCouldNotCompute() && 5145 Start != getCouldNotCompute()) { 5146 const SCEV *StartVal = getSCEV(StartValueV); 5147 if (Start == StartVal) { 5148 // Okay, for the entire analysis of this edge we assumed the PHI 5149 // to be symbolic. We now need to go back and purge all of the 5150 // entries for the scalars that use the symbolic expression. 5151 forgetSymbolicName(PN, SymbolicName); 5152 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 5153 return Shifted; 5154 } 5155 } 5156 } 5157 5158 // Remove the temporary PHI node SCEV that has been inserted while intending 5159 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5160 // as it will prevent later (possibly simpler) SCEV expressions to be added 5161 // to the ValueExprMap. 5162 eraseValueFromMap(PN); 5163 5164 return nullptr; 5165 } 5166 5167 // Checks if the SCEV S is available at BB. S is considered available at BB 5168 // if S can be materialized at BB without introducing a fault. 5169 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 5170 BasicBlock *BB) { 5171 struct CheckAvailable { 5172 bool TraversalDone = false; 5173 bool Available = true; 5174 5175 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 5176 BasicBlock *BB = nullptr; 5177 DominatorTree &DT; 5178 5179 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5180 : L(L), BB(BB), DT(DT) {} 5181 5182 bool setUnavailable() { 5183 TraversalDone = true; 5184 Available = false; 5185 return false; 5186 } 5187 5188 bool follow(const SCEV *S) { 5189 switch (S->getSCEVType()) { 5190 case scConstant: case scTruncate: case scZeroExtend: case scSignExtend: 5191 case scAddExpr: case scMulExpr: case scUMaxExpr: case scSMaxExpr: 5192 case scUMinExpr: 5193 case scSMinExpr: 5194 // These expressions are available if their operand(s) is/are. 5195 return true; 5196 5197 case scAddRecExpr: { 5198 // We allow add recurrences that are on the loop BB is in, or some 5199 // outer loop. This guarantees availability because the value of the 5200 // add recurrence at BB is simply the "current" value of the induction 5201 // variable. We can relax this in the future; for instance an add 5202 // recurrence on a sibling dominating loop is also available at BB. 5203 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5204 if (L && (ARLoop == L || ARLoop->contains(L))) 5205 return true; 5206 5207 return setUnavailable(); 5208 } 5209 5210 case scUnknown: { 5211 // For SCEVUnknown, we check for simple dominance. 5212 const auto *SU = cast<SCEVUnknown>(S); 5213 Value *V = SU->getValue(); 5214 5215 if (isa<Argument>(V)) 5216 return false; 5217 5218 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5219 return false; 5220 5221 return setUnavailable(); 5222 } 5223 5224 case scUDivExpr: 5225 case scCouldNotCompute: 5226 // We do not try to smart about these at all. 5227 return setUnavailable(); 5228 } 5229 llvm_unreachable("switch should be fully covered!"); 5230 } 5231 5232 bool isDone() { return TraversalDone; } 5233 }; 5234 5235 CheckAvailable CA(L, BB, DT); 5236 SCEVTraversal<CheckAvailable> ST(CA); 5237 5238 ST.visitAll(S); 5239 return CA.Available; 5240 } 5241 5242 // Try to match a control flow sequence that branches out at BI and merges back 5243 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5244 // match. 5245 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5246 Value *&C, Value *&LHS, Value *&RHS) { 5247 C = BI->getCondition(); 5248 5249 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5250 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5251 5252 if (!LeftEdge.isSingleEdge()) 5253 return false; 5254 5255 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5256 5257 Use &LeftUse = Merge->getOperandUse(0); 5258 Use &RightUse = Merge->getOperandUse(1); 5259 5260 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5261 LHS = LeftUse; 5262 RHS = RightUse; 5263 return true; 5264 } 5265 5266 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5267 LHS = RightUse; 5268 RHS = LeftUse; 5269 return true; 5270 } 5271 5272 return false; 5273 } 5274 5275 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5276 auto IsReachable = 5277 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5278 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5279 const Loop *L = LI.getLoopFor(PN->getParent()); 5280 5281 // We don't want to break LCSSA, even in a SCEV expression tree. 5282 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5283 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5284 return nullptr; 5285 5286 // Try to match 5287 // 5288 // br %cond, label %left, label %right 5289 // left: 5290 // br label %merge 5291 // right: 5292 // br label %merge 5293 // merge: 5294 // V = phi [ %x, %left ], [ %y, %right ] 5295 // 5296 // as "select %cond, %x, %y" 5297 5298 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5299 assert(IDom && "At least the entry block should dominate PN"); 5300 5301 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5302 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5303 5304 if (BI && BI->isConditional() && 5305 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5306 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5307 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5308 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5309 } 5310 5311 return nullptr; 5312 } 5313 5314 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5315 if (const SCEV *S = createAddRecFromPHI(PN)) 5316 return S; 5317 5318 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5319 return S; 5320 5321 // If the PHI has a single incoming value, follow that value, unless the 5322 // PHI's incoming blocks are in a different loop, in which case doing so 5323 // risks breaking LCSSA form. Instcombine would normally zap these, but 5324 // it doesn't have DominatorTree information, so it may miss cases. 5325 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5326 if (LI.replacementPreservesLCSSAForm(PN, V)) 5327 return getSCEV(V); 5328 5329 // If it's not a loop phi, we can't handle it yet. 5330 return getUnknown(PN); 5331 } 5332 5333 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5334 Value *Cond, 5335 Value *TrueVal, 5336 Value *FalseVal) { 5337 // Handle "constant" branch or select. This can occur for instance when a 5338 // loop pass transforms an inner loop and moves on to process the outer loop. 5339 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5340 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5341 5342 // Try to match some simple smax or umax patterns. 5343 auto *ICI = dyn_cast<ICmpInst>(Cond); 5344 if (!ICI) 5345 return getUnknown(I); 5346 5347 Value *LHS = ICI->getOperand(0); 5348 Value *RHS = ICI->getOperand(1); 5349 5350 switch (ICI->getPredicate()) { 5351 case ICmpInst::ICMP_SLT: 5352 case ICmpInst::ICMP_SLE: 5353 std::swap(LHS, RHS); 5354 LLVM_FALLTHROUGH; 5355 case ICmpInst::ICMP_SGT: 5356 case ICmpInst::ICMP_SGE: 5357 // a >s b ? a+x : b+x -> smax(a, b)+x 5358 // a >s b ? b+x : a+x -> smin(a, b)+x 5359 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5360 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5361 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5362 const SCEV *LA = getSCEV(TrueVal); 5363 const SCEV *RA = getSCEV(FalseVal); 5364 const SCEV *LDiff = getMinusSCEV(LA, LS); 5365 const SCEV *RDiff = getMinusSCEV(RA, RS); 5366 if (LDiff == RDiff) 5367 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5368 LDiff = getMinusSCEV(LA, RS); 5369 RDiff = getMinusSCEV(RA, LS); 5370 if (LDiff == RDiff) 5371 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5372 } 5373 break; 5374 case ICmpInst::ICMP_ULT: 5375 case ICmpInst::ICMP_ULE: 5376 std::swap(LHS, RHS); 5377 LLVM_FALLTHROUGH; 5378 case ICmpInst::ICMP_UGT: 5379 case ICmpInst::ICMP_UGE: 5380 // a >u b ? a+x : b+x -> umax(a, b)+x 5381 // a >u b ? b+x : a+x -> umin(a, b)+x 5382 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5383 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5384 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5385 const SCEV *LA = getSCEV(TrueVal); 5386 const SCEV *RA = getSCEV(FalseVal); 5387 const SCEV *LDiff = getMinusSCEV(LA, LS); 5388 const SCEV *RDiff = getMinusSCEV(RA, RS); 5389 if (LDiff == RDiff) 5390 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5391 LDiff = getMinusSCEV(LA, RS); 5392 RDiff = getMinusSCEV(RA, LS); 5393 if (LDiff == RDiff) 5394 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5395 } 5396 break; 5397 case ICmpInst::ICMP_NE: 5398 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5399 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5400 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5401 const SCEV *One = getOne(I->getType()); 5402 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5403 const SCEV *LA = getSCEV(TrueVal); 5404 const SCEV *RA = getSCEV(FalseVal); 5405 const SCEV *LDiff = getMinusSCEV(LA, LS); 5406 const SCEV *RDiff = getMinusSCEV(RA, One); 5407 if (LDiff == RDiff) 5408 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5409 } 5410 break; 5411 case ICmpInst::ICMP_EQ: 5412 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5413 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5414 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5415 const SCEV *One = getOne(I->getType()); 5416 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5417 const SCEV *LA = getSCEV(TrueVal); 5418 const SCEV *RA = getSCEV(FalseVal); 5419 const SCEV *LDiff = getMinusSCEV(LA, One); 5420 const SCEV *RDiff = getMinusSCEV(RA, LS); 5421 if (LDiff == RDiff) 5422 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5423 } 5424 break; 5425 default: 5426 break; 5427 } 5428 5429 return getUnknown(I); 5430 } 5431 5432 /// Expand GEP instructions into add and multiply operations. This allows them 5433 /// to be analyzed by regular SCEV code. 5434 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5435 // Don't attempt to analyze GEPs over unsized objects. 5436 if (!GEP->getSourceElementType()->isSized()) 5437 return getUnknown(GEP); 5438 5439 SmallVector<const SCEV *, 4> IndexExprs; 5440 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5441 IndexExprs.push_back(getSCEV(*Index)); 5442 return getGEPExpr(GEP, IndexExprs); 5443 } 5444 5445 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5446 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5447 return C->getAPInt().countTrailingZeros(); 5448 5449 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5450 return std::min(GetMinTrailingZeros(T->getOperand()), 5451 (uint32_t)getTypeSizeInBits(T->getType())); 5452 5453 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5454 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5455 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5456 ? getTypeSizeInBits(E->getType()) 5457 : OpRes; 5458 } 5459 5460 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5461 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5462 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5463 ? getTypeSizeInBits(E->getType()) 5464 : OpRes; 5465 } 5466 5467 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5468 // The result is the min of all operands results. 5469 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5470 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5471 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5472 return MinOpRes; 5473 } 5474 5475 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5476 // The result is the sum of all operands results. 5477 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5478 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5479 for (unsigned i = 1, e = M->getNumOperands(); 5480 SumOpRes != BitWidth && i != e; ++i) 5481 SumOpRes = 5482 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5483 return SumOpRes; 5484 } 5485 5486 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5487 // The result is the min of all operands results. 5488 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5489 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5490 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5491 return MinOpRes; 5492 } 5493 5494 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5495 // The result is the min of all operands results. 5496 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5497 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5498 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5499 return MinOpRes; 5500 } 5501 5502 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5503 // The result is the min of all operands results. 5504 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5505 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5506 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5507 return MinOpRes; 5508 } 5509 5510 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5511 // For a SCEVUnknown, ask ValueTracking. 5512 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5513 return Known.countMinTrailingZeros(); 5514 } 5515 5516 // SCEVUDivExpr 5517 return 0; 5518 } 5519 5520 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5521 auto I = MinTrailingZerosCache.find(S); 5522 if (I != MinTrailingZerosCache.end()) 5523 return I->second; 5524 5525 uint32_t Result = GetMinTrailingZerosImpl(S); 5526 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5527 assert(InsertPair.second && "Should insert a new key"); 5528 return InsertPair.first->second; 5529 } 5530 5531 /// Helper method to assign a range to V from metadata present in the IR. 5532 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5533 if (Instruction *I = dyn_cast<Instruction>(V)) 5534 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5535 return getConstantRangeFromMetadata(*MD); 5536 5537 return None; 5538 } 5539 5540 /// Determine the range for a particular SCEV. If SignHint is 5541 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5542 /// with a "cleaner" unsigned (resp. signed) representation. 5543 const ConstantRange & 5544 ScalarEvolution::getRangeRef(const SCEV *S, 5545 ScalarEvolution::RangeSignHint SignHint) { 5546 DenseMap<const SCEV *, ConstantRange> &Cache = 5547 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5548 : SignedRanges; 5549 ConstantRange::PreferredRangeType RangeType = 5550 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED 5551 ? ConstantRange::Unsigned : ConstantRange::Signed; 5552 5553 // See if we've computed this range already. 5554 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5555 if (I != Cache.end()) 5556 return I->second; 5557 5558 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5559 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5560 5561 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5562 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5563 5564 // If the value has known zeros, the maximum value will have those known zeros 5565 // as well. 5566 uint32_t TZ = GetMinTrailingZeros(S); 5567 if (TZ != 0) { 5568 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5569 ConservativeResult = 5570 ConstantRange(APInt::getMinValue(BitWidth), 5571 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5572 else 5573 ConservativeResult = ConstantRange( 5574 APInt::getSignedMinValue(BitWidth), 5575 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5576 } 5577 5578 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5579 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5580 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5581 X = X.add(getRangeRef(Add->getOperand(i), SignHint)); 5582 return setRange(Add, SignHint, 5583 ConservativeResult.intersectWith(X, RangeType)); 5584 } 5585 5586 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5587 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5588 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5589 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5590 return setRange(Mul, SignHint, 5591 ConservativeResult.intersectWith(X, RangeType)); 5592 } 5593 5594 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5595 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5596 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5597 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5598 return setRange(SMax, SignHint, 5599 ConservativeResult.intersectWith(X, RangeType)); 5600 } 5601 5602 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5603 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5604 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5605 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5606 return setRange(UMax, SignHint, 5607 ConservativeResult.intersectWith(X, RangeType)); 5608 } 5609 5610 if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) { 5611 ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint); 5612 for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i) 5613 X = X.smin(getRangeRef(SMin->getOperand(i), SignHint)); 5614 return setRange(SMin, SignHint, 5615 ConservativeResult.intersectWith(X, RangeType)); 5616 } 5617 5618 if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) { 5619 ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint); 5620 for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i) 5621 X = X.umin(getRangeRef(UMin->getOperand(i), SignHint)); 5622 return setRange(UMin, SignHint, 5623 ConservativeResult.intersectWith(X, RangeType)); 5624 } 5625 5626 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5627 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5628 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5629 return setRange(UDiv, SignHint, 5630 ConservativeResult.intersectWith(X.udiv(Y), RangeType)); 5631 } 5632 5633 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5634 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5635 return setRange(ZExt, SignHint, 5636 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), 5637 RangeType)); 5638 } 5639 5640 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5641 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5642 return setRange(SExt, SignHint, 5643 ConservativeResult.intersectWith(X.signExtend(BitWidth), 5644 RangeType)); 5645 } 5646 5647 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5648 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5649 return setRange(Trunc, SignHint, 5650 ConservativeResult.intersectWith(X.truncate(BitWidth), 5651 RangeType)); 5652 } 5653 5654 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5655 // If there's no unsigned wrap, the value will never be less than its 5656 // initial value. 5657 if (AddRec->hasNoUnsignedWrap()) 5658 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 5659 if (!C->getValue()->isZero()) 5660 ConservativeResult = ConservativeResult.intersectWith( 5661 ConstantRange(C->getAPInt(), APInt(BitWidth, 0)), RangeType); 5662 5663 // If there's no signed wrap, and all the operands have the same sign or 5664 // zero, the value won't ever change sign. 5665 if (AddRec->hasNoSignedWrap()) { 5666 bool AllNonNeg = true; 5667 bool AllNonPos = true; 5668 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 5669 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 5670 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 5671 } 5672 if (AllNonNeg) 5673 ConservativeResult = ConservativeResult.intersectWith( 5674 ConstantRange(APInt(BitWidth, 0), 5675 APInt::getSignedMinValue(BitWidth)), RangeType); 5676 else if (AllNonPos) 5677 ConservativeResult = ConservativeResult.intersectWith( 5678 ConstantRange(APInt::getSignedMinValue(BitWidth), 5679 APInt(BitWidth, 1)), RangeType); 5680 } 5681 5682 // TODO: non-affine addrec 5683 if (AddRec->isAffine()) { 5684 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop()); 5685 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5686 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5687 auto RangeFromAffine = getRangeForAffineAR( 5688 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5689 BitWidth); 5690 if (!RangeFromAffine.isFullSet()) 5691 ConservativeResult = 5692 ConservativeResult.intersectWith(RangeFromAffine, RangeType); 5693 5694 auto RangeFromFactoring = getRangeViaFactoring( 5695 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5696 BitWidth); 5697 if (!RangeFromFactoring.isFullSet()) 5698 ConservativeResult = 5699 ConservativeResult.intersectWith(RangeFromFactoring, RangeType); 5700 } 5701 } 5702 5703 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5704 } 5705 5706 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5707 // Check if the IR explicitly contains !range metadata. 5708 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5709 if (MDRange.hasValue()) 5710 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(), 5711 RangeType); 5712 5713 // Split here to avoid paying the compile-time cost of calling both 5714 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted 5715 // if needed. 5716 const DataLayout &DL = getDataLayout(); 5717 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { 5718 // For a SCEVUnknown, ask ValueTracking. 5719 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5720 // If Known does not result in full-set, intersect with it. 5721 if (Known.getMinValue() != Known.getMaxValue() + 1) 5722 ConservativeResult = ConservativeResult.intersectWith( 5723 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), 5724 RangeType); 5725 } else { 5726 assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && 5727 "generalize as needed!"); 5728 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5729 if (NS > 1) 5730 ConservativeResult = ConservativeResult.intersectWith( 5731 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5732 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), 5733 RangeType); 5734 } 5735 5736 // A range of Phi is a subset of union of all ranges of its input. 5737 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 5738 // Make sure that we do not run over cycled Phis. 5739 if (PendingPhiRanges.insert(Phi).second) { 5740 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 5741 for (auto &Op : Phi->operands()) { 5742 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 5743 RangeFromOps = RangeFromOps.unionWith(OpRange); 5744 // No point to continue if we already have a full set. 5745 if (RangeFromOps.isFullSet()) 5746 break; 5747 } 5748 ConservativeResult = 5749 ConservativeResult.intersectWith(RangeFromOps, RangeType); 5750 bool Erased = PendingPhiRanges.erase(Phi); 5751 assert(Erased && "Failed to erase Phi properly?"); 5752 (void) Erased; 5753 } 5754 } 5755 5756 return setRange(U, SignHint, std::move(ConservativeResult)); 5757 } 5758 5759 return setRange(S, SignHint, std::move(ConservativeResult)); 5760 } 5761 5762 // Given a StartRange, Step and MaxBECount for an expression compute a range of 5763 // values that the expression can take. Initially, the expression has a value 5764 // from StartRange and then is changed by Step up to MaxBECount times. Signed 5765 // argument defines if we treat Step as signed or unsigned. 5766 static ConstantRange getRangeForAffineARHelper(APInt Step, 5767 const ConstantRange &StartRange, 5768 const APInt &MaxBECount, 5769 unsigned BitWidth, bool Signed) { 5770 // If either Step or MaxBECount is 0, then the expression won't change, and we 5771 // just need to return the initial range. 5772 if (Step == 0 || MaxBECount == 0) 5773 return StartRange; 5774 5775 // If we don't know anything about the initial value (i.e. StartRange is 5776 // FullRange), then we don't know anything about the final range either. 5777 // Return FullRange. 5778 if (StartRange.isFullSet()) 5779 return ConstantRange::getFull(BitWidth); 5780 5781 // If Step is signed and negative, then we use its absolute value, but we also 5782 // note that we're moving in the opposite direction. 5783 bool Descending = Signed && Step.isNegative(); 5784 5785 if (Signed) 5786 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 5787 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 5788 // This equations hold true due to the well-defined wrap-around behavior of 5789 // APInt. 5790 Step = Step.abs(); 5791 5792 // Check if Offset is more than full span of BitWidth. If it is, the 5793 // expression is guaranteed to overflow. 5794 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 5795 return ConstantRange::getFull(BitWidth); 5796 5797 // Offset is by how much the expression can change. Checks above guarantee no 5798 // overflow here. 5799 APInt Offset = Step * MaxBECount; 5800 5801 // Minimum value of the final range will match the minimal value of StartRange 5802 // if the expression is increasing and will be decreased by Offset otherwise. 5803 // Maximum value of the final range will match the maximal value of StartRange 5804 // if the expression is decreasing and will be increased by Offset otherwise. 5805 APInt StartLower = StartRange.getLower(); 5806 APInt StartUpper = StartRange.getUpper() - 1; 5807 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 5808 : (StartUpper + std::move(Offset)); 5809 5810 // It's possible that the new minimum/maximum value will fall into the initial 5811 // range (due to wrap around). This means that the expression can take any 5812 // value in this bitwidth, and we have to return full range. 5813 if (StartRange.contains(MovedBoundary)) 5814 return ConstantRange::getFull(BitWidth); 5815 5816 APInt NewLower = 5817 Descending ? std::move(MovedBoundary) : std::move(StartLower); 5818 APInt NewUpper = 5819 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 5820 NewUpper += 1; 5821 5822 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 5823 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); 5824 } 5825 5826 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 5827 const SCEV *Step, 5828 const SCEV *MaxBECount, 5829 unsigned BitWidth) { 5830 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 5831 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5832 "Precondition!"); 5833 5834 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 5835 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 5836 5837 // First, consider step signed. 5838 ConstantRange StartSRange = getSignedRange(Start); 5839 ConstantRange StepSRange = getSignedRange(Step); 5840 5841 // If Step can be both positive and negative, we need to find ranges for the 5842 // maximum absolute step values in both directions and union them. 5843 ConstantRange SR = 5844 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 5845 MaxBECountValue, BitWidth, /* Signed = */ true); 5846 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 5847 StartSRange, MaxBECountValue, 5848 BitWidth, /* Signed = */ true)); 5849 5850 // Next, consider step unsigned. 5851 ConstantRange UR = getRangeForAffineARHelper( 5852 getUnsignedRangeMax(Step), getUnsignedRange(Start), 5853 MaxBECountValue, BitWidth, /* Signed = */ false); 5854 5855 // Finally, intersect signed and unsigned ranges. 5856 return SR.intersectWith(UR, ConstantRange::Smallest); 5857 } 5858 5859 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 5860 const SCEV *Step, 5861 const SCEV *MaxBECount, 5862 unsigned BitWidth) { 5863 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 5864 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 5865 5866 struct SelectPattern { 5867 Value *Condition = nullptr; 5868 APInt TrueValue; 5869 APInt FalseValue; 5870 5871 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 5872 const SCEV *S) { 5873 Optional<unsigned> CastOp; 5874 APInt Offset(BitWidth, 0); 5875 5876 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 5877 "Should be!"); 5878 5879 // Peel off a constant offset: 5880 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 5881 // In the future we could consider being smarter here and handle 5882 // {Start+Step,+,Step} too. 5883 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 5884 return; 5885 5886 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 5887 S = SA->getOperand(1); 5888 } 5889 5890 // Peel off a cast operation 5891 if (auto *SCast = dyn_cast<SCEVCastExpr>(S)) { 5892 CastOp = SCast->getSCEVType(); 5893 S = SCast->getOperand(); 5894 } 5895 5896 using namespace llvm::PatternMatch; 5897 5898 auto *SU = dyn_cast<SCEVUnknown>(S); 5899 const APInt *TrueVal, *FalseVal; 5900 if (!SU || 5901 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 5902 m_APInt(FalseVal)))) { 5903 Condition = nullptr; 5904 return; 5905 } 5906 5907 TrueValue = *TrueVal; 5908 FalseValue = *FalseVal; 5909 5910 // Re-apply the cast we peeled off earlier 5911 if (CastOp.hasValue()) 5912 switch (*CastOp) { 5913 default: 5914 llvm_unreachable("Unknown SCEV cast type!"); 5915 5916 case scTruncate: 5917 TrueValue = TrueValue.trunc(BitWidth); 5918 FalseValue = FalseValue.trunc(BitWidth); 5919 break; 5920 case scZeroExtend: 5921 TrueValue = TrueValue.zext(BitWidth); 5922 FalseValue = FalseValue.zext(BitWidth); 5923 break; 5924 case scSignExtend: 5925 TrueValue = TrueValue.sext(BitWidth); 5926 FalseValue = FalseValue.sext(BitWidth); 5927 break; 5928 } 5929 5930 // Re-apply the constant offset we peeled off earlier 5931 TrueValue += Offset; 5932 FalseValue += Offset; 5933 } 5934 5935 bool isRecognized() { return Condition != nullptr; } 5936 }; 5937 5938 SelectPattern StartPattern(*this, BitWidth, Start); 5939 if (!StartPattern.isRecognized()) 5940 return ConstantRange::getFull(BitWidth); 5941 5942 SelectPattern StepPattern(*this, BitWidth, Step); 5943 if (!StepPattern.isRecognized()) 5944 return ConstantRange::getFull(BitWidth); 5945 5946 if (StartPattern.Condition != StepPattern.Condition) { 5947 // We don't handle this case today; but we could, by considering four 5948 // possibilities below instead of two. I'm not sure if there are cases where 5949 // that will help over what getRange already does, though. 5950 return ConstantRange::getFull(BitWidth); 5951 } 5952 5953 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 5954 // construct arbitrary general SCEV expressions here. This function is called 5955 // from deep in the call stack, and calling getSCEV (on a sext instruction, 5956 // say) can end up caching a suboptimal value. 5957 5958 // FIXME: without the explicit `this` receiver below, MSVC errors out with 5959 // C2352 and C2512 (otherwise it isn't needed). 5960 5961 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 5962 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 5963 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 5964 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 5965 5966 ConstantRange TrueRange = 5967 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 5968 ConstantRange FalseRange = 5969 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 5970 5971 return TrueRange.unionWith(FalseRange); 5972 } 5973 5974 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 5975 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 5976 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 5977 5978 // Return early if there are no flags to propagate to the SCEV. 5979 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5980 if (BinOp->hasNoUnsignedWrap()) 5981 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 5982 if (BinOp->hasNoSignedWrap()) 5983 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 5984 if (Flags == SCEV::FlagAnyWrap) 5985 return SCEV::FlagAnyWrap; 5986 5987 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 5988 } 5989 5990 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 5991 // Here we check that I is in the header of the innermost loop containing I, 5992 // since we only deal with instructions in the loop header. The actual loop we 5993 // need to check later will come from an add recurrence, but getting that 5994 // requires computing the SCEV of the operands, which can be expensive. This 5995 // check we can do cheaply to rule out some cases early. 5996 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 5997 if (InnermostContainingLoop == nullptr || 5998 InnermostContainingLoop->getHeader() != I->getParent()) 5999 return false; 6000 6001 // Only proceed if we can prove that I does not yield poison. 6002 if (!programUndefinedIfFullPoison(I)) 6003 return false; 6004 6005 // At this point we know that if I is executed, then it does not wrap 6006 // according to at least one of NSW or NUW. If I is not executed, then we do 6007 // not know if the calculation that I represents would wrap. Multiple 6008 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 6009 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 6010 // derived from other instructions that map to the same SCEV. We cannot make 6011 // that guarantee for cases where I is not executed. So we need to find the 6012 // loop that I is considered in relation to and prove that I is executed for 6013 // every iteration of that loop. That implies that the value that I 6014 // calculates does not wrap anywhere in the loop, so then we can apply the 6015 // flags to the SCEV. 6016 // 6017 // We check isLoopInvariant to disambiguate in case we are adding recurrences 6018 // from different loops, so that we know which loop to prove that I is 6019 // executed in. 6020 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 6021 // I could be an extractvalue from a call to an overflow intrinsic. 6022 // TODO: We can do better here in some cases. 6023 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 6024 return false; 6025 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 6026 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 6027 bool AllOtherOpsLoopInvariant = true; 6028 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 6029 ++OtherOpIndex) { 6030 if (OtherOpIndex != OpIndex) { 6031 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 6032 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 6033 AllOtherOpsLoopInvariant = false; 6034 break; 6035 } 6036 } 6037 } 6038 if (AllOtherOpsLoopInvariant && 6039 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 6040 return true; 6041 } 6042 } 6043 return false; 6044 } 6045 6046 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 6047 // If we know that \c I can never be poison period, then that's enough. 6048 if (isSCEVExprNeverPoison(I)) 6049 return true; 6050 6051 // For an add recurrence specifically, we assume that infinite loops without 6052 // side effects are undefined behavior, and then reason as follows: 6053 // 6054 // If the add recurrence is poison in any iteration, it is poison on all 6055 // future iterations (since incrementing poison yields poison). If the result 6056 // of the add recurrence is fed into the loop latch condition and the loop 6057 // does not contain any throws or exiting blocks other than the latch, we now 6058 // have the ability to "choose" whether the backedge is taken or not (by 6059 // choosing a sufficiently evil value for the poison feeding into the branch) 6060 // for every iteration including and after the one in which \p I first became 6061 // poison. There are two possibilities (let's call the iteration in which \p 6062 // I first became poison as K): 6063 // 6064 // 1. In the set of iterations including and after K, the loop body executes 6065 // no side effects. In this case executing the backege an infinte number 6066 // of times will yield undefined behavior. 6067 // 6068 // 2. In the set of iterations including and after K, the loop body executes 6069 // at least one side effect. In this case, that specific instance of side 6070 // effect is control dependent on poison, which also yields undefined 6071 // behavior. 6072 6073 auto *ExitingBB = L->getExitingBlock(); 6074 auto *LatchBB = L->getLoopLatch(); 6075 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 6076 return false; 6077 6078 SmallPtrSet<const Instruction *, 16> Pushed; 6079 SmallVector<const Instruction *, 8> PoisonStack; 6080 6081 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 6082 // things that are known to be fully poison under that assumption go on the 6083 // PoisonStack. 6084 Pushed.insert(I); 6085 PoisonStack.push_back(I); 6086 6087 bool LatchControlDependentOnPoison = false; 6088 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 6089 const Instruction *Poison = PoisonStack.pop_back_val(); 6090 6091 for (auto *PoisonUser : Poison->users()) { 6092 if (propagatesFullPoison(cast<Instruction>(PoisonUser))) { 6093 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 6094 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 6095 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 6096 assert(BI->isConditional() && "Only possibility!"); 6097 if (BI->getParent() == LatchBB) { 6098 LatchControlDependentOnPoison = true; 6099 break; 6100 } 6101 } 6102 } 6103 } 6104 6105 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 6106 } 6107 6108 ScalarEvolution::LoopProperties 6109 ScalarEvolution::getLoopProperties(const Loop *L) { 6110 using LoopProperties = ScalarEvolution::LoopProperties; 6111 6112 auto Itr = LoopPropertiesCache.find(L); 6113 if (Itr == LoopPropertiesCache.end()) { 6114 auto HasSideEffects = [](Instruction *I) { 6115 if (auto *SI = dyn_cast<StoreInst>(I)) 6116 return !SI->isSimple(); 6117 6118 return I->mayHaveSideEffects(); 6119 }; 6120 6121 LoopProperties LP = {/* HasNoAbnormalExits */ true, 6122 /*HasNoSideEffects*/ true}; 6123 6124 for (auto *BB : L->getBlocks()) 6125 for (auto &I : *BB) { 6126 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6127 LP.HasNoAbnormalExits = false; 6128 if (HasSideEffects(&I)) 6129 LP.HasNoSideEffects = false; 6130 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 6131 break; // We're already as pessimistic as we can get. 6132 } 6133 6134 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 6135 assert(InsertPair.second && "We just checked!"); 6136 Itr = InsertPair.first; 6137 } 6138 6139 return Itr->second; 6140 } 6141 6142 const SCEV *ScalarEvolution::createSCEV(Value *V) { 6143 if (!isSCEVable(V->getType())) 6144 return getUnknown(V); 6145 6146 if (Instruction *I = dyn_cast<Instruction>(V)) { 6147 // Don't attempt to analyze instructions in blocks that aren't 6148 // reachable. Such instructions don't matter, and they aren't required 6149 // to obey basic rules for definitions dominating uses which this 6150 // analysis depends on. 6151 if (!DT.isReachableFromEntry(I->getParent())) 6152 return getUnknown(UndefValue::get(V->getType())); 6153 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 6154 return getConstant(CI); 6155 else if (isa<ConstantPointerNull>(V)) 6156 return getZero(V->getType()); 6157 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 6158 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 6159 else if (!isa<ConstantExpr>(V)) 6160 return getUnknown(V); 6161 6162 Operator *U = cast<Operator>(V); 6163 if (auto BO = MatchBinaryOp(U, DT)) { 6164 switch (BO->Opcode) { 6165 case Instruction::Add: { 6166 // The simple thing to do would be to just call getSCEV on both operands 6167 // and call getAddExpr with the result. However if we're looking at a 6168 // bunch of things all added together, this can be quite inefficient, 6169 // because it leads to N-1 getAddExpr calls for N ultimate operands. 6170 // Instead, gather up all the operands and make a single getAddExpr call. 6171 // LLVM IR canonical form means we need only traverse the left operands. 6172 SmallVector<const SCEV *, 4> AddOps; 6173 do { 6174 if (BO->Op) { 6175 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6176 AddOps.push_back(OpSCEV); 6177 break; 6178 } 6179 6180 // If a NUW or NSW flag can be applied to the SCEV for this 6181 // addition, then compute the SCEV for this addition by itself 6182 // with a separate call to getAddExpr. We need to do that 6183 // instead of pushing the operands of the addition onto AddOps, 6184 // since the flags are only known to apply to this particular 6185 // addition - they may not apply to other additions that can be 6186 // formed with operands from AddOps. 6187 const SCEV *RHS = getSCEV(BO->RHS); 6188 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6189 if (Flags != SCEV::FlagAnyWrap) { 6190 const SCEV *LHS = getSCEV(BO->LHS); 6191 if (BO->Opcode == Instruction::Sub) 6192 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 6193 else 6194 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 6195 break; 6196 } 6197 } 6198 6199 if (BO->Opcode == Instruction::Sub) 6200 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 6201 else 6202 AddOps.push_back(getSCEV(BO->RHS)); 6203 6204 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6205 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6206 NewBO->Opcode != Instruction::Sub)) { 6207 AddOps.push_back(getSCEV(BO->LHS)); 6208 break; 6209 } 6210 BO = NewBO; 6211 } while (true); 6212 6213 return getAddExpr(AddOps); 6214 } 6215 6216 case Instruction::Mul: { 6217 SmallVector<const SCEV *, 4> MulOps; 6218 do { 6219 if (BO->Op) { 6220 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6221 MulOps.push_back(OpSCEV); 6222 break; 6223 } 6224 6225 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6226 if (Flags != SCEV::FlagAnyWrap) { 6227 MulOps.push_back( 6228 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6229 break; 6230 } 6231 } 6232 6233 MulOps.push_back(getSCEV(BO->RHS)); 6234 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6235 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6236 MulOps.push_back(getSCEV(BO->LHS)); 6237 break; 6238 } 6239 BO = NewBO; 6240 } while (true); 6241 6242 return getMulExpr(MulOps); 6243 } 6244 case Instruction::UDiv: 6245 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6246 case Instruction::URem: 6247 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6248 case Instruction::Sub: { 6249 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6250 if (BO->Op) 6251 Flags = getNoWrapFlagsFromUB(BO->Op); 6252 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6253 } 6254 case Instruction::And: 6255 // For an expression like x&255 that merely masks off the high bits, 6256 // use zext(trunc(x)) as the SCEV expression. 6257 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6258 if (CI->isZero()) 6259 return getSCEV(BO->RHS); 6260 if (CI->isMinusOne()) 6261 return getSCEV(BO->LHS); 6262 const APInt &A = CI->getValue(); 6263 6264 // Instcombine's ShrinkDemandedConstant may strip bits out of 6265 // constants, obscuring what would otherwise be a low-bits mask. 6266 // Use computeKnownBits to compute what ShrinkDemandedConstant 6267 // knew about to reconstruct a low-bits mask value. 6268 unsigned LZ = A.countLeadingZeros(); 6269 unsigned TZ = A.countTrailingZeros(); 6270 unsigned BitWidth = A.getBitWidth(); 6271 KnownBits Known(BitWidth); 6272 computeKnownBits(BO->LHS, Known, getDataLayout(), 6273 0, &AC, nullptr, &DT); 6274 6275 APInt EffectiveMask = 6276 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6277 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6278 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6279 const SCEV *LHS = getSCEV(BO->LHS); 6280 const SCEV *ShiftedLHS = nullptr; 6281 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6282 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6283 // For an expression like (x * 8) & 8, simplify the multiply. 6284 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6285 unsigned GCD = std::min(MulZeros, TZ); 6286 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6287 SmallVector<const SCEV*, 4> MulOps; 6288 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6289 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6290 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6291 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6292 } 6293 } 6294 if (!ShiftedLHS) 6295 ShiftedLHS = getUDivExpr(LHS, MulCount); 6296 return getMulExpr( 6297 getZeroExtendExpr( 6298 getTruncateExpr(ShiftedLHS, 6299 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6300 BO->LHS->getType()), 6301 MulCount); 6302 } 6303 } 6304 break; 6305 6306 case Instruction::Or: 6307 // If the RHS of the Or is a constant, we may have something like: 6308 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6309 // optimizations will transparently handle this case. 6310 // 6311 // In order for this transformation to be safe, the LHS must be of the 6312 // form X*(2^n) and the Or constant must be less than 2^n. 6313 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6314 const SCEV *LHS = getSCEV(BO->LHS); 6315 const APInt &CIVal = CI->getValue(); 6316 if (GetMinTrailingZeros(LHS) >= 6317 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6318 // Build a plain add SCEV. 6319 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 6320 // If the LHS of the add was an addrec and it has no-wrap flags, 6321 // transfer the no-wrap flags, since an or won't introduce a wrap. 6322 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 6323 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 6324 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags( 6325 OldAR->getNoWrapFlags()); 6326 } 6327 return S; 6328 } 6329 } 6330 break; 6331 6332 case Instruction::Xor: 6333 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6334 // If the RHS of xor is -1, then this is a not operation. 6335 if (CI->isMinusOne()) 6336 return getNotSCEV(getSCEV(BO->LHS)); 6337 6338 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6339 // This is a variant of the check for xor with -1, and it handles 6340 // the case where instcombine has trimmed non-demanded bits out 6341 // of an xor with -1. 6342 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6343 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6344 if (LBO->getOpcode() == Instruction::And && 6345 LCI->getValue() == CI->getValue()) 6346 if (const SCEVZeroExtendExpr *Z = 6347 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6348 Type *UTy = BO->LHS->getType(); 6349 const SCEV *Z0 = Z->getOperand(); 6350 Type *Z0Ty = Z0->getType(); 6351 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6352 6353 // If C is a low-bits mask, the zero extend is serving to 6354 // mask off the high bits. Complement the operand and 6355 // re-apply the zext. 6356 if (CI->getValue().isMask(Z0TySize)) 6357 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6358 6359 // If C is a single bit, it may be in the sign-bit position 6360 // before the zero-extend. In this case, represent the xor 6361 // using an add, which is equivalent, and re-apply the zext. 6362 APInt Trunc = CI->getValue().trunc(Z0TySize); 6363 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6364 Trunc.isSignMask()) 6365 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6366 UTy); 6367 } 6368 } 6369 break; 6370 6371 case Instruction::Shl: 6372 // Turn shift left of a constant amount into a multiply. 6373 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6374 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6375 6376 // If the shift count is not less than the bitwidth, the result of 6377 // the shift is undefined. Don't try to analyze it, because the 6378 // resolution chosen here may differ from the resolution chosen in 6379 // other parts of the compiler. 6380 if (SA->getValue().uge(BitWidth)) 6381 break; 6382 6383 // It is currently not resolved how to interpret NSW for left 6384 // shift by BitWidth - 1, so we avoid applying flags in that 6385 // case. Remove this check (or this comment) once the situation 6386 // is resolved. See 6387 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html 6388 // and http://reviews.llvm.org/D8890 . 6389 auto Flags = SCEV::FlagAnyWrap; 6390 if (BO->Op && SA->getValue().ult(BitWidth - 1)) 6391 Flags = getNoWrapFlagsFromUB(BO->Op); 6392 6393 Constant *X = ConstantInt::get( 6394 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6395 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6396 } 6397 break; 6398 6399 case Instruction::AShr: { 6400 // AShr X, C, where C is a constant. 6401 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6402 if (!CI) 6403 break; 6404 6405 Type *OuterTy = BO->LHS->getType(); 6406 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6407 // If the shift count is not less than the bitwidth, the result of 6408 // the shift is undefined. Don't try to analyze it, because the 6409 // resolution chosen here may differ from the resolution chosen in 6410 // other parts of the compiler. 6411 if (CI->getValue().uge(BitWidth)) 6412 break; 6413 6414 if (CI->isZero()) 6415 return getSCEV(BO->LHS); // shift by zero --> noop 6416 6417 uint64_t AShrAmt = CI->getZExtValue(); 6418 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6419 6420 Operator *L = dyn_cast<Operator>(BO->LHS); 6421 if (L && L->getOpcode() == Instruction::Shl) { 6422 // X = Shl A, n 6423 // Y = AShr X, m 6424 // Both n and m are constant. 6425 6426 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6427 if (L->getOperand(1) == BO->RHS) 6428 // For a two-shift sext-inreg, i.e. n = m, 6429 // use sext(trunc(x)) as the SCEV expression. 6430 return getSignExtendExpr( 6431 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6432 6433 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6434 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6435 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6436 if (ShlAmt > AShrAmt) { 6437 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6438 // expression. We already checked that ShlAmt < BitWidth, so 6439 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6440 // ShlAmt - AShrAmt < Amt. 6441 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6442 ShlAmt - AShrAmt); 6443 return getSignExtendExpr( 6444 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6445 getConstant(Mul)), OuterTy); 6446 } 6447 } 6448 } 6449 break; 6450 } 6451 } 6452 } 6453 6454 switch (U->getOpcode()) { 6455 case Instruction::Trunc: 6456 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6457 6458 case Instruction::ZExt: 6459 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6460 6461 case Instruction::SExt: 6462 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6463 // The NSW flag of a subtract does not always survive the conversion to 6464 // A + (-1)*B. By pushing sign extension onto its operands we are much 6465 // more likely to preserve NSW and allow later AddRec optimisations. 6466 // 6467 // NOTE: This is effectively duplicating this logic from getSignExtend: 6468 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6469 // but by that point the NSW information has potentially been lost. 6470 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6471 Type *Ty = U->getType(); 6472 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6473 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6474 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6475 } 6476 } 6477 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6478 6479 case Instruction::BitCast: 6480 // BitCasts are no-op casts so we just eliminate the cast. 6481 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6482 return getSCEV(U->getOperand(0)); 6483 break; 6484 6485 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 6486 // lead to pointer expressions which cannot safely be expanded to GEPs, 6487 // because ScalarEvolution doesn't respect the GEP aliasing rules when 6488 // simplifying integer expressions. 6489 6490 case Instruction::GetElementPtr: 6491 return createNodeForGEP(cast<GEPOperator>(U)); 6492 6493 case Instruction::PHI: 6494 return createNodeForPHI(cast<PHINode>(U)); 6495 6496 case Instruction::Select: 6497 // U can also be a select constant expr, which let fall through. Since 6498 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6499 // constant expressions cannot have instructions as operands, we'd have 6500 // returned getUnknown for a select constant expressions anyway. 6501 if (isa<Instruction>(U)) 6502 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6503 U->getOperand(1), U->getOperand(2)); 6504 break; 6505 6506 case Instruction::Call: 6507 case Instruction::Invoke: 6508 if (Value *RV = CallSite(U).getReturnedArgOperand()) 6509 return getSCEV(RV); 6510 break; 6511 } 6512 6513 return getUnknown(V); 6514 } 6515 6516 //===----------------------------------------------------------------------===// 6517 // Iteration Count Computation Code 6518 // 6519 6520 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6521 if (!ExitCount) 6522 return 0; 6523 6524 ConstantInt *ExitConst = ExitCount->getValue(); 6525 6526 // Guard against huge trip counts. 6527 if (ExitConst->getValue().getActiveBits() > 32) 6528 return 0; 6529 6530 // In case of integer overflow, this returns 0, which is correct. 6531 return ((unsigned)ExitConst->getZExtValue()) + 1; 6532 } 6533 6534 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6535 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6536 return getSmallConstantTripCount(L, ExitingBB); 6537 6538 // No trip count information for multiple exits. 6539 return 0; 6540 } 6541 6542 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6543 BasicBlock *ExitingBlock) { 6544 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6545 assert(L->isLoopExiting(ExitingBlock) && 6546 "Exiting block must actually branch out of the loop!"); 6547 const SCEVConstant *ExitCount = 6548 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6549 return getConstantTripCount(ExitCount); 6550 } 6551 6552 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6553 const auto *MaxExitCount = 6554 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); 6555 return getConstantTripCount(MaxExitCount); 6556 } 6557 6558 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6559 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6560 return getSmallConstantTripMultiple(L, ExitingBB); 6561 6562 // No trip multiple information for multiple exits. 6563 return 0; 6564 } 6565 6566 /// Returns the largest constant divisor of the trip count of this loop as a 6567 /// normal unsigned value, if possible. This means that the actual trip count is 6568 /// always a multiple of the returned value (don't forget the trip count could 6569 /// very well be zero as well!). 6570 /// 6571 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6572 /// multiple of a constant (which is also the case if the trip count is simply 6573 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6574 /// if the trip count is very large (>= 2^32). 6575 /// 6576 /// As explained in the comments for getSmallConstantTripCount, this assumes 6577 /// that control exits the loop via ExitingBlock. 6578 unsigned 6579 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6580 BasicBlock *ExitingBlock) { 6581 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6582 assert(L->isLoopExiting(ExitingBlock) && 6583 "Exiting block must actually branch out of the loop!"); 6584 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6585 if (ExitCount == getCouldNotCompute()) 6586 return 1; 6587 6588 // Get the trip count from the BE count by adding 1. 6589 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6590 6591 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6592 if (!TC) 6593 // Attempt to factor more general cases. Returns the greatest power of 6594 // two divisor. If overflow happens, the trip count expression is still 6595 // divisible by the greatest power of 2 divisor returned. 6596 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr)); 6597 6598 ConstantInt *Result = TC->getValue(); 6599 6600 // Guard against huge trip counts (this requires checking 6601 // for zero to handle the case where the trip count == -1 and the 6602 // addition wraps). 6603 if (!Result || Result->getValue().getActiveBits() > 32 || 6604 Result->getValue().getActiveBits() == 0) 6605 return 1; 6606 6607 return (unsigned)Result->getZExtValue(); 6608 } 6609 6610 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6611 BasicBlock *ExitingBlock, 6612 ExitCountKind Kind) { 6613 switch (Kind) { 6614 case Exact: 6615 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 6616 case ConstantMaximum: 6617 return getBackedgeTakenInfo(L).getMax(ExitingBlock, this); 6618 }; 6619 llvm_unreachable("Invalid ExitCountKind!"); 6620 } 6621 6622 const SCEV * 6623 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 6624 SCEVUnionPredicate &Preds) { 6625 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 6626 } 6627 6628 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, 6629 ExitCountKind Kind) { 6630 switch (Kind) { 6631 case Exact: 6632 return getBackedgeTakenInfo(L).getExact(L, this); 6633 case ConstantMaximum: 6634 return getBackedgeTakenInfo(L).getMax(this); 6635 }; 6636 llvm_unreachable("Invalid ExitCountKind!"); 6637 } 6638 6639 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 6640 return getBackedgeTakenInfo(L).isMaxOrZero(this); 6641 } 6642 6643 /// Push PHI nodes in the header of the given loop onto the given Worklist. 6644 static void 6645 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 6646 BasicBlock *Header = L->getHeader(); 6647 6648 // Push all Loop-header PHIs onto the Worklist stack. 6649 for (PHINode &PN : Header->phis()) 6650 Worklist.push_back(&PN); 6651 } 6652 6653 const ScalarEvolution::BackedgeTakenInfo & 6654 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 6655 auto &BTI = getBackedgeTakenInfo(L); 6656 if (BTI.hasFullInfo()) 6657 return BTI; 6658 6659 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6660 6661 if (!Pair.second) 6662 return Pair.first->second; 6663 6664 BackedgeTakenInfo Result = 6665 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 6666 6667 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 6668 } 6669 6670 const ScalarEvolution::BackedgeTakenInfo & 6671 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 6672 // Initially insert an invalid entry for this loop. If the insertion 6673 // succeeds, proceed to actually compute a backedge-taken count and 6674 // update the value. The temporary CouldNotCompute value tells SCEV 6675 // code elsewhere that it shouldn't attempt to request a new 6676 // backedge-taken count, which could result in infinite recursion. 6677 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 6678 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 6679 if (!Pair.second) 6680 return Pair.first->second; 6681 6682 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 6683 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 6684 // must be cleared in this scope. 6685 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 6686 6687 // In product build, there are no usage of statistic. 6688 (void)NumTripCountsComputed; 6689 (void)NumTripCountsNotComputed; 6690 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 6691 const SCEV *BEExact = Result.getExact(L, this); 6692 if (BEExact != getCouldNotCompute()) { 6693 assert(isLoopInvariant(BEExact, L) && 6694 isLoopInvariant(Result.getMax(this), L) && 6695 "Computed backedge-taken count isn't loop invariant for loop!"); 6696 ++NumTripCountsComputed; 6697 } 6698 else if (Result.getMax(this) == getCouldNotCompute() && 6699 isa<PHINode>(L->getHeader()->begin())) { 6700 // Only count loops that have phi nodes as not being computable. 6701 ++NumTripCountsNotComputed; 6702 } 6703 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 6704 6705 // Now that we know more about the trip count for this loop, forget any 6706 // existing SCEV values for PHI nodes in this loop since they are only 6707 // conservative estimates made without the benefit of trip count 6708 // information. This is similar to the code in forgetLoop, except that 6709 // it handles SCEVUnknown PHI nodes specially. 6710 if (Result.hasAnyInfo()) { 6711 SmallVector<Instruction *, 16> Worklist; 6712 PushLoopPHIs(L, Worklist); 6713 6714 SmallPtrSet<Instruction *, 8> Discovered; 6715 while (!Worklist.empty()) { 6716 Instruction *I = Worklist.pop_back_val(); 6717 6718 ValueExprMapType::iterator It = 6719 ValueExprMap.find_as(static_cast<Value *>(I)); 6720 if (It != ValueExprMap.end()) { 6721 const SCEV *Old = It->second; 6722 6723 // SCEVUnknown for a PHI either means that it has an unrecognized 6724 // structure, or it's a PHI that's in the progress of being computed 6725 // by createNodeForPHI. In the former case, additional loop trip 6726 // count information isn't going to change anything. In the later 6727 // case, createNodeForPHI will perform the necessary updates on its 6728 // own when it gets to that point. 6729 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 6730 eraseValueFromMap(It->first); 6731 forgetMemoizedResults(Old); 6732 } 6733 if (PHINode *PN = dyn_cast<PHINode>(I)) 6734 ConstantEvolutionLoopExitValue.erase(PN); 6735 } 6736 6737 // Since we don't need to invalidate anything for correctness and we're 6738 // only invalidating to make SCEV's results more precise, we get to stop 6739 // early to avoid invalidating too much. This is especially important in 6740 // cases like: 6741 // 6742 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 6743 // loop0: 6744 // %pn0 = phi 6745 // ... 6746 // loop1: 6747 // %pn1 = phi 6748 // ... 6749 // 6750 // where both loop0 and loop1's backedge taken count uses the SCEV 6751 // expression for %v. If we don't have the early stop below then in cases 6752 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 6753 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 6754 // count for loop1, effectively nullifying SCEV's trip count cache. 6755 for (auto *U : I->users()) 6756 if (auto *I = dyn_cast<Instruction>(U)) { 6757 auto *LoopForUser = LI.getLoopFor(I->getParent()); 6758 if (LoopForUser && L->contains(LoopForUser) && 6759 Discovered.insert(I).second) 6760 Worklist.push_back(I); 6761 } 6762 } 6763 } 6764 6765 // Re-lookup the insert position, since the call to 6766 // computeBackedgeTakenCount above could result in a 6767 // recusive call to getBackedgeTakenInfo (on a different 6768 // loop), which would invalidate the iterator computed 6769 // earlier. 6770 return BackedgeTakenCounts.find(L)->second = std::move(Result); 6771 } 6772 6773 void ScalarEvolution::forgetAllLoops() { 6774 // This method is intended to forget all info about loops. It should 6775 // invalidate caches as if the following happened: 6776 // - The trip counts of all loops have changed arbitrarily 6777 // - Every llvm::Value has been updated in place to produce a different 6778 // result. 6779 BackedgeTakenCounts.clear(); 6780 PredicatedBackedgeTakenCounts.clear(); 6781 LoopPropertiesCache.clear(); 6782 ConstantEvolutionLoopExitValue.clear(); 6783 ValueExprMap.clear(); 6784 ValuesAtScopes.clear(); 6785 LoopDispositions.clear(); 6786 BlockDispositions.clear(); 6787 UnsignedRanges.clear(); 6788 SignedRanges.clear(); 6789 ExprValueMap.clear(); 6790 HasRecMap.clear(); 6791 MinTrailingZerosCache.clear(); 6792 PredicatedSCEVRewrites.clear(); 6793 } 6794 6795 void ScalarEvolution::forgetLoop(const Loop *L) { 6796 // Drop any stored trip count value. 6797 auto RemoveLoopFromBackedgeMap = 6798 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 6799 auto BTCPos = Map.find(L); 6800 if (BTCPos != Map.end()) { 6801 BTCPos->second.clear(); 6802 Map.erase(BTCPos); 6803 } 6804 }; 6805 6806 SmallVector<const Loop *, 16> LoopWorklist(1, L); 6807 SmallVector<Instruction *, 32> Worklist; 6808 SmallPtrSet<Instruction *, 16> Visited; 6809 6810 // Iterate over all the loops and sub-loops to drop SCEV information. 6811 while (!LoopWorklist.empty()) { 6812 auto *CurrL = LoopWorklist.pop_back_val(); 6813 6814 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 6815 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 6816 6817 // Drop information about predicated SCEV rewrites for this loop. 6818 for (auto I = PredicatedSCEVRewrites.begin(); 6819 I != PredicatedSCEVRewrites.end();) { 6820 std::pair<const SCEV *, const Loop *> Entry = I->first; 6821 if (Entry.second == CurrL) 6822 PredicatedSCEVRewrites.erase(I++); 6823 else 6824 ++I; 6825 } 6826 6827 auto LoopUsersItr = LoopUsers.find(CurrL); 6828 if (LoopUsersItr != LoopUsers.end()) { 6829 for (auto *S : LoopUsersItr->second) 6830 forgetMemoizedResults(S); 6831 LoopUsers.erase(LoopUsersItr); 6832 } 6833 6834 // Drop information about expressions based on loop-header PHIs. 6835 PushLoopPHIs(CurrL, Worklist); 6836 6837 while (!Worklist.empty()) { 6838 Instruction *I = Worklist.pop_back_val(); 6839 if (!Visited.insert(I).second) 6840 continue; 6841 6842 ValueExprMapType::iterator It = 6843 ValueExprMap.find_as(static_cast<Value *>(I)); 6844 if (It != ValueExprMap.end()) { 6845 eraseValueFromMap(It->first); 6846 forgetMemoizedResults(It->second); 6847 if (PHINode *PN = dyn_cast<PHINode>(I)) 6848 ConstantEvolutionLoopExitValue.erase(PN); 6849 } 6850 6851 PushDefUseChildren(I, Worklist); 6852 } 6853 6854 LoopPropertiesCache.erase(CurrL); 6855 // Forget all contained loops too, to avoid dangling entries in the 6856 // ValuesAtScopes map. 6857 LoopWorklist.append(CurrL->begin(), CurrL->end()); 6858 } 6859 } 6860 6861 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 6862 while (Loop *Parent = L->getParentLoop()) 6863 L = Parent; 6864 forgetLoop(L); 6865 } 6866 6867 void ScalarEvolution::forgetValue(Value *V) { 6868 Instruction *I = dyn_cast<Instruction>(V); 6869 if (!I) return; 6870 6871 // Drop information about expressions based on loop-header PHIs. 6872 SmallVector<Instruction *, 16> Worklist; 6873 Worklist.push_back(I); 6874 6875 SmallPtrSet<Instruction *, 8> Visited; 6876 while (!Worklist.empty()) { 6877 I = Worklist.pop_back_val(); 6878 if (!Visited.insert(I).second) 6879 continue; 6880 6881 ValueExprMapType::iterator It = 6882 ValueExprMap.find_as(static_cast<Value *>(I)); 6883 if (It != ValueExprMap.end()) { 6884 eraseValueFromMap(It->first); 6885 forgetMemoizedResults(It->second); 6886 if (PHINode *PN = dyn_cast<PHINode>(I)) 6887 ConstantEvolutionLoopExitValue.erase(PN); 6888 } 6889 6890 PushDefUseChildren(I, Worklist); 6891 } 6892 } 6893 6894 /// Get the exact loop backedge taken count considering all loop exits. A 6895 /// computable result can only be returned for loops with all exiting blocks 6896 /// dominating the latch. howFarToZero assumes that the limit of each loop test 6897 /// is never skipped. This is a valid assumption as long as the loop exits via 6898 /// that test. For precise results, it is the caller's responsibility to specify 6899 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 6900 const SCEV * 6901 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 6902 SCEVUnionPredicate *Preds) const { 6903 // If any exits were not computable, the loop is not computable. 6904 if (!isComplete() || ExitNotTaken.empty()) 6905 return SE->getCouldNotCompute(); 6906 6907 const BasicBlock *Latch = L->getLoopLatch(); 6908 // All exiting blocks we have collected must dominate the only backedge. 6909 if (!Latch) 6910 return SE->getCouldNotCompute(); 6911 6912 // All exiting blocks we have gathered dominate loop's latch, so exact trip 6913 // count is simply a minimum out of all these calculated exit counts. 6914 SmallVector<const SCEV *, 2> Ops; 6915 for (auto &ENT : ExitNotTaken) { 6916 const SCEV *BECount = ENT.ExactNotTaken; 6917 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 6918 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 6919 "We should only have known counts for exiting blocks that dominate " 6920 "latch!"); 6921 6922 Ops.push_back(BECount); 6923 6924 if (Preds && !ENT.hasAlwaysTruePredicate()) 6925 Preds->add(ENT.Predicate.get()); 6926 6927 assert((Preds || ENT.hasAlwaysTruePredicate()) && 6928 "Predicate should be always true!"); 6929 } 6930 6931 return SE->getUMinFromMismatchedTypes(Ops); 6932 } 6933 6934 /// Get the exact not taken count for this loop exit. 6935 const SCEV * 6936 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock, 6937 ScalarEvolution *SE) const { 6938 for (auto &ENT : ExitNotTaken) 6939 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6940 return ENT.ExactNotTaken; 6941 6942 return SE->getCouldNotCompute(); 6943 } 6944 6945 const SCEV * 6946 ScalarEvolution::BackedgeTakenInfo::getMax(BasicBlock *ExitingBlock, 6947 ScalarEvolution *SE) const { 6948 for (auto &ENT : ExitNotTaken) 6949 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 6950 return ENT.MaxNotTaken; 6951 6952 return SE->getCouldNotCompute(); 6953 } 6954 6955 /// getMax - Get the max backedge taken count for the loop. 6956 const SCEV * 6957 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const { 6958 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6959 return !ENT.hasAlwaysTruePredicate(); 6960 }; 6961 6962 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax()) 6963 return SE->getCouldNotCompute(); 6964 6965 assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) && 6966 "No point in having a non-constant max backedge taken count!"); 6967 return getMax(); 6968 } 6969 6970 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution *SE) const { 6971 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 6972 return !ENT.hasAlwaysTruePredicate(); 6973 }; 6974 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 6975 } 6976 6977 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 6978 ScalarEvolution *SE) const { 6979 if (getMax() && getMax() != SE->getCouldNotCompute() && 6980 SE->hasOperand(getMax(), S)) 6981 return true; 6982 6983 for (auto &ENT : ExitNotTaken) 6984 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 6985 SE->hasOperand(ENT.ExactNotTaken, S)) 6986 return true; 6987 6988 return false; 6989 } 6990 6991 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 6992 : ExactNotTaken(E), MaxNotTaken(E) { 6993 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 6994 isa<SCEVConstant>(MaxNotTaken)) && 6995 "No point in having a non-constant max backedge taken count!"); 6996 } 6997 6998 ScalarEvolution::ExitLimit::ExitLimit( 6999 const SCEV *E, const SCEV *M, bool MaxOrZero, 7000 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 7001 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 7002 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 7003 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 7004 "Exact is not allowed to be less precise than Max"); 7005 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7006 isa<SCEVConstant>(MaxNotTaken)) && 7007 "No point in having a non-constant max backedge taken count!"); 7008 for (auto *PredSet : PredSetList) 7009 for (auto *P : *PredSet) 7010 addPredicate(P); 7011 } 7012 7013 ScalarEvolution::ExitLimit::ExitLimit( 7014 const SCEV *E, const SCEV *M, bool MaxOrZero, 7015 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 7016 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 7017 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7018 isa<SCEVConstant>(MaxNotTaken)) && 7019 "No point in having a non-constant max backedge taken count!"); 7020 } 7021 7022 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 7023 bool MaxOrZero) 7024 : ExitLimit(E, M, MaxOrZero, None) { 7025 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7026 isa<SCEVConstant>(MaxNotTaken)) && 7027 "No point in having a non-constant max backedge taken count!"); 7028 } 7029 7030 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 7031 /// computable exit into a persistent ExitNotTakenInfo array. 7032 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 7033 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> 7034 ExitCounts, 7035 bool Complete, const SCEV *MaxCount, bool MaxOrZero) 7036 : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { 7037 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7038 7039 ExitNotTaken.reserve(ExitCounts.size()); 7040 std::transform( 7041 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 7042 [&](const EdgeExitInfo &EEI) { 7043 BasicBlock *ExitBB = EEI.first; 7044 const ExitLimit &EL = EEI.second; 7045 if (EL.Predicates.empty()) 7046 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7047 nullptr); 7048 7049 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 7050 for (auto *Pred : EL.Predicates) 7051 Predicate->add(Pred); 7052 7053 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7054 std::move(Predicate)); 7055 }); 7056 assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) && 7057 "No point in having a non-constant max backedge taken count!"); 7058 } 7059 7060 /// Invalidate this result and free the ExitNotTakenInfo array. 7061 void ScalarEvolution::BackedgeTakenInfo::clear() { 7062 ExitNotTaken.clear(); 7063 } 7064 7065 /// Compute the number of times the backedge of the specified loop will execute. 7066 ScalarEvolution::BackedgeTakenInfo 7067 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 7068 bool AllowPredicates) { 7069 SmallVector<BasicBlock *, 8> ExitingBlocks; 7070 L->getExitingBlocks(ExitingBlocks); 7071 7072 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7073 7074 SmallVector<EdgeExitInfo, 4> ExitCounts; 7075 bool CouldComputeBECount = true; 7076 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 7077 const SCEV *MustExitMaxBECount = nullptr; 7078 const SCEV *MayExitMaxBECount = nullptr; 7079 bool MustExitMaxOrZero = false; 7080 7081 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 7082 // and compute maxBECount. 7083 // Do a union of all the predicates here. 7084 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 7085 BasicBlock *ExitBB = ExitingBlocks[i]; 7086 7087 // We canonicalize untaken exits to br (constant), ignore them so that 7088 // proving an exit untaken doesn't negatively impact our ability to reason 7089 // about the loop as whole. 7090 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) 7091 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { 7092 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7093 if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne())) 7094 continue; 7095 } 7096 7097 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 7098 7099 assert((AllowPredicates || EL.Predicates.empty()) && 7100 "Predicated exit limit when predicates are not allowed!"); 7101 7102 // 1. For each exit that can be computed, add an entry to ExitCounts. 7103 // CouldComputeBECount is true only if all exits can be computed. 7104 if (EL.ExactNotTaken == getCouldNotCompute()) 7105 // We couldn't compute an exact value for this exit, so 7106 // we won't be able to compute an exact value for the loop. 7107 CouldComputeBECount = false; 7108 else 7109 ExitCounts.emplace_back(ExitBB, EL); 7110 7111 // 2. Derive the loop's MaxBECount from each exit's max number of 7112 // non-exiting iterations. Partition the loop exits into two kinds: 7113 // LoopMustExits and LoopMayExits. 7114 // 7115 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 7116 // is a LoopMayExit. If any computable LoopMustExit is found, then 7117 // MaxBECount is the minimum EL.MaxNotTaken of computable 7118 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 7119 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 7120 // computable EL.MaxNotTaken. 7121 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 7122 DT.dominates(ExitBB, Latch)) { 7123 if (!MustExitMaxBECount) { 7124 MustExitMaxBECount = EL.MaxNotTaken; 7125 MustExitMaxOrZero = EL.MaxOrZero; 7126 } else { 7127 MustExitMaxBECount = 7128 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 7129 } 7130 } else if (MayExitMaxBECount != getCouldNotCompute()) { 7131 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 7132 MayExitMaxBECount = EL.MaxNotTaken; 7133 else { 7134 MayExitMaxBECount = 7135 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 7136 } 7137 } 7138 } 7139 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 7140 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 7141 // The loop backedge will be taken the maximum or zero times if there's 7142 // a single exit that must be taken the maximum or zero times. 7143 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 7144 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 7145 MaxBECount, MaxOrZero); 7146 } 7147 7148 ScalarEvolution::ExitLimit 7149 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 7150 bool AllowPredicates) { 7151 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 7152 // If our exiting block does not dominate the latch, then its connection with 7153 // loop's exit limit may be far from trivial. 7154 const BasicBlock *Latch = L->getLoopLatch(); 7155 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 7156 return getCouldNotCompute(); 7157 7158 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 7159 Instruction *Term = ExitingBlock->getTerminator(); 7160 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 7161 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 7162 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7163 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 7164 "It should have one successor in loop and one exit block!"); 7165 // Proceed to the next level to examine the exit condition expression. 7166 return computeExitLimitFromCond( 7167 L, BI->getCondition(), ExitIfTrue, 7168 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 7169 } 7170 7171 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 7172 // For switch, make sure that there is a single exit from the loop. 7173 BasicBlock *Exit = nullptr; 7174 for (auto *SBB : successors(ExitingBlock)) 7175 if (!L->contains(SBB)) { 7176 if (Exit) // Multiple exit successors. 7177 return getCouldNotCompute(); 7178 Exit = SBB; 7179 } 7180 assert(Exit && "Exiting block must have at least one exit"); 7181 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 7182 /*ControlsExit=*/IsOnlyExit); 7183 } 7184 7185 return getCouldNotCompute(); 7186 } 7187 7188 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 7189 const Loop *L, Value *ExitCond, bool ExitIfTrue, 7190 bool ControlsExit, bool AllowPredicates) { 7191 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 7192 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 7193 ControlsExit, AllowPredicates); 7194 } 7195 7196 Optional<ScalarEvolution::ExitLimit> 7197 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 7198 bool ExitIfTrue, bool ControlsExit, 7199 bool AllowPredicates) { 7200 (void)this->L; 7201 (void)this->ExitIfTrue; 7202 (void)this->AllowPredicates; 7203 7204 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7205 this->AllowPredicates == AllowPredicates && 7206 "Variance in assumed invariant key components!"); 7207 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 7208 if (Itr == TripCountMap.end()) 7209 return None; 7210 return Itr->second; 7211 } 7212 7213 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 7214 bool ExitIfTrue, 7215 bool ControlsExit, 7216 bool AllowPredicates, 7217 const ExitLimit &EL) { 7218 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7219 this->AllowPredicates == AllowPredicates && 7220 "Variance in assumed invariant key components!"); 7221 7222 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 7223 assert(InsertResult.second && "Expected successful insertion!"); 7224 (void)InsertResult; 7225 (void)ExitIfTrue; 7226 } 7227 7228 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 7229 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7230 bool ControlsExit, bool AllowPredicates) { 7231 7232 if (auto MaybeEL = 7233 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7234 return *MaybeEL; 7235 7236 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 7237 ControlsExit, AllowPredicates); 7238 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 7239 return EL; 7240 } 7241 7242 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 7243 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7244 bool ControlsExit, bool AllowPredicates) { 7245 // Check if the controlling expression for this loop is an And or Or. 7246 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 7247 if (BO->getOpcode() == Instruction::And) { 7248 // Recurse on the operands of the and. 7249 bool EitherMayExit = !ExitIfTrue; 7250 ExitLimit EL0 = computeExitLimitFromCondCached( 7251 Cache, L, BO->getOperand(0), ExitIfTrue, 7252 ControlsExit && !EitherMayExit, AllowPredicates); 7253 ExitLimit EL1 = computeExitLimitFromCondCached( 7254 Cache, L, BO->getOperand(1), ExitIfTrue, 7255 ControlsExit && !EitherMayExit, AllowPredicates); 7256 // Be robust against unsimplified IR for the form "and i1 X, true" 7257 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) 7258 return CI->isOne() ? EL0 : EL1; 7259 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(0))) 7260 return CI->isOne() ? EL1 : EL0; 7261 const SCEV *BECount = getCouldNotCompute(); 7262 const SCEV *MaxBECount = getCouldNotCompute(); 7263 if (EitherMayExit) { 7264 // Both conditions must be true for the loop to continue executing. 7265 // Choose the less conservative count. 7266 if (EL0.ExactNotTaken == getCouldNotCompute() || 7267 EL1.ExactNotTaken == getCouldNotCompute()) 7268 BECount = getCouldNotCompute(); 7269 else 7270 BECount = 7271 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7272 if (EL0.MaxNotTaken == getCouldNotCompute()) 7273 MaxBECount = EL1.MaxNotTaken; 7274 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7275 MaxBECount = EL0.MaxNotTaken; 7276 else 7277 MaxBECount = 7278 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7279 } else { 7280 // Both conditions must be true at the same time for the loop to exit. 7281 // For now, be conservative. 7282 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7283 MaxBECount = EL0.MaxNotTaken; 7284 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7285 BECount = EL0.ExactNotTaken; 7286 } 7287 7288 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7289 // to be more aggressive when computing BECount than when computing 7290 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7291 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7292 // to not. 7293 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7294 !isa<SCEVCouldNotCompute>(BECount)) 7295 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7296 7297 return ExitLimit(BECount, MaxBECount, false, 7298 {&EL0.Predicates, &EL1.Predicates}); 7299 } 7300 if (BO->getOpcode() == Instruction::Or) { 7301 // Recurse on the operands of the or. 7302 bool EitherMayExit = ExitIfTrue; 7303 ExitLimit EL0 = computeExitLimitFromCondCached( 7304 Cache, L, BO->getOperand(0), ExitIfTrue, 7305 ControlsExit && !EitherMayExit, AllowPredicates); 7306 ExitLimit EL1 = computeExitLimitFromCondCached( 7307 Cache, L, BO->getOperand(1), ExitIfTrue, 7308 ControlsExit && !EitherMayExit, AllowPredicates); 7309 // Be robust against unsimplified IR for the form "or i1 X, true" 7310 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) 7311 return CI->isZero() ? EL0 : EL1; 7312 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(0))) 7313 return CI->isZero() ? EL1 : EL0; 7314 const SCEV *BECount = getCouldNotCompute(); 7315 const SCEV *MaxBECount = getCouldNotCompute(); 7316 if (EitherMayExit) { 7317 // Both conditions must be false for the loop to continue executing. 7318 // Choose the less conservative count. 7319 if (EL0.ExactNotTaken == getCouldNotCompute() || 7320 EL1.ExactNotTaken == getCouldNotCompute()) 7321 BECount = getCouldNotCompute(); 7322 else 7323 BECount = 7324 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7325 if (EL0.MaxNotTaken == getCouldNotCompute()) 7326 MaxBECount = EL1.MaxNotTaken; 7327 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7328 MaxBECount = EL0.MaxNotTaken; 7329 else 7330 MaxBECount = 7331 getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7332 } else { 7333 // Both conditions must be false at the same time for the loop to exit. 7334 // For now, be conservative. 7335 if (EL0.MaxNotTaken == EL1.MaxNotTaken) 7336 MaxBECount = EL0.MaxNotTaken; 7337 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7338 BECount = EL0.ExactNotTaken; 7339 } 7340 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7341 // to be more aggressive when computing BECount than when computing 7342 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7343 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7344 // to not. 7345 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7346 !isa<SCEVCouldNotCompute>(BECount)) 7347 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7348 7349 return ExitLimit(BECount, MaxBECount, false, 7350 {&EL0.Predicates, &EL1.Predicates}); 7351 } 7352 } 7353 7354 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7355 // Proceed to the next level to examine the icmp. 7356 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7357 ExitLimit EL = 7358 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 7359 if (EL.hasFullInfo() || !AllowPredicates) 7360 return EL; 7361 7362 // Try again, but use SCEV predicates this time. 7363 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 7364 /*AllowPredicates=*/true); 7365 } 7366 7367 // Check for a constant condition. These are normally stripped out by 7368 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7369 // preserve the CFG and is temporarily leaving constant conditions 7370 // in place. 7371 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7372 if (ExitIfTrue == !CI->getZExtValue()) 7373 // The backedge is always taken. 7374 return getCouldNotCompute(); 7375 else 7376 // The backedge is never taken. 7377 return getZero(CI->getType()); 7378 } 7379 7380 // If it's not an integer or pointer comparison then compute it the hard way. 7381 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7382 } 7383 7384 ScalarEvolution::ExitLimit 7385 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7386 ICmpInst *ExitCond, 7387 bool ExitIfTrue, 7388 bool ControlsExit, 7389 bool AllowPredicates) { 7390 // If the condition was exit on true, convert the condition to exit on false 7391 ICmpInst::Predicate Pred; 7392 if (!ExitIfTrue) 7393 Pred = ExitCond->getPredicate(); 7394 else 7395 Pred = ExitCond->getInversePredicate(); 7396 const ICmpInst::Predicate OriginalPred = Pred; 7397 7398 // Handle common loops like: for (X = "string"; *X; ++X) 7399 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7400 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7401 ExitLimit ItCnt = 7402 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); 7403 if (ItCnt.hasAnyInfo()) 7404 return ItCnt; 7405 } 7406 7407 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7408 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7409 7410 // Try to evaluate any dependencies out of the loop. 7411 LHS = getSCEVAtScope(LHS, L); 7412 RHS = getSCEVAtScope(RHS, L); 7413 7414 // At this point, we would like to compute how many iterations of the 7415 // loop the predicate will return true for these inputs. 7416 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7417 // If there is a loop-invariant, force it into the RHS. 7418 std::swap(LHS, RHS); 7419 Pred = ICmpInst::getSwappedPredicate(Pred); 7420 } 7421 7422 // Simplify the operands before analyzing them. 7423 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7424 7425 // If we have a comparison of a chrec against a constant, try to use value 7426 // ranges to answer this query. 7427 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7428 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7429 if (AddRec->getLoop() == L) { 7430 // Form the constant range. 7431 ConstantRange CompRange = 7432 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 7433 7434 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7435 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7436 } 7437 7438 switch (Pred) { 7439 case ICmpInst::ICMP_NE: { // while (X != Y) 7440 // Convert to: while (X-Y != 0) 7441 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7442 AllowPredicates); 7443 if (EL.hasAnyInfo()) return EL; 7444 break; 7445 } 7446 case ICmpInst::ICMP_EQ: { // while (X == Y) 7447 // Convert to: while (X-Y == 0) 7448 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7449 if (EL.hasAnyInfo()) return EL; 7450 break; 7451 } 7452 case ICmpInst::ICMP_SLT: 7453 case ICmpInst::ICMP_ULT: { // while (X < Y) 7454 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 7455 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7456 AllowPredicates); 7457 if (EL.hasAnyInfo()) return EL; 7458 break; 7459 } 7460 case ICmpInst::ICMP_SGT: 7461 case ICmpInst::ICMP_UGT: { // while (X > Y) 7462 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 7463 ExitLimit EL = 7464 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7465 AllowPredicates); 7466 if (EL.hasAnyInfo()) return EL; 7467 break; 7468 } 7469 default: 7470 break; 7471 } 7472 7473 auto *ExhaustiveCount = 7474 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7475 7476 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7477 return ExhaustiveCount; 7478 7479 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7480 ExitCond->getOperand(1), L, OriginalPred); 7481 } 7482 7483 ScalarEvolution::ExitLimit 7484 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7485 SwitchInst *Switch, 7486 BasicBlock *ExitingBlock, 7487 bool ControlsExit) { 7488 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7489 7490 // Give up if the exit is the default dest of a switch. 7491 if (Switch->getDefaultDest() == ExitingBlock) 7492 return getCouldNotCompute(); 7493 7494 assert(L->contains(Switch->getDefaultDest()) && 7495 "Default case must not exit the loop!"); 7496 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7497 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7498 7499 // while (X != Y) --> while (X-Y != 0) 7500 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7501 if (EL.hasAnyInfo()) 7502 return EL; 7503 7504 return getCouldNotCompute(); 7505 } 7506 7507 static ConstantInt * 7508 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7509 ScalarEvolution &SE) { 7510 const SCEV *InVal = SE.getConstant(C); 7511 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7512 assert(isa<SCEVConstant>(Val) && 7513 "Evaluation of SCEV at constant didn't fold correctly?"); 7514 return cast<SCEVConstant>(Val)->getValue(); 7515 } 7516 7517 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7518 /// compute the backedge execution count. 7519 ScalarEvolution::ExitLimit 7520 ScalarEvolution::computeLoadConstantCompareExitLimit( 7521 LoadInst *LI, 7522 Constant *RHS, 7523 const Loop *L, 7524 ICmpInst::Predicate predicate) { 7525 if (LI->isVolatile()) return getCouldNotCompute(); 7526 7527 // Check to see if the loaded pointer is a getelementptr of a global. 7528 // TODO: Use SCEV instead of manually grubbing with GEPs. 7529 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7530 if (!GEP) return getCouldNotCompute(); 7531 7532 // Make sure that it is really a constant global we are gepping, with an 7533 // initializer, and make sure the first IDX is really 0. 7534 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7535 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7536 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7537 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7538 return getCouldNotCompute(); 7539 7540 // Okay, we allow one non-constant index into the GEP instruction. 7541 Value *VarIdx = nullptr; 7542 std::vector<Constant*> Indexes; 7543 unsigned VarIdxNum = 0; 7544 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7545 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7546 Indexes.push_back(CI); 7547 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7548 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7549 VarIdx = GEP->getOperand(i); 7550 VarIdxNum = i-2; 7551 Indexes.push_back(nullptr); 7552 } 7553 7554 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7555 if (!VarIdx) 7556 return getCouldNotCompute(); 7557 7558 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7559 // Check to see if X is a loop variant variable value now. 7560 const SCEV *Idx = getSCEV(VarIdx); 7561 Idx = getSCEVAtScope(Idx, L); 7562 7563 // We can only recognize very limited forms of loop index expressions, in 7564 // particular, only affine AddRec's like {C1,+,C2}. 7565 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7566 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || 7567 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7568 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7569 return getCouldNotCompute(); 7570 7571 unsigned MaxSteps = MaxBruteForceIterations; 7572 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7573 ConstantInt *ItCst = ConstantInt::get( 7574 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7575 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7576 7577 // Form the GEP offset. 7578 Indexes[VarIdxNum] = Val; 7579 7580 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7581 Indexes); 7582 if (!Result) break; // Cannot compute! 7583 7584 // Evaluate the condition for this iteration. 7585 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7586 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7587 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7588 ++NumArrayLenItCounts; 7589 return getConstant(ItCst); // Found terminating iteration! 7590 } 7591 } 7592 return getCouldNotCompute(); 7593 } 7594 7595 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7596 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7597 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7598 if (!RHS) 7599 return getCouldNotCompute(); 7600 7601 const BasicBlock *Latch = L->getLoopLatch(); 7602 if (!Latch) 7603 return getCouldNotCompute(); 7604 7605 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7606 if (!Predecessor) 7607 return getCouldNotCompute(); 7608 7609 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7610 // Return LHS in OutLHS and shift_opt in OutOpCode. 7611 auto MatchPositiveShift = 7612 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7613 7614 using namespace PatternMatch; 7615 7616 ConstantInt *ShiftAmt; 7617 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7618 OutOpCode = Instruction::LShr; 7619 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7620 OutOpCode = Instruction::AShr; 7621 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 7622 OutOpCode = Instruction::Shl; 7623 else 7624 return false; 7625 7626 return ShiftAmt->getValue().isStrictlyPositive(); 7627 }; 7628 7629 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 7630 // 7631 // loop: 7632 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 7633 // %iv.shifted = lshr i32 %iv, <positive constant> 7634 // 7635 // Return true on a successful match. Return the corresponding PHI node (%iv 7636 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 7637 auto MatchShiftRecurrence = 7638 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 7639 Optional<Instruction::BinaryOps> PostShiftOpCode; 7640 7641 { 7642 Instruction::BinaryOps OpC; 7643 Value *V; 7644 7645 // If we encounter a shift instruction, "peel off" the shift operation, 7646 // and remember that we did so. Later when we inspect %iv's backedge 7647 // value, we will make sure that the backedge value uses the same 7648 // operation. 7649 // 7650 // Note: the peeled shift operation does not have to be the same 7651 // instruction as the one feeding into the PHI's backedge value. We only 7652 // really care about it being the same *kind* of shift instruction -- 7653 // that's all that is required for our later inferences to hold. 7654 if (MatchPositiveShift(LHS, V, OpC)) { 7655 PostShiftOpCode = OpC; 7656 LHS = V; 7657 } 7658 } 7659 7660 PNOut = dyn_cast<PHINode>(LHS); 7661 if (!PNOut || PNOut->getParent() != L->getHeader()) 7662 return false; 7663 7664 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 7665 Value *OpLHS; 7666 7667 return 7668 // The backedge value for the PHI node must be a shift by a positive 7669 // amount 7670 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 7671 7672 // of the PHI node itself 7673 OpLHS == PNOut && 7674 7675 // and the kind of shift should be match the kind of shift we peeled 7676 // off, if any. 7677 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 7678 }; 7679 7680 PHINode *PN; 7681 Instruction::BinaryOps OpCode; 7682 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 7683 return getCouldNotCompute(); 7684 7685 const DataLayout &DL = getDataLayout(); 7686 7687 // The key rationale for this optimization is that for some kinds of shift 7688 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 7689 // within a finite number of iterations. If the condition guarding the 7690 // backedge (in the sense that the backedge is taken if the condition is true) 7691 // is false for the value the shift recurrence stabilizes to, then we know 7692 // that the backedge is taken only a finite number of times. 7693 7694 ConstantInt *StableValue = nullptr; 7695 switch (OpCode) { 7696 default: 7697 llvm_unreachable("Impossible case!"); 7698 7699 case Instruction::AShr: { 7700 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 7701 // bitwidth(K) iterations. 7702 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 7703 KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr, 7704 Predecessor->getTerminator(), &DT); 7705 auto *Ty = cast<IntegerType>(RHS->getType()); 7706 if (Known.isNonNegative()) 7707 StableValue = ConstantInt::get(Ty, 0); 7708 else if (Known.isNegative()) 7709 StableValue = ConstantInt::get(Ty, -1, true); 7710 else 7711 return getCouldNotCompute(); 7712 7713 break; 7714 } 7715 case Instruction::LShr: 7716 case Instruction::Shl: 7717 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 7718 // stabilize to 0 in at most bitwidth(K) iterations. 7719 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 7720 break; 7721 } 7722 7723 auto *Result = 7724 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 7725 assert(Result->getType()->isIntegerTy(1) && 7726 "Otherwise cannot be an operand to a branch instruction"); 7727 7728 if (Result->isZeroValue()) { 7729 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 7730 const SCEV *UpperBound = 7731 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 7732 return ExitLimit(getCouldNotCompute(), UpperBound, false); 7733 } 7734 7735 return getCouldNotCompute(); 7736 } 7737 7738 /// Return true if we can constant fold an instruction of the specified type, 7739 /// assuming that all operands were constants. 7740 static bool CanConstantFold(const Instruction *I) { 7741 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 7742 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7743 isa<LoadInst>(I) || isa<ExtractValueInst>(I)) 7744 return true; 7745 7746 if (const CallInst *CI = dyn_cast<CallInst>(I)) 7747 if (const Function *F = CI->getCalledFunction()) 7748 return canConstantFoldCallTo(CI, F); 7749 return false; 7750 } 7751 7752 /// Determine whether this instruction can constant evolve within this loop 7753 /// assuming its operands can all constant evolve. 7754 static bool canConstantEvolve(Instruction *I, const Loop *L) { 7755 // An instruction outside of the loop can't be derived from a loop PHI. 7756 if (!L->contains(I)) return false; 7757 7758 if (isa<PHINode>(I)) { 7759 // We don't currently keep track of the control flow needed to evaluate 7760 // PHIs, so we cannot handle PHIs inside of loops. 7761 return L->getHeader() == I->getParent(); 7762 } 7763 7764 // If we won't be able to constant fold this expression even if the operands 7765 // are constants, bail early. 7766 return CanConstantFold(I); 7767 } 7768 7769 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 7770 /// recursing through each instruction operand until reaching a loop header phi. 7771 static PHINode * 7772 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 7773 DenseMap<Instruction *, PHINode *> &PHIMap, 7774 unsigned Depth) { 7775 if (Depth > MaxConstantEvolvingDepth) 7776 return nullptr; 7777 7778 // Otherwise, we can evaluate this instruction if all of its operands are 7779 // constant or derived from a PHI node themselves. 7780 PHINode *PHI = nullptr; 7781 for (Value *Op : UseInst->operands()) { 7782 if (isa<Constant>(Op)) continue; 7783 7784 Instruction *OpInst = dyn_cast<Instruction>(Op); 7785 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 7786 7787 PHINode *P = dyn_cast<PHINode>(OpInst); 7788 if (!P) 7789 // If this operand is already visited, reuse the prior result. 7790 // We may have P != PHI if this is the deepest point at which the 7791 // inconsistent paths meet. 7792 P = PHIMap.lookup(OpInst); 7793 if (!P) { 7794 // Recurse and memoize the results, whether a phi is found or not. 7795 // This recursive call invalidates pointers into PHIMap. 7796 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 7797 PHIMap[OpInst] = P; 7798 } 7799 if (!P) 7800 return nullptr; // Not evolving from PHI 7801 if (PHI && PHI != P) 7802 return nullptr; // Evolving from multiple different PHIs. 7803 PHI = P; 7804 } 7805 // This is a expression evolving from a constant PHI! 7806 return PHI; 7807 } 7808 7809 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 7810 /// in the loop that V is derived from. We allow arbitrary operations along the 7811 /// way, but the operands of an operation must either be constants or a value 7812 /// derived from a constant PHI. If this expression does not fit with these 7813 /// constraints, return null. 7814 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 7815 Instruction *I = dyn_cast<Instruction>(V); 7816 if (!I || !canConstantEvolve(I, L)) return nullptr; 7817 7818 if (PHINode *PN = dyn_cast<PHINode>(I)) 7819 return PN; 7820 7821 // Record non-constant instructions contained by the loop. 7822 DenseMap<Instruction *, PHINode *> PHIMap; 7823 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 7824 } 7825 7826 /// EvaluateExpression - Given an expression that passes the 7827 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 7828 /// in the loop has the value PHIVal. If we can't fold this expression for some 7829 /// reason, return null. 7830 static Constant *EvaluateExpression(Value *V, const Loop *L, 7831 DenseMap<Instruction *, Constant *> &Vals, 7832 const DataLayout &DL, 7833 const TargetLibraryInfo *TLI) { 7834 // Convenient constant check, but redundant for recursive calls. 7835 if (Constant *C = dyn_cast<Constant>(V)) return C; 7836 Instruction *I = dyn_cast<Instruction>(V); 7837 if (!I) return nullptr; 7838 7839 if (Constant *C = Vals.lookup(I)) return C; 7840 7841 // An instruction inside the loop depends on a value outside the loop that we 7842 // weren't given a mapping for, or a value such as a call inside the loop. 7843 if (!canConstantEvolve(I, L)) return nullptr; 7844 7845 // An unmapped PHI can be due to a branch or another loop inside this loop, 7846 // or due to this not being the initial iteration through a loop where we 7847 // couldn't compute the evolution of this particular PHI last time. 7848 if (isa<PHINode>(I)) return nullptr; 7849 7850 std::vector<Constant*> Operands(I->getNumOperands()); 7851 7852 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 7853 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 7854 if (!Operand) { 7855 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 7856 if (!Operands[i]) return nullptr; 7857 continue; 7858 } 7859 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 7860 Vals[Operand] = C; 7861 if (!C) return nullptr; 7862 Operands[i] = C; 7863 } 7864 7865 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 7866 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 7867 Operands[1], DL, TLI); 7868 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 7869 if (!LI->isVolatile()) 7870 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 7871 } 7872 return ConstantFoldInstOperands(I, Operands, DL, TLI); 7873 } 7874 7875 7876 // If every incoming value to PN except the one for BB is a specific Constant, 7877 // return that, else return nullptr. 7878 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 7879 Constant *IncomingVal = nullptr; 7880 7881 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 7882 if (PN->getIncomingBlock(i) == BB) 7883 continue; 7884 7885 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 7886 if (!CurrentVal) 7887 return nullptr; 7888 7889 if (IncomingVal != CurrentVal) { 7890 if (IncomingVal) 7891 return nullptr; 7892 IncomingVal = CurrentVal; 7893 } 7894 } 7895 7896 return IncomingVal; 7897 } 7898 7899 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 7900 /// in the header of its containing loop, we know the loop executes a 7901 /// constant number of times, and the PHI node is just a recurrence 7902 /// involving constants, fold it. 7903 Constant * 7904 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 7905 const APInt &BEs, 7906 const Loop *L) { 7907 auto I = ConstantEvolutionLoopExitValue.find(PN); 7908 if (I != ConstantEvolutionLoopExitValue.end()) 7909 return I->second; 7910 7911 if (BEs.ugt(MaxBruteForceIterations)) 7912 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 7913 7914 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 7915 7916 DenseMap<Instruction *, Constant *> CurrentIterVals; 7917 BasicBlock *Header = L->getHeader(); 7918 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7919 7920 BasicBlock *Latch = L->getLoopLatch(); 7921 if (!Latch) 7922 return nullptr; 7923 7924 for (PHINode &PHI : Header->phis()) { 7925 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 7926 CurrentIterVals[&PHI] = StartCST; 7927 } 7928 if (!CurrentIterVals.count(PN)) 7929 return RetVal = nullptr; 7930 7931 Value *BEValue = PN->getIncomingValueForBlock(Latch); 7932 7933 // Execute the loop symbolically to determine the exit value. 7934 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 7935 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 7936 7937 unsigned NumIterations = BEs.getZExtValue(); // must be in range 7938 unsigned IterationNum = 0; 7939 const DataLayout &DL = getDataLayout(); 7940 for (; ; ++IterationNum) { 7941 if (IterationNum == NumIterations) 7942 return RetVal = CurrentIterVals[PN]; // Got exit value! 7943 7944 // Compute the value of the PHIs for the next iteration. 7945 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 7946 DenseMap<Instruction *, Constant *> NextIterVals; 7947 Constant *NextPHI = 7948 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7949 if (!NextPHI) 7950 return nullptr; // Couldn't evaluate! 7951 NextIterVals[PN] = NextPHI; 7952 7953 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 7954 7955 // Also evaluate the other PHI nodes. However, we don't get to stop if we 7956 // cease to be able to evaluate one of them or if they stop evolving, 7957 // because that doesn't necessarily prevent us from computing PN. 7958 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 7959 for (const auto &I : CurrentIterVals) { 7960 PHINode *PHI = dyn_cast<PHINode>(I.first); 7961 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 7962 PHIsToCompute.emplace_back(PHI, I.second); 7963 } 7964 // We use two distinct loops because EvaluateExpression may invalidate any 7965 // iterators into CurrentIterVals. 7966 for (const auto &I : PHIsToCompute) { 7967 PHINode *PHI = I.first; 7968 Constant *&NextPHI = NextIterVals[PHI]; 7969 if (!NextPHI) { // Not already computed. 7970 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 7971 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 7972 } 7973 if (NextPHI != I.second) 7974 StoppedEvolving = false; 7975 } 7976 7977 // If all entries in CurrentIterVals == NextIterVals then we can stop 7978 // iterating, the loop can't continue to change. 7979 if (StoppedEvolving) 7980 return RetVal = CurrentIterVals[PN]; 7981 7982 CurrentIterVals.swap(NextIterVals); 7983 } 7984 } 7985 7986 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 7987 Value *Cond, 7988 bool ExitWhen) { 7989 PHINode *PN = getConstantEvolvingPHI(Cond, L); 7990 if (!PN) return getCouldNotCompute(); 7991 7992 // If the loop is canonicalized, the PHI will have exactly two entries. 7993 // That's the only form we support here. 7994 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 7995 7996 DenseMap<Instruction *, Constant *> CurrentIterVals; 7997 BasicBlock *Header = L->getHeader(); 7998 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 7999 8000 BasicBlock *Latch = L->getLoopLatch(); 8001 assert(Latch && "Should follow from NumIncomingValues == 2!"); 8002 8003 for (PHINode &PHI : Header->phis()) { 8004 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8005 CurrentIterVals[&PHI] = StartCST; 8006 } 8007 if (!CurrentIterVals.count(PN)) 8008 return getCouldNotCompute(); 8009 8010 // Okay, we find a PHI node that defines the trip count of this loop. Execute 8011 // the loop symbolically to determine when the condition gets a value of 8012 // "ExitWhen". 8013 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 8014 const DataLayout &DL = getDataLayout(); 8015 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 8016 auto *CondVal = dyn_cast_or_null<ConstantInt>( 8017 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 8018 8019 // Couldn't symbolically evaluate. 8020 if (!CondVal) return getCouldNotCompute(); 8021 8022 if (CondVal->getValue() == uint64_t(ExitWhen)) { 8023 ++NumBruteForceTripCountsComputed; 8024 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 8025 } 8026 8027 // Update all the PHI nodes for the next iteration. 8028 DenseMap<Instruction *, Constant *> NextIterVals; 8029 8030 // Create a list of which PHIs we need to compute. We want to do this before 8031 // calling EvaluateExpression on them because that may invalidate iterators 8032 // into CurrentIterVals. 8033 SmallVector<PHINode *, 8> PHIsToCompute; 8034 for (const auto &I : CurrentIterVals) { 8035 PHINode *PHI = dyn_cast<PHINode>(I.first); 8036 if (!PHI || PHI->getParent() != Header) continue; 8037 PHIsToCompute.push_back(PHI); 8038 } 8039 for (PHINode *PHI : PHIsToCompute) { 8040 Constant *&NextPHI = NextIterVals[PHI]; 8041 if (NextPHI) continue; // Already computed! 8042 8043 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8044 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8045 } 8046 CurrentIterVals.swap(NextIterVals); 8047 } 8048 8049 // Too many iterations were needed to evaluate. 8050 return getCouldNotCompute(); 8051 } 8052 8053 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 8054 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 8055 ValuesAtScopes[V]; 8056 // Check to see if we've folded this expression at this loop before. 8057 for (auto &LS : Values) 8058 if (LS.first == L) 8059 return LS.second ? LS.second : V; 8060 8061 Values.emplace_back(L, nullptr); 8062 8063 // Otherwise compute it. 8064 const SCEV *C = computeSCEVAtScope(V, L); 8065 for (auto &LS : reverse(ValuesAtScopes[V])) 8066 if (LS.first == L) { 8067 LS.second = C; 8068 break; 8069 } 8070 return C; 8071 } 8072 8073 /// This builds up a Constant using the ConstantExpr interface. That way, we 8074 /// will return Constants for objects which aren't represented by a 8075 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 8076 /// Returns NULL if the SCEV isn't representable as a Constant. 8077 static Constant *BuildConstantFromSCEV(const SCEV *V) { 8078 switch (static_cast<SCEVTypes>(V->getSCEVType())) { 8079 case scCouldNotCompute: 8080 case scAddRecExpr: 8081 break; 8082 case scConstant: 8083 return cast<SCEVConstant>(V)->getValue(); 8084 case scUnknown: 8085 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 8086 case scSignExtend: { 8087 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 8088 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 8089 return ConstantExpr::getSExt(CastOp, SS->getType()); 8090 break; 8091 } 8092 case scZeroExtend: { 8093 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 8094 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 8095 return ConstantExpr::getZExt(CastOp, SZ->getType()); 8096 break; 8097 } 8098 case scTruncate: { 8099 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 8100 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 8101 return ConstantExpr::getTrunc(CastOp, ST->getType()); 8102 break; 8103 } 8104 case scAddExpr: { 8105 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 8106 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 8107 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8108 unsigned AS = PTy->getAddressSpace(); 8109 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8110 C = ConstantExpr::getBitCast(C, DestPtrTy); 8111 } 8112 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 8113 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 8114 if (!C2) return nullptr; 8115 8116 // First pointer! 8117 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 8118 unsigned AS = C2->getType()->getPointerAddressSpace(); 8119 std::swap(C, C2); 8120 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8121 // The offsets have been converted to bytes. We can add bytes to an 8122 // i8* by GEP with the byte count in the first index. 8123 C = ConstantExpr::getBitCast(C, DestPtrTy); 8124 } 8125 8126 // Don't bother trying to sum two pointers. We probably can't 8127 // statically compute a load that results from it anyway. 8128 if (C2->getType()->isPointerTy()) 8129 return nullptr; 8130 8131 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8132 if (PTy->getElementType()->isStructTy()) 8133 C2 = ConstantExpr::getIntegerCast( 8134 C2, Type::getInt32Ty(C->getContext()), true); 8135 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 8136 } else 8137 C = ConstantExpr::getAdd(C, C2); 8138 } 8139 return C; 8140 } 8141 break; 8142 } 8143 case scMulExpr: { 8144 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 8145 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 8146 // Don't bother with pointers at all. 8147 if (C->getType()->isPointerTy()) return nullptr; 8148 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 8149 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 8150 if (!C2 || C2->getType()->isPointerTy()) return nullptr; 8151 C = ConstantExpr::getMul(C, C2); 8152 } 8153 return C; 8154 } 8155 break; 8156 } 8157 case scUDivExpr: { 8158 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 8159 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 8160 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 8161 if (LHS->getType() == RHS->getType()) 8162 return ConstantExpr::getUDiv(LHS, RHS); 8163 break; 8164 } 8165 case scSMaxExpr: 8166 case scUMaxExpr: 8167 case scSMinExpr: 8168 case scUMinExpr: 8169 break; // TODO: smax, umax, smin, umax. 8170 } 8171 return nullptr; 8172 } 8173 8174 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 8175 if (isa<SCEVConstant>(V)) return V; 8176 8177 // If this instruction is evolved from a constant-evolving PHI, compute the 8178 // exit value from the loop without using SCEVs. 8179 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 8180 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 8181 if (PHINode *PN = dyn_cast<PHINode>(I)) { 8182 const Loop *LI = this->LI[I->getParent()]; 8183 // Looking for loop exit value. 8184 if (LI && LI->getParentLoop() == L && 8185 PN->getParent() == LI->getHeader()) { 8186 // Okay, there is no closed form solution for the PHI node. Check 8187 // to see if the loop that contains it has a known backedge-taken 8188 // count. If so, we may be able to force computation of the exit 8189 // value. 8190 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 8191 // This trivial case can show up in some degenerate cases where 8192 // the incoming IR has not yet been fully simplified. 8193 if (BackedgeTakenCount->isZero()) { 8194 Value *InitValue = nullptr; 8195 bool MultipleInitValues = false; 8196 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 8197 if (!LI->contains(PN->getIncomingBlock(i))) { 8198 if (!InitValue) 8199 InitValue = PN->getIncomingValue(i); 8200 else if (InitValue != PN->getIncomingValue(i)) { 8201 MultipleInitValues = true; 8202 break; 8203 } 8204 } 8205 } 8206 if (!MultipleInitValues && InitValue) 8207 return getSCEV(InitValue); 8208 } 8209 // Do we have a loop invariant value flowing around the backedge 8210 // for a loop which must execute the backedge? 8211 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 8212 isKnownPositive(BackedgeTakenCount) && 8213 PN->getNumIncomingValues() == 2) { 8214 unsigned InLoopPred = LI->contains(PN->getIncomingBlock(0)) ? 0 : 1; 8215 const SCEV *OnBackedge = getSCEV(PN->getIncomingValue(InLoopPred)); 8216 if (IsAvailableOnEntry(LI, DT, OnBackedge, PN->getParent())) 8217 return OnBackedge; 8218 } 8219 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 8220 // Okay, we know how many times the containing loop executes. If 8221 // this is a constant evolving PHI node, get the final value at 8222 // the specified iteration number. 8223 Constant *RV = 8224 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), LI); 8225 if (RV) return getSCEV(RV); 8226 } 8227 } 8228 8229 // If there is a single-input Phi, evaluate it at our scope. If we can 8230 // prove that this replacement does not break LCSSA form, use new value. 8231 if (PN->getNumOperands() == 1) { 8232 const SCEV *Input = getSCEV(PN->getOperand(0)); 8233 const SCEV *InputAtScope = getSCEVAtScope(Input, L); 8234 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, 8235 // for the simplest case just support constants. 8236 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope; 8237 } 8238 } 8239 8240 // Okay, this is an expression that we cannot symbolically evaluate 8241 // into a SCEV. Check to see if it's possible to symbolically evaluate 8242 // the arguments into constants, and if so, try to constant propagate the 8243 // result. This is particularly useful for computing loop exit values. 8244 if (CanConstantFold(I)) { 8245 SmallVector<Constant *, 4> Operands; 8246 bool MadeImprovement = false; 8247 for (Value *Op : I->operands()) { 8248 if (Constant *C = dyn_cast<Constant>(Op)) { 8249 Operands.push_back(C); 8250 continue; 8251 } 8252 8253 // If any of the operands is non-constant and if they are 8254 // non-integer and non-pointer, don't even try to analyze them 8255 // with scev techniques. 8256 if (!isSCEVable(Op->getType())) 8257 return V; 8258 8259 const SCEV *OrigV = getSCEV(Op); 8260 const SCEV *OpV = getSCEVAtScope(OrigV, L); 8261 MadeImprovement |= OrigV != OpV; 8262 8263 Constant *C = BuildConstantFromSCEV(OpV); 8264 if (!C) return V; 8265 if (C->getType() != Op->getType()) 8266 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 8267 Op->getType(), 8268 false), 8269 C, Op->getType()); 8270 Operands.push_back(C); 8271 } 8272 8273 // Check to see if getSCEVAtScope actually made an improvement. 8274 if (MadeImprovement) { 8275 Constant *C = nullptr; 8276 const DataLayout &DL = getDataLayout(); 8277 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 8278 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8279 Operands[1], DL, &TLI); 8280 else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) { 8281 if (!LI->isVolatile()) 8282 C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 8283 } else 8284 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 8285 if (!C) return V; 8286 return getSCEV(C); 8287 } 8288 } 8289 } 8290 8291 // This is some other type of SCEVUnknown, just return it. 8292 return V; 8293 } 8294 8295 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 8296 // Avoid performing the look-up in the common case where the specified 8297 // expression has no loop-variant portions. 8298 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 8299 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8300 if (OpAtScope != Comm->getOperand(i)) { 8301 // Okay, at least one of these operands is loop variant but might be 8302 // foldable. Build a new instance of the folded commutative expression. 8303 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 8304 Comm->op_begin()+i); 8305 NewOps.push_back(OpAtScope); 8306 8307 for (++i; i != e; ++i) { 8308 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8309 NewOps.push_back(OpAtScope); 8310 } 8311 if (isa<SCEVAddExpr>(Comm)) 8312 return getAddExpr(NewOps, Comm->getNoWrapFlags()); 8313 if (isa<SCEVMulExpr>(Comm)) 8314 return getMulExpr(NewOps, Comm->getNoWrapFlags()); 8315 if (isa<SCEVMinMaxExpr>(Comm)) 8316 return getMinMaxExpr(Comm->getSCEVType(), NewOps); 8317 llvm_unreachable("Unknown commutative SCEV type!"); 8318 } 8319 } 8320 // If we got here, all operands are loop invariant. 8321 return Comm; 8322 } 8323 8324 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 8325 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 8326 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 8327 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 8328 return Div; // must be loop invariant 8329 return getUDivExpr(LHS, RHS); 8330 } 8331 8332 // If this is a loop recurrence for a loop that does not contain L, then we 8333 // are dealing with the final value computed by the loop. 8334 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 8335 // First, attempt to evaluate each operand. 8336 // Avoid performing the look-up in the common case where the specified 8337 // expression has no loop-variant portions. 8338 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 8339 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 8340 if (OpAtScope == AddRec->getOperand(i)) 8341 continue; 8342 8343 // Okay, at least one of these operands is loop variant but might be 8344 // foldable. Build a new instance of the folded commutative expression. 8345 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8346 AddRec->op_begin()+i); 8347 NewOps.push_back(OpAtScope); 8348 for (++i; i != e; ++i) 8349 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8350 8351 const SCEV *FoldedRec = 8352 getAddRecExpr(NewOps, AddRec->getLoop(), 8353 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8354 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8355 // The addrec may be folded to a nonrecurrence, for example, if the 8356 // induction variable is multiplied by zero after constant folding. Go 8357 // ahead and return the folded value. 8358 if (!AddRec) 8359 return FoldedRec; 8360 break; 8361 } 8362 8363 // If the scope is outside the addrec's loop, evaluate it by using the 8364 // loop exit value of the addrec. 8365 if (!AddRec->getLoop()->contains(L)) { 8366 // To evaluate this recurrence, we need to know how many times the AddRec 8367 // loop iterates. Compute this now. 8368 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8369 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8370 8371 // Then, evaluate the AddRec. 8372 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8373 } 8374 8375 return AddRec; 8376 } 8377 8378 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8379 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8380 if (Op == Cast->getOperand()) 8381 return Cast; // must be loop invariant 8382 return getZeroExtendExpr(Op, Cast->getType()); 8383 } 8384 8385 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 8386 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8387 if (Op == Cast->getOperand()) 8388 return Cast; // must be loop invariant 8389 return getSignExtendExpr(Op, Cast->getType()); 8390 } 8391 8392 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 8393 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8394 if (Op == Cast->getOperand()) 8395 return Cast; // must be loop invariant 8396 return getTruncateExpr(Op, Cast->getType()); 8397 } 8398 8399 llvm_unreachable("Unknown SCEV type!"); 8400 } 8401 8402 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 8403 return getSCEVAtScope(getSCEV(V), L); 8404 } 8405 8406 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 8407 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 8408 return stripInjectiveFunctions(ZExt->getOperand()); 8409 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 8410 return stripInjectiveFunctions(SExt->getOperand()); 8411 return S; 8412 } 8413 8414 /// Finds the minimum unsigned root of the following equation: 8415 /// 8416 /// A * X = B (mod N) 8417 /// 8418 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 8419 /// A and B isn't important. 8420 /// 8421 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 8422 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 8423 ScalarEvolution &SE) { 8424 uint32_t BW = A.getBitWidth(); 8425 assert(BW == SE.getTypeSizeInBits(B->getType())); 8426 assert(A != 0 && "A must be non-zero."); 8427 8428 // 1. D = gcd(A, N) 8429 // 8430 // The gcd of A and N may have only one prime factor: 2. The number of 8431 // trailing zeros in A is its multiplicity 8432 uint32_t Mult2 = A.countTrailingZeros(); 8433 // D = 2^Mult2 8434 8435 // 2. Check if B is divisible by D. 8436 // 8437 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8438 // is not less than multiplicity of this prime factor for D. 8439 if (SE.GetMinTrailingZeros(B) < Mult2) 8440 return SE.getCouldNotCompute(); 8441 8442 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8443 // modulo (N / D). 8444 // 8445 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8446 // (N / D) in general. The inverse itself always fits into BW bits, though, 8447 // so we immediately truncate it. 8448 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8449 APInt Mod(BW + 1, 0); 8450 Mod.setBit(BW - Mult2); // Mod = N / D 8451 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8452 8453 // 4. Compute the minimum unsigned root of the equation: 8454 // I * (B / D) mod (N / D) 8455 // To simplify the computation, we factor out the divide by D: 8456 // (I * B mod N) / D 8457 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8458 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8459 } 8460 8461 /// For a given quadratic addrec, generate coefficients of the corresponding 8462 /// quadratic equation, multiplied by a common value to ensure that they are 8463 /// integers. 8464 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 8465 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 8466 /// were multiplied by, and BitWidth is the bit width of the original addrec 8467 /// coefficients. 8468 /// This function returns None if the addrec coefficients are not compile- 8469 /// time constants. 8470 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 8471 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 8472 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8473 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8474 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8475 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8476 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 8477 << *AddRec << '\n'); 8478 8479 // We currently can only solve this if the coefficients are constants. 8480 if (!LC || !MC || !NC) { 8481 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 8482 return None; 8483 } 8484 8485 APInt L = LC->getAPInt(); 8486 APInt M = MC->getAPInt(); 8487 APInt N = NC->getAPInt(); 8488 assert(!N.isNullValue() && "This is not a quadratic addrec"); 8489 8490 unsigned BitWidth = LC->getAPInt().getBitWidth(); 8491 unsigned NewWidth = BitWidth + 1; 8492 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 8493 << BitWidth << '\n'); 8494 // The sign-extension (as opposed to a zero-extension) here matches the 8495 // extension used in SolveQuadraticEquationWrap (with the same motivation). 8496 N = N.sext(NewWidth); 8497 M = M.sext(NewWidth); 8498 L = L.sext(NewWidth); 8499 8500 // The increments are M, M+N, M+2N, ..., so the accumulated values are 8501 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 8502 // L+M, L+2M+N, L+3M+3N, ... 8503 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 8504 // 8505 // The equation Acc = 0 is then 8506 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 8507 // In a quadratic form it becomes: 8508 // N n^2 + (2M-N) n + 2L = 0. 8509 8510 APInt A = N; 8511 APInt B = 2 * M - A; 8512 APInt C = 2 * L; 8513 APInt T = APInt(NewWidth, 2); 8514 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 8515 << "x + " << C << ", coeff bw: " << NewWidth 8516 << ", multiplied by " << T << '\n'); 8517 return std::make_tuple(A, B, C, T, BitWidth); 8518 } 8519 8520 /// Helper function to compare optional APInts: 8521 /// (a) if X and Y both exist, return min(X, Y), 8522 /// (b) if neither X nor Y exist, return None, 8523 /// (c) if exactly one of X and Y exists, return that value. 8524 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 8525 if (X.hasValue() && Y.hasValue()) { 8526 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 8527 APInt XW = X->sextOrSelf(W); 8528 APInt YW = Y->sextOrSelf(W); 8529 return XW.slt(YW) ? *X : *Y; 8530 } 8531 if (!X.hasValue() && !Y.hasValue()) 8532 return None; 8533 return X.hasValue() ? *X : *Y; 8534 } 8535 8536 /// Helper function to truncate an optional APInt to a given BitWidth. 8537 /// When solving addrec-related equations, it is preferable to return a value 8538 /// that has the same bit width as the original addrec's coefficients. If the 8539 /// solution fits in the original bit width, truncate it (except for i1). 8540 /// Returning a value of a different bit width may inhibit some optimizations. 8541 /// 8542 /// In general, a solution to a quadratic equation generated from an addrec 8543 /// may require BW+1 bits, where BW is the bit width of the addrec's 8544 /// coefficients. The reason is that the coefficients of the quadratic 8545 /// equation are BW+1 bits wide (to avoid truncation when converting from 8546 /// the addrec to the equation). 8547 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 8548 if (!X.hasValue()) 8549 return None; 8550 unsigned W = X->getBitWidth(); 8551 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 8552 return X->trunc(BitWidth); 8553 return X; 8554 } 8555 8556 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 8557 /// iterations. The values L, M, N are assumed to be signed, and they 8558 /// should all have the same bit widths. 8559 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 8560 /// where BW is the bit width of the addrec's coefficients. 8561 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 8562 /// returned as such, otherwise the bit width of the returned value may 8563 /// be greater than BW. 8564 /// 8565 /// This function returns None if 8566 /// (a) the addrec coefficients are not constant, or 8567 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 8568 /// like x^2 = 5, no integer solutions exist, in other cases an integer 8569 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 8570 static Optional<APInt> 8571 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8572 APInt A, B, C, M; 8573 unsigned BitWidth; 8574 auto T = GetQuadraticEquation(AddRec); 8575 if (!T.hasValue()) 8576 return None; 8577 8578 std::tie(A, B, C, M, BitWidth) = *T; 8579 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 8580 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 8581 if (!X.hasValue()) 8582 return None; 8583 8584 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 8585 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 8586 if (!V->isZero()) 8587 return None; 8588 8589 return TruncIfPossible(X, BitWidth); 8590 } 8591 8592 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 8593 /// iterations. The values M, N are assumed to be signed, and they 8594 /// should all have the same bit widths. 8595 /// Find the least n such that c(n) does not belong to the given range, 8596 /// while c(n-1) does. 8597 /// 8598 /// This function returns None if 8599 /// (a) the addrec coefficients are not constant, or 8600 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 8601 /// bounds of the range. 8602 static Optional<APInt> 8603 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 8604 const ConstantRange &Range, ScalarEvolution &SE) { 8605 assert(AddRec->getOperand(0)->isZero() && 8606 "Starting value of addrec should be 0"); 8607 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 8608 << Range << ", addrec " << *AddRec << '\n'); 8609 // This case is handled in getNumIterationsInRange. Here we can assume that 8610 // we start in the range. 8611 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 8612 "Addrec's initial value should be in range"); 8613 8614 APInt A, B, C, M; 8615 unsigned BitWidth; 8616 auto T = GetQuadraticEquation(AddRec); 8617 if (!T.hasValue()) 8618 return None; 8619 8620 // Be careful about the return value: there can be two reasons for not 8621 // returning an actual number. First, if no solutions to the equations 8622 // were found, and second, if the solutions don't leave the given range. 8623 // The first case means that the actual solution is "unknown", the second 8624 // means that it's known, but not valid. If the solution is unknown, we 8625 // cannot make any conclusions. 8626 // Return a pair: the optional solution and a flag indicating if the 8627 // solution was found. 8628 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 8629 // Solve for signed overflow and unsigned overflow, pick the lower 8630 // solution. 8631 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 8632 << Bound << " (before multiplying by " << M << ")\n"); 8633 Bound *= M; // The quadratic equation multiplier. 8634 8635 Optional<APInt> SO = None; 8636 if (BitWidth > 1) { 8637 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8638 "signed overflow\n"); 8639 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 8640 } 8641 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 8642 "unsigned overflow\n"); 8643 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 8644 BitWidth+1); 8645 8646 auto LeavesRange = [&] (const APInt &X) { 8647 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 8648 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 8649 if (Range.contains(V0->getValue())) 8650 return false; 8651 // X should be at least 1, so X-1 is non-negative. 8652 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 8653 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 8654 if (Range.contains(V1->getValue())) 8655 return true; 8656 return false; 8657 }; 8658 8659 // If SolveQuadraticEquationWrap returns None, it means that there can 8660 // be a solution, but the function failed to find it. We cannot treat it 8661 // as "no solution". 8662 if (!SO.hasValue() || !UO.hasValue()) 8663 return { None, false }; 8664 8665 // Check the smaller value first to see if it leaves the range. 8666 // At this point, both SO and UO must have values. 8667 Optional<APInt> Min = MinOptional(SO, UO); 8668 if (LeavesRange(*Min)) 8669 return { Min, true }; 8670 Optional<APInt> Max = Min == SO ? UO : SO; 8671 if (LeavesRange(*Max)) 8672 return { Max, true }; 8673 8674 // Solutions were found, but were eliminated, hence the "true". 8675 return { None, true }; 8676 }; 8677 8678 std::tie(A, B, C, M, BitWidth) = *T; 8679 // Lower bound is inclusive, subtract 1 to represent the exiting value. 8680 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; 8681 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); 8682 auto SL = SolveForBoundary(Lower); 8683 auto SU = SolveForBoundary(Upper); 8684 // If any of the solutions was unknown, no meaninigful conclusions can 8685 // be made. 8686 if (!SL.second || !SU.second) 8687 return None; 8688 8689 // Claim: The correct solution is not some value between Min and Max. 8690 // 8691 // Justification: Assuming that Min and Max are different values, one of 8692 // them is when the first signed overflow happens, the other is when the 8693 // first unsigned overflow happens. Crossing the range boundary is only 8694 // possible via an overflow (treating 0 as a special case of it, modeling 8695 // an overflow as crossing k*2^W for some k). 8696 // 8697 // The interesting case here is when Min was eliminated as an invalid 8698 // solution, but Max was not. The argument is that if there was another 8699 // overflow between Min and Max, it would also have been eliminated if 8700 // it was considered. 8701 // 8702 // For a given boundary, it is possible to have two overflows of the same 8703 // type (signed/unsigned) without having the other type in between: this 8704 // can happen when the vertex of the parabola is between the iterations 8705 // corresponding to the overflows. This is only possible when the two 8706 // overflows cross k*2^W for the same k. In such case, if the second one 8707 // left the range (and was the first one to do so), the first overflow 8708 // would have to enter the range, which would mean that either we had left 8709 // the range before or that we started outside of it. Both of these cases 8710 // are contradictions. 8711 // 8712 // Claim: In the case where SolveForBoundary returns None, the correct 8713 // solution is not some value between the Max for this boundary and the 8714 // Min of the other boundary. 8715 // 8716 // Justification: Assume that we had such Max_A and Min_B corresponding 8717 // to range boundaries A and B and such that Max_A < Min_B. If there was 8718 // a solution between Max_A and Min_B, it would have to be caused by an 8719 // overflow corresponding to either A or B. It cannot correspond to B, 8720 // since Min_B is the first occurrence of such an overflow. If it 8721 // corresponded to A, it would have to be either a signed or an unsigned 8722 // overflow that is larger than both eliminated overflows for A. But 8723 // between the eliminated overflows and this overflow, the values would 8724 // cover the entire value space, thus crossing the other boundary, which 8725 // is a contradiction. 8726 8727 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 8728 } 8729 8730 ScalarEvolution::ExitLimit 8731 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 8732 bool AllowPredicates) { 8733 8734 // This is only used for loops with a "x != y" exit test. The exit condition 8735 // is now expressed as a single expression, V = x-y. So the exit test is 8736 // effectively V != 0. We know and take advantage of the fact that this 8737 // expression only being used in a comparison by zero context. 8738 8739 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 8740 // If the value is a constant 8741 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8742 // If the value is already zero, the branch will execute zero times. 8743 if (C->getValue()->isZero()) return C; 8744 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8745 } 8746 8747 const SCEVAddRecExpr *AddRec = 8748 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 8749 8750 if (!AddRec && AllowPredicates) 8751 // Try to make this an AddRec using runtime tests, in the first X 8752 // iterations of this loop, where X is the SCEV expression found by the 8753 // algorithm below. 8754 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 8755 8756 if (!AddRec || AddRec->getLoop() != L) 8757 return getCouldNotCompute(); 8758 8759 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 8760 // the quadratic equation to solve it. 8761 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 8762 // We can only use this value if the chrec ends up with an exact zero 8763 // value at this index. When solving for "X*X != 5", for example, we 8764 // should not accept a root of 2. 8765 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 8766 const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); 8767 return ExitLimit(R, R, false, Predicates); 8768 } 8769 return getCouldNotCompute(); 8770 } 8771 8772 // Otherwise we can only handle this if it is affine. 8773 if (!AddRec->isAffine()) 8774 return getCouldNotCompute(); 8775 8776 // If this is an affine expression, the execution count of this branch is 8777 // the minimum unsigned root of the following equation: 8778 // 8779 // Start + Step*N = 0 (mod 2^BW) 8780 // 8781 // equivalent to: 8782 // 8783 // Step*N = -Start (mod 2^BW) 8784 // 8785 // where BW is the common bit width of Start and Step. 8786 8787 // Get the initial value for the loop. 8788 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 8789 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 8790 8791 // For now we handle only constant steps. 8792 // 8793 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 8794 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 8795 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 8796 // We have not yet seen any such cases. 8797 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 8798 if (!StepC || StepC->getValue()->isZero()) 8799 return getCouldNotCompute(); 8800 8801 // For positive steps (counting up until unsigned overflow): 8802 // N = -Start/Step (as unsigned) 8803 // For negative steps (counting down to zero): 8804 // N = Start/-Step 8805 // First compute the unsigned distance from zero in the direction of Step. 8806 bool CountDown = StepC->getAPInt().isNegative(); 8807 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 8808 8809 // Handle unitary steps, which cannot wraparound. 8810 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 8811 // N = Distance (as unsigned) 8812 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 8813 APInt MaxBECount = getUnsignedRangeMax(Distance); 8814 8815 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 8816 // we end up with a loop whose backedge-taken count is n - 1. Detect this 8817 // case, and see if we can improve the bound. 8818 // 8819 // Explicitly handling this here is necessary because getUnsignedRange 8820 // isn't context-sensitive; it doesn't know that we only care about the 8821 // range inside the loop. 8822 const SCEV *Zero = getZero(Distance->getType()); 8823 const SCEV *One = getOne(Distance->getType()); 8824 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 8825 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 8826 // If Distance + 1 doesn't overflow, we can compute the maximum distance 8827 // as "unsigned_max(Distance + 1) - 1". 8828 ConstantRange CR = getUnsignedRange(DistancePlusOne); 8829 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 8830 } 8831 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 8832 } 8833 8834 // If the condition controls loop exit (the loop exits only if the expression 8835 // is true) and the addition is no-wrap we can use unsigned divide to 8836 // compute the backedge count. In this case, the step may not divide the 8837 // distance, but we don't care because if the condition is "missed" the loop 8838 // will have undefined behavior due to wrapping. 8839 if (ControlsExit && AddRec->hasNoSelfWrap() && 8840 loopHasNoAbnormalExits(AddRec->getLoop())) { 8841 const SCEV *Exact = 8842 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 8843 const SCEV *Max = 8844 Exact == getCouldNotCompute() 8845 ? Exact 8846 : getConstant(getUnsignedRangeMax(Exact)); 8847 return ExitLimit(Exact, Max, false, Predicates); 8848 } 8849 8850 // Solve the general equation. 8851 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 8852 getNegativeSCEV(Start), *this); 8853 const SCEV *M = E == getCouldNotCompute() 8854 ? E 8855 : getConstant(getUnsignedRangeMax(E)); 8856 return ExitLimit(E, M, false, Predicates); 8857 } 8858 8859 ScalarEvolution::ExitLimit 8860 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 8861 // Loops that look like: while (X == 0) are very strange indeed. We don't 8862 // handle them yet except for the trivial case. This could be expanded in the 8863 // future as needed. 8864 8865 // If the value is a constant, check to see if it is known to be non-zero 8866 // already. If so, the backedge will execute zero times. 8867 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 8868 if (!C->getValue()->isZero()) 8869 return getZero(C->getType()); 8870 return getCouldNotCompute(); // Otherwise it will loop infinitely. 8871 } 8872 8873 // We could implement others, but I really doubt anyone writes loops like 8874 // this, and if they did, they would already be constant folded. 8875 return getCouldNotCompute(); 8876 } 8877 8878 std::pair<BasicBlock *, BasicBlock *> 8879 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 8880 // If the block has a unique predecessor, then there is no path from the 8881 // predecessor to the block that does not go through the direct edge 8882 // from the predecessor to the block. 8883 if (BasicBlock *Pred = BB->getSinglePredecessor()) 8884 return {Pred, BB}; 8885 8886 // A loop's header is defined to be a block that dominates the loop. 8887 // If the header has a unique predecessor outside the loop, it must be 8888 // a block that has exactly one successor that can reach the loop. 8889 if (Loop *L = LI.getLoopFor(BB)) 8890 return {L->getLoopPredecessor(), L->getHeader()}; 8891 8892 return {nullptr, nullptr}; 8893 } 8894 8895 /// SCEV structural equivalence is usually sufficient for testing whether two 8896 /// expressions are equal, however for the purposes of looking for a condition 8897 /// guarding a loop, it can be useful to be a little more general, since a 8898 /// front-end may have replicated the controlling expression. 8899 static bool HasSameValue(const SCEV *A, const SCEV *B) { 8900 // Quick check to see if they are the same SCEV. 8901 if (A == B) return true; 8902 8903 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 8904 // Not all instructions that are "identical" compute the same value. For 8905 // instance, two distinct alloca instructions allocating the same type are 8906 // identical and do not read memory; but compute distinct values. 8907 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 8908 }; 8909 8910 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 8911 // two different instructions with the same value. Check for this case. 8912 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 8913 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 8914 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 8915 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 8916 if (ComputesEqualValues(AI, BI)) 8917 return true; 8918 8919 // Otherwise assume they may have a different value. 8920 return false; 8921 } 8922 8923 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 8924 const SCEV *&LHS, const SCEV *&RHS, 8925 unsigned Depth) { 8926 bool Changed = false; 8927 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 8928 // '0 != 0'. 8929 auto TrivialCase = [&](bool TriviallyTrue) { 8930 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 8931 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 8932 return true; 8933 }; 8934 // If we hit the max recursion limit bail out. 8935 if (Depth >= 3) 8936 return false; 8937 8938 // Canonicalize a constant to the right side. 8939 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 8940 // Check for both operands constant. 8941 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 8942 if (ConstantExpr::getICmp(Pred, 8943 LHSC->getValue(), 8944 RHSC->getValue())->isNullValue()) 8945 return TrivialCase(false); 8946 else 8947 return TrivialCase(true); 8948 } 8949 // Otherwise swap the operands to put the constant on the right. 8950 std::swap(LHS, RHS); 8951 Pred = ICmpInst::getSwappedPredicate(Pred); 8952 Changed = true; 8953 } 8954 8955 // If we're comparing an addrec with a value which is loop-invariant in the 8956 // addrec's loop, put the addrec on the left. Also make a dominance check, 8957 // as both operands could be addrecs loop-invariant in each other's loop. 8958 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 8959 const Loop *L = AR->getLoop(); 8960 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 8961 std::swap(LHS, RHS); 8962 Pred = ICmpInst::getSwappedPredicate(Pred); 8963 Changed = true; 8964 } 8965 } 8966 8967 // If there's a constant operand, canonicalize comparisons with boundary 8968 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 8969 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 8970 const APInt &RA = RC->getAPInt(); 8971 8972 bool SimplifiedByConstantRange = false; 8973 8974 if (!ICmpInst::isEquality(Pred)) { 8975 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 8976 if (ExactCR.isFullSet()) 8977 return TrivialCase(true); 8978 else if (ExactCR.isEmptySet()) 8979 return TrivialCase(false); 8980 8981 APInt NewRHS; 8982 CmpInst::Predicate NewPred; 8983 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 8984 ICmpInst::isEquality(NewPred)) { 8985 // We were able to convert an inequality to an equality. 8986 Pred = NewPred; 8987 RHS = getConstant(NewRHS); 8988 Changed = SimplifiedByConstantRange = true; 8989 } 8990 } 8991 8992 if (!SimplifiedByConstantRange) { 8993 switch (Pred) { 8994 default: 8995 break; 8996 case ICmpInst::ICMP_EQ: 8997 case ICmpInst::ICMP_NE: 8998 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 8999 if (!RA) 9000 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 9001 if (const SCEVMulExpr *ME = 9002 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 9003 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 9004 ME->getOperand(0)->isAllOnesValue()) { 9005 RHS = AE->getOperand(1); 9006 LHS = ME->getOperand(1); 9007 Changed = true; 9008 } 9009 break; 9010 9011 9012 // The "Should have been caught earlier!" messages refer to the fact 9013 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 9014 // should have fired on the corresponding cases, and canonicalized the 9015 // check to trivial case. 9016 9017 case ICmpInst::ICMP_UGE: 9018 assert(!RA.isMinValue() && "Should have been caught earlier!"); 9019 Pred = ICmpInst::ICMP_UGT; 9020 RHS = getConstant(RA - 1); 9021 Changed = true; 9022 break; 9023 case ICmpInst::ICMP_ULE: 9024 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 9025 Pred = ICmpInst::ICMP_ULT; 9026 RHS = getConstant(RA + 1); 9027 Changed = true; 9028 break; 9029 case ICmpInst::ICMP_SGE: 9030 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 9031 Pred = ICmpInst::ICMP_SGT; 9032 RHS = getConstant(RA - 1); 9033 Changed = true; 9034 break; 9035 case ICmpInst::ICMP_SLE: 9036 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 9037 Pred = ICmpInst::ICMP_SLT; 9038 RHS = getConstant(RA + 1); 9039 Changed = true; 9040 break; 9041 } 9042 } 9043 } 9044 9045 // Check for obvious equality. 9046 if (HasSameValue(LHS, RHS)) { 9047 if (ICmpInst::isTrueWhenEqual(Pred)) 9048 return TrivialCase(true); 9049 if (ICmpInst::isFalseWhenEqual(Pred)) 9050 return TrivialCase(false); 9051 } 9052 9053 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 9054 // adding or subtracting 1 from one of the operands. 9055 switch (Pred) { 9056 case ICmpInst::ICMP_SLE: 9057 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 9058 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9059 SCEV::FlagNSW); 9060 Pred = ICmpInst::ICMP_SLT; 9061 Changed = true; 9062 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 9063 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 9064 SCEV::FlagNSW); 9065 Pred = ICmpInst::ICMP_SLT; 9066 Changed = true; 9067 } 9068 break; 9069 case ICmpInst::ICMP_SGE: 9070 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 9071 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 9072 SCEV::FlagNSW); 9073 Pred = ICmpInst::ICMP_SGT; 9074 Changed = true; 9075 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 9076 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9077 SCEV::FlagNSW); 9078 Pred = ICmpInst::ICMP_SGT; 9079 Changed = true; 9080 } 9081 break; 9082 case ICmpInst::ICMP_ULE: 9083 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 9084 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9085 SCEV::FlagNUW); 9086 Pred = ICmpInst::ICMP_ULT; 9087 Changed = true; 9088 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 9089 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 9090 Pred = ICmpInst::ICMP_ULT; 9091 Changed = true; 9092 } 9093 break; 9094 case ICmpInst::ICMP_UGE: 9095 if (!getUnsignedRangeMin(RHS).isMinValue()) { 9096 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 9097 Pred = ICmpInst::ICMP_UGT; 9098 Changed = true; 9099 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 9100 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9101 SCEV::FlagNUW); 9102 Pred = ICmpInst::ICMP_UGT; 9103 Changed = true; 9104 } 9105 break; 9106 default: 9107 break; 9108 } 9109 9110 // TODO: More simplifications are possible here. 9111 9112 // Recursively simplify until we either hit a recursion limit or nothing 9113 // changes. 9114 if (Changed) 9115 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 9116 9117 return Changed; 9118 } 9119 9120 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 9121 return getSignedRangeMax(S).isNegative(); 9122 } 9123 9124 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 9125 return getSignedRangeMin(S).isStrictlyPositive(); 9126 } 9127 9128 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 9129 return !getSignedRangeMin(S).isNegative(); 9130 } 9131 9132 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 9133 return !getSignedRangeMax(S).isStrictlyPositive(); 9134 } 9135 9136 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 9137 return isKnownNegative(S) || isKnownPositive(S); 9138 } 9139 9140 std::pair<const SCEV *, const SCEV *> 9141 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 9142 // Compute SCEV on entry of loop L. 9143 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 9144 if (Start == getCouldNotCompute()) 9145 return { Start, Start }; 9146 // Compute post increment SCEV for loop L. 9147 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 9148 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 9149 return { Start, PostInc }; 9150 } 9151 9152 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 9153 const SCEV *LHS, const SCEV *RHS) { 9154 // First collect all loops. 9155 SmallPtrSet<const Loop *, 8> LoopsUsed; 9156 getUsedLoops(LHS, LoopsUsed); 9157 getUsedLoops(RHS, LoopsUsed); 9158 9159 if (LoopsUsed.empty()) 9160 return false; 9161 9162 // Domination relationship must be a linear order on collected loops. 9163 #ifndef NDEBUG 9164 for (auto *L1 : LoopsUsed) 9165 for (auto *L2 : LoopsUsed) 9166 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 9167 DT.dominates(L2->getHeader(), L1->getHeader())) && 9168 "Domination relationship is not a linear order"); 9169 #endif 9170 9171 const Loop *MDL = 9172 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 9173 [&](const Loop *L1, const Loop *L2) { 9174 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 9175 }); 9176 9177 // Get init and post increment value for LHS. 9178 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 9179 // if LHS contains unknown non-invariant SCEV then bail out. 9180 if (SplitLHS.first == getCouldNotCompute()) 9181 return false; 9182 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 9183 // Get init and post increment value for RHS. 9184 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 9185 // if RHS contains unknown non-invariant SCEV then bail out. 9186 if (SplitRHS.first == getCouldNotCompute()) 9187 return false; 9188 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 9189 // It is possible that init SCEV contains an invariant load but it does 9190 // not dominate MDL and is not available at MDL loop entry, so we should 9191 // check it here. 9192 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 9193 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 9194 return false; 9195 9196 return isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first) && 9197 isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 9198 SplitRHS.second); 9199 } 9200 9201 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 9202 const SCEV *LHS, const SCEV *RHS) { 9203 // Canonicalize the inputs first. 9204 (void)SimplifyICmpOperands(Pred, LHS, RHS); 9205 9206 if (isKnownViaInduction(Pred, LHS, RHS)) 9207 return true; 9208 9209 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 9210 return true; 9211 9212 // Otherwise see what can be done with some simple reasoning. 9213 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 9214 } 9215 9216 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 9217 const SCEVAddRecExpr *LHS, 9218 const SCEV *RHS) { 9219 const Loop *L = LHS->getLoop(); 9220 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 9221 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 9222 } 9223 9224 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr *LHS, 9225 ICmpInst::Predicate Pred, 9226 bool &Increasing) { 9227 bool Result = isMonotonicPredicateImpl(LHS, Pred, Increasing); 9228 9229 #ifndef NDEBUG 9230 // Verify an invariant: inverting the predicate should turn a monotonically 9231 // increasing change to a monotonically decreasing one, and vice versa. 9232 bool IncreasingSwapped; 9233 bool ResultSwapped = isMonotonicPredicateImpl( 9234 LHS, ICmpInst::getSwappedPredicate(Pred), IncreasingSwapped); 9235 9236 assert(Result == ResultSwapped && "should be able to analyze both!"); 9237 if (ResultSwapped) 9238 assert(Increasing == !IncreasingSwapped && 9239 "monotonicity should flip as we flip the predicate"); 9240 #endif 9241 9242 return Result; 9243 } 9244 9245 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS, 9246 ICmpInst::Predicate Pred, 9247 bool &Increasing) { 9248 9249 // A zero step value for LHS means the induction variable is essentially a 9250 // loop invariant value. We don't really depend on the predicate actually 9251 // flipping from false to true (for increasing predicates, and the other way 9252 // around for decreasing predicates), all we care about is that *if* the 9253 // predicate changes then it only changes from false to true. 9254 // 9255 // A zero step value in itself is not very useful, but there may be places 9256 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 9257 // as general as possible. 9258 9259 switch (Pred) { 9260 default: 9261 return false; // Conservative answer 9262 9263 case ICmpInst::ICMP_UGT: 9264 case ICmpInst::ICMP_UGE: 9265 case ICmpInst::ICMP_ULT: 9266 case ICmpInst::ICMP_ULE: 9267 if (!LHS->hasNoUnsignedWrap()) 9268 return false; 9269 9270 Increasing = Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE; 9271 return true; 9272 9273 case ICmpInst::ICMP_SGT: 9274 case ICmpInst::ICMP_SGE: 9275 case ICmpInst::ICMP_SLT: 9276 case ICmpInst::ICMP_SLE: { 9277 if (!LHS->hasNoSignedWrap()) 9278 return false; 9279 9280 const SCEV *Step = LHS->getStepRecurrence(*this); 9281 9282 if (isKnownNonNegative(Step)) { 9283 Increasing = Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE; 9284 return true; 9285 } 9286 9287 if (isKnownNonPositive(Step)) { 9288 Increasing = Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE; 9289 return true; 9290 } 9291 9292 return false; 9293 } 9294 9295 } 9296 9297 llvm_unreachable("switch has default clause!"); 9298 } 9299 9300 bool ScalarEvolution::isLoopInvariantPredicate( 9301 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 9302 ICmpInst::Predicate &InvariantPred, const SCEV *&InvariantLHS, 9303 const SCEV *&InvariantRHS) { 9304 9305 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9306 if (!isLoopInvariant(RHS, L)) { 9307 if (!isLoopInvariant(LHS, L)) 9308 return false; 9309 9310 std::swap(LHS, RHS); 9311 Pred = ICmpInst::getSwappedPredicate(Pred); 9312 } 9313 9314 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9315 if (!ArLHS || ArLHS->getLoop() != L) 9316 return false; 9317 9318 bool Increasing; 9319 if (!isMonotonicPredicate(ArLHS, Pred, Increasing)) 9320 return false; 9321 9322 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 9323 // true as the loop iterates, and the backedge is control dependent on 9324 // "ArLHS `Pred` RHS" == true then we can reason as follows: 9325 // 9326 // * if the predicate was false in the first iteration then the predicate 9327 // is never evaluated again, since the loop exits without taking the 9328 // backedge. 9329 // * if the predicate was true in the first iteration then it will 9330 // continue to be true for all future iterations since it is 9331 // monotonically increasing. 9332 // 9333 // For both the above possibilities, we can replace the loop varying 9334 // predicate with its value on the first iteration of the loop (which is 9335 // loop invariant). 9336 // 9337 // A similar reasoning applies for a monotonically decreasing predicate, by 9338 // replacing true with false and false with true in the above two bullets. 9339 9340 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 9341 9342 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 9343 return false; 9344 9345 InvariantPred = Pred; 9346 InvariantLHS = ArLHS->getStart(); 9347 InvariantRHS = RHS; 9348 return true; 9349 } 9350 9351 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 9352 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 9353 if (HasSameValue(LHS, RHS)) 9354 return ICmpInst::isTrueWhenEqual(Pred); 9355 9356 // This code is split out from isKnownPredicate because it is called from 9357 // within isLoopEntryGuardedByCond. 9358 9359 auto CheckRanges = 9360 [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) { 9361 return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS) 9362 .contains(RangeLHS); 9363 }; 9364 9365 // The check at the top of the function catches the case where the values are 9366 // known to be equal. 9367 if (Pred == CmpInst::ICMP_EQ) 9368 return false; 9369 9370 if (Pred == CmpInst::ICMP_NE) 9371 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 9372 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 9373 isKnownNonZero(getMinusSCEV(LHS, RHS)); 9374 9375 if (CmpInst::isSigned(Pred)) 9376 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 9377 9378 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 9379 } 9380 9381 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 9382 const SCEV *LHS, 9383 const SCEV *RHS) { 9384 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 9385 // Return Y via OutY. 9386 auto MatchBinaryAddToConst = 9387 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 9388 SCEV::NoWrapFlags ExpectedFlags) { 9389 const SCEV *NonConstOp, *ConstOp; 9390 SCEV::NoWrapFlags FlagsPresent; 9391 9392 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 9393 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 9394 return false; 9395 9396 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 9397 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 9398 }; 9399 9400 APInt C; 9401 9402 switch (Pred) { 9403 default: 9404 break; 9405 9406 case ICmpInst::ICMP_SGE: 9407 std::swap(LHS, RHS); 9408 LLVM_FALLTHROUGH; 9409 case ICmpInst::ICMP_SLE: 9410 // X s<= (X + C)<nsw> if C >= 0 9411 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 9412 return true; 9413 9414 // (X + C)<nsw> s<= X if C <= 0 9415 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 9416 !C.isStrictlyPositive()) 9417 return true; 9418 break; 9419 9420 case ICmpInst::ICMP_SGT: 9421 std::swap(LHS, RHS); 9422 LLVM_FALLTHROUGH; 9423 case ICmpInst::ICMP_SLT: 9424 // X s< (X + C)<nsw> if C > 0 9425 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 9426 C.isStrictlyPositive()) 9427 return true; 9428 9429 // (X + C)<nsw> s< X if C < 0 9430 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 9431 return true; 9432 break; 9433 } 9434 9435 return false; 9436 } 9437 9438 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 9439 const SCEV *LHS, 9440 const SCEV *RHS) { 9441 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 9442 return false; 9443 9444 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 9445 // the stack can result in exponential time complexity. 9446 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 9447 9448 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 9449 // 9450 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 9451 // isKnownPredicate. isKnownPredicate is more powerful, but also more 9452 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 9453 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 9454 // use isKnownPredicate later if needed. 9455 return isKnownNonNegative(RHS) && 9456 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 9457 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 9458 } 9459 9460 bool ScalarEvolution::isImpliedViaGuard(BasicBlock *BB, 9461 ICmpInst::Predicate Pred, 9462 const SCEV *LHS, const SCEV *RHS) { 9463 // No need to even try if we know the module has no guards. 9464 if (!HasGuards) 9465 return false; 9466 9467 return any_of(*BB, [&](Instruction &I) { 9468 using namespace llvm::PatternMatch; 9469 9470 Value *Condition; 9471 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 9472 m_Value(Condition))) && 9473 isImpliedCond(Pred, LHS, RHS, Condition, false); 9474 }); 9475 } 9476 9477 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 9478 /// protected by a conditional between LHS and RHS. This is used to 9479 /// to eliminate casts. 9480 bool 9481 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 9482 ICmpInst::Predicate Pred, 9483 const SCEV *LHS, const SCEV *RHS) { 9484 // Interpret a null as meaning no loop, where there is obviously no guard 9485 // (interprocedural conditions notwithstanding). 9486 if (!L) return true; 9487 9488 if (VerifyIR) 9489 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 9490 "This cannot be done on broken IR!"); 9491 9492 9493 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9494 return true; 9495 9496 BasicBlock *Latch = L->getLoopLatch(); 9497 if (!Latch) 9498 return false; 9499 9500 BranchInst *LoopContinuePredicate = 9501 dyn_cast<BranchInst>(Latch->getTerminator()); 9502 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 9503 isImpliedCond(Pred, LHS, RHS, 9504 LoopContinuePredicate->getCondition(), 9505 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 9506 return true; 9507 9508 // We don't want more than one activation of the following loops on the stack 9509 // -- that can lead to O(n!) time complexity. 9510 if (WalkingBEDominatingConds) 9511 return false; 9512 9513 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 9514 9515 // See if we can exploit a trip count to prove the predicate. 9516 const auto &BETakenInfo = getBackedgeTakenInfo(L); 9517 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 9518 if (LatchBECount != getCouldNotCompute()) { 9519 // We know that Latch branches back to the loop header exactly 9520 // LatchBECount times. This means the backdege condition at Latch is 9521 // equivalent to "{0,+,1} u< LatchBECount". 9522 Type *Ty = LatchBECount->getType(); 9523 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 9524 const SCEV *LoopCounter = 9525 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 9526 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 9527 LatchBECount)) 9528 return true; 9529 } 9530 9531 // Check conditions due to any @llvm.assume intrinsics. 9532 for (auto &AssumeVH : AC.assumptions()) { 9533 if (!AssumeVH) 9534 continue; 9535 auto *CI = cast<CallInst>(AssumeVH); 9536 if (!DT.dominates(CI, Latch->getTerminator())) 9537 continue; 9538 9539 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 9540 return true; 9541 } 9542 9543 // If the loop is not reachable from the entry block, we risk running into an 9544 // infinite loop as we walk up into the dom tree. These loops do not matter 9545 // anyway, so we just return a conservative answer when we see them. 9546 if (!DT.isReachableFromEntry(L->getHeader())) 9547 return false; 9548 9549 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 9550 return true; 9551 9552 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 9553 DTN != HeaderDTN; DTN = DTN->getIDom()) { 9554 assert(DTN && "should reach the loop header before reaching the root!"); 9555 9556 BasicBlock *BB = DTN->getBlock(); 9557 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 9558 return true; 9559 9560 BasicBlock *PBB = BB->getSinglePredecessor(); 9561 if (!PBB) 9562 continue; 9563 9564 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 9565 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 9566 continue; 9567 9568 Value *Condition = ContinuePredicate->getCondition(); 9569 9570 // If we have an edge `E` within the loop body that dominates the only 9571 // latch, the condition guarding `E` also guards the backedge. This 9572 // reasoning works only for loops with a single latch. 9573 9574 BasicBlockEdge DominatingEdge(PBB, BB); 9575 if (DominatingEdge.isSingleEdge()) { 9576 // We're constructively (and conservatively) enumerating edges within the 9577 // loop body that dominate the latch. The dominator tree better agree 9578 // with us on this: 9579 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 9580 9581 if (isImpliedCond(Pred, LHS, RHS, Condition, 9582 BB != ContinuePredicate->getSuccessor(0))) 9583 return true; 9584 } 9585 } 9586 9587 return false; 9588 } 9589 9590 bool 9591 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 9592 ICmpInst::Predicate Pred, 9593 const SCEV *LHS, const SCEV *RHS) { 9594 // Interpret a null as meaning no loop, where there is obviously no guard 9595 // (interprocedural conditions notwithstanding). 9596 if (!L) return false; 9597 9598 if (VerifyIR) 9599 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 9600 "This cannot be done on broken IR!"); 9601 9602 // Both LHS and RHS must be available at loop entry. 9603 assert(isAvailableAtLoopEntry(LHS, L) && 9604 "LHS is not available at Loop Entry"); 9605 assert(isAvailableAtLoopEntry(RHS, L) && 9606 "RHS is not available at Loop Entry"); 9607 9608 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 9609 return true; 9610 9611 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 9612 // the facts (a >= b && a != b) separately. A typical situation is when the 9613 // non-strict comparison is known from ranges and non-equality is known from 9614 // dominating predicates. If we are proving strict comparison, we always try 9615 // to prove non-equality and non-strict comparison separately. 9616 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 9617 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 9618 bool ProvedNonStrictComparison = false; 9619 bool ProvedNonEquality = false; 9620 9621 if (ProvingStrictComparison) { 9622 ProvedNonStrictComparison = 9623 isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS); 9624 ProvedNonEquality = 9625 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, LHS, RHS); 9626 if (ProvedNonStrictComparison && ProvedNonEquality) 9627 return true; 9628 } 9629 9630 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 9631 auto ProveViaGuard = [&](BasicBlock *Block) { 9632 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 9633 return true; 9634 if (ProvingStrictComparison) { 9635 if (!ProvedNonStrictComparison) 9636 ProvedNonStrictComparison = 9637 isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS); 9638 if (!ProvedNonEquality) 9639 ProvedNonEquality = 9640 isImpliedViaGuard(Block, ICmpInst::ICMP_NE, LHS, RHS); 9641 if (ProvedNonStrictComparison && ProvedNonEquality) 9642 return true; 9643 } 9644 return false; 9645 }; 9646 9647 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 9648 auto ProveViaCond = [&](Value *Condition, bool Inverse) { 9649 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse)) 9650 return true; 9651 if (ProvingStrictComparison) { 9652 if (!ProvedNonStrictComparison) 9653 ProvedNonStrictComparison = 9654 isImpliedCond(NonStrictPredicate, LHS, RHS, Condition, Inverse); 9655 if (!ProvedNonEquality) 9656 ProvedNonEquality = 9657 isImpliedCond(ICmpInst::ICMP_NE, LHS, RHS, Condition, Inverse); 9658 if (ProvedNonStrictComparison && ProvedNonEquality) 9659 return true; 9660 } 9661 return false; 9662 }; 9663 9664 // Starting at the loop predecessor, climb up the predecessor chain, as long 9665 // as there are predecessors that can be found that have unique successors 9666 // leading to the original header. 9667 for (std::pair<BasicBlock *, BasicBlock *> 9668 Pair(L->getLoopPredecessor(), L->getHeader()); 9669 Pair.first; 9670 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 9671 9672 if (ProveViaGuard(Pair.first)) 9673 return true; 9674 9675 BranchInst *LoopEntryPredicate = 9676 dyn_cast<BranchInst>(Pair.first->getTerminator()); 9677 if (!LoopEntryPredicate || 9678 LoopEntryPredicate->isUnconditional()) 9679 continue; 9680 9681 if (ProveViaCond(LoopEntryPredicate->getCondition(), 9682 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 9683 return true; 9684 } 9685 9686 // Check conditions due to any @llvm.assume intrinsics. 9687 for (auto &AssumeVH : AC.assumptions()) { 9688 if (!AssumeVH) 9689 continue; 9690 auto *CI = cast<CallInst>(AssumeVH); 9691 if (!DT.dominates(CI, L->getHeader())) 9692 continue; 9693 9694 if (ProveViaCond(CI->getArgOperand(0), false)) 9695 return true; 9696 } 9697 9698 return false; 9699 } 9700 9701 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 9702 const SCEV *LHS, const SCEV *RHS, 9703 Value *FoundCondValue, 9704 bool Inverse) { 9705 if (!PendingLoopPredicates.insert(FoundCondValue).second) 9706 return false; 9707 9708 auto ClearOnExit = 9709 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 9710 9711 // Recursively handle And and Or conditions. 9712 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 9713 if (BO->getOpcode() == Instruction::And) { 9714 if (!Inverse) 9715 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9716 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9717 } else if (BO->getOpcode() == Instruction::Or) { 9718 if (Inverse) 9719 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 9720 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 9721 } 9722 } 9723 9724 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 9725 if (!ICI) return false; 9726 9727 // Now that we found a conditional branch that dominates the loop or controls 9728 // the loop latch. Check to see if it is the comparison we are looking for. 9729 ICmpInst::Predicate FoundPred; 9730 if (Inverse) 9731 FoundPred = ICI->getInversePredicate(); 9732 else 9733 FoundPred = ICI->getPredicate(); 9734 9735 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 9736 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 9737 9738 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS); 9739 } 9740 9741 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 9742 const SCEV *RHS, 9743 ICmpInst::Predicate FoundPred, 9744 const SCEV *FoundLHS, 9745 const SCEV *FoundRHS) { 9746 // Balance the types. 9747 if (getTypeSizeInBits(LHS->getType()) < 9748 getTypeSizeInBits(FoundLHS->getType())) { 9749 if (CmpInst::isSigned(Pred)) { 9750 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 9751 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 9752 } else { 9753 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 9754 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 9755 } 9756 } else if (getTypeSizeInBits(LHS->getType()) > 9757 getTypeSizeInBits(FoundLHS->getType())) { 9758 if (CmpInst::isSigned(FoundPred)) { 9759 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 9760 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 9761 } else { 9762 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 9763 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 9764 } 9765 } 9766 9767 // Canonicalize the query to match the way instcombine will have 9768 // canonicalized the comparison. 9769 if (SimplifyICmpOperands(Pred, LHS, RHS)) 9770 if (LHS == RHS) 9771 return CmpInst::isTrueWhenEqual(Pred); 9772 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 9773 if (FoundLHS == FoundRHS) 9774 return CmpInst::isFalseWhenEqual(FoundPred); 9775 9776 // Check to see if we can make the LHS or RHS match. 9777 if (LHS == FoundRHS || RHS == FoundLHS) { 9778 if (isa<SCEVConstant>(RHS)) { 9779 std::swap(FoundLHS, FoundRHS); 9780 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 9781 } else { 9782 std::swap(LHS, RHS); 9783 Pred = ICmpInst::getSwappedPredicate(Pred); 9784 } 9785 } 9786 9787 // Check whether the found predicate is the same as the desired predicate. 9788 if (FoundPred == Pred) 9789 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9790 9791 // Check whether swapping the found predicate makes it the same as the 9792 // desired predicate. 9793 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 9794 if (isa<SCEVConstant>(RHS)) 9795 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 9796 else 9797 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 9798 RHS, LHS, FoundLHS, FoundRHS); 9799 } 9800 9801 // Unsigned comparison is the same as signed comparison when both the operands 9802 // are non-negative. 9803 if (CmpInst::isUnsigned(FoundPred) && 9804 CmpInst::getSignedPredicate(FoundPred) == Pred && 9805 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 9806 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 9807 9808 // Check if we can make progress by sharpening ranges. 9809 if (FoundPred == ICmpInst::ICMP_NE && 9810 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 9811 9812 const SCEVConstant *C = nullptr; 9813 const SCEV *V = nullptr; 9814 9815 if (isa<SCEVConstant>(FoundLHS)) { 9816 C = cast<SCEVConstant>(FoundLHS); 9817 V = FoundRHS; 9818 } else { 9819 C = cast<SCEVConstant>(FoundRHS); 9820 V = FoundLHS; 9821 } 9822 9823 // The guarding predicate tells us that C != V. If the known range 9824 // of V is [C, t), we can sharpen the range to [C + 1, t). The 9825 // range we consider has to correspond to same signedness as the 9826 // predicate we're interested in folding. 9827 9828 APInt Min = ICmpInst::isSigned(Pred) ? 9829 getSignedRangeMin(V) : getUnsignedRangeMin(V); 9830 9831 if (Min == C->getAPInt()) { 9832 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 9833 // This is true even if (Min + 1) wraps around -- in case of 9834 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 9835 9836 APInt SharperMin = Min + 1; 9837 9838 switch (Pred) { 9839 case ICmpInst::ICMP_SGE: 9840 case ICmpInst::ICMP_UGE: 9841 // We know V `Pred` SharperMin. If this implies LHS `Pred` 9842 // RHS, we're done. 9843 if (isImpliedCondOperands(Pred, LHS, RHS, V, 9844 getConstant(SharperMin))) 9845 return true; 9846 LLVM_FALLTHROUGH; 9847 9848 case ICmpInst::ICMP_SGT: 9849 case ICmpInst::ICMP_UGT: 9850 // We know from the range information that (V `Pred` Min || 9851 // V == Min). We know from the guarding condition that !(V 9852 // == Min). This gives us 9853 // 9854 // V `Pred` Min || V == Min && !(V == Min) 9855 // => V `Pred` Min 9856 // 9857 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 9858 9859 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min))) 9860 return true; 9861 LLVM_FALLTHROUGH; 9862 9863 default: 9864 // No change 9865 break; 9866 } 9867 } 9868 } 9869 9870 // Check whether the actual condition is beyond sufficient. 9871 if (FoundPred == ICmpInst::ICMP_EQ) 9872 if (ICmpInst::isTrueWhenEqual(Pred)) 9873 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 9874 return true; 9875 if (Pred == ICmpInst::ICMP_NE) 9876 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 9877 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 9878 return true; 9879 9880 // Otherwise assume the worst. 9881 return false; 9882 } 9883 9884 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 9885 const SCEV *&L, const SCEV *&R, 9886 SCEV::NoWrapFlags &Flags) { 9887 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 9888 if (!AE || AE->getNumOperands() != 2) 9889 return false; 9890 9891 L = AE->getOperand(0); 9892 R = AE->getOperand(1); 9893 Flags = AE->getNoWrapFlags(); 9894 return true; 9895 } 9896 9897 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 9898 const SCEV *Less) { 9899 // We avoid subtracting expressions here because this function is usually 9900 // fairly deep in the call stack (i.e. is called many times). 9901 9902 // X - X = 0. 9903 if (More == Less) 9904 return APInt(getTypeSizeInBits(More->getType()), 0); 9905 9906 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 9907 const auto *LAR = cast<SCEVAddRecExpr>(Less); 9908 const auto *MAR = cast<SCEVAddRecExpr>(More); 9909 9910 if (LAR->getLoop() != MAR->getLoop()) 9911 return None; 9912 9913 // We look at affine expressions only; not for correctness but to keep 9914 // getStepRecurrence cheap. 9915 if (!LAR->isAffine() || !MAR->isAffine()) 9916 return None; 9917 9918 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 9919 return None; 9920 9921 Less = LAR->getStart(); 9922 More = MAR->getStart(); 9923 9924 // fall through 9925 } 9926 9927 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 9928 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 9929 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 9930 return M - L; 9931 } 9932 9933 SCEV::NoWrapFlags Flags; 9934 const SCEV *LLess = nullptr, *RLess = nullptr; 9935 const SCEV *LMore = nullptr, *RMore = nullptr; 9936 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 9937 // Compare (X + C1) vs X. 9938 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 9939 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 9940 if (RLess == More) 9941 return -(C1->getAPInt()); 9942 9943 // Compare X vs (X + C2). 9944 if (splitBinaryAdd(More, LMore, RMore, Flags)) 9945 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 9946 if (RMore == Less) 9947 return C2->getAPInt(); 9948 9949 // Compare (X + C1) vs (X + C2). 9950 if (C1 && C2 && RLess == RMore) 9951 return C2->getAPInt() - C1->getAPInt(); 9952 9953 return None; 9954 } 9955 9956 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 9957 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 9958 const SCEV *FoundLHS, const SCEV *FoundRHS) { 9959 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 9960 return false; 9961 9962 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9963 if (!AddRecLHS) 9964 return false; 9965 9966 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 9967 if (!AddRecFoundLHS) 9968 return false; 9969 9970 // We'd like to let SCEV reason about control dependencies, so we constrain 9971 // both the inequalities to be about add recurrences on the same loop. This 9972 // way we can use isLoopEntryGuardedByCond later. 9973 9974 const Loop *L = AddRecFoundLHS->getLoop(); 9975 if (L != AddRecLHS->getLoop()) 9976 return false; 9977 9978 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 9979 // 9980 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 9981 // ... (2) 9982 // 9983 // Informal proof for (2), assuming (1) [*]: 9984 // 9985 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 9986 // 9987 // Then 9988 // 9989 // FoundLHS s< FoundRHS s< INT_MIN - C 9990 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 9991 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 9992 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 9993 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 9994 // <=> FoundLHS + C s< FoundRHS + C 9995 // 9996 // [*]: (1) can be proved by ruling out overflow. 9997 // 9998 // [**]: This can be proved by analyzing all the four possibilities: 9999 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 10000 // (A s>= 0, B s>= 0). 10001 // 10002 // Note: 10003 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 10004 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 10005 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 10006 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 10007 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 10008 // C)". 10009 10010 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 10011 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 10012 if (!LDiff || !RDiff || *LDiff != *RDiff) 10013 return false; 10014 10015 if (LDiff->isMinValue()) 10016 return true; 10017 10018 APInt FoundRHSLimit; 10019 10020 if (Pred == CmpInst::ICMP_ULT) { 10021 FoundRHSLimit = -(*RDiff); 10022 } else { 10023 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 10024 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 10025 } 10026 10027 // Try to prove (1) or (2), as needed. 10028 return isAvailableAtLoopEntry(FoundRHS, L) && 10029 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 10030 getConstant(FoundRHSLimit)); 10031 } 10032 10033 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 10034 const SCEV *LHS, const SCEV *RHS, 10035 const SCEV *FoundLHS, 10036 const SCEV *FoundRHS, unsigned Depth) { 10037 const PHINode *LPhi = nullptr, *RPhi = nullptr; 10038 10039 auto ClearOnExit = make_scope_exit([&]() { 10040 if (LPhi) { 10041 bool Erased = PendingMerges.erase(LPhi); 10042 assert(Erased && "Failed to erase LPhi!"); 10043 (void)Erased; 10044 } 10045 if (RPhi) { 10046 bool Erased = PendingMerges.erase(RPhi); 10047 assert(Erased && "Failed to erase RPhi!"); 10048 (void)Erased; 10049 } 10050 }); 10051 10052 // Find respective Phis and check that they are not being pending. 10053 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 10054 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 10055 if (!PendingMerges.insert(Phi).second) 10056 return false; 10057 LPhi = Phi; 10058 } 10059 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 10060 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 10061 // If we detect a loop of Phi nodes being processed by this method, for 10062 // example: 10063 // 10064 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 10065 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 10066 // 10067 // we don't want to deal with a case that complex, so return conservative 10068 // answer false. 10069 if (!PendingMerges.insert(Phi).second) 10070 return false; 10071 RPhi = Phi; 10072 } 10073 10074 // If none of LHS, RHS is a Phi, nothing to do here. 10075 if (!LPhi && !RPhi) 10076 return false; 10077 10078 // If there is a SCEVUnknown Phi we are interested in, make it left. 10079 if (!LPhi) { 10080 std::swap(LHS, RHS); 10081 std::swap(FoundLHS, FoundRHS); 10082 std::swap(LPhi, RPhi); 10083 Pred = ICmpInst::getSwappedPredicate(Pred); 10084 } 10085 10086 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 10087 const BasicBlock *LBB = LPhi->getParent(); 10088 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10089 10090 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 10091 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 10092 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 10093 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 10094 }; 10095 10096 if (RPhi && RPhi->getParent() == LBB) { 10097 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 10098 // If we compare two Phis from the same block, and for each entry block 10099 // the predicate is true for incoming values from this block, then the 10100 // predicate is also true for the Phis. 10101 for (const BasicBlock *IncBB : predecessors(LBB)) { 10102 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10103 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 10104 if (!ProvedEasily(L, R)) 10105 return false; 10106 } 10107 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 10108 // Case two: RHS is also a Phi from the same basic block, and it is an 10109 // AddRec. It means that there is a loop which has both AddRec and Unknown 10110 // PHIs, for it we can compare incoming values of AddRec from above the loop 10111 // and latch with their respective incoming values of LPhi. 10112 // TODO: Generalize to handle loops with many inputs in a header. 10113 if (LPhi->getNumIncomingValues() != 2) return false; 10114 10115 auto *RLoop = RAR->getLoop(); 10116 auto *Predecessor = RLoop->getLoopPredecessor(); 10117 assert(Predecessor && "Loop with AddRec with no predecessor?"); 10118 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 10119 if (!ProvedEasily(L1, RAR->getStart())) 10120 return false; 10121 auto *Latch = RLoop->getLoopLatch(); 10122 assert(Latch && "Loop with AddRec with no latch?"); 10123 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 10124 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 10125 return false; 10126 } else { 10127 // In all other cases go over inputs of LHS and compare each of them to RHS, 10128 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 10129 // At this point RHS is either a non-Phi, or it is a Phi from some block 10130 // different from LBB. 10131 for (const BasicBlock *IncBB : predecessors(LBB)) { 10132 // Check that RHS is available in this block. 10133 if (!dominates(RHS, IncBB)) 10134 return false; 10135 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10136 if (!ProvedEasily(L, RHS)) 10137 return false; 10138 } 10139 } 10140 return true; 10141 } 10142 10143 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 10144 const SCEV *LHS, const SCEV *RHS, 10145 const SCEV *FoundLHS, 10146 const SCEV *FoundRHS) { 10147 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10148 return true; 10149 10150 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10151 return true; 10152 10153 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 10154 FoundLHS, FoundRHS) || 10155 // ~x < ~y --> x > y 10156 isImpliedCondOperandsHelper(Pred, LHS, RHS, 10157 getNotSCEV(FoundRHS), 10158 getNotSCEV(FoundLHS)); 10159 } 10160 10161 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? 10162 template <typename MinMaxExprType> 10163 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, 10164 const SCEV *Candidate) { 10165 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); 10166 if (!MinMaxExpr) 10167 return false; 10168 10169 return find(MinMaxExpr->operands(), Candidate) != MinMaxExpr->op_end(); 10170 } 10171 10172 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 10173 ICmpInst::Predicate Pred, 10174 const SCEV *LHS, const SCEV *RHS) { 10175 // If both sides are affine addrecs for the same loop, with equal 10176 // steps, and we know the recurrences don't wrap, then we only 10177 // need to check the predicate on the starting values. 10178 10179 if (!ICmpInst::isRelational(Pred)) 10180 return false; 10181 10182 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 10183 if (!LAR) 10184 return false; 10185 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10186 if (!RAR) 10187 return false; 10188 if (LAR->getLoop() != RAR->getLoop()) 10189 return false; 10190 if (!LAR->isAffine() || !RAR->isAffine()) 10191 return false; 10192 10193 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 10194 return false; 10195 10196 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 10197 SCEV::FlagNSW : SCEV::FlagNUW; 10198 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 10199 return false; 10200 10201 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 10202 } 10203 10204 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 10205 /// expression? 10206 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 10207 ICmpInst::Predicate Pred, 10208 const SCEV *LHS, const SCEV *RHS) { 10209 switch (Pred) { 10210 default: 10211 return false; 10212 10213 case ICmpInst::ICMP_SGE: 10214 std::swap(LHS, RHS); 10215 LLVM_FALLTHROUGH; 10216 case ICmpInst::ICMP_SLE: 10217 return 10218 // min(A, ...) <= A 10219 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || 10220 // A <= max(A, ...) 10221 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 10222 10223 case ICmpInst::ICMP_UGE: 10224 std::swap(LHS, RHS); 10225 LLVM_FALLTHROUGH; 10226 case ICmpInst::ICMP_ULE: 10227 return 10228 // min(A, ...) <= A 10229 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || 10230 // A <= max(A, ...) 10231 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 10232 } 10233 10234 llvm_unreachable("covered switch fell through?!"); 10235 } 10236 10237 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 10238 const SCEV *LHS, const SCEV *RHS, 10239 const SCEV *FoundLHS, 10240 const SCEV *FoundRHS, 10241 unsigned Depth) { 10242 assert(getTypeSizeInBits(LHS->getType()) == 10243 getTypeSizeInBits(RHS->getType()) && 10244 "LHS and RHS have different sizes?"); 10245 assert(getTypeSizeInBits(FoundLHS->getType()) == 10246 getTypeSizeInBits(FoundRHS->getType()) && 10247 "FoundLHS and FoundRHS have different sizes?"); 10248 // We want to avoid hurting the compile time with analysis of too big trees. 10249 if (Depth > MaxSCEVOperationsImplicationDepth) 10250 return false; 10251 // We only want to work with ICMP_SGT comparison so far. 10252 // TODO: Extend to ICMP_UGT? 10253 if (Pred == ICmpInst::ICMP_SLT) { 10254 Pred = ICmpInst::ICMP_SGT; 10255 std::swap(LHS, RHS); 10256 std::swap(FoundLHS, FoundRHS); 10257 } 10258 if (Pred != ICmpInst::ICMP_SGT) 10259 return false; 10260 10261 auto GetOpFromSExt = [&](const SCEV *S) { 10262 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 10263 return Ext->getOperand(); 10264 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 10265 // the constant in some cases. 10266 return S; 10267 }; 10268 10269 // Acquire values from extensions. 10270 auto *OrigLHS = LHS; 10271 auto *OrigFoundLHS = FoundLHS; 10272 LHS = GetOpFromSExt(LHS); 10273 FoundLHS = GetOpFromSExt(FoundLHS); 10274 10275 // Is the SGT predicate can be proved trivially or using the found context. 10276 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 10277 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 10278 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 10279 FoundRHS, Depth + 1); 10280 }; 10281 10282 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 10283 // We want to avoid creation of any new non-constant SCEV. Since we are 10284 // going to compare the operands to RHS, we should be certain that we don't 10285 // need any size extensions for this. So let's decline all cases when the 10286 // sizes of types of LHS and RHS do not match. 10287 // TODO: Maybe try to get RHS from sext to catch more cases? 10288 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 10289 return false; 10290 10291 // Should not overflow. 10292 if (!LHSAddExpr->hasNoSignedWrap()) 10293 return false; 10294 10295 auto *LL = LHSAddExpr->getOperand(0); 10296 auto *LR = LHSAddExpr->getOperand(1); 10297 auto *MinusOne = getNegativeSCEV(getOne(RHS->getType())); 10298 10299 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 10300 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 10301 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 10302 }; 10303 // Try to prove the following rule: 10304 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 10305 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 10306 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 10307 return true; 10308 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 10309 Value *LL, *LR; 10310 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 10311 10312 using namespace llvm::PatternMatch; 10313 10314 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 10315 // Rules for division. 10316 // We are going to perform some comparisons with Denominator and its 10317 // derivative expressions. In general case, creating a SCEV for it may 10318 // lead to a complex analysis of the entire graph, and in particular it 10319 // can request trip count recalculation for the same loop. This would 10320 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 10321 // this, we only want to create SCEVs that are constants in this section. 10322 // So we bail if Denominator is not a constant. 10323 if (!isa<ConstantInt>(LR)) 10324 return false; 10325 10326 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 10327 10328 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 10329 // then a SCEV for the numerator already exists and matches with FoundLHS. 10330 auto *Numerator = getExistingSCEV(LL); 10331 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 10332 return false; 10333 10334 // Make sure that the numerator matches with FoundLHS and the denominator 10335 // is positive. 10336 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 10337 return false; 10338 10339 auto *DTy = Denominator->getType(); 10340 auto *FRHSTy = FoundRHS->getType(); 10341 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 10342 // One of types is a pointer and another one is not. We cannot extend 10343 // them properly to a wider type, so let us just reject this case. 10344 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 10345 // to avoid this check. 10346 return false; 10347 10348 // Given that: 10349 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 10350 auto *WTy = getWiderType(DTy, FRHSTy); 10351 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 10352 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 10353 10354 // Try to prove the following rule: 10355 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 10356 // For example, given that FoundLHS > 2. It means that FoundLHS is at 10357 // least 3. If we divide it by Denominator < 4, we will have at least 1. 10358 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 10359 if (isKnownNonPositive(RHS) && 10360 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 10361 return true; 10362 10363 // Try to prove the following rule: 10364 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 10365 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 10366 // If we divide it by Denominator > 2, then: 10367 // 1. If FoundLHS is negative, then the result is 0. 10368 // 2. If FoundLHS is non-negative, then the result is non-negative. 10369 // Anyways, the result is non-negative. 10370 auto *MinusOne = getNegativeSCEV(getOne(WTy)); 10371 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 10372 if (isKnownNegative(RHS) && 10373 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 10374 return true; 10375 } 10376 } 10377 10378 // If our expression contained SCEVUnknown Phis, and we split it down and now 10379 // need to prove something for them, try to prove the predicate for every 10380 // possible incoming values of those Phis. 10381 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 10382 return true; 10383 10384 return false; 10385 } 10386 10387 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, 10388 const SCEV *LHS, const SCEV *RHS) { 10389 // zext x u<= sext x, sext x s<= zext x 10390 switch (Pred) { 10391 case ICmpInst::ICMP_SGE: 10392 std::swap(LHS, RHS); 10393 LLVM_FALLTHROUGH; 10394 case ICmpInst::ICMP_SLE: { 10395 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. 10396 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); 10397 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); 10398 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 10399 return true; 10400 break; 10401 } 10402 case ICmpInst::ICMP_UGE: 10403 std::swap(LHS, RHS); 10404 LLVM_FALLTHROUGH; 10405 case ICmpInst::ICMP_ULE: { 10406 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. 10407 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); 10408 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); 10409 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 10410 return true; 10411 break; 10412 } 10413 default: 10414 break; 10415 }; 10416 return false; 10417 } 10418 10419 bool 10420 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 10421 const SCEV *LHS, const SCEV *RHS) { 10422 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || 10423 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 10424 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 10425 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 10426 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 10427 } 10428 10429 bool 10430 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 10431 const SCEV *LHS, const SCEV *RHS, 10432 const SCEV *FoundLHS, 10433 const SCEV *FoundRHS) { 10434 switch (Pred) { 10435 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 10436 case ICmpInst::ICMP_EQ: 10437 case ICmpInst::ICMP_NE: 10438 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 10439 return true; 10440 break; 10441 case ICmpInst::ICMP_SLT: 10442 case ICmpInst::ICMP_SLE: 10443 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 10444 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 10445 return true; 10446 break; 10447 case ICmpInst::ICMP_SGT: 10448 case ICmpInst::ICMP_SGE: 10449 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 10450 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 10451 return true; 10452 break; 10453 case ICmpInst::ICMP_ULT: 10454 case ICmpInst::ICMP_ULE: 10455 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 10456 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 10457 return true; 10458 break; 10459 case ICmpInst::ICMP_UGT: 10460 case ICmpInst::ICMP_UGE: 10461 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 10462 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 10463 return true; 10464 break; 10465 } 10466 10467 // Maybe it can be proved via operations? 10468 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10469 return true; 10470 10471 return false; 10472 } 10473 10474 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 10475 const SCEV *LHS, 10476 const SCEV *RHS, 10477 const SCEV *FoundLHS, 10478 const SCEV *FoundRHS) { 10479 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 10480 // The restriction on `FoundRHS` be lifted easily -- it exists only to 10481 // reduce the compile time impact of this optimization. 10482 return false; 10483 10484 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 10485 if (!Addend) 10486 return false; 10487 10488 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 10489 10490 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 10491 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 10492 ConstantRange FoundLHSRange = 10493 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 10494 10495 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 10496 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 10497 10498 // We can also compute the range of values for `LHS` that satisfy the 10499 // consequent, "`LHS` `Pred` `RHS`": 10500 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 10501 ConstantRange SatisfyingLHSRange = 10502 ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS); 10503 10504 // The antecedent implies the consequent if every value of `LHS` that 10505 // satisfies the antecedent also satisfies the consequent. 10506 return SatisfyingLHSRange.contains(LHSRange); 10507 } 10508 10509 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 10510 bool IsSigned, bool NoWrap) { 10511 assert(isKnownPositive(Stride) && "Positive stride expected!"); 10512 10513 if (NoWrap) return false; 10514 10515 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10516 const SCEV *One = getOne(Stride->getType()); 10517 10518 if (IsSigned) { 10519 APInt MaxRHS = getSignedRangeMax(RHS); 10520 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 10521 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10522 10523 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 10524 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 10525 } 10526 10527 APInt MaxRHS = getUnsignedRangeMax(RHS); 10528 APInt MaxValue = APInt::getMaxValue(BitWidth); 10529 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10530 10531 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 10532 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 10533 } 10534 10535 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 10536 bool IsSigned, bool NoWrap) { 10537 if (NoWrap) return false; 10538 10539 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 10540 const SCEV *One = getOne(Stride->getType()); 10541 10542 if (IsSigned) { 10543 APInt MinRHS = getSignedRangeMin(RHS); 10544 APInt MinValue = APInt::getSignedMinValue(BitWidth); 10545 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 10546 10547 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 10548 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 10549 } 10550 10551 APInt MinRHS = getUnsignedRangeMin(RHS); 10552 APInt MinValue = APInt::getMinValue(BitWidth); 10553 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 10554 10555 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 10556 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 10557 } 10558 10559 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 10560 bool Equality) { 10561 const SCEV *One = getOne(Step->getType()); 10562 Delta = Equality ? getAddExpr(Delta, Step) 10563 : getAddExpr(Delta, getMinusSCEV(Step, One)); 10564 return getUDivExpr(Delta, Step); 10565 } 10566 10567 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 10568 const SCEV *Stride, 10569 const SCEV *End, 10570 unsigned BitWidth, 10571 bool IsSigned) { 10572 10573 assert(!isKnownNonPositive(Stride) && 10574 "Stride is expected strictly positive!"); 10575 // Calculate the maximum backedge count based on the range of values 10576 // permitted by Start, End, and Stride. 10577 const SCEV *MaxBECount; 10578 APInt MinStart = 10579 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 10580 10581 APInt StrideForMaxBECount = 10582 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 10583 10584 // We already know that the stride is positive, so we paper over conservatism 10585 // in our range computation by forcing StrideForMaxBECount to be at least one. 10586 // In theory this is unnecessary, but we expect MaxBECount to be a 10587 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 10588 // is nothing to constant fold it to). 10589 APInt One(BitWidth, 1, IsSigned); 10590 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 10591 10592 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 10593 : APInt::getMaxValue(BitWidth); 10594 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 10595 10596 // Although End can be a MAX expression we estimate MaxEnd considering only 10597 // the case End = RHS of the loop termination condition. This is safe because 10598 // in the other case (End - Start) is zero, leading to a zero maximum backedge 10599 // taken count. 10600 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 10601 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 10602 10603 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 10604 getConstant(StrideForMaxBECount) /* Step */, 10605 false /* Equality */); 10606 10607 return MaxBECount; 10608 } 10609 10610 ScalarEvolution::ExitLimit 10611 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 10612 const Loop *L, bool IsSigned, 10613 bool ControlsExit, bool AllowPredicates) { 10614 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10615 10616 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10617 bool PredicatedIV = false; 10618 10619 if (!IV && AllowPredicates) { 10620 // Try to make this an AddRec using runtime tests, in the first X 10621 // iterations of this loop, where X is the SCEV expression found by the 10622 // algorithm below. 10623 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10624 PredicatedIV = true; 10625 } 10626 10627 // Avoid weird loops 10628 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10629 return getCouldNotCompute(); 10630 10631 bool NoWrap = ControlsExit && 10632 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10633 10634 const SCEV *Stride = IV->getStepRecurrence(*this); 10635 10636 bool PositiveStride = isKnownPositive(Stride); 10637 10638 // Avoid negative or zero stride values. 10639 if (!PositiveStride) { 10640 // We can compute the correct backedge taken count for loops with unknown 10641 // strides if we can prove that the loop is not an infinite loop with side 10642 // effects. Here's the loop structure we are trying to handle - 10643 // 10644 // i = start 10645 // do { 10646 // A[i] = i; 10647 // i += s; 10648 // } while (i < end); 10649 // 10650 // The backedge taken count for such loops is evaluated as - 10651 // (max(end, start + stride) - start - 1) /u stride 10652 // 10653 // The additional preconditions that we need to check to prove correctness 10654 // of the above formula is as follows - 10655 // 10656 // a) IV is either nuw or nsw depending upon signedness (indicated by the 10657 // NoWrap flag). 10658 // b) loop is single exit with no side effects. 10659 // 10660 // 10661 // Precondition a) implies that if the stride is negative, this is a single 10662 // trip loop. The backedge taken count formula reduces to zero in this case. 10663 // 10664 // Precondition b) implies that the unknown stride cannot be zero otherwise 10665 // we have UB. 10666 // 10667 // The positive stride case is the same as isKnownPositive(Stride) returning 10668 // true (original behavior of the function). 10669 // 10670 // We want to make sure that the stride is truly unknown as there are edge 10671 // cases where ScalarEvolution propagates no wrap flags to the 10672 // post-increment/decrement IV even though the increment/decrement operation 10673 // itself is wrapping. The computed backedge taken count may be wrong in 10674 // such cases. This is prevented by checking that the stride is not known to 10675 // be either positive or non-positive. For example, no wrap flags are 10676 // propagated to the post-increment IV of this loop with a trip count of 2 - 10677 // 10678 // unsigned char i; 10679 // for(i=127; i<128; i+=129) 10680 // A[i] = i; 10681 // 10682 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 10683 !loopHasNoSideEffects(L)) 10684 return getCouldNotCompute(); 10685 } else if (!Stride->isOne() && 10686 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 10687 // Avoid proven overflow cases: this will ensure that the backedge taken 10688 // count will not generate any unsigned overflow. Relaxed no-overflow 10689 // conditions exploit NoWrapFlags, allowing to optimize in presence of 10690 // undefined behaviors like the case of C language. 10691 return getCouldNotCompute(); 10692 10693 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 10694 : ICmpInst::ICMP_ULT; 10695 const SCEV *Start = IV->getStart(); 10696 const SCEV *End = RHS; 10697 // When the RHS is not invariant, we do not know the end bound of the loop and 10698 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 10699 // calculate the MaxBECount, given the start, stride and max value for the end 10700 // bound of the loop (RHS), and the fact that IV does not overflow (which is 10701 // checked above). 10702 if (!isLoopInvariant(RHS, L)) { 10703 const SCEV *MaxBECount = computeMaxBECountForLT( 10704 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10705 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 10706 false /*MaxOrZero*/, Predicates); 10707 } 10708 // If the backedge is taken at least once, then it will be taken 10709 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 10710 // is the LHS value of the less-than comparison the first time it is evaluated 10711 // and End is the RHS. 10712 const SCEV *BECountIfBackedgeTaken = 10713 computeBECount(getMinusSCEV(End, Start), Stride, false); 10714 // If the loop entry is guarded by the result of the backedge test of the 10715 // first loop iteration, then we know the backedge will be taken at least 10716 // once and so the backedge taken count is as above. If not then we use the 10717 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 10718 // as if the backedge is taken at least once max(End,Start) is End and so the 10719 // result is as above, and if not max(End,Start) is Start so we get a backedge 10720 // count of zero. 10721 const SCEV *BECount; 10722 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 10723 BECount = BECountIfBackedgeTaken; 10724 else { 10725 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 10726 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 10727 } 10728 10729 const SCEV *MaxBECount; 10730 bool MaxOrZero = false; 10731 if (isa<SCEVConstant>(BECount)) 10732 MaxBECount = BECount; 10733 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 10734 // If we know exactly how many times the backedge will be taken if it's 10735 // taken at least once, then the backedge count will either be that or 10736 // zero. 10737 MaxBECount = BECountIfBackedgeTaken; 10738 MaxOrZero = true; 10739 } else { 10740 MaxBECount = computeMaxBECountForLT( 10741 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 10742 } 10743 10744 if (isa<SCEVCouldNotCompute>(MaxBECount) && 10745 !isa<SCEVCouldNotCompute>(BECount)) 10746 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 10747 10748 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 10749 } 10750 10751 ScalarEvolution::ExitLimit 10752 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 10753 const Loop *L, bool IsSigned, 10754 bool ControlsExit, bool AllowPredicates) { 10755 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 10756 // We handle only IV > Invariant 10757 if (!isLoopInvariant(RHS, L)) 10758 return getCouldNotCompute(); 10759 10760 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 10761 if (!IV && AllowPredicates) 10762 // Try to make this an AddRec using runtime tests, in the first X 10763 // iterations of this loop, where X is the SCEV expression found by the 10764 // algorithm below. 10765 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 10766 10767 // Avoid weird loops 10768 if (!IV || IV->getLoop() != L || !IV->isAffine()) 10769 return getCouldNotCompute(); 10770 10771 bool NoWrap = ControlsExit && 10772 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 10773 10774 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 10775 10776 // Avoid negative or zero stride values 10777 if (!isKnownPositive(Stride)) 10778 return getCouldNotCompute(); 10779 10780 // Avoid proven overflow cases: this will ensure that the backedge taken count 10781 // will not generate any unsigned overflow. Relaxed no-overflow conditions 10782 // exploit NoWrapFlags, allowing to optimize in presence of undefined 10783 // behaviors like the case of C language. 10784 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 10785 return getCouldNotCompute(); 10786 10787 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 10788 : ICmpInst::ICMP_UGT; 10789 10790 const SCEV *Start = IV->getStart(); 10791 const SCEV *End = RHS; 10792 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) 10793 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 10794 10795 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 10796 10797 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 10798 : getUnsignedRangeMax(Start); 10799 10800 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 10801 : getUnsignedRangeMin(Stride); 10802 10803 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 10804 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 10805 : APInt::getMinValue(BitWidth) + (MinStride - 1); 10806 10807 // Although End can be a MIN expression we estimate MinEnd considering only 10808 // the case End = RHS. This is safe because in the other case (Start - End) 10809 // is zero, leading to a zero maximum backedge taken count. 10810 APInt MinEnd = 10811 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 10812 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 10813 10814 const SCEV *MaxBECount = isa<SCEVConstant>(BECount) 10815 ? BECount 10816 : computeBECount(getConstant(MaxStart - MinEnd), 10817 getConstant(MinStride), false); 10818 10819 if (isa<SCEVCouldNotCompute>(MaxBECount)) 10820 MaxBECount = BECount; 10821 10822 return ExitLimit(BECount, MaxBECount, false, Predicates); 10823 } 10824 10825 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 10826 ScalarEvolution &SE) const { 10827 if (Range.isFullSet()) // Infinite loop. 10828 return SE.getCouldNotCompute(); 10829 10830 // If the start is a non-zero constant, shift the range to simplify things. 10831 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 10832 if (!SC->getValue()->isZero()) { 10833 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 10834 Operands[0] = SE.getZero(SC->getType()); 10835 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 10836 getNoWrapFlags(FlagNW)); 10837 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 10838 return ShiftedAddRec->getNumIterationsInRange( 10839 Range.subtract(SC->getAPInt()), SE); 10840 // This is strange and shouldn't happen. 10841 return SE.getCouldNotCompute(); 10842 } 10843 10844 // The only time we can solve this is when we have all constant indices. 10845 // Otherwise, we cannot determine the overflow conditions. 10846 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 10847 return SE.getCouldNotCompute(); 10848 10849 // Okay at this point we know that all elements of the chrec are constants and 10850 // that the start element is zero. 10851 10852 // First check to see if the range contains zero. If not, the first 10853 // iteration exits. 10854 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 10855 if (!Range.contains(APInt(BitWidth, 0))) 10856 return SE.getZero(getType()); 10857 10858 if (isAffine()) { 10859 // If this is an affine expression then we have this situation: 10860 // Solve {0,+,A} in Range === Ax in Range 10861 10862 // We know that zero is in the range. If A is positive then we know that 10863 // the upper value of the range must be the first possible exit value. 10864 // If A is negative then the lower of the range is the last possible loop 10865 // value. Also note that we already checked for a full range. 10866 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 10867 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 10868 10869 // The exit value should be (End+A)/A. 10870 APInt ExitVal = (End + A).udiv(A); 10871 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 10872 10873 // Evaluate at the exit value. If we really did fall out of the valid 10874 // range, then we computed our trip count, otherwise wrap around or other 10875 // things must have happened. 10876 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 10877 if (Range.contains(Val->getValue())) 10878 return SE.getCouldNotCompute(); // Something strange happened 10879 10880 // Ensure that the previous value is in the range. This is a sanity check. 10881 assert(Range.contains( 10882 EvaluateConstantChrecAtConstant(this, 10883 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 10884 "Linear scev computation is off in a bad way!"); 10885 return SE.getConstant(ExitValue); 10886 } 10887 10888 if (isQuadratic()) { 10889 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 10890 return SE.getConstant(S.getValue()); 10891 } 10892 10893 return SE.getCouldNotCompute(); 10894 } 10895 10896 const SCEVAddRecExpr * 10897 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 10898 assert(getNumOperands() > 1 && "AddRec with zero step?"); 10899 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 10900 // but in this case we cannot guarantee that the value returned will be an 10901 // AddRec because SCEV does not have a fixed point where it stops 10902 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 10903 // may happen if we reach arithmetic depth limit while simplifying. So we 10904 // construct the returned value explicitly. 10905 SmallVector<const SCEV *, 3> Ops; 10906 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 10907 // (this + Step) is {A+B,+,B+C,+...,+,N}. 10908 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 10909 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 10910 // We know that the last operand is not a constant zero (otherwise it would 10911 // have been popped out earlier). This guarantees us that if the result has 10912 // the same last operand, then it will also not be popped out, meaning that 10913 // the returned value will be an AddRec. 10914 const SCEV *Last = getOperand(getNumOperands() - 1); 10915 assert(!Last->isZero() && "Recurrency with zero step?"); 10916 Ops.push_back(Last); 10917 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 10918 SCEV::FlagAnyWrap)); 10919 } 10920 10921 // Return true when S contains at least an undef value. 10922 static inline bool containsUndefs(const SCEV *S) { 10923 return SCEVExprContains(S, [](const SCEV *S) { 10924 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 10925 return isa<UndefValue>(SU->getValue()); 10926 return false; 10927 }); 10928 } 10929 10930 namespace { 10931 10932 // Collect all steps of SCEV expressions. 10933 struct SCEVCollectStrides { 10934 ScalarEvolution &SE; 10935 SmallVectorImpl<const SCEV *> &Strides; 10936 10937 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 10938 : SE(SE), Strides(S) {} 10939 10940 bool follow(const SCEV *S) { 10941 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 10942 Strides.push_back(AR->getStepRecurrence(SE)); 10943 return true; 10944 } 10945 10946 bool isDone() const { return false; } 10947 }; 10948 10949 // Collect all SCEVUnknown and SCEVMulExpr expressions. 10950 struct SCEVCollectTerms { 10951 SmallVectorImpl<const SCEV *> &Terms; 10952 10953 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 10954 10955 bool follow(const SCEV *S) { 10956 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 10957 isa<SCEVSignExtendExpr>(S)) { 10958 if (!containsUndefs(S)) 10959 Terms.push_back(S); 10960 10961 // Stop recursion: once we collected a term, do not walk its operands. 10962 return false; 10963 } 10964 10965 // Keep looking. 10966 return true; 10967 } 10968 10969 bool isDone() const { return false; } 10970 }; 10971 10972 // Check if a SCEV contains an AddRecExpr. 10973 struct SCEVHasAddRec { 10974 bool &ContainsAddRec; 10975 10976 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 10977 ContainsAddRec = false; 10978 } 10979 10980 bool follow(const SCEV *S) { 10981 if (isa<SCEVAddRecExpr>(S)) { 10982 ContainsAddRec = true; 10983 10984 // Stop recursion: once we collected a term, do not walk its operands. 10985 return false; 10986 } 10987 10988 // Keep looking. 10989 return true; 10990 } 10991 10992 bool isDone() const { return false; } 10993 }; 10994 10995 // Find factors that are multiplied with an expression that (possibly as a 10996 // subexpression) contains an AddRecExpr. In the expression: 10997 // 10998 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 10999 // 11000 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 11001 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 11002 // parameters as they form a product with an induction variable. 11003 // 11004 // This collector expects all array size parameters to be in the same MulExpr. 11005 // It might be necessary to later add support for collecting parameters that are 11006 // spread over different nested MulExpr. 11007 struct SCEVCollectAddRecMultiplies { 11008 SmallVectorImpl<const SCEV *> &Terms; 11009 ScalarEvolution &SE; 11010 11011 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 11012 : Terms(T), SE(SE) {} 11013 11014 bool follow(const SCEV *S) { 11015 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 11016 bool HasAddRec = false; 11017 SmallVector<const SCEV *, 0> Operands; 11018 for (auto Op : Mul->operands()) { 11019 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 11020 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 11021 Operands.push_back(Op); 11022 } else if (Unknown) { 11023 HasAddRec = true; 11024 } else { 11025 bool ContainsAddRec = false; 11026 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 11027 visitAll(Op, ContiansAddRec); 11028 HasAddRec |= ContainsAddRec; 11029 } 11030 } 11031 if (Operands.size() == 0) 11032 return true; 11033 11034 if (!HasAddRec) 11035 return false; 11036 11037 Terms.push_back(SE.getMulExpr(Operands)); 11038 // Stop recursion: once we collected a term, do not walk its operands. 11039 return false; 11040 } 11041 11042 // Keep looking. 11043 return true; 11044 } 11045 11046 bool isDone() const { return false; } 11047 }; 11048 11049 } // end anonymous namespace 11050 11051 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 11052 /// two places: 11053 /// 1) The strides of AddRec expressions. 11054 /// 2) Unknowns that are multiplied with AddRec expressions. 11055 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 11056 SmallVectorImpl<const SCEV *> &Terms) { 11057 SmallVector<const SCEV *, 4> Strides; 11058 SCEVCollectStrides StrideCollector(*this, Strides); 11059 visitAll(Expr, StrideCollector); 11060 11061 LLVM_DEBUG({ 11062 dbgs() << "Strides:\n"; 11063 for (const SCEV *S : Strides) 11064 dbgs() << *S << "\n"; 11065 }); 11066 11067 for (const SCEV *S : Strides) { 11068 SCEVCollectTerms TermCollector(Terms); 11069 visitAll(S, TermCollector); 11070 } 11071 11072 LLVM_DEBUG({ 11073 dbgs() << "Terms:\n"; 11074 for (const SCEV *T : Terms) 11075 dbgs() << *T << "\n"; 11076 }); 11077 11078 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 11079 visitAll(Expr, MulCollector); 11080 } 11081 11082 static bool findArrayDimensionsRec(ScalarEvolution &SE, 11083 SmallVectorImpl<const SCEV *> &Terms, 11084 SmallVectorImpl<const SCEV *> &Sizes) { 11085 int Last = Terms.size() - 1; 11086 const SCEV *Step = Terms[Last]; 11087 11088 // End of recursion. 11089 if (Last == 0) { 11090 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 11091 SmallVector<const SCEV *, 2> Qs; 11092 for (const SCEV *Op : M->operands()) 11093 if (!isa<SCEVConstant>(Op)) 11094 Qs.push_back(Op); 11095 11096 Step = SE.getMulExpr(Qs); 11097 } 11098 11099 Sizes.push_back(Step); 11100 return true; 11101 } 11102 11103 for (const SCEV *&Term : Terms) { 11104 // Normalize the terms before the next call to findArrayDimensionsRec. 11105 const SCEV *Q, *R; 11106 SCEVDivision::divide(SE, Term, Step, &Q, &R); 11107 11108 // Bail out when GCD does not evenly divide one of the terms. 11109 if (!R->isZero()) 11110 return false; 11111 11112 Term = Q; 11113 } 11114 11115 // Remove all SCEVConstants. 11116 Terms.erase( 11117 remove_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }), 11118 Terms.end()); 11119 11120 if (Terms.size() > 0) 11121 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 11122 return false; 11123 11124 Sizes.push_back(Step); 11125 return true; 11126 } 11127 11128 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 11129 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 11130 for (const SCEV *T : Terms) 11131 if (SCEVExprContains(T, isa<SCEVUnknown, const SCEV *>)) 11132 return true; 11133 return false; 11134 } 11135 11136 // Return the number of product terms in S. 11137 static inline int numberOfTerms(const SCEV *S) { 11138 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 11139 return Expr->getNumOperands(); 11140 return 1; 11141 } 11142 11143 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 11144 if (isa<SCEVConstant>(T)) 11145 return nullptr; 11146 11147 if (isa<SCEVUnknown>(T)) 11148 return T; 11149 11150 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 11151 SmallVector<const SCEV *, 2> Factors; 11152 for (const SCEV *Op : M->operands()) 11153 if (!isa<SCEVConstant>(Op)) 11154 Factors.push_back(Op); 11155 11156 return SE.getMulExpr(Factors); 11157 } 11158 11159 return T; 11160 } 11161 11162 /// Return the size of an element read or written by Inst. 11163 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 11164 Type *Ty; 11165 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 11166 Ty = Store->getValueOperand()->getType(); 11167 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 11168 Ty = Load->getType(); 11169 else 11170 return nullptr; 11171 11172 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 11173 return getSizeOfExpr(ETy, Ty); 11174 } 11175 11176 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 11177 SmallVectorImpl<const SCEV *> &Sizes, 11178 const SCEV *ElementSize) { 11179 if (Terms.size() < 1 || !ElementSize) 11180 return; 11181 11182 // Early return when Terms do not contain parameters: we do not delinearize 11183 // non parametric SCEVs. 11184 if (!containsParameters(Terms)) 11185 return; 11186 11187 LLVM_DEBUG({ 11188 dbgs() << "Terms:\n"; 11189 for (const SCEV *T : Terms) 11190 dbgs() << *T << "\n"; 11191 }); 11192 11193 // Remove duplicates. 11194 array_pod_sort(Terms.begin(), Terms.end()); 11195 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 11196 11197 // Put larger terms first. 11198 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) { 11199 return numberOfTerms(LHS) > numberOfTerms(RHS); 11200 }); 11201 11202 // Try to divide all terms by the element size. If term is not divisible by 11203 // element size, proceed with the original term. 11204 for (const SCEV *&Term : Terms) { 11205 const SCEV *Q, *R; 11206 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 11207 if (!Q->isZero()) 11208 Term = Q; 11209 } 11210 11211 SmallVector<const SCEV *, 4> NewTerms; 11212 11213 // Remove constant factors. 11214 for (const SCEV *T : Terms) 11215 if (const SCEV *NewT = removeConstantFactors(*this, T)) 11216 NewTerms.push_back(NewT); 11217 11218 LLVM_DEBUG({ 11219 dbgs() << "Terms after sorting:\n"; 11220 for (const SCEV *T : NewTerms) 11221 dbgs() << *T << "\n"; 11222 }); 11223 11224 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 11225 Sizes.clear(); 11226 return; 11227 } 11228 11229 // The last element to be pushed into Sizes is the size of an element. 11230 Sizes.push_back(ElementSize); 11231 11232 LLVM_DEBUG({ 11233 dbgs() << "Sizes:\n"; 11234 for (const SCEV *S : Sizes) 11235 dbgs() << *S << "\n"; 11236 }); 11237 } 11238 11239 void ScalarEvolution::computeAccessFunctions( 11240 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 11241 SmallVectorImpl<const SCEV *> &Sizes) { 11242 // Early exit in case this SCEV is not an affine multivariate function. 11243 if (Sizes.empty()) 11244 return; 11245 11246 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 11247 if (!AR->isAffine()) 11248 return; 11249 11250 const SCEV *Res = Expr; 11251 int Last = Sizes.size() - 1; 11252 for (int i = Last; i >= 0; i--) { 11253 const SCEV *Q, *R; 11254 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 11255 11256 LLVM_DEBUG({ 11257 dbgs() << "Res: " << *Res << "\n"; 11258 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 11259 dbgs() << "Res divided by Sizes[i]:\n"; 11260 dbgs() << "Quotient: " << *Q << "\n"; 11261 dbgs() << "Remainder: " << *R << "\n"; 11262 }); 11263 11264 Res = Q; 11265 11266 // Do not record the last subscript corresponding to the size of elements in 11267 // the array. 11268 if (i == Last) { 11269 11270 // Bail out if the remainder is too complex. 11271 if (isa<SCEVAddRecExpr>(R)) { 11272 Subscripts.clear(); 11273 Sizes.clear(); 11274 return; 11275 } 11276 11277 continue; 11278 } 11279 11280 // Record the access function for the current subscript. 11281 Subscripts.push_back(R); 11282 } 11283 11284 // Also push in last position the remainder of the last division: it will be 11285 // the access function of the innermost dimension. 11286 Subscripts.push_back(Res); 11287 11288 std::reverse(Subscripts.begin(), Subscripts.end()); 11289 11290 LLVM_DEBUG({ 11291 dbgs() << "Subscripts:\n"; 11292 for (const SCEV *S : Subscripts) 11293 dbgs() << *S << "\n"; 11294 }); 11295 } 11296 11297 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 11298 /// sizes of an array access. Returns the remainder of the delinearization that 11299 /// is the offset start of the array. The SCEV->delinearize algorithm computes 11300 /// the multiples of SCEV coefficients: that is a pattern matching of sub 11301 /// expressions in the stride and base of a SCEV corresponding to the 11302 /// computation of a GCD (greatest common divisor) of base and stride. When 11303 /// SCEV->delinearize fails, it returns the SCEV unchanged. 11304 /// 11305 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 11306 /// 11307 /// void foo(long n, long m, long o, double A[n][m][o]) { 11308 /// 11309 /// for (long i = 0; i < n; i++) 11310 /// for (long j = 0; j < m; j++) 11311 /// for (long k = 0; k < o; k++) 11312 /// A[i][j][k] = 1.0; 11313 /// } 11314 /// 11315 /// the delinearization input is the following AddRec SCEV: 11316 /// 11317 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 11318 /// 11319 /// From this SCEV, we are able to say that the base offset of the access is %A 11320 /// because it appears as an offset that does not divide any of the strides in 11321 /// the loops: 11322 /// 11323 /// CHECK: Base offset: %A 11324 /// 11325 /// and then SCEV->delinearize determines the size of some of the dimensions of 11326 /// the array as these are the multiples by which the strides are happening: 11327 /// 11328 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 11329 /// 11330 /// Note that the outermost dimension remains of UnknownSize because there are 11331 /// no strides that would help identifying the size of the last dimension: when 11332 /// the array has been statically allocated, one could compute the size of that 11333 /// dimension by dividing the overall size of the array by the size of the known 11334 /// dimensions: %m * %o * 8. 11335 /// 11336 /// Finally delinearize provides the access functions for the array reference 11337 /// that does correspond to A[i][j][k] of the above C testcase: 11338 /// 11339 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 11340 /// 11341 /// The testcases are checking the output of a function pass: 11342 /// DelinearizationPass that walks through all loads and stores of a function 11343 /// asking for the SCEV of the memory access with respect to all enclosing 11344 /// loops, calling SCEV->delinearize on that and printing the results. 11345 void ScalarEvolution::delinearize(const SCEV *Expr, 11346 SmallVectorImpl<const SCEV *> &Subscripts, 11347 SmallVectorImpl<const SCEV *> &Sizes, 11348 const SCEV *ElementSize) { 11349 // First step: collect parametric terms. 11350 SmallVector<const SCEV *, 4> Terms; 11351 collectParametricTerms(Expr, Terms); 11352 11353 if (Terms.empty()) 11354 return; 11355 11356 // Second step: find subscript sizes. 11357 findArrayDimensions(Terms, Sizes, ElementSize); 11358 11359 if (Sizes.empty()) 11360 return; 11361 11362 // Third step: compute the access functions for each subscript. 11363 computeAccessFunctions(Expr, Subscripts, Sizes); 11364 11365 if (Subscripts.empty()) 11366 return; 11367 11368 LLVM_DEBUG({ 11369 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 11370 dbgs() << "ArrayDecl[UnknownSize]"; 11371 for (const SCEV *S : Sizes) 11372 dbgs() << "[" << *S << "]"; 11373 11374 dbgs() << "\nArrayRef"; 11375 for (const SCEV *S : Subscripts) 11376 dbgs() << "[" << *S << "]"; 11377 dbgs() << "\n"; 11378 }); 11379 } 11380 11381 //===----------------------------------------------------------------------===// 11382 // SCEVCallbackVH Class Implementation 11383 //===----------------------------------------------------------------------===// 11384 11385 void ScalarEvolution::SCEVCallbackVH::deleted() { 11386 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11387 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 11388 SE->ConstantEvolutionLoopExitValue.erase(PN); 11389 SE->eraseValueFromMap(getValPtr()); 11390 // this now dangles! 11391 } 11392 11393 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 11394 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 11395 11396 // Forget all the expressions associated with users of the old value, 11397 // so that future queries will recompute the expressions using the new 11398 // value. 11399 Value *Old = getValPtr(); 11400 SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end()); 11401 SmallPtrSet<User *, 8> Visited; 11402 while (!Worklist.empty()) { 11403 User *U = Worklist.pop_back_val(); 11404 // Deleting the Old value will cause this to dangle. Postpone 11405 // that until everything else is done. 11406 if (U == Old) 11407 continue; 11408 if (!Visited.insert(U).second) 11409 continue; 11410 if (PHINode *PN = dyn_cast<PHINode>(U)) 11411 SE->ConstantEvolutionLoopExitValue.erase(PN); 11412 SE->eraseValueFromMap(U); 11413 Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); 11414 } 11415 // Delete the Old value. 11416 if (PHINode *PN = dyn_cast<PHINode>(Old)) 11417 SE->ConstantEvolutionLoopExitValue.erase(PN); 11418 SE->eraseValueFromMap(Old); 11419 // this now dangles! 11420 } 11421 11422 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 11423 : CallbackVH(V), SE(se) {} 11424 11425 //===----------------------------------------------------------------------===// 11426 // ScalarEvolution Class Implementation 11427 //===----------------------------------------------------------------------===// 11428 11429 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 11430 AssumptionCache &AC, DominatorTree &DT, 11431 LoopInfo &LI) 11432 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 11433 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 11434 LoopDispositions(64), BlockDispositions(64) { 11435 // To use guards for proving predicates, we need to scan every instruction in 11436 // relevant basic blocks, and not just terminators. Doing this is a waste of 11437 // time if the IR does not actually contain any calls to 11438 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 11439 // 11440 // This pessimizes the case where a pass that preserves ScalarEvolution wants 11441 // to _add_ guards to the module when there weren't any before, and wants 11442 // ScalarEvolution to optimize based on those guards. For now we prefer to be 11443 // efficient in lieu of being smart in that rather obscure case. 11444 11445 auto *GuardDecl = F.getParent()->getFunction( 11446 Intrinsic::getName(Intrinsic::experimental_guard)); 11447 HasGuards = GuardDecl && !GuardDecl->use_empty(); 11448 } 11449 11450 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 11451 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 11452 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 11453 ValueExprMap(std::move(Arg.ValueExprMap)), 11454 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 11455 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 11456 PendingMerges(std::move(Arg.PendingMerges)), 11457 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 11458 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 11459 PredicatedBackedgeTakenCounts( 11460 std::move(Arg.PredicatedBackedgeTakenCounts)), 11461 ConstantEvolutionLoopExitValue( 11462 std::move(Arg.ConstantEvolutionLoopExitValue)), 11463 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 11464 LoopDispositions(std::move(Arg.LoopDispositions)), 11465 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 11466 BlockDispositions(std::move(Arg.BlockDispositions)), 11467 UnsignedRanges(std::move(Arg.UnsignedRanges)), 11468 SignedRanges(std::move(Arg.SignedRanges)), 11469 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 11470 UniquePreds(std::move(Arg.UniquePreds)), 11471 SCEVAllocator(std::move(Arg.SCEVAllocator)), 11472 LoopUsers(std::move(Arg.LoopUsers)), 11473 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 11474 FirstUnknown(Arg.FirstUnknown) { 11475 Arg.FirstUnknown = nullptr; 11476 } 11477 11478 ScalarEvolution::~ScalarEvolution() { 11479 // Iterate through all the SCEVUnknown instances and call their 11480 // destructors, so that they release their references to their values. 11481 for (SCEVUnknown *U = FirstUnknown; U;) { 11482 SCEVUnknown *Tmp = U; 11483 U = U->Next; 11484 Tmp->~SCEVUnknown(); 11485 } 11486 FirstUnknown = nullptr; 11487 11488 ExprValueMap.clear(); 11489 ValueExprMap.clear(); 11490 HasRecMap.clear(); 11491 11492 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 11493 // that a loop had multiple computable exits. 11494 for (auto &BTCI : BackedgeTakenCounts) 11495 BTCI.second.clear(); 11496 for (auto &BTCI : PredicatedBackedgeTakenCounts) 11497 BTCI.second.clear(); 11498 11499 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 11500 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 11501 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 11502 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 11503 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 11504 } 11505 11506 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 11507 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 11508 } 11509 11510 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 11511 const Loop *L) { 11512 // Print all inner loops first 11513 for (Loop *I : *L) 11514 PrintLoopInfo(OS, SE, I); 11515 11516 OS << "Loop "; 11517 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11518 OS << ": "; 11519 11520 SmallVector<BasicBlock *, 8> ExitingBlocks; 11521 L->getExitingBlocks(ExitingBlocks); 11522 if (ExitingBlocks.size() != 1) 11523 OS << "<multiple exits> "; 11524 11525 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 11526 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; 11527 else 11528 OS << "Unpredictable backedge-taken count.\n"; 11529 11530 if (ExitingBlocks.size() > 1) 11531 for (BasicBlock *ExitingBlock : ExitingBlocks) { 11532 OS << " exit count for " << ExitingBlock->getName() << ": " 11533 << *SE->getExitCount(L, ExitingBlock) << "\n"; 11534 } 11535 11536 OS << "Loop "; 11537 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11538 OS << ": "; 11539 11540 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) { 11541 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L); 11542 if (SE->isBackedgeTakenCountMaxOrZero(L)) 11543 OS << ", actual taken count either this or zero."; 11544 } else { 11545 OS << "Unpredictable max backedge-taken count. "; 11546 } 11547 11548 OS << "\n" 11549 "Loop "; 11550 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11551 OS << ": "; 11552 11553 SCEVUnionPredicate Pred; 11554 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 11555 if (!isa<SCEVCouldNotCompute>(PBT)) { 11556 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 11557 OS << " Predicates:\n"; 11558 Pred.print(OS, 4); 11559 } else { 11560 OS << "Unpredictable predicated backedge-taken count. "; 11561 } 11562 OS << "\n"; 11563 11564 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 11565 OS << "Loop "; 11566 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11567 OS << ": "; 11568 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 11569 } 11570 } 11571 11572 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 11573 switch (LD) { 11574 case ScalarEvolution::LoopVariant: 11575 return "Variant"; 11576 case ScalarEvolution::LoopInvariant: 11577 return "Invariant"; 11578 case ScalarEvolution::LoopComputable: 11579 return "Computable"; 11580 } 11581 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 11582 } 11583 11584 void ScalarEvolution::print(raw_ostream &OS) const { 11585 // ScalarEvolution's implementation of the print method is to print 11586 // out SCEV values of all instructions that are interesting. Doing 11587 // this potentially causes it to create new SCEV objects though, 11588 // which technically conflicts with the const qualifier. This isn't 11589 // observable from outside the class though, so casting away the 11590 // const isn't dangerous. 11591 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11592 11593 if (ClassifyExpressions) { 11594 OS << "Classifying expressions for: "; 11595 F.printAsOperand(OS, /*PrintType=*/false); 11596 OS << "\n"; 11597 for (Instruction &I : instructions(F)) 11598 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 11599 OS << I << '\n'; 11600 OS << " --> "; 11601 const SCEV *SV = SE.getSCEV(&I); 11602 SV->print(OS); 11603 if (!isa<SCEVCouldNotCompute>(SV)) { 11604 OS << " U: "; 11605 SE.getUnsignedRange(SV).print(OS); 11606 OS << " S: "; 11607 SE.getSignedRange(SV).print(OS); 11608 } 11609 11610 const Loop *L = LI.getLoopFor(I.getParent()); 11611 11612 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 11613 if (AtUse != SV) { 11614 OS << " --> "; 11615 AtUse->print(OS); 11616 if (!isa<SCEVCouldNotCompute>(AtUse)) { 11617 OS << " U: "; 11618 SE.getUnsignedRange(AtUse).print(OS); 11619 OS << " S: "; 11620 SE.getSignedRange(AtUse).print(OS); 11621 } 11622 } 11623 11624 if (L) { 11625 OS << "\t\t" "Exits: "; 11626 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 11627 if (!SE.isLoopInvariant(ExitValue, L)) { 11628 OS << "<<Unknown>>"; 11629 } else { 11630 OS << *ExitValue; 11631 } 11632 11633 bool First = true; 11634 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 11635 if (First) { 11636 OS << "\t\t" "LoopDispositions: { "; 11637 First = false; 11638 } else { 11639 OS << ", "; 11640 } 11641 11642 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11643 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 11644 } 11645 11646 for (auto *InnerL : depth_first(L)) { 11647 if (InnerL == L) 11648 continue; 11649 if (First) { 11650 OS << "\t\t" "LoopDispositions: { "; 11651 First = false; 11652 } else { 11653 OS << ", "; 11654 } 11655 11656 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 11657 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 11658 } 11659 11660 OS << " }"; 11661 } 11662 11663 OS << "\n"; 11664 } 11665 } 11666 11667 OS << "Determining loop execution counts for: "; 11668 F.printAsOperand(OS, /*PrintType=*/false); 11669 OS << "\n"; 11670 for (Loop *I : LI) 11671 PrintLoopInfo(OS, &SE, I); 11672 } 11673 11674 ScalarEvolution::LoopDisposition 11675 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 11676 auto &Values = LoopDispositions[S]; 11677 for (auto &V : Values) { 11678 if (V.getPointer() == L) 11679 return V.getInt(); 11680 } 11681 Values.emplace_back(L, LoopVariant); 11682 LoopDisposition D = computeLoopDisposition(S, L); 11683 auto &Values2 = LoopDispositions[S]; 11684 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11685 if (V.getPointer() == L) { 11686 V.setInt(D); 11687 break; 11688 } 11689 } 11690 return D; 11691 } 11692 11693 ScalarEvolution::LoopDisposition 11694 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 11695 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11696 case scConstant: 11697 return LoopInvariant; 11698 case scTruncate: 11699 case scZeroExtend: 11700 case scSignExtend: 11701 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 11702 case scAddRecExpr: { 11703 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11704 11705 // If L is the addrec's loop, it's computable. 11706 if (AR->getLoop() == L) 11707 return LoopComputable; 11708 11709 // Add recurrences are never invariant in the function-body (null loop). 11710 if (!L) 11711 return LoopVariant; 11712 11713 // Everything that is not defined at loop entry is variant. 11714 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 11715 return LoopVariant; 11716 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 11717 " dominate the contained loop's header?"); 11718 11719 // This recurrence is invariant w.r.t. L if AR's loop contains L. 11720 if (AR->getLoop()->contains(L)) 11721 return LoopInvariant; 11722 11723 // This recurrence is variant w.r.t. L if any of its operands 11724 // are variant. 11725 for (auto *Op : AR->operands()) 11726 if (!isLoopInvariant(Op, L)) 11727 return LoopVariant; 11728 11729 // Otherwise it's loop-invariant. 11730 return LoopInvariant; 11731 } 11732 case scAddExpr: 11733 case scMulExpr: 11734 case scUMaxExpr: 11735 case scSMaxExpr: 11736 case scUMinExpr: 11737 case scSMinExpr: { 11738 bool HasVarying = false; 11739 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 11740 LoopDisposition D = getLoopDisposition(Op, L); 11741 if (D == LoopVariant) 11742 return LoopVariant; 11743 if (D == LoopComputable) 11744 HasVarying = true; 11745 } 11746 return HasVarying ? LoopComputable : LoopInvariant; 11747 } 11748 case scUDivExpr: { 11749 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11750 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 11751 if (LD == LoopVariant) 11752 return LoopVariant; 11753 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 11754 if (RD == LoopVariant) 11755 return LoopVariant; 11756 return (LD == LoopInvariant && RD == LoopInvariant) ? 11757 LoopInvariant : LoopComputable; 11758 } 11759 case scUnknown: 11760 // All non-instruction values are loop invariant. All instructions are loop 11761 // invariant if they are not contained in the specified loop. 11762 // Instructions are never considered invariant in the function body 11763 // (null loop) because they are defined within the "loop". 11764 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 11765 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 11766 return LoopInvariant; 11767 case scCouldNotCompute: 11768 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11769 } 11770 llvm_unreachable("Unknown SCEV kind!"); 11771 } 11772 11773 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 11774 return getLoopDisposition(S, L) == LoopInvariant; 11775 } 11776 11777 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 11778 return getLoopDisposition(S, L) == LoopComputable; 11779 } 11780 11781 ScalarEvolution::BlockDisposition 11782 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11783 auto &Values = BlockDispositions[S]; 11784 for (auto &V : Values) { 11785 if (V.getPointer() == BB) 11786 return V.getInt(); 11787 } 11788 Values.emplace_back(BB, DoesNotDominateBlock); 11789 BlockDisposition D = computeBlockDisposition(S, BB); 11790 auto &Values2 = BlockDispositions[S]; 11791 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 11792 if (V.getPointer() == BB) { 11793 V.setInt(D); 11794 break; 11795 } 11796 } 11797 return D; 11798 } 11799 11800 ScalarEvolution::BlockDisposition 11801 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 11802 switch (static_cast<SCEVTypes>(S->getSCEVType())) { 11803 case scConstant: 11804 return ProperlyDominatesBlock; 11805 case scTruncate: 11806 case scZeroExtend: 11807 case scSignExtend: 11808 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 11809 case scAddRecExpr: { 11810 // This uses a "dominates" query instead of "properly dominates" query 11811 // to test for proper dominance too, because the instruction which 11812 // produces the addrec's value is a PHI, and a PHI effectively properly 11813 // dominates its entire containing block. 11814 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 11815 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 11816 return DoesNotDominateBlock; 11817 11818 // Fall through into SCEVNAryExpr handling. 11819 LLVM_FALLTHROUGH; 11820 } 11821 case scAddExpr: 11822 case scMulExpr: 11823 case scUMaxExpr: 11824 case scSMaxExpr: 11825 case scUMinExpr: 11826 case scSMinExpr: { 11827 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 11828 bool Proper = true; 11829 for (const SCEV *NAryOp : NAry->operands()) { 11830 BlockDisposition D = getBlockDisposition(NAryOp, BB); 11831 if (D == DoesNotDominateBlock) 11832 return DoesNotDominateBlock; 11833 if (D == DominatesBlock) 11834 Proper = false; 11835 } 11836 return Proper ? ProperlyDominatesBlock : DominatesBlock; 11837 } 11838 case scUDivExpr: { 11839 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 11840 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 11841 BlockDisposition LD = getBlockDisposition(LHS, BB); 11842 if (LD == DoesNotDominateBlock) 11843 return DoesNotDominateBlock; 11844 BlockDisposition RD = getBlockDisposition(RHS, BB); 11845 if (RD == DoesNotDominateBlock) 11846 return DoesNotDominateBlock; 11847 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 11848 ProperlyDominatesBlock : DominatesBlock; 11849 } 11850 case scUnknown: 11851 if (Instruction *I = 11852 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 11853 if (I->getParent() == BB) 11854 return DominatesBlock; 11855 if (DT.properlyDominates(I->getParent(), BB)) 11856 return ProperlyDominatesBlock; 11857 return DoesNotDominateBlock; 11858 } 11859 return ProperlyDominatesBlock; 11860 case scCouldNotCompute: 11861 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 11862 } 11863 llvm_unreachable("Unknown SCEV kind!"); 11864 } 11865 11866 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 11867 return getBlockDisposition(S, BB) >= DominatesBlock; 11868 } 11869 11870 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 11871 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 11872 } 11873 11874 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 11875 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 11876 } 11877 11878 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 11879 auto IsS = [&](const SCEV *X) { return S == X; }; 11880 auto ContainsS = [&](const SCEV *X) { 11881 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 11882 }; 11883 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 11884 } 11885 11886 void 11887 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 11888 ValuesAtScopes.erase(S); 11889 LoopDispositions.erase(S); 11890 BlockDispositions.erase(S); 11891 UnsignedRanges.erase(S); 11892 SignedRanges.erase(S); 11893 ExprValueMap.erase(S); 11894 HasRecMap.erase(S); 11895 MinTrailingZerosCache.erase(S); 11896 11897 for (auto I = PredicatedSCEVRewrites.begin(); 11898 I != PredicatedSCEVRewrites.end();) { 11899 std::pair<const SCEV *, const Loop *> Entry = I->first; 11900 if (Entry.first == S) 11901 PredicatedSCEVRewrites.erase(I++); 11902 else 11903 ++I; 11904 } 11905 11906 auto RemoveSCEVFromBackedgeMap = 11907 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 11908 for (auto I = Map.begin(), E = Map.end(); I != E;) { 11909 BackedgeTakenInfo &BEInfo = I->second; 11910 if (BEInfo.hasOperand(S, this)) { 11911 BEInfo.clear(); 11912 Map.erase(I++); 11913 } else 11914 ++I; 11915 } 11916 }; 11917 11918 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 11919 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 11920 } 11921 11922 void 11923 ScalarEvolution::getUsedLoops(const SCEV *S, 11924 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 11925 struct FindUsedLoops { 11926 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 11927 : LoopsUsed(LoopsUsed) {} 11928 SmallPtrSetImpl<const Loop *> &LoopsUsed; 11929 bool follow(const SCEV *S) { 11930 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 11931 LoopsUsed.insert(AR->getLoop()); 11932 return true; 11933 } 11934 11935 bool isDone() const { return false; } 11936 }; 11937 11938 FindUsedLoops F(LoopsUsed); 11939 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 11940 } 11941 11942 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 11943 SmallPtrSet<const Loop *, 8> LoopsUsed; 11944 getUsedLoops(S, LoopsUsed); 11945 for (auto *L : LoopsUsed) 11946 LoopUsers[L].push_back(S); 11947 } 11948 11949 void ScalarEvolution::verify() const { 11950 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 11951 ScalarEvolution SE2(F, TLI, AC, DT, LI); 11952 11953 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 11954 11955 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 11956 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 11957 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 11958 11959 const SCEV *visitConstant(const SCEVConstant *Constant) { 11960 return SE.getConstant(Constant->getAPInt()); 11961 } 11962 11963 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 11964 return SE.getUnknown(Expr->getValue()); 11965 } 11966 11967 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 11968 return SE.getCouldNotCompute(); 11969 } 11970 }; 11971 11972 SCEVMapper SCM(SE2); 11973 11974 while (!LoopStack.empty()) { 11975 auto *L = LoopStack.pop_back_val(); 11976 LoopStack.insert(LoopStack.end(), L->begin(), L->end()); 11977 11978 auto *CurBECount = SCM.visit( 11979 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 11980 auto *NewBECount = SE2.getBackedgeTakenCount(L); 11981 11982 if (CurBECount == SE2.getCouldNotCompute() || 11983 NewBECount == SE2.getCouldNotCompute()) { 11984 // NB! This situation is legal, but is very suspicious -- whatever pass 11985 // change the loop to make a trip count go from could not compute to 11986 // computable or vice-versa *should have* invalidated SCEV. However, we 11987 // choose not to assert here (for now) since we don't want false 11988 // positives. 11989 continue; 11990 } 11991 11992 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 11993 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 11994 // not propagate undef aggressively). This means we can (and do) fail 11995 // verification in cases where a transform makes the trip count of a loop 11996 // go from "undef" to "undef+1" (say). The transform is fine, since in 11997 // both cases the loop iterates "undef" times, but SCEV thinks we 11998 // increased the trip count of the loop by 1 incorrectly. 11999 continue; 12000 } 12001 12002 if (SE.getTypeSizeInBits(CurBECount->getType()) > 12003 SE.getTypeSizeInBits(NewBECount->getType())) 12004 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 12005 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 12006 SE.getTypeSizeInBits(NewBECount->getType())) 12007 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 12008 12009 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount); 12010 12011 // Unless VerifySCEVStrict is set, we only compare constant deltas. 12012 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) { 12013 dbgs() << "Trip Count for " << *L << " Changed!\n"; 12014 dbgs() << "Old: " << *CurBECount << "\n"; 12015 dbgs() << "New: " << *NewBECount << "\n"; 12016 dbgs() << "Delta: " << *Delta << "\n"; 12017 std::abort(); 12018 } 12019 } 12020 } 12021 12022 bool ScalarEvolution::invalidate( 12023 Function &F, const PreservedAnalyses &PA, 12024 FunctionAnalysisManager::Invalidator &Inv) { 12025 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 12026 // of its dependencies is invalidated. 12027 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 12028 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 12029 Inv.invalidate<AssumptionAnalysis>(F, PA) || 12030 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 12031 Inv.invalidate<LoopAnalysis>(F, PA); 12032 } 12033 12034 AnalysisKey ScalarEvolutionAnalysis::Key; 12035 12036 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 12037 FunctionAnalysisManager &AM) { 12038 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 12039 AM.getResult<AssumptionAnalysis>(F), 12040 AM.getResult<DominatorTreeAnalysis>(F), 12041 AM.getResult<LoopAnalysis>(F)); 12042 } 12043 12044 PreservedAnalyses 12045 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 12046 AM.getResult<ScalarEvolutionAnalysis>(F).verify(); 12047 return PreservedAnalyses::all(); 12048 } 12049 12050 PreservedAnalyses 12051 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 12052 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 12053 return PreservedAnalyses::all(); 12054 } 12055 12056 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 12057 "Scalar Evolution Analysis", false, true) 12058 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 12059 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 12060 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 12061 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 12062 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 12063 "Scalar Evolution Analysis", false, true) 12064 12065 char ScalarEvolutionWrapperPass::ID = 0; 12066 12067 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 12068 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 12069 } 12070 12071 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 12072 SE.reset(new ScalarEvolution( 12073 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 12074 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 12075 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 12076 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 12077 return false; 12078 } 12079 12080 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 12081 12082 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 12083 SE->print(OS); 12084 } 12085 12086 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 12087 if (!VerifySCEV) 12088 return; 12089 12090 SE->verify(); 12091 } 12092 12093 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 12094 AU.setPreservesAll(); 12095 AU.addRequiredTransitive<AssumptionCacheTracker>(); 12096 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 12097 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 12098 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 12099 } 12100 12101 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 12102 const SCEV *RHS) { 12103 FoldingSetNodeID ID; 12104 assert(LHS->getType() == RHS->getType() && 12105 "Type mismatch between LHS and RHS"); 12106 // Unique this node based on the arguments 12107 ID.AddInteger(SCEVPredicate::P_Equal); 12108 ID.AddPointer(LHS); 12109 ID.AddPointer(RHS); 12110 void *IP = nullptr; 12111 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12112 return S; 12113 SCEVEqualPredicate *Eq = new (SCEVAllocator) 12114 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 12115 UniquePreds.InsertNode(Eq, IP); 12116 return Eq; 12117 } 12118 12119 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 12120 const SCEVAddRecExpr *AR, 12121 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12122 FoldingSetNodeID ID; 12123 // Unique this node based on the arguments 12124 ID.AddInteger(SCEVPredicate::P_Wrap); 12125 ID.AddPointer(AR); 12126 ID.AddInteger(AddedFlags); 12127 void *IP = nullptr; 12128 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12129 return S; 12130 auto *OF = new (SCEVAllocator) 12131 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 12132 UniquePreds.InsertNode(OF, IP); 12133 return OF; 12134 } 12135 12136 namespace { 12137 12138 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 12139 public: 12140 12141 /// Rewrites \p S in the context of a loop L and the SCEV predication 12142 /// infrastructure. 12143 /// 12144 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 12145 /// equivalences present in \p Pred. 12146 /// 12147 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 12148 /// \p NewPreds such that the result will be an AddRecExpr. 12149 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 12150 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12151 SCEVUnionPredicate *Pred) { 12152 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 12153 return Rewriter.visit(S); 12154 } 12155 12156 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12157 if (Pred) { 12158 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 12159 for (auto *Pred : ExprPreds) 12160 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 12161 if (IPred->getLHS() == Expr) 12162 return IPred->getRHS(); 12163 } 12164 return convertToAddRecWithPreds(Expr); 12165 } 12166 12167 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 12168 const SCEV *Operand = visit(Expr->getOperand()); 12169 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12170 if (AR && AR->getLoop() == L && AR->isAffine()) { 12171 // This couldn't be folded because the operand didn't have the nuw 12172 // flag. Add the nusw flag as an assumption that we could make. 12173 const SCEV *Step = AR->getStepRecurrence(SE); 12174 Type *Ty = Expr->getType(); 12175 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 12176 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 12177 SE.getSignExtendExpr(Step, Ty), L, 12178 AR->getNoWrapFlags()); 12179 } 12180 return SE.getZeroExtendExpr(Operand, Expr->getType()); 12181 } 12182 12183 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 12184 const SCEV *Operand = visit(Expr->getOperand()); 12185 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12186 if (AR && AR->getLoop() == L && AR->isAffine()) { 12187 // This couldn't be folded because the operand didn't have the nsw 12188 // flag. Add the nssw flag as an assumption that we could make. 12189 const SCEV *Step = AR->getStepRecurrence(SE); 12190 Type *Ty = Expr->getType(); 12191 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 12192 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 12193 SE.getSignExtendExpr(Step, Ty), L, 12194 AR->getNoWrapFlags()); 12195 } 12196 return SE.getSignExtendExpr(Operand, Expr->getType()); 12197 } 12198 12199 private: 12200 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 12201 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12202 SCEVUnionPredicate *Pred) 12203 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 12204 12205 bool addOverflowAssumption(const SCEVPredicate *P) { 12206 if (!NewPreds) { 12207 // Check if we've already made this assumption. 12208 return Pred && Pred->implies(P); 12209 } 12210 NewPreds->insert(P); 12211 return true; 12212 } 12213 12214 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 12215 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12216 auto *A = SE.getWrapPredicate(AR, AddedFlags); 12217 return addOverflowAssumption(A); 12218 } 12219 12220 // If \p Expr represents a PHINode, we try to see if it can be represented 12221 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 12222 // to add this predicate as a runtime overflow check, we return the AddRec. 12223 // If \p Expr does not meet these conditions (is not a PHI node, or we 12224 // couldn't create an AddRec for it, or couldn't add the predicate), we just 12225 // return \p Expr. 12226 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 12227 if (!isa<PHINode>(Expr->getValue())) 12228 return Expr; 12229 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 12230 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 12231 if (!PredicatedRewrite) 12232 return Expr; 12233 for (auto *P : PredicatedRewrite->second){ 12234 // Wrap predicates from outer loops are not supported. 12235 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 12236 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 12237 if (L != AR->getLoop()) 12238 return Expr; 12239 } 12240 if (!addOverflowAssumption(P)) 12241 return Expr; 12242 } 12243 return PredicatedRewrite->first; 12244 } 12245 12246 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 12247 SCEVUnionPredicate *Pred; 12248 const Loop *L; 12249 }; 12250 12251 } // end anonymous namespace 12252 12253 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 12254 SCEVUnionPredicate &Preds) { 12255 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 12256 } 12257 12258 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 12259 const SCEV *S, const Loop *L, 12260 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 12261 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 12262 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 12263 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 12264 12265 if (!AddRec) 12266 return nullptr; 12267 12268 // Since the transformation was successful, we can now transfer the SCEV 12269 // predicates. 12270 for (auto *P : TransformPreds) 12271 Preds.insert(P); 12272 12273 return AddRec; 12274 } 12275 12276 /// SCEV predicates 12277 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 12278 SCEVPredicateKind Kind) 12279 : FastID(ID), Kind(Kind) {} 12280 12281 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 12282 const SCEV *LHS, const SCEV *RHS) 12283 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 12284 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 12285 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 12286 } 12287 12288 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 12289 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 12290 12291 if (!Op) 12292 return false; 12293 12294 return Op->LHS == LHS && Op->RHS == RHS; 12295 } 12296 12297 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 12298 12299 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 12300 12301 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 12302 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 12303 } 12304 12305 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 12306 const SCEVAddRecExpr *AR, 12307 IncrementWrapFlags Flags) 12308 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 12309 12310 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 12311 12312 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 12313 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 12314 12315 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 12316 } 12317 12318 bool SCEVWrapPredicate::isAlwaysTrue() const { 12319 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 12320 IncrementWrapFlags IFlags = Flags; 12321 12322 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 12323 IFlags = clearFlags(IFlags, IncrementNSSW); 12324 12325 return IFlags == IncrementAnyWrap; 12326 } 12327 12328 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 12329 OS.indent(Depth) << *getExpr() << " Added Flags: "; 12330 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 12331 OS << "<nusw>"; 12332 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 12333 OS << "<nssw>"; 12334 OS << "\n"; 12335 } 12336 12337 SCEVWrapPredicate::IncrementWrapFlags 12338 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 12339 ScalarEvolution &SE) { 12340 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 12341 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 12342 12343 // We can safely transfer the NSW flag as NSSW. 12344 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 12345 ImpliedFlags = IncrementNSSW; 12346 12347 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 12348 // If the increment is positive, the SCEV NUW flag will also imply the 12349 // WrapPredicate NUSW flag. 12350 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 12351 if (Step->getValue()->getValue().isNonNegative()) 12352 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 12353 } 12354 12355 return ImpliedFlags; 12356 } 12357 12358 /// Union predicates don't get cached so create a dummy set ID for it. 12359 SCEVUnionPredicate::SCEVUnionPredicate() 12360 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 12361 12362 bool SCEVUnionPredicate::isAlwaysTrue() const { 12363 return all_of(Preds, 12364 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 12365 } 12366 12367 ArrayRef<const SCEVPredicate *> 12368 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 12369 auto I = SCEVToPreds.find(Expr); 12370 if (I == SCEVToPreds.end()) 12371 return ArrayRef<const SCEVPredicate *>(); 12372 return I->second; 12373 } 12374 12375 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 12376 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 12377 return all_of(Set->Preds, 12378 [this](const SCEVPredicate *I) { return this->implies(I); }); 12379 12380 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 12381 if (ScevPredsIt == SCEVToPreds.end()) 12382 return false; 12383 auto &SCEVPreds = ScevPredsIt->second; 12384 12385 return any_of(SCEVPreds, 12386 [N](const SCEVPredicate *I) { return I->implies(N); }); 12387 } 12388 12389 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 12390 12391 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 12392 for (auto Pred : Preds) 12393 Pred->print(OS, Depth); 12394 } 12395 12396 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 12397 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 12398 for (auto Pred : Set->Preds) 12399 add(Pred); 12400 return; 12401 } 12402 12403 if (implies(N)) 12404 return; 12405 12406 const SCEV *Key = N->getExpr(); 12407 assert(Key && "Only SCEVUnionPredicate doesn't have an " 12408 " associated expression!"); 12409 12410 SCEVToPreds[Key].push_back(N); 12411 Preds.push_back(N); 12412 } 12413 12414 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 12415 Loop &L) 12416 : SE(SE), L(L) {} 12417 12418 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 12419 const SCEV *Expr = SE.getSCEV(V); 12420 RewriteEntry &Entry = RewriteMap[Expr]; 12421 12422 // If we already have an entry and the version matches, return it. 12423 if (Entry.second && Generation == Entry.first) 12424 return Entry.second; 12425 12426 // We found an entry but it's stale. Rewrite the stale entry 12427 // according to the current predicate. 12428 if (Entry.second) 12429 Expr = Entry.second; 12430 12431 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 12432 Entry = {Generation, NewSCEV}; 12433 12434 return NewSCEV; 12435 } 12436 12437 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 12438 if (!BackedgeCount) { 12439 SCEVUnionPredicate BackedgePred; 12440 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 12441 addPredicate(BackedgePred); 12442 } 12443 return BackedgeCount; 12444 } 12445 12446 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 12447 if (Preds.implies(&Pred)) 12448 return; 12449 Preds.add(&Pred); 12450 updateGeneration(); 12451 } 12452 12453 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 12454 return Preds; 12455 } 12456 12457 void PredicatedScalarEvolution::updateGeneration() { 12458 // If the generation number wrapped recompute everything. 12459 if (++Generation == 0) { 12460 for (auto &II : RewriteMap) { 12461 const SCEV *Rewritten = II.second.second; 12462 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 12463 } 12464 } 12465 } 12466 12467 void PredicatedScalarEvolution::setNoOverflow( 12468 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12469 const SCEV *Expr = getSCEV(V); 12470 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12471 12472 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 12473 12474 // Clear the statically implied flags. 12475 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 12476 addPredicate(*SE.getWrapPredicate(AR, Flags)); 12477 12478 auto II = FlagsMap.insert({V, Flags}); 12479 if (!II.second) 12480 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 12481 } 12482 12483 bool PredicatedScalarEvolution::hasNoOverflow( 12484 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 12485 const SCEV *Expr = getSCEV(V); 12486 const auto *AR = cast<SCEVAddRecExpr>(Expr); 12487 12488 Flags = SCEVWrapPredicate::clearFlags( 12489 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 12490 12491 auto II = FlagsMap.find(V); 12492 12493 if (II != FlagsMap.end()) 12494 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 12495 12496 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 12497 } 12498 12499 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 12500 const SCEV *Expr = this->getSCEV(V); 12501 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 12502 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 12503 12504 if (!New) 12505 return nullptr; 12506 12507 for (auto *P : NewPreds) 12508 Preds.add(P); 12509 12510 updateGeneration(); 12511 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 12512 return New; 12513 } 12514 12515 PredicatedScalarEvolution::PredicatedScalarEvolution( 12516 const PredicatedScalarEvolution &Init) 12517 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 12518 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 12519 for (const auto &I : Init.FlagsMap) 12520 FlagsMap.insert(I); 12521 } 12522 12523 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 12524 // For each block. 12525 for (auto *BB : L.getBlocks()) 12526 for (auto &I : *BB) { 12527 if (!SE.isSCEVable(I.getType())) 12528 continue; 12529 12530 auto *Expr = SE.getSCEV(&I); 12531 auto II = RewriteMap.find(Expr); 12532 12533 if (II == RewriteMap.end()) 12534 continue; 12535 12536 // Don't print things that are not interesting. 12537 if (II->second.second == Expr) 12538 continue; 12539 12540 OS.indent(Depth) << "[PSE]" << I << ":\n"; 12541 OS.indent(Depth + 2) << *Expr << "\n"; 12542 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 12543 } 12544 } 12545 12546 // Match the mathematical pattern A - (A / B) * B, where A and B can be 12547 // arbitrary expressions. 12548 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 12549 // 4, A / B becomes X / 8). 12550 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 12551 const SCEV *&RHS) { 12552 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 12553 if (Add == nullptr || Add->getNumOperands() != 2) 12554 return false; 12555 12556 const SCEV *A = Add->getOperand(1); 12557 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 12558 12559 if (Mul == nullptr) 12560 return false; 12561 12562 const auto MatchURemWithDivisor = [&](const SCEV *B) { 12563 // (SomeExpr + (-(SomeExpr / B) * B)). 12564 if (Expr == getURemExpr(A, B)) { 12565 LHS = A; 12566 RHS = B; 12567 return true; 12568 } 12569 return false; 12570 }; 12571 12572 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 12573 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 12574 return MatchURemWithDivisor(Mul->getOperand(1)) || 12575 MatchURemWithDivisor(Mul->getOperand(2)); 12576 12577 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 12578 if (Mul->getNumOperands() == 2) 12579 return MatchURemWithDivisor(Mul->getOperand(1)) || 12580 MatchURemWithDivisor(Mul->getOperand(0)) || 12581 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 12582 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 12583 return false; 12584 } 12585