1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionDivision.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/Config/llvm-config.h" 87 #include "llvm/IR/Argument.h" 88 #include "llvm/IR/BasicBlock.h" 89 #include "llvm/IR/CFG.h" 90 #include "llvm/IR/Constant.h" 91 #include "llvm/IR/ConstantRange.h" 92 #include "llvm/IR/Constants.h" 93 #include "llvm/IR/DataLayout.h" 94 #include "llvm/IR/DerivedTypes.h" 95 #include "llvm/IR/Dominators.h" 96 #include "llvm/IR/Function.h" 97 #include "llvm/IR/GlobalAlias.h" 98 #include "llvm/IR/GlobalValue.h" 99 #include "llvm/IR/GlobalVariable.h" 100 #include "llvm/IR/InstIterator.h" 101 #include "llvm/IR/InstrTypes.h" 102 #include "llvm/IR/Instruction.h" 103 #include "llvm/IR/Instructions.h" 104 #include "llvm/IR/IntrinsicInst.h" 105 #include "llvm/IR/Intrinsics.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/Metadata.h" 108 #include "llvm/IR/Operator.h" 109 #include "llvm/IR/PatternMatch.h" 110 #include "llvm/IR/Type.h" 111 #include "llvm/IR/Use.h" 112 #include "llvm/IR/User.h" 113 #include "llvm/IR/Value.h" 114 #include "llvm/IR/Verifier.h" 115 #include "llvm/InitializePasses.h" 116 #include "llvm/Pass.h" 117 #include "llvm/Support/Casting.h" 118 #include "llvm/Support/CommandLine.h" 119 #include "llvm/Support/Compiler.h" 120 #include "llvm/Support/Debug.h" 121 #include "llvm/Support/ErrorHandling.h" 122 #include "llvm/Support/KnownBits.h" 123 #include "llvm/Support/SaveAndRestore.h" 124 #include "llvm/Support/raw_ostream.h" 125 #include <algorithm> 126 #include <cassert> 127 #include <climits> 128 #include <cstddef> 129 #include <cstdint> 130 #include <cstdlib> 131 #include <map> 132 #include <memory> 133 #include <tuple> 134 #include <utility> 135 #include <vector> 136 137 using namespace llvm; 138 using namespace PatternMatch; 139 140 #define DEBUG_TYPE "scalar-evolution" 141 142 STATISTIC(NumTripCountsComputed, 143 "Number of loops with predictable loop counts"); 144 STATISTIC(NumTripCountsNotComputed, 145 "Number of loops without predictable loop counts"); 146 STATISTIC(NumBruteForceTripCountsComputed, 147 "Number of loops with trip counts computed by force"); 148 149 static cl::opt<unsigned> 150 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 151 cl::ZeroOrMore, 152 cl::desc("Maximum number of iterations SCEV will " 153 "symbolically execute a constant " 154 "derived loop"), 155 cl::init(100)); 156 157 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 158 static cl::opt<bool> VerifySCEV( 159 "verify-scev", cl::Hidden, 160 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 161 static cl::opt<bool> VerifySCEVStrict( 162 "verify-scev-strict", cl::Hidden, 163 cl::desc("Enable stricter verification with -verify-scev is passed")); 164 static cl::opt<bool> 165 VerifySCEVMap("verify-scev-maps", cl::Hidden, 166 cl::desc("Verify no dangling value in ScalarEvolution's " 167 "ExprValueMap (slow)")); 168 169 static cl::opt<bool> VerifyIR( 170 "scev-verify-ir", cl::Hidden, 171 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 172 cl::init(false)); 173 174 static cl::opt<unsigned> MulOpsInlineThreshold( 175 "scev-mulops-inline-threshold", cl::Hidden, 176 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 177 cl::init(32)); 178 179 static cl::opt<unsigned> AddOpsInlineThreshold( 180 "scev-addops-inline-threshold", cl::Hidden, 181 cl::desc("Threshold for inlining addition operands into a SCEV"), 182 cl::init(500)); 183 184 static cl::opt<unsigned> MaxSCEVCompareDepth( 185 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 186 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 187 cl::init(32)); 188 189 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 190 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 191 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 192 cl::init(2)); 193 194 static cl::opt<unsigned> MaxValueCompareDepth( 195 "scalar-evolution-max-value-compare-depth", cl::Hidden, 196 cl::desc("Maximum depth of recursive value complexity comparisons"), 197 cl::init(2)); 198 199 static cl::opt<unsigned> 200 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 201 cl::desc("Maximum depth of recursive arithmetics"), 202 cl::init(32)); 203 204 static cl::opt<unsigned> MaxConstantEvolvingDepth( 205 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 206 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 207 208 static cl::opt<unsigned> 209 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 210 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 211 cl::init(8)); 212 213 static cl::opt<unsigned> 214 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 215 cl::desc("Max coefficients in AddRec during evolving"), 216 cl::init(8)); 217 218 static cl::opt<unsigned> 219 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 220 cl::desc("Size of the expression which is considered huge"), 221 cl::init(4096)); 222 223 static cl::opt<bool> 224 ClassifyExpressions("scalar-evolution-classify-expressions", 225 cl::Hidden, cl::init(true), 226 cl::desc("When printing analysis, include information on every instruction")); 227 228 static cl::opt<bool> UseExpensiveRangeSharpening( 229 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden, 230 cl::init(false), 231 cl::desc("Use more powerful methods of sharpening expression ranges. May " 232 "be costly in terms of compile time")); 233 234 //===----------------------------------------------------------------------===// 235 // SCEV class definitions 236 //===----------------------------------------------------------------------===// 237 238 //===----------------------------------------------------------------------===// 239 // Implementation of the SCEV class. 240 // 241 242 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 243 LLVM_DUMP_METHOD void SCEV::dump() const { 244 print(dbgs()); 245 dbgs() << '\n'; 246 } 247 #endif 248 249 void SCEV::print(raw_ostream &OS) const { 250 switch (getSCEVType()) { 251 case scConstant: 252 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 253 return; 254 case scPtrToInt: { 255 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this); 256 const SCEV *Op = PtrToInt->getOperand(); 257 OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to " 258 << *PtrToInt->getType() << ")"; 259 return; 260 } 261 case scTruncate: { 262 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 263 const SCEV *Op = Trunc->getOperand(); 264 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 265 << *Trunc->getType() << ")"; 266 return; 267 } 268 case scZeroExtend: { 269 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 270 const SCEV *Op = ZExt->getOperand(); 271 OS << "(zext " << *Op->getType() << " " << *Op << " to " 272 << *ZExt->getType() << ")"; 273 return; 274 } 275 case scSignExtend: { 276 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 277 const SCEV *Op = SExt->getOperand(); 278 OS << "(sext " << *Op->getType() << " " << *Op << " to " 279 << *SExt->getType() << ")"; 280 return; 281 } 282 case scAddRecExpr: { 283 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 284 OS << "{" << *AR->getOperand(0); 285 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 286 OS << ",+," << *AR->getOperand(i); 287 OS << "}<"; 288 if (AR->hasNoUnsignedWrap()) 289 OS << "nuw><"; 290 if (AR->hasNoSignedWrap()) 291 OS << "nsw><"; 292 if (AR->hasNoSelfWrap() && 293 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 294 OS << "nw><"; 295 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 296 OS << ">"; 297 return; 298 } 299 case scAddExpr: 300 case scMulExpr: 301 case scUMaxExpr: 302 case scSMaxExpr: 303 case scUMinExpr: 304 case scSMinExpr: { 305 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 306 const char *OpStr = nullptr; 307 switch (NAry->getSCEVType()) { 308 case scAddExpr: OpStr = " + "; break; 309 case scMulExpr: OpStr = " * "; break; 310 case scUMaxExpr: OpStr = " umax "; break; 311 case scSMaxExpr: OpStr = " smax "; break; 312 case scUMinExpr: 313 OpStr = " umin "; 314 break; 315 case scSMinExpr: 316 OpStr = " smin "; 317 break; 318 default: 319 llvm_unreachable("There are no other nary expression types."); 320 } 321 OS << "("; 322 ListSeparator LS(OpStr); 323 for (const SCEV *Op : NAry->operands()) 324 OS << LS << *Op; 325 OS << ")"; 326 switch (NAry->getSCEVType()) { 327 case scAddExpr: 328 case scMulExpr: 329 if (NAry->hasNoUnsignedWrap()) 330 OS << "<nuw>"; 331 if (NAry->hasNoSignedWrap()) 332 OS << "<nsw>"; 333 break; 334 default: 335 // Nothing to print for other nary expressions. 336 break; 337 } 338 return; 339 } 340 case scUDivExpr: { 341 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 342 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 343 return; 344 } 345 case scUnknown: { 346 const SCEVUnknown *U = cast<SCEVUnknown>(this); 347 Type *AllocTy; 348 if (U->isSizeOf(AllocTy)) { 349 OS << "sizeof(" << *AllocTy << ")"; 350 return; 351 } 352 if (U->isAlignOf(AllocTy)) { 353 OS << "alignof(" << *AllocTy << ")"; 354 return; 355 } 356 357 Type *CTy; 358 Constant *FieldNo; 359 if (U->isOffsetOf(CTy, FieldNo)) { 360 OS << "offsetof(" << *CTy << ", "; 361 FieldNo->printAsOperand(OS, false); 362 OS << ")"; 363 return; 364 } 365 366 // Otherwise just print it normally. 367 U->getValue()->printAsOperand(OS, false); 368 return; 369 } 370 case scCouldNotCompute: 371 OS << "***COULDNOTCOMPUTE***"; 372 return; 373 } 374 llvm_unreachable("Unknown SCEV kind!"); 375 } 376 377 Type *SCEV::getType() const { 378 switch (getSCEVType()) { 379 case scConstant: 380 return cast<SCEVConstant>(this)->getType(); 381 case scPtrToInt: 382 case scTruncate: 383 case scZeroExtend: 384 case scSignExtend: 385 return cast<SCEVCastExpr>(this)->getType(); 386 case scAddRecExpr: 387 return cast<SCEVAddRecExpr>(this)->getType(); 388 case scMulExpr: 389 return cast<SCEVMulExpr>(this)->getType(); 390 case scUMaxExpr: 391 case scSMaxExpr: 392 case scUMinExpr: 393 case scSMinExpr: 394 return cast<SCEVMinMaxExpr>(this)->getType(); 395 case scAddExpr: 396 return cast<SCEVAddExpr>(this)->getType(); 397 case scUDivExpr: 398 return cast<SCEVUDivExpr>(this)->getType(); 399 case scUnknown: 400 return cast<SCEVUnknown>(this)->getType(); 401 case scCouldNotCompute: 402 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 403 } 404 llvm_unreachable("Unknown SCEV kind!"); 405 } 406 407 bool SCEV::isZero() const { 408 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 409 return SC->getValue()->isZero(); 410 return false; 411 } 412 413 bool SCEV::isOne() const { 414 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 415 return SC->getValue()->isOne(); 416 return false; 417 } 418 419 bool SCEV::isAllOnesValue() const { 420 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 421 return SC->getValue()->isMinusOne(); 422 return false; 423 } 424 425 bool SCEV::isNonConstantNegative() const { 426 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 427 if (!Mul) return false; 428 429 // If there is a constant factor, it will be first. 430 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 431 if (!SC) return false; 432 433 // Return true if the value is negative, this matches things like (-42 * V). 434 return SC->getAPInt().isNegative(); 435 } 436 437 SCEVCouldNotCompute::SCEVCouldNotCompute() : 438 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 439 440 bool SCEVCouldNotCompute::classof(const SCEV *S) { 441 return S->getSCEVType() == scCouldNotCompute; 442 } 443 444 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 445 FoldingSetNodeID ID; 446 ID.AddInteger(scConstant); 447 ID.AddPointer(V); 448 void *IP = nullptr; 449 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 450 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 451 UniqueSCEVs.InsertNode(S, IP); 452 return S; 453 } 454 455 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 456 return getConstant(ConstantInt::get(getContext(), Val)); 457 } 458 459 const SCEV * 460 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 461 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 462 return getConstant(ConstantInt::get(ITy, V, isSigned)); 463 } 464 465 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, 466 const SCEV *op, Type *ty) 467 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) { 468 Operands[0] = op; 469 } 470 471 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, 472 Type *ITy) 473 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) { 474 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() && 475 "Must be a non-bit-width-changing pointer-to-integer cast!"); 476 } 477 478 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, 479 SCEVTypes SCEVTy, const SCEV *op, 480 Type *ty) 481 : SCEVCastExpr(ID, SCEVTy, op, ty) {} 482 483 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, 484 Type *ty) 485 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { 486 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 487 "Cannot truncate non-integer value!"); 488 } 489 490 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 491 const SCEV *op, Type *ty) 492 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { 493 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 494 "Cannot zero extend non-integer value!"); 495 } 496 497 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 498 const SCEV *op, Type *ty) 499 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { 500 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 501 "Cannot sign extend non-integer value!"); 502 } 503 504 void SCEVUnknown::deleted() { 505 // Clear this SCEVUnknown from various maps. 506 SE->forgetMemoizedResults(this); 507 508 // Remove this SCEVUnknown from the uniquing map. 509 SE->UniqueSCEVs.RemoveNode(this); 510 511 // Release the value. 512 setValPtr(nullptr); 513 } 514 515 void SCEVUnknown::allUsesReplacedWith(Value *New) { 516 // Remove this SCEVUnknown from the uniquing map. 517 SE->UniqueSCEVs.RemoveNode(this); 518 519 // Update this SCEVUnknown to point to the new value. This is needed 520 // because there may still be outstanding SCEVs which still point to 521 // this SCEVUnknown. 522 setValPtr(New); 523 } 524 525 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 526 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 527 if (VCE->getOpcode() == Instruction::PtrToInt) 528 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 529 if (CE->getOpcode() == Instruction::GetElementPtr && 530 CE->getOperand(0)->isNullValue() && 531 CE->getNumOperands() == 2) 532 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 533 if (CI->isOne()) { 534 AllocTy = cast<GEPOperator>(CE)->getSourceElementType(); 535 return true; 536 } 537 538 return false; 539 } 540 541 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 542 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 543 if (VCE->getOpcode() == Instruction::PtrToInt) 544 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 545 if (CE->getOpcode() == Instruction::GetElementPtr && 546 CE->getOperand(0)->isNullValue()) { 547 Type *Ty = cast<GEPOperator>(CE)->getSourceElementType(); 548 if (StructType *STy = dyn_cast<StructType>(Ty)) 549 if (!STy->isPacked() && 550 CE->getNumOperands() == 3 && 551 CE->getOperand(1)->isNullValue()) { 552 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 553 if (CI->isOne() && 554 STy->getNumElements() == 2 && 555 STy->getElementType(0)->isIntegerTy(1)) { 556 AllocTy = STy->getElementType(1); 557 return true; 558 } 559 } 560 } 561 562 return false; 563 } 564 565 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 566 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 567 if (VCE->getOpcode() == Instruction::PtrToInt) 568 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 569 if (CE->getOpcode() == Instruction::GetElementPtr && 570 CE->getNumOperands() == 3 && 571 CE->getOperand(0)->isNullValue() && 572 CE->getOperand(1)->isNullValue()) { 573 Type *Ty = cast<GEPOperator>(CE)->getSourceElementType(); 574 // Ignore vector types here so that ScalarEvolutionExpander doesn't 575 // emit getelementptrs that index into vectors. 576 if (Ty->isStructTy() || Ty->isArrayTy()) { 577 CTy = Ty; 578 FieldNo = CE->getOperand(2); 579 return true; 580 } 581 } 582 583 return false; 584 } 585 586 //===----------------------------------------------------------------------===// 587 // SCEV Utilities 588 //===----------------------------------------------------------------------===// 589 590 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 591 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 592 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 593 /// have been previously deemed to be "equally complex" by this routine. It is 594 /// intended to avoid exponential time complexity in cases like: 595 /// 596 /// %a = f(%x, %y) 597 /// %b = f(%a, %a) 598 /// %c = f(%b, %b) 599 /// 600 /// %d = f(%x, %y) 601 /// %e = f(%d, %d) 602 /// %f = f(%e, %e) 603 /// 604 /// CompareValueComplexity(%f, %c) 605 /// 606 /// Since we do not continue running this routine on expression trees once we 607 /// have seen unequal values, there is no need to track them in the cache. 608 static int 609 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 610 const LoopInfo *const LI, Value *LV, Value *RV, 611 unsigned Depth) { 612 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 613 return 0; 614 615 // Order pointer values after integer values. This helps SCEVExpander form 616 // GEPs. 617 bool LIsPointer = LV->getType()->isPointerTy(), 618 RIsPointer = RV->getType()->isPointerTy(); 619 if (LIsPointer != RIsPointer) 620 return (int)LIsPointer - (int)RIsPointer; 621 622 // Compare getValueID values. 623 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 624 if (LID != RID) 625 return (int)LID - (int)RID; 626 627 // Sort arguments by their position. 628 if (const auto *LA = dyn_cast<Argument>(LV)) { 629 const auto *RA = cast<Argument>(RV); 630 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 631 return (int)LArgNo - (int)RArgNo; 632 } 633 634 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 635 const auto *RGV = cast<GlobalValue>(RV); 636 637 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 638 auto LT = GV->getLinkage(); 639 return !(GlobalValue::isPrivateLinkage(LT) || 640 GlobalValue::isInternalLinkage(LT)); 641 }; 642 643 // Use the names to distinguish the two values, but only if the 644 // names are semantically important. 645 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 646 return LGV->getName().compare(RGV->getName()); 647 } 648 649 // For instructions, compare their loop depth, and their operand count. This 650 // is pretty loose. 651 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 652 const auto *RInst = cast<Instruction>(RV); 653 654 // Compare loop depths. 655 const BasicBlock *LParent = LInst->getParent(), 656 *RParent = RInst->getParent(); 657 if (LParent != RParent) { 658 unsigned LDepth = LI->getLoopDepth(LParent), 659 RDepth = LI->getLoopDepth(RParent); 660 if (LDepth != RDepth) 661 return (int)LDepth - (int)RDepth; 662 } 663 664 // Compare the number of operands. 665 unsigned LNumOps = LInst->getNumOperands(), 666 RNumOps = RInst->getNumOperands(); 667 if (LNumOps != RNumOps) 668 return (int)LNumOps - (int)RNumOps; 669 670 for (unsigned Idx : seq(0u, LNumOps)) { 671 int Result = 672 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 673 RInst->getOperand(Idx), Depth + 1); 674 if (Result != 0) 675 return Result; 676 } 677 } 678 679 EqCacheValue.unionSets(LV, RV); 680 return 0; 681 } 682 683 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 684 // than RHS, respectively. A three-way result allows recursive comparisons to be 685 // more efficient. 686 // If the max analysis depth was reached, return None, assuming we do not know 687 // if they are equivalent for sure. 688 static Optional<int> 689 CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV, 690 EquivalenceClasses<const Value *> &EqCacheValue, 691 const LoopInfo *const LI, const SCEV *LHS, 692 const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) { 693 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 694 if (LHS == RHS) 695 return 0; 696 697 // Primarily, sort the SCEVs by their getSCEVType(). 698 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 699 if (LType != RType) 700 return (int)LType - (int)RType; 701 702 if (EqCacheSCEV.isEquivalent(LHS, RHS)) 703 return 0; 704 705 if (Depth > MaxSCEVCompareDepth) 706 return None; 707 708 // Aside from the getSCEVType() ordering, the particular ordering 709 // isn't very important except that it's beneficial to be consistent, 710 // so that (a + b) and (b + a) don't end up as different expressions. 711 switch (LType) { 712 case scUnknown: { 713 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 714 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 715 716 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 717 RU->getValue(), Depth + 1); 718 if (X == 0) 719 EqCacheSCEV.unionSets(LHS, RHS); 720 return X; 721 } 722 723 case scConstant: { 724 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 725 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 726 727 // Compare constant values. 728 const APInt &LA = LC->getAPInt(); 729 const APInt &RA = RC->getAPInt(); 730 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 731 if (LBitWidth != RBitWidth) 732 return (int)LBitWidth - (int)RBitWidth; 733 return LA.ult(RA) ? -1 : 1; 734 } 735 736 case scAddRecExpr: { 737 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 738 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 739 740 // There is always a dominance between two recs that are used by one SCEV, 741 // so we can safely sort recs by loop header dominance. We require such 742 // order in getAddExpr. 743 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 744 if (LLoop != RLoop) { 745 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 746 assert(LHead != RHead && "Two loops share the same header?"); 747 if (DT.dominates(LHead, RHead)) 748 return 1; 749 else 750 assert(DT.dominates(RHead, LHead) && 751 "No dominance between recurrences used by one SCEV?"); 752 return -1; 753 } 754 755 // Addrec complexity grows with operand count. 756 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 757 if (LNumOps != RNumOps) 758 return (int)LNumOps - (int)RNumOps; 759 760 // Lexicographically compare. 761 for (unsigned i = 0; i != LNumOps; ++i) { 762 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 763 LA->getOperand(i), RA->getOperand(i), DT, 764 Depth + 1); 765 if (X != 0) 766 return X; 767 } 768 EqCacheSCEV.unionSets(LHS, RHS); 769 return 0; 770 } 771 772 case scAddExpr: 773 case scMulExpr: 774 case scSMaxExpr: 775 case scUMaxExpr: 776 case scSMinExpr: 777 case scUMinExpr: { 778 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 779 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 780 781 // Lexicographically compare n-ary expressions. 782 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 783 if (LNumOps != RNumOps) 784 return (int)LNumOps - (int)RNumOps; 785 786 for (unsigned i = 0; i != LNumOps; ++i) { 787 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 788 LC->getOperand(i), RC->getOperand(i), DT, 789 Depth + 1); 790 if (X != 0) 791 return X; 792 } 793 EqCacheSCEV.unionSets(LHS, RHS); 794 return 0; 795 } 796 797 case scUDivExpr: { 798 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 799 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 800 801 // Lexicographically compare udiv expressions. 802 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 803 RC->getLHS(), DT, Depth + 1); 804 if (X != 0) 805 return X; 806 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 807 RC->getRHS(), DT, Depth + 1); 808 if (X == 0) 809 EqCacheSCEV.unionSets(LHS, RHS); 810 return X; 811 } 812 813 case scPtrToInt: 814 case scTruncate: 815 case scZeroExtend: 816 case scSignExtend: { 817 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 818 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 819 820 // Compare cast expressions by operand. 821 auto X = 822 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(), 823 RC->getOperand(), DT, Depth + 1); 824 if (X == 0) 825 EqCacheSCEV.unionSets(LHS, RHS); 826 return X; 827 } 828 829 case scCouldNotCompute: 830 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 831 } 832 llvm_unreachable("Unknown SCEV kind!"); 833 } 834 835 /// Given a list of SCEV objects, order them by their complexity, and group 836 /// objects of the same complexity together by value. When this routine is 837 /// finished, we know that any duplicates in the vector are consecutive and that 838 /// complexity is monotonically increasing. 839 /// 840 /// Note that we go take special precautions to ensure that we get deterministic 841 /// results from this routine. In other words, we don't want the results of 842 /// this to depend on where the addresses of various SCEV objects happened to 843 /// land in memory. 844 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 845 LoopInfo *LI, DominatorTree &DT) { 846 if (Ops.size() < 2) return; // Noop 847 848 EquivalenceClasses<const SCEV *> EqCacheSCEV; 849 EquivalenceClasses<const Value *> EqCacheValue; 850 851 // Whether LHS has provably less complexity than RHS. 852 auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) { 853 auto Complexity = 854 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT); 855 return Complexity && *Complexity < 0; 856 }; 857 if (Ops.size() == 2) { 858 // This is the common case, which also happens to be trivially simple. 859 // Special case it. 860 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 861 if (IsLessComplex(RHS, LHS)) 862 std::swap(LHS, RHS); 863 return; 864 } 865 866 // Do the rough sort by complexity. 867 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { 868 return IsLessComplex(LHS, RHS); 869 }); 870 871 // Now that we are sorted by complexity, group elements of the same 872 // complexity. Note that this is, at worst, N^2, but the vector is likely to 873 // be extremely short in practice. Note that we take this approach because we 874 // do not want to depend on the addresses of the objects we are grouping. 875 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 876 const SCEV *S = Ops[i]; 877 unsigned Complexity = S->getSCEVType(); 878 879 // If there are any objects of the same complexity and same value as this 880 // one, group them. 881 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 882 if (Ops[j] == S) { // Found a duplicate. 883 // Move it to immediately after i'th element. 884 std::swap(Ops[i+1], Ops[j]); 885 ++i; // no need to rescan it. 886 if (i == e-2) return; // Done! 887 } 888 } 889 } 890 } 891 892 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at 893 /// least HugeExprThreshold nodes). 894 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 895 return any_of(Ops, [](const SCEV *S) { 896 return S->getExpressionSize() >= HugeExprThreshold; 897 }); 898 } 899 900 //===----------------------------------------------------------------------===// 901 // Simple SCEV method implementations 902 //===----------------------------------------------------------------------===// 903 904 /// Compute BC(It, K). The result has width W. Assume, K > 0. 905 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 906 ScalarEvolution &SE, 907 Type *ResultTy) { 908 // Handle the simplest case efficiently. 909 if (K == 1) 910 return SE.getTruncateOrZeroExtend(It, ResultTy); 911 912 // We are using the following formula for BC(It, K): 913 // 914 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 915 // 916 // Suppose, W is the bitwidth of the return value. We must be prepared for 917 // overflow. Hence, we must assure that the result of our computation is 918 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 919 // safe in modular arithmetic. 920 // 921 // However, this code doesn't use exactly that formula; the formula it uses 922 // is something like the following, where T is the number of factors of 2 in 923 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 924 // exponentiation: 925 // 926 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 927 // 928 // This formula is trivially equivalent to the previous formula. However, 929 // this formula can be implemented much more efficiently. The trick is that 930 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 931 // arithmetic. To do exact division in modular arithmetic, all we have 932 // to do is multiply by the inverse. Therefore, this step can be done at 933 // width W. 934 // 935 // The next issue is how to safely do the division by 2^T. The way this 936 // is done is by doing the multiplication step at a width of at least W + T 937 // bits. This way, the bottom W+T bits of the product are accurate. Then, 938 // when we perform the division by 2^T (which is equivalent to a right shift 939 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 940 // truncated out after the division by 2^T. 941 // 942 // In comparison to just directly using the first formula, this technique 943 // is much more efficient; using the first formula requires W * K bits, 944 // but this formula less than W + K bits. Also, the first formula requires 945 // a division step, whereas this formula only requires multiplies and shifts. 946 // 947 // It doesn't matter whether the subtraction step is done in the calculation 948 // width or the input iteration count's width; if the subtraction overflows, 949 // the result must be zero anyway. We prefer here to do it in the width of 950 // the induction variable because it helps a lot for certain cases; CodeGen 951 // isn't smart enough to ignore the overflow, which leads to much less 952 // efficient code if the width of the subtraction is wider than the native 953 // register width. 954 // 955 // (It's possible to not widen at all by pulling out factors of 2 before 956 // the multiplication; for example, K=2 can be calculated as 957 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 958 // extra arithmetic, so it's not an obvious win, and it gets 959 // much more complicated for K > 3.) 960 961 // Protection from insane SCEVs; this bound is conservative, 962 // but it probably doesn't matter. 963 if (K > 1000) 964 return SE.getCouldNotCompute(); 965 966 unsigned W = SE.getTypeSizeInBits(ResultTy); 967 968 // Calculate K! / 2^T and T; we divide out the factors of two before 969 // multiplying for calculating K! / 2^T to avoid overflow. 970 // Other overflow doesn't matter because we only care about the bottom 971 // W bits of the result. 972 APInt OddFactorial(W, 1); 973 unsigned T = 1; 974 for (unsigned i = 3; i <= K; ++i) { 975 APInt Mult(W, i); 976 unsigned TwoFactors = Mult.countTrailingZeros(); 977 T += TwoFactors; 978 Mult.lshrInPlace(TwoFactors); 979 OddFactorial *= Mult; 980 } 981 982 // We need at least W + T bits for the multiplication step 983 unsigned CalculationBits = W + T; 984 985 // Calculate 2^T, at width T+W. 986 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 987 988 // Calculate the multiplicative inverse of K! / 2^T; 989 // this multiplication factor will perform the exact division by 990 // K! / 2^T. 991 APInt Mod = APInt::getSignedMinValue(W+1); 992 APInt MultiplyFactor = OddFactorial.zext(W+1); 993 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 994 MultiplyFactor = MultiplyFactor.trunc(W); 995 996 // Calculate the product, at width T+W 997 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 998 CalculationBits); 999 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1000 for (unsigned i = 1; i != K; ++i) { 1001 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1002 Dividend = SE.getMulExpr(Dividend, 1003 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1004 } 1005 1006 // Divide by 2^T 1007 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1008 1009 // Truncate the result, and divide by K! / 2^T. 1010 1011 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1012 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1013 } 1014 1015 /// Return the value of this chain of recurrences at the specified iteration 1016 /// number. We can evaluate this recurrence by multiplying each element in the 1017 /// chain by the binomial coefficient corresponding to it. In other words, we 1018 /// can evaluate {A,+,B,+,C,+,D} as: 1019 /// 1020 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1021 /// 1022 /// where BC(It, k) stands for binomial coefficient. 1023 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1024 ScalarEvolution &SE) const { 1025 return evaluateAtIteration(makeArrayRef(op_begin(), op_end()), It, SE); 1026 } 1027 1028 const SCEV * 1029 SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands, 1030 const SCEV *It, ScalarEvolution &SE) { 1031 assert(Operands.size() > 0); 1032 const SCEV *Result = Operands[0]; 1033 for (unsigned i = 1, e = Operands.size(); i != e; ++i) { 1034 // The computation is correct in the face of overflow provided that the 1035 // multiplication is performed _after_ the evaluation of the binomial 1036 // coefficient. 1037 const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType()); 1038 if (isa<SCEVCouldNotCompute>(Coeff)) 1039 return Coeff; 1040 1041 Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff)); 1042 } 1043 return Result; 1044 } 1045 1046 //===----------------------------------------------------------------------===// 1047 // SCEV Expression folder implementations 1048 //===----------------------------------------------------------------------===// 1049 1050 const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op, 1051 unsigned Depth) { 1052 assert(Depth <= 1 && 1053 "getLosslessPtrToIntExpr() should self-recurse at most once."); 1054 1055 // We could be called with an integer-typed operands during SCEV rewrites. 1056 // Since the operand is an integer already, just perform zext/trunc/self cast. 1057 if (!Op->getType()->isPointerTy()) 1058 return Op; 1059 1060 // What would be an ID for such a SCEV cast expression? 1061 FoldingSetNodeID ID; 1062 ID.AddInteger(scPtrToInt); 1063 ID.AddPointer(Op); 1064 1065 void *IP = nullptr; 1066 1067 // Is there already an expression for such a cast? 1068 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1069 return S; 1070 1071 // It isn't legal for optimizations to construct new ptrtoint expressions 1072 // for non-integral pointers. 1073 if (getDataLayout().isNonIntegralPointerType(Op->getType())) 1074 return getCouldNotCompute(); 1075 1076 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType()); 1077 1078 // We can only trivially model ptrtoint if SCEV's effective (integer) type 1079 // is sufficiently wide to represent all possible pointer values. 1080 // We could theoretically teach SCEV to truncate wider pointers, but 1081 // that isn't implemented for now. 1082 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) != 1083 getDataLayout().getTypeSizeInBits(IntPtrTy)) 1084 return getCouldNotCompute(); 1085 1086 // If not, is this expression something we can't reduce any further? 1087 if (auto *U = dyn_cast<SCEVUnknown>(Op)) { 1088 // Perform some basic constant folding. If the operand of the ptr2int cast 1089 // is a null pointer, don't create a ptr2int SCEV expression (that will be 1090 // left as-is), but produce a zero constant. 1091 // NOTE: We could handle a more general case, but lack motivational cases. 1092 if (isa<ConstantPointerNull>(U->getValue())) 1093 return getZero(IntPtrTy); 1094 1095 // Create an explicit cast node. 1096 // We can reuse the existing insert position since if we get here, 1097 // we won't have made any changes which would invalidate it. 1098 SCEV *S = new (SCEVAllocator) 1099 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy); 1100 UniqueSCEVs.InsertNode(S, IP); 1101 registerUser(S, Op); 1102 return S; 1103 } 1104 1105 assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for " 1106 "non-SCEVUnknown's."); 1107 1108 // Otherwise, we've got some expression that is more complex than just a 1109 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an 1110 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown 1111 // only, and the expressions must otherwise be integer-typed. 1112 // So sink the cast down to the SCEVUnknown's. 1113 1114 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression, 1115 /// which computes a pointer-typed value, and rewrites the whole expression 1116 /// tree so that *all* the computations are done on integers, and the only 1117 /// pointer-typed operands in the expression are SCEVUnknown. 1118 class SCEVPtrToIntSinkingRewriter 1119 : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> { 1120 using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>; 1121 1122 public: 1123 SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {} 1124 1125 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) { 1126 SCEVPtrToIntSinkingRewriter Rewriter(SE); 1127 return Rewriter.visit(Scev); 1128 } 1129 1130 const SCEV *visit(const SCEV *S) { 1131 Type *STy = S->getType(); 1132 // If the expression is not pointer-typed, just keep it as-is. 1133 if (!STy->isPointerTy()) 1134 return S; 1135 // Else, recursively sink the cast down into it. 1136 return Base::visit(S); 1137 } 1138 1139 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) { 1140 SmallVector<const SCEV *, 2> Operands; 1141 bool Changed = false; 1142 for (auto *Op : Expr->operands()) { 1143 Operands.push_back(visit(Op)); 1144 Changed |= Op != Operands.back(); 1145 } 1146 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags()); 1147 } 1148 1149 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) { 1150 SmallVector<const SCEV *, 2> Operands; 1151 bool Changed = false; 1152 for (auto *Op : Expr->operands()) { 1153 Operands.push_back(visit(Op)); 1154 Changed |= Op != Operands.back(); 1155 } 1156 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags()); 1157 } 1158 1159 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 1160 assert(Expr->getType()->isPointerTy() && 1161 "Should only reach pointer-typed SCEVUnknown's."); 1162 return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1); 1163 } 1164 }; 1165 1166 // And actually perform the cast sinking. 1167 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this); 1168 assert(IntOp->getType()->isIntegerTy() && 1169 "We must have succeeded in sinking the cast, " 1170 "and ending up with an integer-typed expression!"); 1171 return IntOp; 1172 } 1173 1174 const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) { 1175 assert(Ty->isIntegerTy() && "Target type must be an integer type!"); 1176 1177 const SCEV *IntOp = getLosslessPtrToIntExpr(Op); 1178 if (isa<SCEVCouldNotCompute>(IntOp)) 1179 return IntOp; 1180 1181 return getTruncateOrZeroExtend(IntOp, Ty); 1182 } 1183 1184 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1185 unsigned Depth) { 1186 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1187 "This is not a truncating conversion!"); 1188 assert(isSCEVable(Ty) && 1189 "This is not a conversion to a SCEVable type!"); 1190 assert(!Op->getType()->isPointerTy() && "Can't truncate pointer!"); 1191 Ty = getEffectiveSCEVType(Ty); 1192 1193 FoldingSetNodeID ID; 1194 ID.AddInteger(scTruncate); 1195 ID.AddPointer(Op); 1196 ID.AddPointer(Ty); 1197 void *IP = nullptr; 1198 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1199 1200 // Fold if the operand is constant. 1201 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1202 return getConstant( 1203 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1204 1205 // trunc(trunc(x)) --> trunc(x) 1206 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1207 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1208 1209 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1210 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1211 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1212 1213 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1214 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1215 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1216 1217 if (Depth > MaxCastDepth) { 1218 SCEV *S = 1219 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1220 UniqueSCEVs.InsertNode(S, IP); 1221 registerUser(S, Op); 1222 return S; 1223 } 1224 1225 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1226 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1227 // if after transforming we have at most one truncate, not counting truncates 1228 // that replace other casts. 1229 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1230 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1231 SmallVector<const SCEV *, 4> Operands; 1232 unsigned numTruncs = 0; 1233 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1234 ++i) { 1235 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1236 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) && 1237 isa<SCEVTruncateExpr>(S)) 1238 numTruncs++; 1239 Operands.push_back(S); 1240 } 1241 if (numTruncs < 2) { 1242 if (isa<SCEVAddExpr>(Op)) 1243 return getAddExpr(Operands); 1244 else if (isa<SCEVMulExpr>(Op)) 1245 return getMulExpr(Operands); 1246 else 1247 llvm_unreachable("Unexpected SCEV type for Op."); 1248 } 1249 // Although we checked in the beginning that ID is not in the cache, it is 1250 // possible that during recursion and different modification ID was inserted 1251 // into the cache. So if we find it, just return it. 1252 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1253 return S; 1254 } 1255 1256 // If the input value is a chrec scev, truncate the chrec's operands. 1257 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1258 SmallVector<const SCEV *, 4> Operands; 1259 for (const SCEV *Op : AddRec->operands()) 1260 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1261 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1262 } 1263 1264 // Return zero if truncating to known zeros. 1265 uint32_t MinTrailingZeros = GetMinTrailingZeros(Op); 1266 if (MinTrailingZeros >= getTypeSizeInBits(Ty)) 1267 return getZero(Ty); 1268 1269 // The cast wasn't folded; create an explicit cast node. We can reuse 1270 // the existing insert position since if we get here, we won't have 1271 // made any changes which would invalidate it. 1272 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1273 Op, Ty); 1274 UniqueSCEVs.InsertNode(S, IP); 1275 registerUser(S, Op); 1276 return S; 1277 } 1278 1279 // Get the limit of a recurrence such that incrementing by Step cannot cause 1280 // signed overflow as long as the value of the recurrence within the 1281 // loop does not exceed this limit before incrementing. 1282 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1283 ICmpInst::Predicate *Pred, 1284 ScalarEvolution *SE) { 1285 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1286 if (SE->isKnownPositive(Step)) { 1287 *Pred = ICmpInst::ICMP_SLT; 1288 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1289 SE->getSignedRangeMax(Step)); 1290 } 1291 if (SE->isKnownNegative(Step)) { 1292 *Pred = ICmpInst::ICMP_SGT; 1293 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1294 SE->getSignedRangeMin(Step)); 1295 } 1296 return nullptr; 1297 } 1298 1299 // Get the limit of a recurrence such that incrementing by Step cannot cause 1300 // unsigned overflow as long as the value of the recurrence within the loop does 1301 // not exceed this limit before incrementing. 1302 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1303 ICmpInst::Predicate *Pred, 1304 ScalarEvolution *SE) { 1305 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1306 *Pred = ICmpInst::ICMP_ULT; 1307 1308 return SE->getConstant(APInt::getMinValue(BitWidth) - 1309 SE->getUnsignedRangeMax(Step)); 1310 } 1311 1312 namespace { 1313 1314 struct ExtendOpTraitsBase { 1315 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1316 unsigned); 1317 }; 1318 1319 // Used to make code generic over signed and unsigned overflow. 1320 template <typename ExtendOp> struct ExtendOpTraits { 1321 // Members present: 1322 // 1323 // static const SCEV::NoWrapFlags WrapType; 1324 // 1325 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1326 // 1327 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1328 // ICmpInst::Predicate *Pred, 1329 // ScalarEvolution *SE); 1330 }; 1331 1332 template <> 1333 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1334 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1335 1336 static const GetExtendExprTy GetExtendExpr; 1337 1338 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1339 ICmpInst::Predicate *Pred, 1340 ScalarEvolution *SE) { 1341 return getSignedOverflowLimitForStep(Step, Pred, SE); 1342 } 1343 }; 1344 1345 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1346 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1347 1348 template <> 1349 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1350 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1351 1352 static const GetExtendExprTy GetExtendExpr; 1353 1354 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1355 ICmpInst::Predicate *Pred, 1356 ScalarEvolution *SE) { 1357 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1358 } 1359 }; 1360 1361 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1362 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1363 1364 } // end anonymous namespace 1365 1366 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1367 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1368 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1369 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1370 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1371 // expression "Step + sext/zext(PreIncAR)" is congruent with 1372 // "sext/zext(PostIncAR)" 1373 template <typename ExtendOpTy> 1374 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1375 ScalarEvolution *SE, unsigned Depth) { 1376 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1377 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1378 1379 const Loop *L = AR->getLoop(); 1380 const SCEV *Start = AR->getStart(); 1381 const SCEV *Step = AR->getStepRecurrence(*SE); 1382 1383 // Check for a simple looking step prior to loop entry. 1384 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1385 if (!SA) 1386 return nullptr; 1387 1388 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1389 // subtraction is expensive. For this purpose, perform a quick and dirty 1390 // difference, by checking for Step in the operand list. 1391 SmallVector<const SCEV *, 4> DiffOps; 1392 for (const SCEV *Op : SA->operands()) 1393 if (Op != Step) 1394 DiffOps.push_back(Op); 1395 1396 if (DiffOps.size() == SA->getNumOperands()) 1397 return nullptr; 1398 1399 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1400 // `Step`: 1401 1402 // 1. NSW/NUW flags on the step increment. 1403 auto PreStartFlags = 1404 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1405 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1406 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1407 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1408 1409 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1410 // "S+X does not sign/unsign-overflow". 1411 // 1412 1413 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1414 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1415 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1416 return PreStart; 1417 1418 // 2. Direct overflow check on the step operation's expression. 1419 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1420 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1421 const SCEV *OperandExtendedStart = 1422 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1423 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1424 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1425 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1426 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1427 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1428 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1429 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType); 1430 } 1431 return PreStart; 1432 } 1433 1434 // 3. Loop precondition. 1435 ICmpInst::Predicate Pred; 1436 const SCEV *OverflowLimit = 1437 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1438 1439 if (OverflowLimit && 1440 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1441 return PreStart; 1442 1443 return nullptr; 1444 } 1445 1446 // Get the normalized zero or sign extended expression for this AddRec's Start. 1447 template <typename ExtendOpTy> 1448 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1449 ScalarEvolution *SE, 1450 unsigned Depth) { 1451 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1452 1453 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1454 if (!PreStart) 1455 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1456 1457 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1458 Depth), 1459 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1460 } 1461 1462 // Try to prove away overflow by looking at "nearby" add recurrences. A 1463 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1464 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1465 // 1466 // Formally: 1467 // 1468 // {S,+,X} == {S-T,+,X} + T 1469 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1470 // 1471 // If ({S-T,+,X} + T) does not overflow ... (1) 1472 // 1473 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1474 // 1475 // If {S-T,+,X} does not overflow ... (2) 1476 // 1477 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1478 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1479 // 1480 // If (S-T)+T does not overflow ... (3) 1481 // 1482 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1483 // == {Ext(S),+,Ext(X)} == LHS 1484 // 1485 // Thus, if (1), (2) and (3) are true for some T, then 1486 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1487 // 1488 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1489 // does not overflow" restricted to the 0th iteration. Therefore we only need 1490 // to check for (1) and (2). 1491 // 1492 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1493 // is `Delta` (defined below). 1494 template <typename ExtendOpTy> 1495 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1496 const SCEV *Step, 1497 const Loop *L) { 1498 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1499 1500 // We restrict `Start` to a constant to prevent SCEV from spending too much 1501 // time here. It is correct (but more expensive) to continue with a 1502 // non-constant `Start` and do a general SCEV subtraction to compute 1503 // `PreStart` below. 1504 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1505 if (!StartC) 1506 return false; 1507 1508 APInt StartAI = StartC->getAPInt(); 1509 1510 for (unsigned Delta : {-2, -1, 1, 2}) { 1511 const SCEV *PreStart = getConstant(StartAI - Delta); 1512 1513 FoldingSetNodeID ID; 1514 ID.AddInteger(scAddRecExpr); 1515 ID.AddPointer(PreStart); 1516 ID.AddPointer(Step); 1517 ID.AddPointer(L); 1518 void *IP = nullptr; 1519 const auto *PreAR = 1520 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1521 1522 // Give up if we don't already have the add recurrence we need because 1523 // actually constructing an add recurrence is relatively expensive. 1524 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1525 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1526 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1527 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1528 DeltaS, &Pred, this); 1529 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1530 return true; 1531 } 1532 } 1533 1534 return false; 1535 } 1536 1537 // Finds an integer D for an expression (C + x + y + ...) such that the top 1538 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1539 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1540 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1541 // the (C + x + y + ...) expression is \p WholeAddExpr. 1542 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1543 const SCEVConstant *ConstantTerm, 1544 const SCEVAddExpr *WholeAddExpr) { 1545 const APInt &C = ConstantTerm->getAPInt(); 1546 const unsigned BitWidth = C.getBitWidth(); 1547 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1548 uint32_t TZ = BitWidth; 1549 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1550 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1551 if (TZ) { 1552 // Set D to be as many least significant bits of C as possible while still 1553 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1554 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1555 } 1556 return APInt(BitWidth, 0); 1557 } 1558 1559 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1560 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1561 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1562 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1563 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1564 const APInt &ConstantStart, 1565 const SCEV *Step) { 1566 const unsigned BitWidth = ConstantStart.getBitWidth(); 1567 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1568 if (TZ) 1569 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1570 : ConstantStart; 1571 return APInt(BitWidth, 0); 1572 } 1573 1574 const SCEV * 1575 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1576 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1577 "This is not an extending conversion!"); 1578 assert(isSCEVable(Ty) && 1579 "This is not a conversion to a SCEVable type!"); 1580 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!"); 1581 Ty = getEffectiveSCEVType(Ty); 1582 1583 // Fold if the operand is constant. 1584 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1585 return getConstant( 1586 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1587 1588 // zext(zext(x)) --> zext(x) 1589 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1590 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1591 1592 // Before doing any expensive analysis, check to see if we've already 1593 // computed a SCEV for this Op and Ty. 1594 FoldingSetNodeID ID; 1595 ID.AddInteger(scZeroExtend); 1596 ID.AddPointer(Op); 1597 ID.AddPointer(Ty); 1598 void *IP = nullptr; 1599 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1600 if (Depth > MaxCastDepth) { 1601 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1602 Op, Ty); 1603 UniqueSCEVs.InsertNode(S, IP); 1604 registerUser(S, Op); 1605 return S; 1606 } 1607 1608 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1609 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1610 // It's possible the bits taken off by the truncate were all zero bits. If 1611 // so, we should be able to simplify this further. 1612 const SCEV *X = ST->getOperand(); 1613 ConstantRange CR = getUnsignedRange(X); 1614 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1615 unsigned NewBits = getTypeSizeInBits(Ty); 1616 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1617 CR.zextOrTrunc(NewBits))) 1618 return getTruncateOrZeroExtend(X, Ty, Depth); 1619 } 1620 1621 // If the input value is a chrec scev, and we can prove that the value 1622 // did not overflow the old, smaller, value, we can zero extend all of the 1623 // operands (often constants). This allows analysis of something like 1624 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1625 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1626 if (AR->isAffine()) { 1627 const SCEV *Start = AR->getStart(); 1628 const SCEV *Step = AR->getStepRecurrence(*this); 1629 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1630 const Loop *L = AR->getLoop(); 1631 1632 if (!AR->hasNoUnsignedWrap()) { 1633 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1634 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1635 } 1636 1637 // If we have special knowledge that this addrec won't overflow, 1638 // we don't need to do any further analysis. 1639 if (AR->hasNoUnsignedWrap()) 1640 return getAddRecExpr( 1641 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1642 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1643 1644 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1645 // Note that this serves two purposes: It filters out loops that are 1646 // simply not analyzable, and it covers the case where this code is 1647 // being called from within backedge-taken count analysis, such that 1648 // attempting to ask for the backedge-taken count would likely result 1649 // in infinite recursion. In the later case, the analysis code will 1650 // cope with a conservative value, and it will take care to purge 1651 // that value once it has finished. 1652 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1653 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1654 // Manually compute the final value for AR, checking for overflow. 1655 1656 // Check whether the backedge-taken count can be losslessly casted to 1657 // the addrec's type. The count is always unsigned. 1658 const SCEV *CastedMaxBECount = 1659 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1660 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1661 CastedMaxBECount, MaxBECount->getType(), Depth); 1662 if (MaxBECount == RecastedMaxBECount) { 1663 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1664 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1665 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1666 SCEV::FlagAnyWrap, Depth + 1); 1667 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1668 SCEV::FlagAnyWrap, 1669 Depth + 1), 1670 WideTy, Depth + 1); 1671 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1672 const SCEV *WideMaxBECount = 1673 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1674 const SCEV *OperandExtendedAdd = 1675 getAddExpr(WideStart, 1676 getMulExpr(WideMaxBECount, 1677 getZeroExtendExpr(Step, WideTy, Depth + 1), 1678 SCEV::FlagAnyWrap, Depth + 1), 1679 SCEV::FlagAnyWrap, Depth + 1); 1680 if (ZAdd == OperandExtendedAdd) { 1681 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1682 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1683 // Return the expression with the addrec on the outside. 1684 return getAddRecExpr( 1685 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1686 Depth + 1), 1687 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1688 AR->getNoWrapFlags()); 1689 } 1690 // Similar to above, only this time treat the step value as signed. 1691 // This covers loops that count down. 1692 OperandExtendedAdd = 1693 getAddExpr(WideStart, 1694 getMulExpr(WideMaxBECount, 1695 getSignExtendExpr(Step, WideTy, Depth + 1), 1696 SCEV::FlagAnyWrap, Depth + 1), 1697 SCEV::FlagAnyWrap, Depth + 1); 1698 if (ZAdd == OperandExtendedAdd) { 1699 // Cache knowledge of AR NW, which is propagated to this AddRec. 1700 // Negative step causes unsigned wrap, but it still can't self-wrap. 1701 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1702 // Return the expression with the addrec on the outside. 1703 return getAddRecExpr( 1704 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1705 Depth + 1), 1706 getSignExtendExpr(Step, Ty, Depth + 1), L, 1707 AR->getNoWrapFlags()); 1708 } 1709 } 1710 } 1711 1712 // Normally, in the cases we can prove no-overflow via a 1713 // backedge guarding condition, we can also compute a backedge 1714 // taken count for the loop. The exceptions are assumptions and 1715 // guards present in the loop -- SCEV is not great at exploiting 1716 // these to compute max backedge taken counts, but can still use 1717 // these to prove lack of overflow. Use this fact to avoid 1718 // doing extra work that may not pay off. 1719 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1720 !AC.assumptions().empty()) { 1721 1722 auto NewFlags = proveNoUnsignedWrapViaInduction(AR); 1723 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1724 if (AR->hasNoUnsignedWrap()) { 1725 // Same as nuw case above - duplicated here to avoid a compile time 1726 // issue. It's not clear that the order of checks does matter, but 1727 // it's one of two issue possible causes for a change which was 1728 // reverted. Be conservative for the moment. 1729 return getAddRecExpr( 1730 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1731 Depth + 1), 1732 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1733 AR->getNoWrapFlags()); 1734 } 1735 1736 // For a negative step, we can extend the operands iff doing so only 1737 // traverses values in the range zext([0,UINT_MAX]). 1738 if (isKnownNegative(Step)) { 1739 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1740 getSignedRangeMin(Step)); 1741 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1742 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1743 // Cache knowledge of AR NW, which is propagated to this 1744 // AddRec. Negative step causes unsigned wrap, but it 1745 // still can't self-wrap. 1746 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1747 // Return the expression with the addrec on the outside. 1748 return getAddRecExpr( 1749 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1750 Depth + 1), 1751 getSignExtendExpr(Step, Ty, Depth + 1), L, 1752 AR->getNoWrapFlags()); 1753 } 1754 } 1755 } 1756 1757 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1758 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1759 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1760 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1761 const APInt &C = SC->getAPInt(); 1762 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1763 if (D != 0) { 1764 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1765 const SCEV *SResidual = 1766 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1767 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1768 return getAddExpr(SZExtD, SZExtR, 1769 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1770 Depth + 1); 1771 } 1772 } 1773 1774 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1775 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1776 return getAddRecExpr( 1777 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1778 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1779 } 1780 } 1781 1782 // zext(A % B) --> zext(A) % zext(B) 1783 { 1784 const SCEV *LHS; 1785 const SCEV *RHS; 1786 if (matchURem(Op, LHS, RHS)) 1787 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1788 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1789 } 1790 1791 // zext(A / B) --> zext(A) / zext(B). 1792 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1793 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1794 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1795 1796 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1797 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1798 if (SA->hasNoUnsignedWrap()) { 1799 // If the addition does not unsign overflow then we can, by definition, 1800 // commute the zero extension with the addition operation. 1801 SmallVector<const SCEV *, 4> Ops; 1802 for (const auto *Op : SA->operands()) 1803 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1804 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1805 } 1806 1807 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1808 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1809 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1810 // 1811 // Often address arithmetics contain expressions like 1812 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1813 // This transformation is useful while proving that such expressions are 1814 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1815 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1816 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1817 if (D != 0) { 1818 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1819 const SCEV *SResidual = 1820 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1821 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1822 return getAddExpr(SZExtD, SZExtR, 1823 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1824 Depth + 1); 1825 } 1826 } 1827 } 1828 1829 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1830 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1831 if (SM->hasNoUnsignedWrap()) { 1832 // If the multiply does not unsign overflow then we can, by definition, 1833 // commute the zero extension with the multiply operation. 1834 SmallVector<const SCEV *, 4> Ops; 1835 for (const auto *Op : SM->operands()) 1836 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1837 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1838 } 1839 1840 // zext(2^K * (trunc X to iN)) to iM -> 1841 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1842 // 1843 // Proof: 1844 // 1845 // zext(2^K * (trunc X to iN)) to iM 1846 // = zext((trunc X to iN) << K) to iM 1847 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1848 // (because shl removes the top K bits) 1849 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1850 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1851 // 1852 if (SM->getNumOperands() == 2) 1853 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1854 if (MulLHS->getAPInt().isPowerOf2()) 1855 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1856 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1857 MulLHS->getAPInt().logBase2(); 1858 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1859 return getMulExpr( 1860 getZeroExtendExpr(MulLHS, Ty), 1861 getZeroExtendExpr( 1862 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1863 SCEV::FlagNUW, Depth + 1); 1864 } 1865 } 1866 1867 // The cast wasn't folded; create an explicit cast node. 1868 // Recompute the insert position, as it may have been invalidated. 1869 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1870 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1871 Op, Ty); 1872 UniqueSCEVs.InsertNode(S, IP); 1873 registerUser(S, Op); 1874 return S; 1875 } 1876 1877 const SCEV * 1878 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1879 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1880 "This is not an extending conversion!"); 1881 assert(isSCEVable(Ty) && 1882 "This is not a conversion to a SCEVable type!"); 1883 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!"); 1884 Ty = getEffectiveSCEVType(Ty); 1885 1886 // Fold if the operand is constant. 1887 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1888 return getConstant( 1889 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1890 1891 // sext(sext(x)) --> sext(x) 1892 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1893 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1894 1895 // sext(zext(x)) --> zext(x) 1896 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1897 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1898 1899 // Before doing any expensive analysis, check to see if we've already 1900 // computed a SCEV for this Op and Ty. 1901 FoldingSetNodeID ID; 1902 ID.AddInteger(scSignExtend); 1903 ID.AddPointer(Op); 1904 ID.AddPointer(Ty); 1905 void *IP = nullptr; 1906 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1907 // Limit recursion depth. 1908 if (Depth > MaxCastDepth) { 1909 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1910 Op, Ty); 1911 UniqueSCEVs.InsertNode(S, IP); 1912 registerUser(S, Op); 1913 return S; 1914 } 1915 1916 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1917 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1918 // It's possible the bits taken off by the truncate were all sign bits. If 1919 // so, we should be able to simplify this further. 1920 const SCEV *X = ST->getOperand(); 1921 ConstantRange CR = getSignedRange(X); 1922 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1923 unsigned NewBits = getTypeSizeInBits(Ty); 1924 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1925 CR.sextOrTrunc(NewBits))) 1926 return getTruncateOrSignExtend(X, Ty, Depth); 1927 } 1928 1929 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1930 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1931 if (SA->hasNoSignedWrap()) { 1932 // If the addition does not sign overflow then we can, by definition, 1933 // commute the sign extension with the addition operation. 1934 SmallVector<const SCEV *, 4> Ops; 1935 for (const auto *Op : SA->operands()) 1936 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1937 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1938 } 1939 1940 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 1941 // if D + (C - D + x + y + ...) could be proven to not signed wrap 1942 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1943 // 1944 // For instance, this will bring two seemingly different expressions: 1945 // 1 + sext(5 + 20 * %x + 24 * %y) and 1946 // sext(6 + 20 * %x + 24 * %y) 1947 // to the same form: 1948 // 2 + sext(4 + 20 * %x + 24 * %y) 1949 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1950 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1951 if (D != 0) { 1952 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1953 const SCEV *SResidual = 1954 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1955 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1956 return getAddExpr(SSExtD, SSExtR, 1957 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1958 Depth + 1); 1959 } 1960 } 1961 } 1962 // If the input value is a chrec scev, and we can prove that the value 1963 // did not overflow the old, smaller, value, we can sign extend all of the 1964 // operands (often constants). This allows analysis of something like 1965 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1966 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1967 if (AR->isAffine()) { 1968 const SCEV *Start = AR->getStart(); 1969 const SCEV *Step = AR->getStepRecurrence(*this); 1970 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1971 const Loop *L = AR->getLoop(); 1972 1973 if (!AR->hasNoSignedWrap()) { 1974 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1975 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1976 } 1977 1978 // If we have special knowledge that this addrec won't overflow, 1979 // we don't need to do any further analysis. 1980 if (AR->hasNoSignedWrap()) 1981 return getAddRecExpr( 1982 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1983 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1984 1985 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1986 // Note that this serves two purposes: It filters out loops that are 1987 // simply not analyzable, and it covers the case where this code is 1988 // being called from within backedge-taken count analysis, such that 1989 // attempting to ask for the backedge-taken count would likely result 1990 // in infinite recursion. In the later case, the analysis code will 1991 // cope with a conservative value, and it will take care to purge 1992 // that value once it has finished. 1993 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1994 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1995 // Manually compute the final value for AR, checking for 1996 // overflow. 1997 1998 // Check whether the backedge-taken count can be losslessly casted to 1999 // the addrec's type. The count is always unsigned. 2000 const SCEV *CastedMaxBECount = 2001 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 2002 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 2003 CastedMaxBECount, MaxBECount->getType(), Depth); 2004 if (MaxBECount == RecastedMaxBECount) { 2005 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 2006 // Check whether Start+Step*MaxBECount has no signed overflow. 2007 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 2008 SCEV::FlagAnyWrap, Depth + 1); 2009 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 2010 SCEV::FlagAnyWrap, 2011 Depth + 1), 2012 WideTy, Depth + 1); 2013 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 2014 const SCEV *WideMaxBECount = 2015 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 2016 const SCEV *OperandExtendedAdd = 2017 getAddExpr(WideStart, 2018 getMulExpr(WideMaxBECount, 2019 getSignExtendExpr(Step, WideTy, Depth + 1), 2020 SCEV::FlagAnyWrap, Depth + 1), 2021 SCEV::FlagAnyWrap, Depth + 1); 2022 if (SAdd == OperandExtendedAdd) { 2023 // Cache knowledge of AR NSW, which is propagated to this AddRec. 2024 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2025 // Return the expression with the addrec on the outside. 2026 return getAddRecExpr( 2027 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2028 Depth + 1), 2029 getSignExtendExpr(Step, Ty, Depth + 1), L, 2030 AR->getNoWrapFlags()); 2031 } 2032 // Similar to above, only this time treat the step value as unsigned. 2033 // This covers loops that count up with an unsigned step. 2034 OperandExtendedAdd = 2035 getAddExpr(WideStart, 2036 getMulExpr(WideMaxBECount, 2037 getZeroExtendExpr(Step, WideTy, Depth + 1), 2038 SCEV::FlagAnyWrap, Depth + 1), 2039 SCEV::FlagAnyWrap, Depth + 1); 2040 if (SAdd == OperandExtendedAdd) { 2041 // If AR wraps around then 2042 // 2043 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 2044 // => SAdd != OperandExtendedAdd 2045 // 2046 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 2047 // (SAdd == OperandExtendedAdd => AR is NW) 2048 2049 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 2050 2051 // Return the expression with the addrec on the outside. 2052 return getAddRecExpr( 2053 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2054 Depth + 1), 2055 getZeroExtendExpr(Step, Ty, Depth + 1), L, 2056 AR->getNoWrapFlags()); 2057 } 2058 } 2059 } 2060 2061 auto NewFlags = proveNoSignedWrapViaInduction(AR); 2062 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 2063 if (AR->hasNoSignedWrap()) { 2064 // Same as nsw case above - duplicated here to avoid a compile time 2065 // issue. It's not clear that the order of checks does matter, but 2066 // it's one of two issue possible causes for a change which was 2067 // reverted. Be conservative for the moment. 2068 return getAddRecExpr( 2069 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2070 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2071 } 2072 2073 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 2074 // if D + (C - D + Step * n) could be proven to not signed wrap 2075 // where D maximizes the number of trailing zeros of (C - D + Step * n) 2076 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 2077 const APInt &C = SC->getAPInt(); 2078 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 2079 if (D != 0) { 2080 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2081 const SCEV *SResidual = 2082 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 2083 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2084 return getAddExpr(SSExtD, SSExtR, 2085 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2086 Depth + 1); 2087 } 2088 } 2089 2090 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2091 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2092 return getAddRecExpr( 2093 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2094 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2095 } 2096 } 2097 2098 // If the input value is provably positive and we could not simplify 2099 // away the sext build a zext instead. 2100 if (isKnownNonNegative(Op)) 2101 return getZeroExtendExpr(Op, Ty, Depth + 1); 2102 2103 // The cast wasn't folded; create an explicit cast node. 2104 // Recompute the insert position, as it may have been invalidated. 2105 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2106 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2107 Op, Ty); 2108 UniqueSCEVs.InsertNode(S, IP); 2109 registerUser(S, { Op }); 2110 return S; 2111 } 2112 2113 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2114 /// unspecified bits out to the given type. 2115 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2116 Type *Ty) { 2117 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2118 "This is not an extending conversion!"); 2119 assert(isSCEVable(Ty) && 2120 "This is not a conversion to a SCEVable type!"); 2121 Ty = getEffectiveSCEVType(Ty); 2122 2123 // Sign-extend negative constants. 2124 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2125 if (SC->getAPInt().isNegative()) 2126 return getSignExtendExpr(Op, Ty); 2127 2128 // Peel off a truncate cast. 2129 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2130 const SCEV *NewOp = T->getOperand(); 2131 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2132 return getAnyExtendExpr(NewOp, Ty); 2133 return getTruncateOrNoop(NewOp, Ty); 2134 } 2135 2136 // Next try a zext cast. If the cast is folded, use it. 2137 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2138 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2139 return ZExt; 2140 2141 // Next try a sext cast. If the cast is folded, use it. 2142 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2143 if (!isa<SCEVSignExtendExpr>(SExt)) 2144 return SExt; 2145 2146 // Force the cast to be folded into the operands of an addrec. 2147 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2148 SmallVector<const SCEV *, 4> Ops; 2149 for (const SCEV *Op : AR->operands()) 2150 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2151 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2152 } 2153 2154 // If the expression is obviously signed, use the sext cast value. 2155 if (isa<SCEVSMaxExpr>(Op)) 2156 return SExt; 2157 2158 // Absent any other information, use the zext cast value. 2159 return ZExt; 2160 } 2161 2162 /// Process the given Ops list, which is a list of operands to be added under 2163 /// the given scale, update the given map. This is a helper function for 2164 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2165 /// that would form an add expression like this: 2166 /// 2167 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2168 /// 2169 /// where A and B are constants, update the map with these values: 2170 /// 2171 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2172 /// 2173 /// and add 13 + A*B*29 to AccumulatedConstant. 2174 /// This will allow getAddRecExpr to produce this: 2175 /// 2176 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2177 /// 2178 /// This form often exposes folding opportunities that are hidden in 2179 /// the original operand list. 2180 /// 2181 /// Return true iff it appears that any interesting folding opportunities 2182 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2183 /// the common case where no interesting opportunities are present, and 2184 /// is also used as a check to avoid infinite recursion. 2185 static bool 2186 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2187 SmallVectorImpl<const SCEV *> &NewOps, 2188 APInt &AccumulatedConstant, 2189 const SCEV *const *Ops, size_t NumOperands, 2190 const APInt &Scale, 2191 ScalarEvolution &SE) { 2192 bool Interesting = false; 2193 2194 // Iterate over the add operands. They are sorted, with constants first. 2195 unsigned i = 0; 2196 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2197 ++i; 2198 // Pull a buried constant out to the outside. 2199 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2200 Interesting = true; 2201 AccumulatedConstant += Scale * C->getAPInt(); 2202 } 2203 2204 // Next comes everything else. We're especially interested in multiplies 2205 // here, but they're in the middle, so just visit the rest with one loop. 2206 for (; i != NumOperands; ++i) { 2207 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2208 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2209 APInt NewScale = 2210 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2211 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2212 // A multiplication of a constant with another add; recurse. 2213 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2214 Interesting |= 2215 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2216 Add->op_begin(), Add->getNumOperands(), 2217 NewScale, SE); 2218 } else { 2219 // A multiplication of a constant with some other value. Update 2220 // the map. 2221 SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands())); 2222 const SCEV *Key = SE.getMulExpr(MulOps); 2223 auto Pair = M.insert({Key, NewScale}); 2224 if (Pair.second) { 2225 NewOps.push_back(Pair.first->first); 2226 } else { 2227 Pair.first->second += NewScale; 2228 // The map already had an entry for this value, which may indicate 2229 // a folding opportunity. 2230 Interesting = true; 2231 } 2232 } 2233 } else { 2234 // An ordinary operand. Update the map. 2235 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2236 M.insert({Ops[i], Scale}); 2237 if (Pair.second) { 2238 NewOps.push_back(Pair.first->first); 2239 } else { 2240 Pair.first->second += Scale; 2241 // The map already had an entry for this value, which may indicate 2242 // a folding opportunity. 2243 Interesting = true; 2244 } 2245 } 2246 } 2247 2248 return Interesting; 2249 } 2250 2251 bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, 2252 const SCEV *LHS, const SCEV *RHS) { 2253 const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *, 2254 SCEV::NoWrapFlags, unsigned); 2255 switch (BinOp) { 2256 default: 2257 llvm_unreachable("Unsupported binary op"); 2258 case Instruction::Add: 2259 Operation = &ScalarEvolution::getAddExpr; 2260 break; 2261 case Instruction::Sub: 2262 Operation = &ScalarEvolution::getMinusSCEV; 2263 break; 2264 case Instruction::Mul: 2265 Operation = &ScalarEvolution::getMulExpr; 2266 break; 2267 } 2268 2269 const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) = 2270 Signed ? &ScalarEvolution::getSignExtendExpr 2271 : &ScalarEvolution::getZeroExtendExpr; 2272 2273 // Check ext(LHS op RHS) == ext(LHS) op ext(RHS) 2274 auto *NarrowTy = cast<IntegerType>(LHS->getType()); 2275 auto *WideTy = 2276 IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2); 2277 2278 const SCEV *A = (this->*Extension)( 2279 (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0); 2280 const SCEV *B = (this->*Operation)((this->*Extension)(LHS, WideTy, 0), 2281 (this->*Extension)(RHS, WideTy, 0), 2282 SCEV::FlagAnyWrap, 0); 2283 return A == B; 2284 } 2285 2286 std::pair<SCEV::NoWrapFlags, bool /*Deduced*/> 2287 ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp( 2288 const OverflowingBinaryOperator *OBO) { 2289 SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap; 2290 2291 if (OBO->hasNoUnsignedWrap()) 2292 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2293 if (OBO->hasNoSignedWrap()) 2294 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2295 2296 bool Deduced = false; 2297 2298 if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap()) 2299 return {Flags, Deduced}; 2300 2301 if (OBO->getOpcode() != Instruction::Add && 2302 OBO->getOpcode() != Instruction::Sub && 2303 OBO->getOpcode() != Instruction::Mul) 2304 return {Flags, Deduced}; 2305 2306 const SCEV *LHS = getSCEV(OBO->getOperand(0)); 2307 const SCEV *RHS = getSCEV(OBO->getOperand(1)); 2308 2309 if (!OBO->hasNoUnsignedWrap() && 2310 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), 2311 /* Signed */ false, LHS, RHS)) { 2312 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2313 Deduced = true; 2314 } 2315 2316 if (!OBO->hasNoSignedWrap() && 2317 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), 2318 /* Signed */ true, LHS, RHS)) { 2319 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2320 Deduced = true; 2321 } 2322 2323 return {Flags, Deduced}; 2324 } 2325 2326 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2327 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2328 // can't-overflow flags for the operation if possible. 2329 static SCEV::NoWrapFlags 2330 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2331 const ArrayRef<const SCEV *> Ops, 2332 SCEV::NoWrapFlags Flags) { 2333 using namespace std::placeholders; 2334 2335 using OBO = OverflowingBinaryOperator; 2336 2337 bool CanAnalyze = 2338 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2339 (void)CanAnalyze; 2340 assert(CanAnalyze && "don't call from other places!"); 2341 2342 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2343 SCEV::NoWrapFlags SignOrUnsignWrap = 2344 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2345 2346 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2347 auto IsKnownNonNegative = [&](const SCEV *S) { 2348 return SE->isKnownNonNegative(S); 2349 }; 2350 2351 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2352 Flags = 2353 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2354 2355 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2356 2357 if (SignOrUnsignWrap != SignOrUnsignMask && 2358 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2359 isa<SCEVConstant>(Ops[0])) { 2360 2361 auto Opcode = [&] { 2362 switch (Type) { 2363 case scAddExpr: 2364 return Instruction::Add; 2365 case scMulExpr: 2366 return Instruction::Mul; 2367 default: 2368 llvm_unreachable("Unexpected SCEV op."); 2369 } 2370 }(); 2371 2372 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2373 2374 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2375 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2376 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2377 Opcode, C, OBO::NoSignedWrap); 2378 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2379 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2380 } 2381 2382 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2383 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2384 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2385 Opcode, C, OBO::NoUnsignedWrap); 2386 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2387 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2388 } 2389 } 2390 2391 // <0,+,nonnegative><nw> is also nuw 2392 // TODO: Add corresponding nsw case 2393 if (Type == scAddRecExpr && ScalarEvolution::hasFlags(Flags, SCEV::FlagNW) && 2394 !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && Ops.size() == 2 && 2395 Ops[0]->isZero() && IsKnownNonNegative(Ops[1])) 2396 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2397 2398 // both (udiv X, Y) * Y and Y * (udiv X, Y) are always NUW 2399 if (Type == scMulExpr && !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && 2400 Ops.size() == 2) { 2401 if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0])) 2402 if (UDiv->getOperand(1) == Ops[1]) 2403 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2404 if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1])) 2405 if (UDiv->getOperand(1) == Ops[0]) 2406 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2407 } 2408 2409 return Flags; 2410 } 2411 2412 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2413 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2414 } 2415 2416 /// Get a canonical add expression, or something simpler if possible. 2417 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2418 SCEV::NoWrapFlags OrigFlags, 2419 unsigned Depth) { 2420 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2421 "only nuw or nsw allowed"); 2422 assert(!Ops.empty() && "Cannot get empty add!"); 2423 if (Ops.size() == 1) return Ops[0]; 2424 #ifndef NDEBUG 2425 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2426 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2427 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2428 "SCEVAddExpr operand types don't match!"); 2429 unsigned NumPtrs = count_if( 2430 Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); }); 2431 assert(NumPtrs <= 1 && "add has at most one pointer operand"); 2432 #endif 2433 2434 // Sort by complexity, this groups all similar expression types together. 2435 GroupByComplexity(Ops, &LI, DT); 2436 2437 // If there are any constants, fold them together. 2438 unsigned Idx = 0; 2439 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2440 ++Idx; 2441 assert(Idx < Ops.size()); 2442 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2443 // We found two constants, fold them together! 2444 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2445 if (Ops.size() == 2) return Ops[0]; 2446 Ops.erase(Ops.begin()+1); // Erase the folded element 2447 LHSC = cast<SCEVConstant>(Ops[0]); 2448 } 2449 2450 // If we are left with a constant zero being added, strip it off. 2451 if (LHSC->getValue()->isZero()) { 2452 Ops.erase(Ops.begin()); 2453 --Idx; 2454 } 2455 2456 if (Ops.size() == 1) return Ops[0]; 2457 } 2458 2459 // Delay expensive flag strengthening until necessary. 2460 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 2461 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags); 2462 }; 2463 2464 // Limit recursion calls depth. 2465 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2466 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2467 2468 if (SCEV *S = findExistingSCEVInCache(scAddExpr, Ops)) { 2469 // Don't strengthen flags if we have no new information. 2470 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S); 2471 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags) 2472 Add->setNoWrapFlags(ComputeFlags(Ops)); 2473 return S; 2474 } 2475 2476 // Okay, check to see if the same value occurs in the operand list more than 2477 // once. If so, merge them together into an multiply expression. Since we 2478 // sorted the list, these values are required to be adjacent. 2479 Type *Ty = Ops[0]->getType(); 2480 bool FoundMatch = false; 2481 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2482 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2483 // Scan ahead to count how many equal operands there are. 2484 unsigned Count = 2; 2485 while (i+Count != e && Ops[i+Count] == Ops[i]) 2486 ++Count; 2487 // Merge the values into a multiply. 2488 const SCEV *Scale = getConstant(Ty, Count); 2489 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2490 if (Ops.size() == Count) 2491 return Mul; 2492 Ops[i] = Mul; 2493 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2494 --i; e -= Count - 1; 2495 FoundMatch = true; 2496 } 2497 if (FoundMatch) 2498 return getAddExpr(Ops, OrigFlags, Depth + 1); 2499 2500 // Check for truncates. If all the operands are truncated from the same 2501 // type, see if factoring out the truncate would permit the result to be 2502 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2503 // if the contents of the resulting outer trunc fold to something simple. 2504 auto FindTruncSrcType = [&]() -> Type * { 2505 // We're ultimately looking to fold an addrec of truncs and muls of only 2506 // constants and truncs, so if we find any other types of SCEV 2507 // as operands of the addrec then we bail and return nullptr here. 2508 // Otherwise, we return the type of the operand of a trunc that we find. 2509 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2510 return T->getOperand()->getType(); 2511 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2512 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2513 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2514 return T->getOperand()->getType(); 2515 } 2516 return nullptr; 2517 }; 2518 if (auto *SrcType = FindTruncSrcType()) { 2519 SmallVector<const SCEV *, 8> LargeOps; 2520 bool Ok = true; 2521 // Check all the operands to see if they can be represented in the 2522 // source type of the truncate. 2523 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2524 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2525 if (T->getOperand()->getType() != SrcType) { 2526 Ok = false; 2527 break; 2528 } 2529 LargeOps.push_back(T->getOperand()); 2530 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2531 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2532 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2533 SmallVector<const SCEV *, 8> LargeMulOps; 2534 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2535 if (const SCEVTruncateExpr *T = 2536 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2537 if (T->getOperand()->getType() != SrcType) { 2538 Ok = false; 2539 break; 2540 } 2541 LargeMulOps.push_back(T->getOperand()); 2542 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2543 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2544 } else { 2545 Ok = false; 2546 break; 2547 } 2548 } 2549 if (Ok) 2550 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2551 } else { 2552 Ok = false; 2553 break; 2554 } 2555 } 2556 if (Ok) { 2557 // Evaluate the expression in the larger type. 2558 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2559 // If it folds to something simple, use it. Otherwise, don't. 2560 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2561 return getTruncateExpr(Fold, Ty); 2562 } 2563 } 2564 2565 if (Ops.size() == 2) { 2566 // Check if we have an expression of the form ((X + C1) - C2), where C1 and 2567 // C2 can be folded in a way that allows retaining wrapping flags of (X + 2568 // C1). 2569 const SCEV *A = Ops[0]; 2570 const SCEV *B = Ops[1]; 2571 auto *AddExpr = dyn_cast<SCEVAddExpr>(B); 2572 auto *C = dyn_cast<SCEVConstant>(A); 2573 if (AddExpr && C && isa<SCEVConstant>(AddExpr->getOperand(0))) { 2574 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt(); 2575 auto C2 = C->getAPInt(); 2576 SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap; 2577 2578 APInt ConstAdd = C1 + C2; 2579 auto AddFlags = AddExpr->getNoWrapFlags(); 2580 // Adding a smaller constant is NUW if the original AddExpr was NUW. 2581 if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNUW) && 2582 ConstAdd.ule(C1)) { 2583 PreservedFlags = 2584 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW); 2585 } 2586 2587 // Adding a constant with the same sign and small magnitude is NSW, if the 2588 // original AddExpr was NSW. 2589 if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNSW) && 2590 C1.isSignBitSet() == ConstAdd.isSignBitSet() && 2591 ConstAdd.abs().ule(C1.abs())) { 2592 PreservedFlags = 2593 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNSW); 2594 } 2595 2596 if (PreservedFlags != SCEV::FlagAnyWrap) { 2597 SmallVector<const SCEV *, 4> NewOps(AddExpr->operands()); 2598 NewOps[0] = getConstant(ConstAdd); 2599 return getAddExpr(NewOps, PreservedFlags); 2600 } 2601 } 2602 } 2603 2604 // Canonicalize (-1 * urem X, Y) + X --> (Y * X/Y) 2605 if (Ops.size() == 2) { 2606 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[0]); 2607 if (Mul && Mul->getNumOperands() == 2 && 2608 Mul->getOperand(0)->isAllOnesValue()) { 2609 const SCEV *X; 2610 const SCEV *Y; 2611 if (matchURem(Mul->getOperand(1), X, Y) && X == Ops[1]) { 2612 return getMulExpr(Y, getUDivExpr(X, Y)); 2613 } 2614 } 2615 } 2616 2617 // Skip past any other cast SCEVs. 2618 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2619 ++Idx; 2620 2621 // If there are add operands they would be next. 2622 if (Idx < Ops.size()) { 2623 bool DeletedAdd = false; 2624 // If the original flags and all inlined SCEVAddExprs are NUW, use the 2625 // common NUW flag for expression after inlining. Other flags cannot be 2626 // preserved, because they may depend on the original order of operations. 2627 SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW); 2628 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2629 if (Ops.size() > AddOpsInlineThreshold || 2630 Add->getNumOperands() > AddOpsInlineThreshold) 2631 break; 2632 // If we have an add, expand the add operands onto the end of the operands 2633 // list. 2634 Ops.erase(Ops.begin()+Idx); 2635 Ops.append(Add->op_begin(), Add->op_end()); 2636 DeletedAdd = true; 2637 CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags()); 2638 } 2639 2640 // If we deleted at least one add, we added operands to the end of the list, 2641 // and they are not necessarily sorted. Recurse to resort and resimplify 2642 // any operands we just acquired. 2643 if (DeletedAdd) 2644 return getAddExpr(Ops, CommonFlags, Depth + 1); 2645 } 2646 2647 // Skip over the add expression until we get to a multiply. 2648 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2649 ++Idx; 2650 2651 // Check to see if there are any folding opportunities present with 2652 // operands multiplied by constant values. 2653 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2654 uint64_t BitWidth = getTypeSizeInBits(Ty); 2655 DenseMap<const SCEV *, APInt> M; 2656 SmallVector<const SCEV *, 8> NewOps; 2657 APInt AccumulatedConstant(BitWidth, 0); 2658 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2659 Ops.data(), Ops.size(), 2660 APInt(BitWidth, 1), *this)) { 2661 struct APIntCompare { 2662 bool operator()(const APInt &LHS, const APInt &RHS) const { 2663 return LHS.ult(RHS); 2664 } 2665 }; 2666 2667 // Some interesting folding opportunity is present, so its worthwhile to 2668 // re-generate the operands list. Group the operands by constant scale, 2669 // to avoid multiplying by the same constant scale multiple times. 2670 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2671 for (const SCEV *NewOp : NewOps) 2672 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2673 // Re-generate the operands list. 2674 Ops.clear(); 2675 if (AccumulatedConstant != 0) 2676 Ops.push_back(getConstant(AccumulatedConstant)); 2677 for (auto &MulOp : MulOpLists) { 2678 if (MulOp.first == 1) { 2679 Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1)); 2680 } else if (MulOp.first != 0) { 2681 Ops.push_back(getMulExpr( 2682 getConstant(MulOp.first), 2683 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2684 SCEV::FlagAnyWrap, Depth + 1)); 2685 } 2686 } 2687 if (Ops.empty()) 2688 return getZero(Ty); 2689 if (Ops.size() == 1) 2690 return Ops[0]; 2691 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2692 } 2693 } 2694 2695 // If we are adding something to a multiply expression, make sure the 2696 // something is not already an operand of the multiply. If so, merge it into 2697 // the multiply. 2698 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2699 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2700 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2701 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2702 if (isa<SCEVConstant>(MulOpSCEV)) 2703 continue; 2704 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2705 if (MulOpSCEV == Ops[AddOp]) { 2706 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2707 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2708 if (Mul->getNumOperands() != 2) { 2709 // If the multiply has more than two operands, we must get the 2710 // Y*Z term. 2711 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2712 Mul->op_begin()+MulOp); 2713 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2714 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2715 } 2716 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2717 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2718 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2719 SCEV::FlagAnyWrap, Depth + 1); 2720 if (Ops.size() == 2) return OuterMul; 2721 if (AddOp < Idx) { 2722 Ops.erase(Ops.begin()+AddOp); 2723 Ops.erase(Ops.begin()+Idx-1); 2724 } else { 2725 Ops.erase(Ops.begin()+Idx); 2726 Ops.erase(Ops.begin()+AddOp-1); 2727 } 2728 Ops.push_back(OuterMul); 2729 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2730 } 2731 2732 // Check this multiply against other multiplies being added together. 2733 for (unsigned OtherMulIdx = Idx+1; 2734 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2735 ++OtherMulIdx) { 2736 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2737 // If MulOp occurs in OtherMul, we can fold the two multiplies 2738 // together. 2739 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2740 OMulOp != e; ++OMulOp) 2741 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2742 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2743 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2744 if (Mul->getNumOperands() != 2) { 2745 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2746 Mul->op_begin()+MulOp); 2747 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2748 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2749 } 2750 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2751 if (OtherMul->getNumOperands() != 2) { 2752 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2753 OtherMul->op_begin()+OMulOp); 2754 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2755 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2756 } 2757 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2758 const SCEV *InnerMulSum = 2759 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2760 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2761 SCEV::FlagAnyWrap, Depth + 1); 2762 if (Ops.size() == 2) return OuterMul; 2763 Ops.erase(Ops.begin()+Idx); 2764 Ops.erase(Ops.begin()+OtherMulIdx-1); 2765 Ops.push_back(OuterMul); 2766 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2767 } 2768 } 2769 } 2770 } 2771 2772 // If there are any add recurrences in the operands list, see if any other 2773 // added values are loop invariant. If so, we can fold them into the 2774 // recurrence. 2775 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2776 ++Idx; 2777 2778 // Scan over all recurrences, trying to fold loop invariants into them. 2779 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2780 // Scan all of the other operands to this add and add them to the vector if 2781 // they are loop invariant w.r.t. the recurrence. 2782 SmallVector<const SCEV *, 8> LIOps; 2783 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2784 const Loop *AddRecLoop = AddRec->getLoop(); 2785 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2786 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2787 LIOps.push_back(Ops[i]); 2788 Ops.erase(Ops.begin()+i); 2789 --i; --e; 2790 } 2791 2792 // If we found some loop invariants, fold them into the recurrence. 2793 if (!LIOps.empty()) { 2794 // Compute nowrap flags for the addition of the loop-invariant ops and 2795 // the addrec. Temporarily push it as an operand for that purpose. These 2796 // flags are valid in the scope of the addrec only. 2797 LIOps.push_back(AddRec); 2798 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps); 2799 LIOps.pop_back(); 2800 2801 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2802 LIOps.push_back(AddRec->getStart()); 2803 2804 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2805 2806 // It is not in general safe to propagate flags valid on an add within 2807 // the addrec scope to one outside it. We must prove that the inner 2808 // scope is guaranteed to execute if the outer one does to be able to 2809 // safely propagate. We know the program is undefined if poison is 2810 // produced on the inner scoped addrec. We also know that *for this use* 2811 // the outer scoped add can't overflow (because of the flags we just 2812 // computed for the inner scoped add) without the program being undefined. 2813 // Proving that entry to the outer scope neccesitates entry to the inner 2814 // scope, thus proves the program undefined if the flags would be violated 2815 // in the outer scope. 2816 SCEV::NoWrapFlags AddFlags = Flags; 2817 if (AddFlags != SCEV::FlagAnyWrap) { 2818 auto *DefI = getDefiningScopeBound(LIOps); 2819 auto *ReachI = &*AddRecLoop->getHeader()->begin(); 2820 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI)) 2821 AddFlags = SCEV::FlagAnyWrap; 2822 } 2823 AddRecOps[0] = getAddExpr(LIOps, AddFlags, Depth + 1); 2824 2825 // Build the new addrec. Propagate the NUW and NSW flags if both the 2826 // outer add and the inner addrec are guaranteed to have no overflow. 2827 // Always propagate NW. 2828 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2829 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2830 2831 // If all of the other operands were loop invariant, we are done. 2832 if (Ops.size() == 1) return NewRec; 2833 2834 // Otherwise, add the folded AddRec by the non-invariant parts. 2835 for (unsigned i = 0;; ++i) 2836 if (Ops[i] == AddRec) { 2837 Ops[i] = NewRec; 2838 break; 2839 } 2840 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2841 } 2842 2843 // Okay, if there weren't any loop invariants to be folded, check to see if 2844 // there are multiple AddRec's with the same loop induction variable being 2845 // added together. If so, we can fold them. 2846 for (unsigned OtherIdx = Idx+1; 2847 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2848 ++OtherIdx) { 2849 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2850 // so that the 1st found AddRecExpr is dominated by all others. 2851 assert(DT.dominates( 2852 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2853 AddRec->getLoop()->getHeader()) && 2854 "AddRecExprs are not sorted in reverse dominance order?"); 2855 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2856 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2857 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2858 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2859 ++OtherIdx) { 2860 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2861 if (OtherAddRec->getLoop() == AddRecLoop) { 2862 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2863 i != e; ++i) { 2864 if (i >= AddRecOps.size()) { 2865 AddRecOps.append(OtherAddRec->op_begin()+i, 2866 OtherAddRec->op_end()); 2867 break; 2868 } 2869 SmallVector<const SCEV *, 2> TwoOps = { 2870 AddRecOps[i], OtherAddRec->getOperand(i)}; 2871 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2872 } 2873 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2874 } 2875 } 2876 // Step size has changed, so we cannot guarantee no self-wraparound. 2877 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2878 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2879 } 2880 } 2881 2882 // Otherwise couldn't fold anything into this recurrence. Move onto the 2883 // next one. 2884 } 2885 2886 // Okay, it looks like we really DO need an add expr. Check to see if we 2887 // already have one, otherwise create a new one. 2888 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2889 } 2890 2891 const SCEV * 2892 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 2893 SCEV::NoWrapFlags Flags) { 2894 FoldingSetNodeID ID; 2895 ID.AddInteger(scAddExpr); 2896 for (const SCEV *Op : Ops) 2897 ID.AddPointer(Op); 2898 void *IP = nullptr; 2899 SCEVAddExpr *S = 2900 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2901 if (!S) { 2902 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2903 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2904 S = new (SCEVAllocator) 2905 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2906 UniqueSCEVs.InsertNode(S, IP); 2907 registerUser(S, Ops); 2908 } 2909 S->setNoWrapFlags(Flags); 2910 return S; 2911 } 2912 2913 const SCEV * 2914 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 2915 const Loop *L, SCEV::NoWrapFlags Flags) { 2916 FoldingSetNodeID ID; 2917 ID.AddInteger(scAddRecExpr); 2918 for (const SCEV *Op : Ops) 2919 ID.AddPointer(Op); 2920 ID.AddPointer(L); 2921 void *IP = nullptr; 2922 SCEVAddRecExpr *S = 2923 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2924 if (!S) { 2925 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2926 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2927 S = new (SCEVAllocator) 2928 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 2929 UniqueSCEVs.InsertNode(S, IP); 2930 LoopUsers[L].push_back(S); 2931 registerUser(S, Ops); 2932 } 2933 setNoWrapFlags(S, Flags); 2934 return S; 2935 } 2936 2937 const SCEV * 2938 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 2939 SCEV::NoWrapFlags Flags) { 2940 FoldingSetNodeID ID; 2941 ID.AddInteger(scMulExpr); 2942 for (const SCEV *Op : Ops) 2943 ID.AddPointer(Op); 2944 void *IP = nullptr; 2945 SCEVMulExpr *S = 2946 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2947 if (!S) { 2948 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2949 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2950 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2951 O, Ops.size()); 2952 UniqueSCEVs.InsertNode(S, IP); 2953 registerUser(S, Ops); 2954 } 2955 S->setNoWrapFlags(Flags); 2956 return S; 2957 } 2958 2959 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2960 uint64_t k = i*j; 2961 if (j > 1 && k / j != i) Overflow = true; 2962 return k; 2963 } 2964 2965 /// Compute the result of "n choose k", the binomial coefficient. If an 2966 /// intermediate computation overflows, Overflow will be set and the return will 2967 /// be garbage. Overflow is not cleared on absence of overflow. 2968 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2969 // We use the multiplicative formula: 2970 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2971 // At each iteration, we take the n-th term of the numeral and divide by the 2972 // (k-n)th term of the denominator. This division will always produce an 2973 // integral result, and helps reduce the chance of overflow in the 2974 // intermediate computations. However, we can still overflow even when the 2975 // final result would fit. 2976 2977 if (n == 0 || n == k) return 1; 2978 if (k > n) return 0; 2979 2980 if (k > n/2) 2981 k = n-k; 2982 2983 uint64_t r = 1; 2984 for (uint64_t i = 1; i <= k; ++i) { 2985 r = umul_ov(r, n-(i-1), Overflow); 2986 r /= i; 2987 } 2988 return r; 2989 } 2990 2991 /// Determine if any of the operands in this SCEV are a constant or if 2992 /// any of the add or multiply expressions in this SCEV contain a constant. 2993 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2994 struct FindConstantInAddMulChain { 2995 bool FoundConstant = false; 2996 2997 bool follow(const SCEV *S) { 2998 FoundConstant |= isa<SCEVConstant>(S); 2999 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 3000 } 3001 3002 bool isDone() const { 3003 return FoundConstant; 3004 } 3005 }; 3006 3007 FindConstantInAddMulChain F; 3008 SCEVTraversal<FindConstantInAddMulChain> ST(F); 3009 ST.visitAll(StartExpr); 3010 return F.FoundConstant; 3011 } 3012 3013 /// Get a canonical multiply expression, or something simpler if possible. 3014 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 3015 SCEV::NoWrapFlags OrigFlags, 3016 unsigned Depth) { 3017 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) && 3018 "only nuw or nsw allowed"); 3019 assert(!Ops.empty() && "Cannot get empty mul!"); 3020 if (Ops.size() == 1) return Ops[0]; 3021 #ifndef NDEBUG 3022 Type *ETy = Ops[0]->getType(); 3023 assert(!ETy->isPointerTy()); 3024 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3025 assert(Ops[i]->getType() == ETy && 3026 "SCEVMulExpr operand types don't match!"); 3027 #endif 3028 3029 // Sort by complexity, this groups all similar expression types together. 3030 GroupByComplexity(Ops, &LI, DT); 3031 3032 // If there are any constants, fold them together. 3033 unsigned Idx = 0; 3034 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3035 ++Idx; 3036 assert(Idx < Ops.size()); 3037 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3038 // We found two constants, fold them together! 3039 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt()); 3040 if (Ops.size() == 2) return Ops[0]; 3041 Ops.erase(Ops.begin()+1); // Erase the folded element 3042 LHSC = cast<SCEVConstant>(Ops[0]); 3043 } 3044 3045 // If we have a multiply of zero, it will always be zero. 3046 if (LHSC->getValue()->isZero()) 3047 return LHSC; 3048 3049 // If we are left with a constant one being multiplied, strip it off. 3050 if (LHSC->getValue()->isOne()) { 3051 Ops.erase(Ops.begin()); 3052 --Idx; 3053 } 3054 3055 if (Ops.size() == 1) 3056 return Ops[0]; 3057 } 3058 3059 // Delay expensive flag strengthening until necessary. 3060 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 3061 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags); 3062 }; 3063 3064 // Limit recursion calls depth. 3065 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 3066 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3067 3068 if (SCEV *S = findExistingSCEVInCache(scMulExpr, Ops)) { 3069 // Don't strengthen flags if we have no new information. 3070 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S); 3071 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags) 3072 Mul->setNoWrapFlags(ComputeFlags(Ops)); 3073 return S; 3074 } 3075 3076 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3077 if (Ops.size() == 2) { 3078 // C1*(C2+V) -> C1*C2 + C1*V 3079 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 3080 // If any of Add's ops are Adds or Muls with a constant, apply this 3081 // transformation as well. 3082 // 3083 // TODO: There are some cases where this transformation is not 3084 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 3085 // this transformation should be narrowed down. 3086 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) 3087 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 3088 SCEV::FlagAnyWrap, Depth + 1), 3089 getMulExpr(LHSC, Add->getOperand(1), 3090 SCEV::FlagAnyWrap, Depth + 1), 3091 SCEV::FlagAnyWrap, Depth + 1); 3092 3093 if (Ops[0]->isAllOnesValue()) { 3094 // If we have a mul by -1 of an add, try distributing the -1 among the 3095 // add operands. 3096 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 3097 SmallVector<const SCEV *, 4> NewOps; 3098 bool AnyFolded = false; 3099 for (const SCEV *AddOp : Add->operands()) { 3100 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 3101 Depth + 1); 3102 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 3103 NewOps.push_back(Mul); 3104 } 3105 if (AnyFolded) 3106 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 3107 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 3108 // Negation preserves a recurrence's no self-wrap property. 3109 SmallVector<const SCEV *, 4> Operands; 3110 for (const SCEV *AddRecOp : AddRec->operands()) 3111 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 3112 Depth + 1)); 3113 3114 return getAddRecExpr(Operands, AddRec->getLoop(), 3115 AddRec->getNoWrapFlags(SCEV::FlagNW)); 3116 } 3117 } 3118 } 3119 } 3120 3121 // Skip over the add expression until we get to a multiply. 3122 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 3123 ++Idx; 3124 3125 // If there are mul operands inline them all into this expression. 3126 if (Idx < Ops.size()) { 3127 bool DeletedMul = false; 3128 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 3129 if (Ops.size() > MulOpsInlineThreshold) 3130 break; 3131 // If we have an mul, expand the mul operands onto the end of the 3132 // operands list. 3133 Ops.erase(Ops.begin()+Idx); 3134 Ops.append(Mul->op_begin(), Mul->op_end()); 3135 DeletedMul = true; 3136 } 3137 3138 // If we deleted at least one mul, we added operands to the end of the 3139 // list, and they are not necessarily sorted. Recurse to resort and 3140 // resimplify any operands we just acquired. 3141 if (DeletedMul) 3142 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3143 } 3144 3145 // If there are any add recurrences in the operands list, see if any other 3146 // added values are loop invariant. If so, we can fold them into the 3147 // recurrence. 3148 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 3149 ++Idx; 3150 3151 // Scan over all recurrences, trying to fold loop invariants into them. 3152 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 3153 // Scan all of the other operands to this mul and add them to the vector 3154 // if they are loop invariant w.r.t. the recurrence. 3155 SmallVector<const SCEV *, 8> LIOps; 3156 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 3157 const Loop *AddRecLoop = AddRec->getLoop(); 3158 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3159 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 3160 LIOps.push_back(Ops[i]); 3161 Ops.erase(Ops.begin()+i); 3162 --i; --e; 3163 } 3164 3165 // If we found some loop invariants, fold them into the recurrence. 3166 if (!LIOps.empty()) { 3167 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 3168 SmallVector<const SCEV *, 4> NewOps; 3169 NewOps.reserve(AddRec->getNumOperands()); 3170 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 3171 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 3172 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 3173 SCEV::FlagAnyWrap, Depth + 1)); 3174 3175 // Build the new addrec. Propagate the NUW and NSW flags if both the 3176 // outer mul and the inner addrec are guaranteed to have no overflow. 3177 // 3178 // No self-wrap cannot be guaranteed after changing the step size, but 3179 // will be inferred if either NUW or NSW is true. 3180 SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec}); 3181 const SCEV *NewRec = getAddRecExpr( 3182 NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags)); 3183 3184 // If all of the other operands were loop invariant, we are done. 3185 if (Ops.size() == 1) return NewRec; 3186 3187 // Otherwise, multiply the folded AddRec by the non-invariant parts. 3188 for (unsigned i = 0;; ++i) 3189 if (Ops[i] == AddRec) { 3190 Ops[i] = NewRec; 3191 break; 3192 } 3193 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3194 } 3195 3196 // Okay, if there weren't any loop invariants to be folded, check to see 3197 // if there are multiple AddRec's with the same loop induction variable 3198 // being multiplied together. If so, we can fold them. 3199 3200 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 3201 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 3202 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 3203 // ]]],+,...up to x=2n}. 3204 // Note that the arguments to choose() are always integers with values 3205 // known at compile time, never SCEV objects. 3206 // 3207 // The implementation avoids pointless extra computations when the two 3208 // addrec's are of different length (mathematically, it's equivalent to 3209 // an infinite stream of zeros on the right). 3210 bool OpsModified = false; 3211 for (unsigned OtherIdx = Idx+1; 3212 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 3213 ++OtherIdx) { 3214 const SCEVAddRecExpr *OtherAddRec = 3215 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 3216 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 3217 continue; 3218 3219 // Limit max number of arguments to avoid creation of unreasonably big 3220 // SCEVAddRecs with very complex operands. 3221 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 3222 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) 3223 continue; 3224 3225 bool Overflow = false; 3226 Type *Ty = AddRec->getType(); 3227 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 3228 SmallVector<const SCEV*, 7> AddRecOps; 3229 for (int x = 0, xe = AddRec->getNumOperands() + 3230 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 3231 SmallVector <const SCEV *, 7> SumOps; 3232 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 3233 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 3234 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 3235 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 3236 z < ze && !Overflow; ++z) { 3237 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 3238 uint64_t Coeff; 3239 if (LargerThan64Bits) 3240 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 3241 else 3242 Coeff = Coeff1*Coeff2; 3243 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 3244 const SCEV *Term1 = AddRec->getOperand(y-z); 3245 const SCEV *Term2 = OtherAddRec->getOperand(z); 3246 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 3247 SCEV::FlagAnyWrap, Depth + 1)); 3248 } 3249 } 3250 if (SumOps.empty()) 3251 SumOps.push_back(getZero(Ty)); 3252 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 3253 } 3254 if (!Overflow) { 3255 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, 3256 SCEV::FlagAnyWrap); 3257 if (Ops.size() == 2) return NewAddRec; 3258 Ops[Idx] = NewAddRec; 3259 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3260 OpsModified = true; 3261 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 3262 if (!AddRec) 3263 break; 3264 } 3265 } 3266 if (OpsModified) 3267 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3268 3269 // Otherwise couldn't fold anything into this recurrence. Move onto the 3270 // next one. 3271 } 3272 3273 // Okay, it looks like we really DO need an mul expr. Check to see if we 3274 // already have one, otherwise create a new one. 3275 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3276 } 3277 3278 /// Represents an unsigned remainder expression based on unsigned division. 3279 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3280 const SCEV *RHS) { 3281 assert(getEffectiveSCEVType(LHS->getType()) == 3282 getEffectiveSCEVType(RHS->getType()) && 3283 "SCEVURemExpr operand types don't match!"); 3284 3285 // Short-circuit easy cases 3286 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3287 // If constant is one, the result is trivial 3288 if (RHSC->getValue()->isOne()) 3289 return getZero(LHS->getType()); // X urem 1 --> 0 3290 3291 // If constant is a power of two, fold into a zext(trunc(LHS)). 3292 if (RHSC->getAPInt().isPowerOf2()) { 3293 Type *FullTy = LHS->getType(); 3294 Type *TruncTy = 3295 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3296 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3297 } 3298 } 3299 3300 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3301 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3302 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3303 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3304 } 3305 3306 /// Get a canonical unsigned division expression, or something simpler if 3307 /// possible. 3308 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3309 const SCEV *RHS) { 3310 assert(!LHS->getType()->isPointerTy() && 3311 "SCEVUDivExpr operand can't be pointer!"); 3312 assert(LHS->getType() == RHS->getType() && 3313 "SCEVUDivExpr operand types don't match!"); 3314 3315 FoldingSetNodeID ID; 3316 ID.AddInteger(scUDivExpr); 3317 ID.AddPointer(LHS); 3318 ID.AddPointer(RHS); 3319 void *IP = nullptr; 3320 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3321 return S; 3322 3323 // 0 udiv Y == 0 3324 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) 3325 if (LHSC->getValue()->isZero()) 3326 return LHS; 3327 3328 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3329 if (RHSC->getValue()->isOne()) 3330 return LHS; // X udiv 1 --> x 3331 // If the denominator is zero, the result of the udiv is undefined. Don't 3332 // try to analyze it, because the resolution chosen here may differ from 3333 // the resolution chosen in other parts of the compiler. 3334 if (!RHSC->getValue()->isZero()) { 3335 // Determine if the division can be folded into the operands of 3336 // its operands. 3337 // TODO: Generalize this to non-constants by using known-bits information. 3338 Type *Ty = LHS->getType(); 3339 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3340 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3341 // For non-power-of-two values, effectively round the value up to the 3342 // nearest power of two. 3343 if (!RHSC->getAPInt().isPowerOf2()) 3344 ++MaxShiftAmt; 3345 IntegerType *ExtTy = 3346 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3347 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3348 if (const SCEVConstant *Step = 3349 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3350 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3351 const APInt &StepInt = Step->getAPInt(); 3352 const APInt &DivInt = RHSC->getAPInt(); 3353 if (!StepInt.urem(DivInt) && 3354 getZeroExtendExpr(AR, ExtTy) == 3355 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3356 getZeroExtendExpr(Step, ExtTy), 3357 AR->getLoop(), SCEV::FlagAnyWrap)) { 3358 SmallVector<const SCEV *, 4> Operands; 3359 for (const SCEV *Op : AR->operands()) 3360 Operands.push_back(getUDivExpr(Op, RHS)); 3361 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3362 } 3363 /// Get a canonical UDivExpr for a recurrence. 3364 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3365 // We can currently only fold X%N if X is constant. 3366 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3367 if (StartC && !DivInt.urem(StepInt) && 3368 getZeroExtendExpr(AR, ExtTy) == 3369 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3370 getZeroExtendExpr(Step, ExtTy), 3371 AR->getLoop(), SCEV::FlagAnyWrap)) { 3372 const APInt &StartInt = StartC->getAPInt(); 3373 const APInt &StartRem = StartInt.urem(StepInt); 3374 if (StartRem != 0) { 3375 const SCEV *NewLHS = 3376 getAddRecExpr(getConstant(StartInt - StartRem), Step, 3377 AR->getLoop(), SCEV::FlagNW); 3378 if (LHS != NewLHS) { 3379 LHS = NewLHS; 3380 3381 // Reset the ID to include the new LHS, and check if it is 3382 // already cached. 3383 ID.clear(); 3384 ID.AddInteger(scUDivExpr); 3385 ID.AddPointer(LHS); 3386 ID.AddPointer(RHS); 3387 IP = nullptr; 3388 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3389 return S; 3390 } 3391 } 3392 } 3393 } 3394 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3395 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3396 SmallVector<const SCEV *, 4> Operands; 3397 for (const SCEV *Op : M->operands()) 3398 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3399 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3400 // Find an operand that's safely divisible. 3401 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3402 const SCEV *Op = M->getOperand(i); 3403 const SCEV *Div = getUDivExpr(Op, RHSC); 3404 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3405 Operands = SmallVector<const SCEV *, 4>(M->operands()); 3406 Operands[i] = Div; 3407 return getMulExpr(Operands); 3408 } 3409 } 3410 } 3411 3412 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3413 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3414 if (auto *DivisorConstant = 3415 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3416 bool Overflow = false; 3417 APInt NewRHS = 3418 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3419 if (Overflow) { 3420 return getConstant(RHSC->getType(), 0, false); 3421 } 3422 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3423 } 3424 } 3425 3426 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3427 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3428 SmallVector<const SCEV *, 4> Operands; 3429 for (const SCEV *Op : A->operands()) 3430 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3431 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3432 Operands.clear(); 3433 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3434 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3435 if (isa<SCEVUDivExpr>(Op) || 3436 getMulExpr(Op, RHS) != A->getOperand(i)) 3437 break; 3438 Operands.push_back(Op); 3439 } 3440 if (Operands.size() == A->getNumOperands()) 3441 return getAddExpr(Operands); 3442 } 3443 } 3444 3445 // Fold if both operands are constant. 3446 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3447 Constant *LHSCV = LHSC->getValue(); 3448 Constant *RHSCV = RHSC->getValue(); 3449 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3450 RHSCV))); 3451 } 3452 } 3453 } 3454 3455 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs 3456 // changes). Make sure we get a new one. 3457 IP = nullptr; 3458 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3459 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3460 LHS, RHS); 3461 UniqueSCEVs.InsertNode(S, IP); 3462 registerUser(S, {LHS, RHS}); 3463 return S; 3464 } 3465 3466 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3467 APInt A = C1->getAPInt().abs(); 3468 APInt B = C2->getAPInt().abs(); 3469 uint32_t ABW = A.getBitWidth(); 3470 uint32_t BBW = B.getBitWidth(); 3471 3472 if (ABW > BBW) 3473 B = B.zext(ABW); 3474 else if (ABW < BBW) 3475 A = A.zext(BBW); 3476 3477 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3478 } 3479 3480 /// Get a canonical unsigned division expression, or something simpler if 3481 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3482 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3483 /// it's not exact because the udiv may be clearing bits. 3484 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3485 const SCEV *RHS) { 3486 // TODO: we could try to find factors in all sorts of things, but for now we 3487 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3488 // end of this file for inspiration. 3489 3490 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3491 if (!Mul || !Mul->hasNoUnsignedWrap()) 3492 return getUDivExpr(LHS, RHS); 3493 3494 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3495 // If the mulexpr multiplies by a constant, then that constant must be the 3496 // first element of the mulexpr. 3497 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3498 if (LHSCst == RHSCst) { 3499 SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands())); 3500 return getMulExpr(Operands); 3501 } 3502 3503 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3504 // that there's a factor provided by one of the other terms. We need to 3505 // check. 3506 APInt Factor = gcd(LHSCst, RHSCst); 3507 if (!Factor.isIntN(1)) { 3508 LHSCst = 3509 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3510 RHSCst = 3511 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3512 SmallVector<const SCEV *, 2> Operands; 3513 Operands.push_back(LHSCst); 3514 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3515 LHS = getMulExpr(Operands); 3516 RHS = RHSCst; 3517 Mul = dyn_cast<SCEVMulExpr>(LHS); 3518 if (!Mul) 3519 return getUDivExactExpr(LHS, RHS); 3520 } 3521 } 3522 } 3523 3524 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3525 if (Mul->getOperand(i) == RHS) { 3526 SmallVector<const SCEV *, 2> Operands; 3527 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3528 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3529 return getMulExpr(Operands); 3530 } 3531 } 3532 3533 return getUDivExpr(LHS, RHS); 3534 } 3535 3536 /// Get an add recurrence expression for the specified loop. Simplify the 3537 /// expression as much as possible. 3538 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3539 const Loop *L, 3540 SCEV::NoWrapFlags Flags) { 3541 SmallVector<const SCEV *, 4> Operands; 3542 Operands.push_back(Start); 3543 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3544 if (StepChrec->getLoop() == L) { 3545 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3546 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3547 } 3548 3549 Operands.push_back(Step); 3550 return getAddRecExpr(Operands, L, Flags); 3551 } 3552 3553 /// Get an add recurrence expression for the specified loop. Simplify the 3554 /// expression as much as possible. 3555 const SCEV * 3556 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3557 const Loop *L, SCEV::NoWrapFlags Flags) { 3558 if (Operands.size() == 1) return Operands[0]; 3559 #ifndef NDEBUG 3560 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3561 for (unsigned i = 1, e = Operands.size(); i != e; ++i) { 3562 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3563 "SCEVAddRecExpr operand types don't match!"); 3564 assert(!Operands[i]->getType()->isPointerTy() && "Step must be integer"); 3565 } 3566 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3567 assert(isLoopInvariant(Operands[i], L) && 3568 "SCEVAddRecExpr operand is not loop-invariant!"); 3569 #endif 3570 3571 if (Operands.back()->isZero()) { 3572 Operands.pop_back(); 3573 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3574 } 3575 3576 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and 3577 // use that information to infer NUW and NSW flags. However, computing a 3578 // BE count requires calling getAddRecExpr, so we may not yet have a 3579 // meaningful BE count at this point (and if we don't, we'd be stuck 3580 // with a SCEVCouldNotCompute as the cached BE count). 3581 3582 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3583 3584 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3585 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3586 const Loop *NestedLoop = NestedAR->getLoop(); 3587 if (L->contains(NestedLoop) 3588 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3589 : (!NestedLoop->contains(L) && 3590 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3591 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands()); 3592 Operands[0] = NestedAR->getStart(); 3593 // AddRecs require their operands be loop-invariant with respect to their 3594 // loops. Don't perform this transformation if it would break this 3595 // requirement. 3596 bool AllInvariant = all_of( 3597 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3598 3599 if (AllInvariant) { 3600 // Create a recurrence for the outer loop with the same step size. 3601 // 3602 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3603 // inner recurrence has the same property. 3604 SCEV::NoWrapFlags OuterFlags = 3605 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3606 3607 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3608 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3609 return isLoopInvariant(Op, NestedLoop); 3610 }); 3611 3612 if (AllInvariant) { 3613 // Ok, both add recurrences are valid after the transformation. 3614 // 3615 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3616 // the outer recurrence has the same property. 3617 SCEV::NoWrapFlags InnerFlags = 3618 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3619 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3620 } 3621 } 3622 // Reset Operands to its original state. 3623 Operands[0] = NestedAR; 3624 } 3625 } 3626 3627 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3628 // already have one, otherwise create a new one. 3629 return getOrCreateAddRecExpr(Operands, L, Flags); 3630 } 3631 3632 const SCEV * 3633 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3634 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3635 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3636 // getSCEV(Base)->getType() has the same address space as Base->getType() 3637 // because SCEV::getType() preserves the address space. 3638 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); 3639 const bool AssumeInBoundsFlags = [&]() { 3640 if (!GEP->isInBounds()) 3641 return false; 3642 3643 // We'd like to propagate flags from the IR to the corresponding SCEV nodes, 3644 // but to do that, we have to ensure that said flag is valid in the entire 3645 // defined scope of the SCEV. 3646 auto *GEPI = dyn_cast<Instruction>(GEP); 3647 // TODO: non-instructions have global scope. We might be able to prove 3648 // some global scope cases 3649 return GEPI && isSCEVExprNeverPoison(GEPI); 3650 }(); 3651 3652 SCEV::NoWrapFlags OffsetWrap = 3653 AssumeInBoundsFlags ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3654 3655 Type *CurTy = GEP->getType(); 3656 bool FirstIter = true; 3657 SmallVector<const SCEV *, 4> Offsets; 3658 for (const SCEV *IndexExpr : IndexExprs) { 3659 // Compute the (potentially symbolic) offset in bytes for this index. 3660 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3661 // For a struct, add the member offset. 3662 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3663 unsigned FieldNo = Index->getZExtValue(); 3664 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); 3665 Offsets.push_back(FieldOffset); 3666 3667 // Update CurTy to the type of the field at Index. 3668 CurTy = STy->getTypeAtIndex(Index); 3669 } else { 3670 // Update CurTy to its element type. 3671 if (FirstIter) { 3672 assert(isa<PointerType>(CurTy) && 3673 "The first index of a GEP indexes a pointer"); 3674 CurTy = GEP->getSourceElementType(); 3675 FirstIter = false; 3676 } else { 3677 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); 3678 } 3679 // For an array, add the element offset, explicitly scaled. 3680 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); 3681 // Getelementptr indices are signed. 3682 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); 3683 3684 // Multiply the index by the element size to compute the element offset. 3685 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap); 3686 Offsets.push_back(LocalOffset); 3687 } 3688 } 3689 3690 // Handle degenerate case of GEP without offsets. 3691 if (Offsets.empty()) 3692 return BaseExpr; 3693 3694 // Add the offsets together, assuming nsw if inbounds. 3695 const SCEV *Offset = getAddExpr(Offsets, OffsetWrap); 3696 // Add the base address and the offset. We cannot use the nsw flag, as the 3697 // base address is unsigned. However, if we know that the offset is 3698 // non-negative, we can use nuw. 3699 SCEV::NoWrapFlags BaseWrap = AssumeInBoundsFlags && isKnownNonNegative(Offset) 3700 ? SCEV::FlagNUW : SCEV::FlagAnyWrap; 3701 auto *GEPExpr = getAddExpr(BaseExpr, Offset, BaseWrap); 3702 assert(BaseExpr->getType() == GEPExpr->getType() && 3703 "GEP should not change type mid-flight."); 3704 return GEPExpr; 3705 } 3706 3707 SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType, 3708 ArrayRef<const SCEV *> Ops) { 3709 FoldingSetNodeID ID; 3710 ID.AddInteger(SCEVType); 3711 for (const SCEV *Op : Ops) 3712 ID.AddPointer(Op); 3713 void *IP = nullptr; 3714 return UniqueSCEVs.FindNodeOrInsertPos(ID, IP); 3715 } 3716 3717 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { 3718 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3719 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); 3720 } 3721 3722 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind, 3723 SmallVectorImpl<const SCEV *> &Ops) { 3724 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 3725 if (Ops.size() == 1) return Ops[0]; 3726 #ifndef NDEBUG 3727 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3728 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 3729 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3730 "Operand types don't match!"); 3731 assert(Ops[0]->getType()->isPointerTy() == 3732 Ops[i]->getType()->isPointerTy() && 3733 "min/max should be consistently pointerish"); 3734 } 3735 #endif 3736 3737 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; 3738 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; 3739 3740 // Sort by complexity, this groups all similar expression types together. 3741 GroupByComplexity(Ops, &LI, DT); 3742 3743 // Check if we have created the same expression before. 3744 if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) { 3745 return S; 3746 } 3747 3748 // If there are any constants, fold them together. 3749 unsigned Idx = 0; 3750 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3751 ++Idx; 3752 assert(Idx < Ops.size()); 3753 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { 3754 if (Kind == scSMaxExpr) 3755 return APIntOps::smax(LHS, RHS); 3756 else if (Kind == scSMinExpr) 3757 return APIntOps::smin(LHS, RHS); 3758 else if (Kind == scUMaxExpr) 3759 return APIntOps::umax(LHS, RHS); 3760 else if (Kind == scUMinExpr) 3761 return APIntOps::umin(LHS, RHS); 3762 llvm_unreachable("Unknown SCEV min/max opcode"); 3763 }; 3764 3765 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3766 // We found two constants, fold them together! 3767 ConstantInt *Fold = ConstantInt::get( 3768 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); 3769 Ops[0] = getConstant(Fold); 3770 Ops.erase(Ops.begin()+1); // Erase the folded element 3771 if (Ops.size() == 1) return Ops[0]; 3772 LHSC = cast<SCEVConstant>(Ops[0]); 3773 } 3774 3775 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); 3776 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); 3777 3778 if (IsMax ? IsMinV : IsMaxV) { 3779 // If we are left with a constant minimum(/maximum)-int, strip it off. 3780 Ops.erase(Ops.begin()); 3781 --Idx; 3782 } else if (IsMax ? IsMaxV : IsMinV) { 3783 // If we have a max(/min) with a constant maximum(/minimum)-int, 3784 // it will always be the extremum. 3785 return LHSC; 3786 } 3787 3788 if (Ops.size() == 1) return Ops[0]; 3789 } 3790 3791 // Find the first operation of the same kind 3792 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) 3793 ++Idx; 3794 3795 // Check to see if one of the operands is of the same kind. If so, expand its 3796 // operands onto our operand list, and recurse to simplify. 3797 if (Idx < Ops.size()) { 3798 bool DeletedAny = false; 3799 while (Ops[Idx]->getSCEVType() == Kind) { 3800 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); 3801 Ops.erase(Ops.begin()+Idx); 3802 Ops.append(SMME->op_begin(), SMME->op_end()); 3803 DeletedAny = true; 3804 } 3805 3806 if (DeletedAny) 3807 return getMinMaxExpr(Kind, Ops); 3808 } 3809 3810 // Okay, check to see if the same value occurs in the operand list twice. If 3811 // so, delete one. Since we sorted the list, these values are required to 3812 // be adjacent. 3813 llvm::CmpInst::Predicate GEPred = 3814 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 3815 llvm::CmpInst::Predicate LEPred = 3816 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 3817 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; 3818 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; 3819 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { 3820 if (Ops[i] == Ops[i + 1] || 3821 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { 3822 // X op Y op Y --> X op Y 3823 // X op Y --> X, if we know X, Y are ordered appropriately 3824 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3825 --i; 3826 --e; 3827 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], 3828 Ops[i + 1])) { 3829 // X op Y --> Y, if we know X, Y are ordered appropriately 3830 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3831 --i; 3832 --e; 3833 } 3834 } 3835 3836 if (Ops.size() == 1) return Ops[0]; 3837 3838 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3839 3840 // Okay, it looks like we really DO need an expr. Check to see if we 3841 // already have one, otherwise create a new one. 3842 FoldingSetNodeID ID; 3843 ID.AddInteger(Kind); 3844 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3845 ID.AddPointer(Ops[i]); 3846 void *IP = nullptr; 3847 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP); 3848 if (ExistingSCEV) 3849 return ExistingSCEV; 3850 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3851 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3852 SCEV *S = new (SCEVAllocator) 3853 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); 3854 3855 UniqueSCEVs.InsertNode(S, IP); 3856 registerUser(S, Ops); 3857 return S; 3858 } 3859 3860 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3861 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3862 return getSMaxExpr(Ops); 3863 } 3864 3865 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3866 return getMinMaxExpr(scSMaxExpr, Ops); 3867 } 3868 3869 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3870 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3871 return getUMaxExpr(Ops); 3872 } 3873 3874 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3875 return getMinMaxExpr(scUMaxExpr, Ops); 3876 } 3877 3878 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3879 const SCEV *RHS) { 3880 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3881 return getSMinExpr(Ops); 3882 } 3883 3884 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3885 return getMinMaxExpr(scSMinExpr, Ops); 3886 } 3887 3888 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3889 const SCEV *RHS) { 3890 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3891 return getUMinExpr(Ops); 3892 } 3893 3894 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3895 return getMinMaxExpr(scUMinExpr, Ops); 3896 } 3897 3898 const SCEV * 3899 ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy, 3900 ScalableVectorType *ScalableTy) { 3901 Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo()); 3902 Constant *One = ConstantInt::get(IntTy, 1); 3903 Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One); 3904 // Note that the expression we created is the final expression, we don't 3905 // want to simplify it any further Also, if we call a normal getSCEV(), 3906 // we'll end up in an endless recursion. So just create an SCEVUnknown. 3907 return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy)); 3908 } 3909 3910 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3911 if (auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy)) 3912 return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy); 3913 // We can bypass creating a target-independent constant expression and then 3914 // folding it back into a ConstantInt. This is just a compile-time 3915 // optimization. 3916 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3917 } 3918 3919 const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) { 3920 if (auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy)) 3921 return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy); 3922 // We can bypass creating a target-independent constant expression and then 3923 // folding it back into a ConstantInt. This is just a compile-time 3924 // optimization. 3925 return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy)); 3926 } 3927 3928 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3929 StructType *STy, 3930 unsigned FieldNo) { 3931 // We can bypass creating a target-independent constant expression and then 3932 // folding it back into a ConstantInt. This is just a compile-time 3933 // optimization. 3934 return getConstant( 3935 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3936 } 3937 3938 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3939 // Don't attempt to do anything other than create a SCEVUnknown object 3940 // here. createSCEV only calls getUnknown after checking for all other 3941 // interesting possibilities, and any other code that calls getUnknown 3942 // is doing so in order to hide a value from SCEV canonicalization. 3943 3944 FoldingSetNodeID ID; 3945 ID.AddInteger(scUnknown); 3946 ID.AddPointer(V); 3947 void *IP = nullptr; 3948 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3949 assert(cast<SCEVUnknown>(S)->getValue() == V && 3950 "Stale SCEVUnknown in uniquing map!"); 3951 return S; 3952 } 3953 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3954 FirstUnknown); 3955 FirstUnknown = cast<SCEVUnknown>(S); 3956 UniqueSCEVs.InsertNode(S, IP); 3957 return S; 3958 } 3959 3960 //===----------------------------------------------------------------------===// 3961 // Basic SCEV Analysis and PHI Idiom Recognition Code 3962 // 3963 3964 /// Test if values of the given type are analyzable within the SCEV 3965 /// framework. This primarily includes integer types, and it can optionally 3966 /// include pointer types if the ScalarEvolution class has access to 3967 /// target-specific information. 3968 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3969 // Integers and pointers are always SCEVable. 3970 return Ty->isIntOrPtrTy(); 3971 } 3972 3973 /// Return the size in bits of the specified type, for which isSCEVable must 3974 /// return true. 3975 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3976 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3977 if (Ty->isPointerTy()) 3978 return getDataLayout().getIndexTypeSizeInBits(Ty); 3979 return getDataLayout().getTypeSizeInBits(Ty); 3980 } 3981 3982 /// Return a type with the same bitwidth as the given type and which represents 3983 /// how SCEV will treat the given type, for which isSCEVable must return 3984 /// true. For pointer types, this is the pointer index sized integer type. 3985 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3986 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3987 3988 if (Ty->isIntegerTy()) 3989 return Ty; 3990 3991 // The only other support type is pointer. 3992 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3993 return getDataLayout().getIndexType(Ty); 3994 } 3995 3996 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3997 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3998 } 3999 4000 bool ScalarEvolution::instructionCouldExistWitthOperands(const SCEV *A, 4001 const SCEV *B) { 4002 /// For a valid use point to exist, the defining scope of one operand 4003 /// must dominate the other. 4004 bool PreciseA, PreciseB; 4005 auto *ScopeA = getDefiningScopeBound({A}, PreciseA); 4006 auto *ScopeB = getDefiningScopeBound({B}, PreciseB); 4007 if (!PreciseA || !PreciseB) 4008 // Can't tell. 4009 return false; 4010 return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) || 4011 DT.dominates(ScopeB, ScopeA); 4012 } 4013 4014 4015 const SCEV *ScalarEvolution::getCouldNotCompute() { 4016 return CouldNotCompute.get(); 4017 } 4018 4019 bool ScalarEvolution::checkValidity(const SCEV *S) const { 4020 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 4021 auto *SU = dyn_cast<SCEVUnknown>(S); 4022 return SU && SU->getValue() == nullptr; 4023 }); 4024 4025 return !ContainsNulls; 4026 } 4027 4028 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 4029 HasRecMapType::iterator I = HasRecMap.find(S); 4030 if (I != HasRecMap.end()) 4031 return I->second; 4032 4033 bool FoundAddRec = 4034 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); 4035 HasRecMap.insert({S, FoundAddRec}); 4036 return FoundAddRec; 4037 } 4038 4039 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 4040 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 4041 /// offset I, then return {S', I}, else return {\p S, nullptr}. 4042 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 4043 const auto *Add = dyn_cast<SCEVAddExpr>(S); 4044 if (!Add) 4045 return {S, nullptr}; 4046 4047 if (Add->getNumOperands() != 2) 4048 return {S, nullptr}; 4049 4050 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 4051 if (!ConstOp) 4052 return {S, nullptr}; 4053 4054 return {Add->getOperand(1), ConstOp->getValue()}; 4055 } 4056 4057 /// Return the ValueOffsetPair set for \p S. \p S can be represented 4058 /// by the value and offset from any ValueOffsetPair in the set. 4059 ScalarEvolution::ValueOffsetPairSetVector * 4060 ScalarEvolution::getSCEVValues(const SCEV *S) { 4061 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 4062 if (SI == ExprValueMap.end()) 4063 return nullptr; 4064 #ifndef NDEBUG 4065 if (VerifySCEVMap) { 4066 // Check there is no dangling Value in the set returned. 4067 for (const auto &VE : SI->second) 4068 assert(ValueExprMap.count(VE.first)); 4069 } 4070 #endif 4071 return &SI->second; 4072 } 4073 4074 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 4075 /// cannot be used separately. eraseValueFromMap should be used to remove 4076 /// V from ValueExprMap and ExprValueMap at the same time. 4077 void ScalarEvolution::eraseValueFromMap(Value *V) { 4078 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 4079 if (I != ValueExprMap.end()) { 4080 const SCEV *S = I->second; 4081 // Remove {V, 0} from the set of ExprValueMap[S] 4082 if (auto *SV = getSCEVValues(S)) 4083 SV->remove({V, nullptr}); 4084 4085 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 4086 const SCEV *Stripped; 4087 ConstantInt *Offset; 4088 std::tie(Stripped, Offset) = splitAddExpr(S); 4089 if (Offset != nullptr) { 4090 if (auto *SV = getSCEVValues(Stripped)) 4091 SV->remove({V, Offset}); 4092 } 4093 ValueExprMap.erase(V); 4094 } 4095 } 4096 4097 void ScalarEvolution::insertValueToMap(Value *V, const SCEV *S) { 4098 // A recursive query may have already computed the SCEV. It should be 4099 // equivalent, but may not necessarily be exactly the same, e.g. due to lazily 4100 // inferred nowrap flags. 4101 auto It = ValueExprMap.find_as(V); 4102 if (It == ValueExprMap.end()) { 4103 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 4104 ExprValueMap[S].insert({V, nullptr}); 4105 } 4106 } 4107 4108 /// Return an existing SCEV if it exists, otherwise analyze the expression and 4109 /// create a new one. 4110 const SCEV *ScalarEvolution::getSCEV(Value *V) { 4111 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 4112 4113 const SCEV *S = getExistingSCEV(V); 4114 if (S == nullptr) { 4115 S = createSCEV(V); 4116 // During PHI resolution, it is possible to create two SCEVs for the same 4117 // V, so it is needed to double check whether V->S is inserted into 4118 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 4119 std::pair<ValueExprMapType::iterator, bool> Pair = 4120 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 4121 if (Pair.second) { 4122 ExprValueMap[S].insert({V, nullptr}); 4123 4124 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 4125 // ExprValueMap. 4126 const SCEV *Stripped = S; 4127 ConstantInt *Offset = nullptr; 4128 std::tie(Stripped, Offset) = splitAddExpr(S); 4129 // If stripped is SCEVUnknown, don't bother to save 4130 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 4131 // increase the complexity of the expansion code. 4132 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 4133 // because it may generate add/sub instead of GEP in SCEV expansion. 4134 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 4135 !isa<GetElementPtrInst>(V)) 4136 ExprValueMap[Stripped].insert({V, Offset}); 4137 } 4138 } 4139 return S; 4140 } 4141 4142 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 4143 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 4144 4145 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 4146 if (I != ValueExprMap.end()) { 4147 const SCEV *S = I->second; 4148 assert(checkValidity(S) && 4149 "existing SCEV has not been properly invalidated"); 4150 return S; 4151 } 4152 return nullptr; 4153 } 4154 4155 /// Return a SCEV corresponding to -V = -1*V 4156 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 4157 SCEV::NoWrapFlags Flags) { 4158 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4159 return getConstant( 4160 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 4161 4162 Type *Ty = V->getType(); 4163 Ty = getEffectiveSCEVType(Ty); 4164 return getMulExpr(V, getMinusOne(Ty), Flags); 4165 } 4166 4167 /// If Expr computes ~A, return A else return nullptr 4168 static const SCEV *MatchNotExpr(const SCEV *Expr) { 4169 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 4170 if (!Add || Add->getNumOperands() != 2 || 4171 !Add->getOperand(0)->isAllOnesValue()) 4172 return nullptr; 4173 4174 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 4175 if (!AddRHS || AddRHS->getNumOperands() != 2 || 4176 !AddRHS->getOperand(0)->isAllOnesValue()) 4177 return nullptr; 4178 4179 return AddRHS->getOperand(1); 4180 } 4181 4182 /// Return a SCEV corresponding to ~V = -1-V 4183 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 4184 assert(!V->getType()->isPointerTy() && "Can't negate pointer"); 4185 4186 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4187 return getConstant( 4188 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 4189 4190 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) 4191 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { 4192 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { 4193 SmallVector<const SCEV *, 2> MatchedOperands; 4194 for (const SCEV *Operand : MME->operands()) { 4195 const SCEV *Matched = MatchNotExpr(Operand); 4196 if (!Matched) 4197 return (const SCEV *)nullptr; 4198 MatchedOperands.push_back(Matched); 4199 } 4200 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()), 4201 MatchedOperands); 4202 }; 4203 if (const SCEV *Replaced = MatchMinMaxNegation(MME)) 4204 return Replaced; 4205 } 4206 4207 Type *Ty = V->getType(); 4208 Ty = getEffectiveSCEVType(Ty); 4209 return getMinusSCEV(getMinusOne(Ty), V); 4210 } 4211 4212 const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) { 4213 assert(P->getType()->isPointerTy()); 4214 4215 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) { 4216 // The base of an AddRec is the first operand. 4217 SmallVector<const SCEV *> Ops{AddRec->operands()}; 4218 Ops[0] = removePointerBase(Ops[0]); 4219 // Don't try to transfer nowrap flags for now. We could in some cases 4220 // (for example, if pointer operand of the AddRec is a SCEVUnknown). 4221 return getAddRecExpr(Ops, AddRec->getLoop(), SCEV::FlagAnyWrap); 4222 } 4223 if (auto *Add = dyn_cast<SCEVAddExpr>(P)) { 4224 // The base of an Add is the pointer operand. 4225 SmallVector<const SCEV *> Ops{Add->operands()}; 4226 const SCEV **PtrOp = nullptr; 4227 for (const SCEV *&AddOp : Ops) { 4228 if (AddOp->getType()->isPointerTy()) { 4229 assert(!PtrOp && "Cannot have multiple pointer ops"); 4230 PtrOp = &AddOp; 4231 } 4232 } 4233 *PtrOp = removePointerBase(*PtrOp); 4234 // Don't try to transfer nowrap flags for now. We could in some cases 4235 // (for example, if the pointer operand of the Add is a SCEVUnknown). 4236 return getAddExpr(Ops); 4237 } 4238 // Any other expression must be a pointer base. 4239 return getZero(P->getType()); 4240 } 4241 4242 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 4243 SCEV::NoWrapFlags Flags, 4244 unsigned Depth) { 4245 // Fast path: X - X --> 0. 4246 if (LHS == RHS) 4247 return getZero(LHS->getType()); 4248 4249 // If we subtract two pointers with different pointer bases, bail. 4250 // Eventually, we're going to add an assertion to getMulExpr that we 4251 // can't multiply by a pointer. 4252 if (RHS->getType()->isPointerTy()) { 4253 if (!LHS->getType()->isPointerTy() || 4254 getPointerBase(LHS) != getPointerBase(RHS)) 4255 return getCouldNotCompute(); 4256 LHS = removePointerBase(LHS); 4257 RHS = removePointerBase(RHS); 4258 } 4259 4260 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 4261 // makes it so that we cannot make much use of NUW. 4262 auto AddFlags = SCEV::FlagAnyWrap; 4263 const bool RHSIsNotMinSigned = 4264 !getSignedRangeMin(RHS).isMinSignedValue(); 4265 if (hasFlags(Flags, SCEV::FlagNSW)) { 4266 // Let M be the minimum representable signed value. Then (-1)*RHS 4267 // signed-wraps if and only if RHS is M. That can happen even for 4268 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 4269 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 4270 // (-1)*RHS, we need to prove that RHS != M. 4271 // 4272 // If LHS is non-negative and we know that LHS - RHS does not 4273 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 4274 // either by proving that RHS > M or that LHS >= 0. 4275 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 4276 AddFlags = SCEV::FlagNSW; 4277 } 4278 } 4279 4280 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 4281 // RHS is NSW and LHS >= 0. 4282 // 4283 // The difficulty here is that the NSW flag may have been proven 4284 // relative to a loop that is to be found in a recurrence in LHS and 4285 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 4286 // larger scope than intended. 4287 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 4288 4289 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 4290 } 4291 4292 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 4293 unsigned Depth) { 4294 Type *SrcTy = V->getType(); 4295 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4296 "Cannot truncate or zero extend with non-integer arguments!"); 4297 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4298 return V; // No conversion 4299 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4300 return getTruncateExpr(V, Ty, Depth); 4301 return getZeroExtendExpr(V, Ty, Depth); 4302 } 4303 4304 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 4305 unsigned Depth) { 4306 Type *SrcTy = V->getType(); 4307 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4308 "Cannot truncate or zero extend with non-integer arguments!"); 4309 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4310 return V; // No conversion 4311 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4312 return getTruncateExpr(V, Ty, Depth); 4313 return getSignExtendExpr(V, Ty, Depth); 4314 } 4315 4316 const SCEV * 4317 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 4318 Type *SrcTy = V->getType(); 4319 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4320 "Cannot noop or zero extend with non-integer arguments!"); 4321 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4322 "getNoopOrZeroExtend cannot truncate!"); 4323 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4324 return V; // No conversion 4325 return getZeroExtendExpr(V, Ty); 4326 } 4327 4328 const SCEV * 4329 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 4330 Type *SrcTy = V->getType(); 4331 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4332 "Cannot noop or sign extend with non-integer arguments!"); 4333 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4334 "getNoopOrSignExtend cannot truncate!"); 4335 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4336 return V; // No conversion 4337 return getSignExtendExpr(V, Ty); 4338 } 4339 4340 const SCEV * 4341 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 4342 Type *SrcTy = V->getType(); 4343 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4344 "Cannot noop or any extend with non-integer arguments!"); 4345 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4346 "getNoopOrAnyExtend cannot truncate!"); 4347 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4348 return V; // No conversion 4349 return getAnyExtendExpr(V, Ty); 4350 } 4351 4352 const SCEV * 4353 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 4354 Type *SrcTy = V->getType(); 4355 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4356 "Cannot truncate or noop with non-integer arguments!"); 4357 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4358 "getTruncateOrNoop cannot extend!"); 4359 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4360 return V; // No conversion 4361 return getTruncateExpr(V, Ty); 4362 } 4363 4364 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4365 const SCEV *RHS) { 4366 const SCEV *PromotedLHS = LHS; 4367 const SCEV *PromotedRHS = RHS; 4368 4369 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4370 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4371 else 4372 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4373 4374 return getUMaxExpr(PromotedLHS, PromotedRHS); 4375 } 4376 4377 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4378 const SCEV *RHS) { 4379 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4380 return getUMinFromMismatchedTypes(Ops); 4381 } 4382 4383 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( 4384 SmallVectorImpl<const SCEV *> &Ops) { 4385 assert(!Ops.empty() && "At least one operand must be!"); 4386 // Trivial case. 4387 if (Ops.size() == 1) 4388 return Ops[0]; 4389 4390 // Find the max type first. 4391 Type *MaxType = nullptr; 4392 for (auto *S : Ops) 4393 if (MaxType) 4394 MaxType = getWiderType(MaxType, S->getType()); 4395 else 4396 MaxType = S->getType(); 4397 assert(MaxType && "Failed to find maximum type!"); 4398 4399 // Extend all ops to max type. 4400 SmallVector<const SCEV *, 2> PromotedOps; 4401 for (auto *S : Ops) 4402 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4403 4404 // Generate umin. 4405 return getUMinExpr(PromotedOps); 4406 } 4407 4408 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4409 // A pointer operand may evaluate to a nonpointer expression, such as null. 4410 if (!V->getType()->isPointerTy()) 4411 return V; 4412 4413 while (true) { 4414 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 4415 V = AddRec->getStart(); 4416 } else if (auto *Add = dyn_cast<SCEVAddExpr>(V)) { 4417 const SCEV *PtrOp = nullptr; 4418 for (const SCEV *AddOp : Add->operands()) { 4419 if (AddOp->getType()->isPointerTy()) { 4420 assert(!PtrOp && "Cannot have multiple pointer ops"); 4421 PtrOp = AddOp; 4422 } 4423 } 4424 assert(PtrOp && "Must have pointer op"); 4425 V = PtrOp; 4426 } else // Not something we can look further into. 4427 return V; 4428 } 4429 } 4430 4431 /// Push users of the given Instruction onto the given Worklist. 4432 static void PushDefUseChildren(Instruction *I, 4433 SmallVectorImpl<Instruction *> &Worklist, 4434 SmallPtrSetImpl<Instruction *> &Visited) { 4435 // Push the def-use children onto the Worklist stack. 4436 for (User *U : I->users()) { 4437 auto *UserInsn = cast<Instruction>(U); 4438 if (Visited.insert(UserInsn).second) 4439 Worklist.push_back(UserInsn); 4440 } 4441 } 4442 4443 namespace { 4444 4445 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4446 /// expression in case its Loop is L. If it is not L then 4447 /// if IgnoreOtherLoops is true then use AddRec itself 4448 /// otherwise rewrite cannot be done. 4449 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4450 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4451 public: 4452 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4453 bool IgnoreOtherLoops = true) { 4454 SCEVInitRewriter Rewriter(L, SE); 4455 const SCEV *Result = Rewriter.visit(S); 4456 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4457 return SE.getCouldNotCompute(); 4458 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4459 ? SE.getCouldNotCompute() 4460 : Result; 4461 } 4462 4463 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4464 if (!SE.isLoopInvariant(Expr, L)) 4465 SeenLoopVariantSCEVUnknown = true; 4466 return Expr; 4467 } 4468 4469 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4470 // Only re-write AddRecExprs for this loop. 4471 if (Expr->getLoop() == L) 4472 return Expr->getStart(); 4473 SeenOtherLoops = true; 4474 return Expr; 4475 } 4476 4477 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4478 4479 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4480 4481 private: 4482 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4483 : SCEVRewriteVisitor(SE), L(L) {} 4484 4485 const Loop *L; 4486 bool SeenLoopVariantSCEVUnknown = false; 4487 bool SeenOtherLoops = false; 4488 }; 4489 4490 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4491 /// increment expression in case its Loop is L. If it is not L then 4492 /// use AddRec itself. 4493 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4494 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4495 public: 4496 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4497 SCEVPostIncRewriter Rewriter(L, SE); 4498 const SCEV *Result = Rewriter.visit(S); 4499 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4500 ? SE.getCouldNotCompute() 4501 : Result; 4502 } 4503 4504 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4505 if (!SE.isLoopInvariant(Expr, L)) 4506 SeenLoopVariantSCEVUnknown = true; 4507 return Expr; 4508 } 4509 4510 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4511 // Only re-write AddRecExprs for this loop. 4512 if (Expr->getLoop() == L) 4513 return Expr->getPostIncExpr(SE); 4514 SeenOtherLoops = true; 4515 return Expr; 4516 } 4517 4518 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4519 4520 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4521 4522 private: 4523 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4524 : SCEVRewriteVisitor(SE), L(L) {} 4525 4526 const Loop *L; 4527 bool SeenLoopVariantSCEVUnknown = false; 4528 bool SeenOtherLoops = false; 4529 }; 4530 4531 /// This class evaluates the compare condition by matching it against the 4532 /// condition of loop latch. If there is a match we assume a true value 4533 /// for the condition while building SCEV nodes. 4534 class SCEVBackedgeConditionFolder 4535 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4536 public: 4537 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4538 ScalarEvolution &SE) { 4539 bool IsPosBECond = false; 4540 Value *BECond = nullptr; 4541 if (BasicBlock *Latch = L->getLoopLatch()) { 4542 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4543 if (BI && BI->isConditional()) { 4544 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4545 "Both outgoing branches should not target same header!"); 4546 BECond = BI->getCondition(); 4547 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4548 } else { 4549 return S; 4550 } 4551 } 4552 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4553 return Rewriter.visit(S); 4554 } 4555 4556 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4557 const SCEV *Result = Expr; 4558 bool InvariantF = SE.isLoopInvariant(Expr, L); 4559 4560 if (!InvariantF) { 4561 Instruction *I = cast<Instruction>(Expr->getValue()); 4562 switch (I->getOpcode()) { 4563 case Instruction::Select: { 4564 SelectInst *SI = cast<SelectInst>(I); 4565 Optional<const SCEV *> Res = 4566 compareWithBackedgeCondition(SI->getCondition()); 4567 if (Res.hasValue()) { 4568 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4569 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4570 } 4571 break; 4572 } 4573 default: { 4574 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4575 if (Res.hasValue()) 4576 Result = Res.getValue(); 4577 break; 4578 } 4579 } 4580 } 4581 return Result; 4582 } 4583 4584 private: 4585 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4586 bool IsPosBECond, ScalarEvolution &SE) 4587 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4588 IsPositiveBECond(IsPosBECond) {} 4589 4590 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4591 4592 const Loop *L; 4593 /// Loop back condition. 4594 Value *BackedgeCond = nullptr; 4595 /// Set to true if loop back is on positive branch condition. 4596 bool IsPositiveBECond; 4597 }; 4598 4599 Optional<const SCEV *> 4600 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4601 4602 // If value matches the backedge condition for loop latch, 4603 // then return a constant evolution node based on loopback 4604 // branch taken. 4605 if (BackedgeCond == IC) 4606 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4607 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4608 return None; 4609 } 4610 4611 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4612 public: 4613 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4614 ScalarEvolution &SE) { 4615 SCEVShiftRewriter Rewriter(L, SE); 4616 const SCEV *Result = Rewriter.visit(S); 4617 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4618 } 4619 4620 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4621 // Only allow AddRecExprs for this loop. 4622 if (!SE.isLoopInvariant(Expr, L)) 4623 Valid = false; 4624 return Expr; 4625 } 4626 4627 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4628 if (Expr->getLoop() == L && Expr->isAffine()) 4629 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4630 Valid = false; 4631 return Expr; 4632 } 4633 4634 bool isValid() { return Valid; } 4635 4636 private: 4637 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4638 : SCEVRewriteVisitor(SE), L(L) {} 4639 4640 const Loop *L; 4641 bool Valid = true; 4642 }; 4643 4644 } // end anonymous namespace 4645 4646 SCEV::NoWrapFlags 4647 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4648 if (!AR->isAffine()) 4649 return SCEV::FlagAnyWrap; 4650 4651 using OBO = OverflowingBinaryOperator; 4652 4653 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4654 4655 if (!AR->hasNoSignedWrap()) { 4656 ConstantRange AddRecRange = getSignedRange(AR); 4657 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4658 4659 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4660 Instruction::Add, IncRange, OBO::NoSignedWrap); 4661 if (NSWRegion.contains(AddRecRange)) 4662 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4663 } 4664 4665 if (!AR->hasNoUnsignedWrap()) { 4666 ConstantRange AddRecRange = getUnsignedRange(AR); 4667 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4668 4669 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4670 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4671 if (NUWRegion.contains(AddRecRange)) 4672 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4673 } 4674 4675 return Result; 4676 } 4677 4678 SCEV::NoWrapFlags 4679 ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) { 4680 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 4681 4682 if (AR->hasNoSignedWrap()) 4683 return Result; 4684 4685 if (!AR->isAffine()) 4686 return Result; 4687 4688 const SCEV *Step = AR->getStepRecurrence(*this); 4689 const Loop *L = AR->getLoop(); 4690 4691 // Check whether the backedge-taken count is SCEVCouldNotCompute. 4692 // Note that this serves two purposes: It filters out loops that are 4693 // simply not analyzable, and it covers the case where this code is 4694 // being called from within backedge-taken count analysis, such that 4695 // attempting to ask for the backedge-taken count would likely result 4696 // in infinite recursion. In the later case, the analysis code will 4697 // cope with a conservative value, and it will take care to purge 4698 // that value once it has finished. 4699 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 4700 4701 // Normally, in the cases we can prove no-overflow via a 4702 // backedge guarding condition, we can also compute a backedge 4703 // taken count for the loop. The exceptions are assumptions and 4704 // guards present in the loop -- SCEV is not great at exploiting 4705 // these to compute max backedge taken counts, but can still use 4706 // these to prove lack of overflow. Use this fact to avoid 4707 // doing extra work that may not pay off. 4708 4709 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 4710 AC.assumptions().empty()) 4711 return Result; 4712 4713 // If the backedge is guarded by a comparison with the pre-inc value the 4714 // addrec is safe. Also, if the entry is guarded by a comparison with the 4715 // start value and the backedge is guarded by a comparison with the post-inc 4716 // value, the addrec is safe. 4717 ICmpInst::Predicate Pred; 4718 const SCEV *OverflowLimit = 4719 getSignedOverflowLimitForStep(Step, &Pred, this); 4720 if (OverflowLimit && 4721 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 4722 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 4723 Result = setFlags(Result, SCEV::FlagNSW); 4724 } 4725 return Result; 4726 } 4727 SCEV::NoWrapFlags 4728 ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) { 4729 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 4730 4731 if (AR->hasNoUnsignedWrap()) 4732 return Result; 4733 4734 if (!AR->isAffine()) 4735 return Result; 4736 4737 const SCEV *Step = AR->getStepRecurrence(*this); 4738 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 4739 const Loop *L = AR->getLoop(); 4740 4741 // Check whether the backedge-taken count is SCEVCouldNotCompute. 4742 // Note that this serves two purposes: It filters out loops that are 4743 // simply not analyzable, and it covers the case where this code is 4744 // being called from within backedge-taken count analysis, such that 4745 // attempting to ask for the backedge-taken count would likely result 4746 // in infinite recursion. In the later case, the analysis code will 4747 // cope with a conservative value, and it will take care to purge 4748 // that value once it has finished. 4749 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 4750 4751 // Normally, in the cases we can prove no-overflow via a 4752 // backedge guarding condition, we can also compute a backedge 4753 // taken count for the loop. The exceptions are assumptions and 4754 // guards present in the loop -- SCEV is not great at exploiting 4755 // these to compute max backedge taken counts, but can still use 4756 // these to prove lack of overflow. Use this fact to avoid 4757 // doing extra work that may not pay off. 4758 4759 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 4760 AC.assumptions().empty()) 4761 return Result; 4762 4763 // If the backedge is guarded by a comparison with the pre-inc value the 4764 // addrec is safe. Also, if the entry is guarded by a comparison with the 4765 // start value and the backedge is guarded by a comparison with the post-inc 4766 // value, the addrec is safe. 4767 if (isKnownPositive(Step)) { 4768 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 4769 getUnsignedRangeMax(Step)); 4770 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 4771 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 4772 Result = setFlags(Result, SCEV::FlagNUW); 4773 } 4774 } 4775 4776 return Result; 4777 } 4778 4779 namespace { 4780 4781 /// Represents an abstract binary operation. This may exist as a 4782 /// normal instruction or constant expression, or may have been 4783 /// derived from an expression tree. 4784 struct BinaryOp { 4785 unsigned Opcode; 4786 Value *LHS; 4787 Value *RHS; 4788 bool IsNSW = false; 4789 bool IsNUW = false; 4790 4791 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4792 /// constant expression. 4793 Operator *Op = nullptr; 4794 4795 explicit BinaryOp(Operator *Op) 4796 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4797 Op(Op) { 4798 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4799 IsNSW = OBO->hasNoSignedWrap(); 4800 IsNUW = OBO->hasNoUnsignedWrap(); 4801 } 4802 } 4803 4804 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4805 bool IsNUW = false) 4806 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 4807 }; 4808 4809 } // end anonymous namespace 4810 4811 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4812 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4813 auto *Op = dyn_cast<Operator>(V); 4814 if (!Op) 4815 return None; 4816 4817 // Implementation detail: all the cleverness here should happen without 4818 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4819 // SCEV expressions when possible, and we should not break that. 4820 4821 switch (Op->getOpcode()) { 4822 case Instruction::Add: 4823 case Instruction::Sub: 4824 case Instruction::Mul: 4825 case Instruction::UDiv: 4826 case Instruction::URem: 4827 case Instruction::And: 4828 case Instruction::Or: 4829 case Instruction::AShr: 4830 case Instruction::Shl: 4831 return BinaryOp(Op); 4832 4833 case Instruction::Xor: 4834 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4835 // If the RHS of the xor is a signmask, then this is just an add. 4836 // Instcombine turns add of signmask into xor as a strength reduction step. 4837 if (RHSC->getValue().isSignMask()) 4838 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4839 return BinaryOp(Op); 4840 4841 case Instruction::LShr: 4842 // Turn logical shift right of a constant into a unsigned divide. 4843 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4844 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4845 4846 // If the shift count is not less than the bitwidth, the result of 4847 // the shift is undefined. Don't try to analyze it, because the 4848 // resolution chosen here may differ from the resolution chosen in 4849 // other parts of the compiler. 4850 if (SA->getValue().ult(BitWidth)) { 4851 Constant *X = 4852 ConstantInt::get(SA->getContext(), 4853 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4854 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4855 } 4856 } 4857 return BinaryOp(Op); 4858 4859 case Instruction::ExtractValue: { 4860 auto *EVI = cast<ExtractValueInst>(Op); 4861 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4862 break; 4863 4864 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 4865 if (!WO) 4866 break; 4867 4868 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 4869 bool Signed = WO->isSigned(); 4870 // TODO: Should add nuw/nsw flags for mul as well. 4871 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 4872 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 4873 4874 // Now that we know that all uses of the arithmetic-result component of 4875 // CI are guarded by the overflow check, we can go ahead and pretend 4876 // that the arithmetic is non-overflowing. 4877 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 4878 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 4879 } 4880 4881 default: 4882 break; 4883 } 4884 4885 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same 4886 // semantics as a Sub, return a binary sub expression. 4887 if (auto *II = dyn_cast<IntrinsicInst>(V)) 4888 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) 4889 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); 4890 4891 return None; 4892 } 4893 4894 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4895 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4896 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4897 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4898 /// follows one of the following patterns: 4899 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4900 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4901 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4902 /// we return the type of the truncation operation, and indicate whether the 4903 /// truncated type should be treated as signed/unsigned by setting 4904 /// \p Signed to true/false, respectively. 4905 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4906 bool &Signed, ScalarEvolution &SE) { 4907 // The case where Op == SymbolicPHI (that is, with no type conversions on 4908 // the way) is handled by the regular add recurrence creating logic and 4909 // would have already been triggered in createAddRecForPHI. Reaching it here 4910 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4911 // because one of the other operands of the SCEVAddExpr updating this PHI is 4912 // not invariant). 4913 // 4914 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4915 // this case predicates that allow us to prove that Op == SymbolicPHI will 4916 // be added. 4917 if (Op == SymbolicPHI) 4918 return nullptr; 4919 4920 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4921 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4922 if (SourceBits != NewBits) 4923 return nullptr; 4924 4925 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4926 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4927 if (!SExt && !ZExt) 4928 return nullptr; 4929 const SCEVTruncateExpr *Trunc = 4930 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4931 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4932 if (!Trunc) 4933 return nullptr; 4934 const SCEV *X = Trunc->getOperand(); 4935 if (X != SymbolicPHI) 4936 return nullptr; 4937 Signed = SExt != nullptr; 4938 return Trunc->getType(); 4939 } 4940 4941 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4942 if (!PN->getType()->isIntegerTy()) 4943 return nullptr; 4944 const Loop *L = LI.getLoopFor(PN->getParent()); 4945 if (!L || L->getHeader() != PN->getParent()) 4946 return nullptr; 4947 return L; 4948 } 4949 4950 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4951 // computation that updates the phi follows the following pattern: 4952 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4953 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4954 // If so, try to see if it can be rewritten as an AddRecExpr under some 4955 // Predicates. If successful, return them as a pair. Also cache the results 4956 // of the analysis. 4957 // 4958 // Example usage scenario: 4959 // Say the Rewriter is called for the following SCEV: 4960 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4961 // where: 4962 // %X = phi i64 (%Start, %BEValue) 4963 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4964 // and call this function with %SymbolicPHI = %X. 4965 // 4966 // The analysis will find that the value coming around the backedge has 4967 // the following SCEV: 4968 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4969 // Upon concluding that this matches the desired pattern, the function 4970 // will return the pair {NewAddRec, SmallPredsVec} where: 4971 // NewAddRec = {%Start,+,%Step} 4972 // SmallPredsVec = {P1, P2, P3} as follows: 4973 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4974 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4975 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4976 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4977 // under the predicates {P1,P2,P3}. 4978 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4979 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4980 // 4981 // TODO's: 4982 // 4983 // 1) Extend the Induction descriptor to also support inductions that involve 4984 // casts: When needed (namely, when we are called in the context of the 4985 // vectorizer induction analysis), a Set of cast instructions will be 4986 // populated by this method, and provided back to isInductionPHI. This is 4987 // needed to allow the vectorizer to properly record them to be ignored by 4988 // the cost model and to avoid vectorizing them (otherwise these casts, 4989 // which are redundant under the runtime overflow checks, will be 4990 // vectorized, which can be costly). 4991 // 4992 // 2) Support additional induction/PHISCEV patterns: We also want to support 4993 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4994 // after the induction update operation (the induction increment): 4995 // 4996 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4997 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4998 // 4999 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 5000 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 5001 // 5002 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 5003 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5004 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 5005 SmallVector<const SCEVPredicate *, 3> Predicates; 5006 5007 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 5008 // return an AddRec expression under some predicate. 5009 5010 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 5011 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 5012 assert(L && "Expecting an integer loop header phi"); 5013 5014 // The loop may have multiple entrances or multiple exits; we can analyze 5015 // this phi as an addrec if it has a unique entry value and a unique 5016 // backedge value. 5017 Value *BEValueV = nullptr, *StartValueV = nullptr; 5018 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5019 Value *V = PN->getIncomingValue(i); 5020 if (L->contains(PN->getIncomingBlock(i))) { 5021 if (!BEValueV) { 5022 BEValueV = V; 5023 } else if (BEValueV != V) { 5024 BEValueV = nullptr; 5025 break; 5026 } 5027 } else if (!StartValueV) { 5028 StartValueV = V; 5029 } else if (StartValueV != V) { 5030 StartValueV = nullptr; 5031 break; 5032 } 5033 } 5034 if (!BEValueV || !StartValueV) 5035 return None; 5036 5037 const SCEV *BEValue = getSCEV(BEValueV); 5038 5039 // If the value coming around the backedge is an add with the symbolic 5040 // value we just inserted, possibly with casts that we can ignore under 5041 // an appropriate runtime guard, then we found a simple induction variable! 5042 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 5043 if (!Add) 5044 return None; 5045 5046 // If there is a single occurrence of the symbolic value, possibly 5047 // casted, replace it with a recurrence. 5048 unsigned FoundIndex = Add->getNumOperands(); 5049 Type *TruncTy = nullptr; 5050 bool Signed; 5051 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5052 if ((TruncTy = 5053 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 5054 if (FoundIndex == e) { 5055 FoundIndex = i; 5056 break; 5057 } 5058 5059 if (FoundIndex == Add->getNumOperands()) 5060 return None; 5061 5062 // Create an add with everything but the specified operand. 5063 SmallVector<const SCEV *, 8> Ops; 5064 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5065 if (i != FoundIndex) 5066 Ops.push_back(Add->getOperand(i)); 5067 const SCEV *Accum = getAddExpr(Ops); 5068 5069 // The runtime checks will not be valid if the step amount is 5070 // varying inside the loop. 5071 if (!isLoopInvariant(Accum, L)) 5072 return None; 5073 5074 // *** Part2: Create the predicates 5075 5076 // Analysis was successful: we have a phi-with-cast pattern for which we 5077 // can return an AddRec expression under the following predicates: 5078 // 5079 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 5080 // fits within the truncated type (does not overflow) for i = 0 to n-1. 5081 // P2: An Equal predicate that guarantees that 5082 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 5083 // P3: An Equal predicate that guarantees that 5084 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 5085 // 5086 // As we next prove, the above predicates guarantee that: 5087 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 5088 // 5089 // 5090 // More formally, we want to prove that: 5091 // Expr(i+1) = Start + (i+1) * Accum 5092 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 5093 // 5094 // Given that: 5095 // 1) Expr(0) = Start 5096 // 2) Expr(1) = Start + Accum 5097 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 5098 // 3) Induction hypothesis (step i): 5099 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 5100 // 5101 // Proof: 5102 // Expr(i+1) = 5103 // = Start + (i+1)*Accum 5104 // = (Start + i*Accum) + Accum 5105 // = Expr(i) + Accum 5106 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 5107 // :: from step i 5108 // 5109 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 5110 // 5111 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 5112 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 5113 // + Accum :: from P3 5114 // 5115 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 5116 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 5117 // 5118 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 5119 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 5120 // 5121 // By induction, the same applies to all iterations 1<=i<n: 5122 // 5123 5124 // Create a truncated addrec for which we will add a no overflow check (P1). 5125 const SCEV *StartVal = getSCEV(StartValueV); 5126 const SCEV *PHISCEV = 5127 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 5128 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 5129 5130 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 5131 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 5132 // will be constant. 5133 // 5134 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 5135 // add P1. 5136 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 5137 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 5138 Signed ? SCEVWrapPredicate::IncrementNSSW 5139 : SCEVWrapPredicate::IncrementNUSW; 5140 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 5141 Predicates.push_back(AddRecPred); 5142 } 5143 5144 // Create the Equal Predicates P2,P3: 5145 5146 // It is possible that the predicates P2 and/or P3 are computable at 5147 // compile time due to StartVal and/or Accum being constants. 5148 // If either one is, then we can check that now and escape if either P2 5149 // or P3 is false. 5150 5151 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 5152 // for each of StartVal and Accum 5153 auto getExtendedExpr = [&](const SCEV *Expr, 5154 bool CreateSignExtend) -> const SCEV * { 5155 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 5156 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 5157 const SCEV *ExtendedExpr = 5158 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 5159 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 5160 return ExtendedExpr; 5161 }; 5162 5163 // Given: 5164 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 5165 // = getExtendedExpr(Expr) 5166 // Determine whether the predicate P: Expr == ExtendedExpr 5167 // is known to be false at compile time 5168 auto PredIsKnownFalse = [&](const SCEV *Expr, 5169 const SCEV *ExtendedExpr) -> bool { 5170 return Expr != ExtendedExpr && 5171 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 5172 }; 5173 5174 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 5175 if (PredIsKnownFalse(StartVal, StartExtended)) { 5176 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 5177 return None; 5178 } 5179 5180 // The Step is always Signed (because the overflow checks are either 5181 // NSSW or NUSW) 5182 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 5183 if (PredIsKnownFalse(Accum, AccumExtended)) { 5184 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 5185 return None; 5186 } 5187 5188 auto AppendPredicate = [&](const SCEV *Expr, 5189 const SCEV *ExtendedExpr) -> void { 5190 if (Expr != ExtendedExpr && 5191 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 5192 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 5193 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 5194 Predicates.push_back(Pred); 5195 } 5196 }; 5197 5198 AppendPredicate(StartVal, StartExtended); 5199 AppendPredicate(Accum, AccumExtended); 5200 5201 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 5202 // which the casts had been folded away. The caller can rewrite SymbolicPHI 5203 // into NewAR if it will also add the runtime overflow checks specified in 5204 // Predicates. 5205 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 5206 5207 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 5208 std::make_pair(NewAR, Predicates); 5209 // Remember the result of the analysis for this SCEV at this locayyytion. 5210 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 5211 return PredRewrite; 5212 } 5213 5214 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5215 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 5216 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 5217 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 5218 if (!L) 5219 return None; 5220 5221 // Check to see if we already analyzed this PHI. 5222 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 5223 if (I != PredicatedSCEVRewrites.end()) { 5224 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 5225 I->second; 5226 // Analysis was done before and failed to create an AddRec: 5227 if (Rewrite.first == SymbolicPHI) 5228 return None; 5229 // Analysis was done before and succeeded to create an AddRec under 5230 // a predicate: 5231 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 5232 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 5233 return Rewrite; 5234 } 5235 5236 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5237 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 5238 5239 // Record in the cache that the analysis failed 5240 if (!Rewrite) { 5241 SmallVector<const SCEVPredicate *, 3> Predicates; 5242 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 5243 return None; 5244 } 5245 5246 return Rewrite; 5247 } 5248 5249 // FIXME: This utility is currently required because the Rewriter currently 5250 // does not rewrite this expression: 5251 // {0, +, (sext ix (trunc iy to ix) to iy)} 5252 // into {0, +, %step}, 5253 // even when the following Equal predicate exists: 5254 // "%step == (sext ix (trunc iy to ix) to iy)". 5255 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 5256 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 5257 if (AR1 == AR2) 5258 return true; 5259 5260 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 5261 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 5262 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 5263 return false; 5264 return true; 5265 }; 5266 5267 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 5268 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 5269 return false; 5270 return true; 5271 } 5272 5273 /// A helper function for createAddRecFromPHI to handle simple cases. 5274 /// 5275 /// This function tries to find an AddRec expression for the simplest (yet most 5276 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 5277 /// If it fails, createAddRecFromPHI will use a more general, but slow, 5278 /// technique for finding the AddRec expression. 5279 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 5280 Value *BEValueV, 5281 Value *StartValueV) { 5282 const Loop *L = LI.getLoopFor(PN->getParent()); 5283 assert(L && L->getHeader() == PN->getParent()); 5284 assert(BEValueV && StartValueV); 5285 5286 auto BO = MatchBinaryOp(BEValueV, DT); 5287 if (!BO) 5288 return nullptr; 5289 5290 if (BO->Opcode != Instruction::Add) 5291 return nullptr; 5292 5293 const SCEV *Accum = nullptr; 5294 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 5295 Accum = getSCEV(BO->RHS); 5296 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 5297 Accum = getSCEV(BO->LHS); 5298 5299 if (!Accum) 5300 return nullptr; 5301 5302 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5303 if (BO->IsNUW) 5304 Flags = setFlags(Flags, SCEV::FlagNUW); 5305 if (BO->IsNSW) 5306 Flags = setFlags(Flags, SCEV::FlagNSW); 5307 5308 const SCEV *StartVal = getSCEV(StartValueV); 5309 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5310 insertValueToMap(PN, PHISCEV); 5311 5312 // We can add Flags to the post-inc expression only if we 5313 // know that it is *undefined behavior* for BEValueV to 5314 // overflow. 5315 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) { 5316 assert(isLoopInvariant(Accum, L) && 5317 "Accum is defined outside L, but is not invariant?"); 5318 if (isAddRecNeverPoison(BEInst, L)) 5319 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5320 } 5321 5322 return PHISCEV; 5323 } 5324 5325 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 5326 const Loop *L = LI.getLoopFor(PN->getParent()); 5327 if (!L || L->getHeader() != PN->getParent()) 5328 return nullptr; 5329 5330 // The loop may have multiple entrances or multiple exits; we can analyze 5331 // this phi as an addrec if it has a unique entry value and a unique 5332 // backedge value. 5333 Value *BEValueV = nullptr, *StartValueV = nullptr; 5334 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5335 Value *V = PN->getIncomingValue(i); 5336 if (L->contains(PN->getIncomingBlock(i))) { 5337 if (!BEValueV) { 5338 BEValueV = V; 5339 } else if (BEValueV != V) { 5340 BEValueV = nullptr; 5341 break; 5342 } 5343 } else if (!StartValueV) { 5344 StartValueV = V; 5345 } else if (StartValueV != V) { 5346 StartValueV = nullptr; 5347 break; 5348 } 5349 } 5350 if (!BEValueV || !StartValueV) 5351 return nullptr; 5352 5353 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 5354 "PHI node already processed?"); 5355 5356 // First, try to find AddRec expression without creating a fictituos symbolic 5357 // value for PN. 5358 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 5359 return S; 5360 5361 // Handle PHI node value symbolically. 5362 const SCEV *SymbolicName = getUnknown(PN); 5363 insertValueToMap(PN, SymbolicName); 5364 5365 // Using this symbolic name for the PHI, analyze the value coming around 5366 // the back-edge. 5367 const SCEV *BEValue = getSCEV(BEValueV); 5368 5369 // NOTE: If BEValue is loop invariant, we know that the PHI node just 5370 // has a special value for the first iteration of the loop. 5371 5372 // If the value coming around the backedge is an add with the symbolic 5373 // value we just inserted, then we found a simple induction variable! 5374 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 5375 // If there is a single occurrence of the symbolic value, replace it 5376 // with a recurrence. 5377 unsigned FoundIndex = Add->getNumOperands(); 5378 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5379 if (Add->getOperand(i) == SymbolicName) 5380 if (FoundIndex == e) { 5381 FoundIndex = i; 5382 break; 5383 } 5384 5385 if (FoundIndex != Add->getNumOperands()) { 5386 // Create an add with everything but the specified operand. 5387 SmallVector<const SCEV *, 8> Ops; 5388 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5389 if (i != FoundIndex) 5390 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 5391 L, *this)); 5392 const SCEV *Accum = getAddExpr(Ops); 5393 5394 // This is not a valid addrec if the step amount is varying each 5395 // loop iteration, but is not itself an addrec in this loop. 5396 if (isLoopInvariant(Accum, L) || 5397 (isa<SCEVAddRecExpr>(Accum) && 5398 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5399 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5400 5401 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 5402 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5403 if (BO->IsNUW) 5404 Flags = setFlags(Flags, SCEV::FlagNUW); 5405 if (BO->IsNSW) 5406 Flags = setFlags(Flags, SCEV::FlagNSW); 5407 } 5408 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5409 // If the increment is an inbounds GEP, then we know the address 5410 // space cannot be wrapped around. We cannot make any guarantee 5411 // about signed or unsigned overflow because pointers are 5412 // unsigned but we may have a negative index from the base 5413 // pointer. We can guarantee that no unsigned wrap occurs if the 5414 // indices form a positive value. 5415 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5416 Flags = setFlags(Flags, SCEV::FlagNW); 5417 5418 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 5419 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 5420 Flags = setFlags(Flags, SCEV::FlagNUW); 5421 } 5422 5423 // We cannot transfer nuw and nsw flags from subtraction 5424 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5425 // for instance. 5426 } 5427 5428 const SCEV *StartVal = getSCEV(StartValueV); 5429 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5430 5431 // Okay, for the entire analysis of this edge we assumed the PHI 5432 // to be symbolic. We now need to go back and purge all of the 5433 // entries for the scalars that use the symbolic expression. 5434 forgetMemoizedResults(SymbolicName); 5435 insertValueToMap(PN, PHISCEV); 5436 5437 // We can add Flags to the post-inc expression only if we 5438 // know that it is *undefined behavior* for BEValueV to 5439 // overflow. 5440 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5441 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5442 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5443 5444 return PHISCEV; 5445 } 5446 } 5447 } else { 5448 // Otherwise, this could be a loop like this: 5449 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5450 // In this case, j = {1,+,1} and BEValue is j. 5451 // Because the other in-value of i (0) fits the evolution of BEValue 5452 // i really is an addrec evolution. 5453 // 5454 // We can generalize this saying that i is the shifted value of BEValue 5455 // by one iteration: 5456 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5457 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5458 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5459 if (Shifted != getCouldNotCompute() && 5460 Start != getCouldNotCompute()) { 5461 const SCEV *StartVal = getSCEV(StartValueV); 5462 if (Start == StartVal) { 5463 // Okay, for the entire analysis of this edge we assumed the PHI 5464 // to be symbolic. We now need to go back and purge all of the 5465 // entries for the scalars that use the symbolic expression. 5466 forgetMemoizedResults(SymbolicName); 5467 insertValueToMap(PN, Shifted); 5468 return Shifted; 5469 } 5470 } 5471 } 5472 5473 // Remove the temporary PHI node SCEV that has been inserted while intending 5474 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5475 // as it will prevent later (possibly simpler) SCEV expressions to be added 5476 // to the ValueExprMap. 5477 eraseValueFromMap(PN); 5478 5479 return nullptr; 5480 } 5481 5482 // Checks if the SCEV S is available at BB. S is considered available at BB 5483 // if S can be materialized at BB without introducing a fault. 5484 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 5485 BasicBlock *BB) { 5486 struct CheckAvailable { 5487 bool TraversalDone = false; 5488 bool Available = true; 5489 5490 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 5491 BasicBlock *BB = nullptr; 5492 DominatorTree &DT; 5493 5494 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5495 : L(L), BB(BB), DT(DT) {} 5496 5497 bool setUnavailable() { 5498 TraversalDone = true; 5499 Available = false; 5500 return false; 5501 } 5502 5503 bool follow(const SCEV *S) { 5504 switch (S->getSCEVType()) { 5505 case scConstant: 5506 case scPtrToInt: 5507 case scTruncate: 5508 case scZeroExtend: 5509 case scSignExtend: 5510 case scAddExpr: 5511 case scMulExpr: 5512 case scUMaxExpr: 5513 case scSMaxExpr: 5514 case scUMinExpr: 5515 case scSMinExpr: 5516 // These expressions are available if their operand(s) is/are. 5517 return true; 5518 5519 case scAddRecExpr: { 5520 // We allow add recurrences that are on the loop BB is in, or some 5521 // outer loop. This guarantees availability because the value of the 5522 // add recurrence at BB is simply the "current" value of the induction 5523 // variable. We can relax this in the future; for instance an add 5524 // recurrence on a sibling dominating loop is also available at BB. 5525 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5526 if (L && (ARLoop == L || ARLoop->contains(L))) 5527 return true; 5528 5529 return setUnavailable(); 5530 } 5531 5532 case scUnknown: { 5533 // For SCEVUnknown, we check for simple dominance. 5534 const auto *SU = cast<SCEVUnknown>(S); 5535 Value *V = SU->getValue(); 5536 5537 if (isa<Argument>(V)) 5538 return false; 5539 5540 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5541 return false; 5542 5543 return setUnavailable(); 5544 } 5545 5546 case scUDivExpr: 5547 case scCouldNotCompute: 5548 // We do not try to smart about these at all. 5549 return setUnavailable(); 5550 } 5551 llvm_unreachable("Unknown SCEV kind!"); 5552 } 5553 5554 bool isDone() { return TraversalDone; } 5555 }; 5556 5557 CheckAvailable CA(L, BB, DT); 5558 SCEVTraversal<CheckAvailable> ST(CA); 5559 5560 ST.visitAll(S); 5561 return CA.Available; 5562 } 5563 5564 // Try to match a control flow sequence that branches out at BI and merges back 5565 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5566 // match. 5567 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5568 Value *&C, Value *&LHS, Value *&RHS) { 5569 C = BI->getCondition(); 5570 5571 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5572 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5573 5574 if (!LeftEdge.isSingleEdge()) 5575 return false; 5576 5577 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5578 5579 Use &LeftUse = Merge->getOperandUse(0); 5580 Use &RightUse = Merge->getOperandUse(1); 5581 5582 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5583 LHS = LeftUse; 5584 RHS = RightUse; 5585 return true; 5586 } 5587 5588 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5589 LHS = RightUse; 5590 RHS = LeftUse; 5591 return true; 5592 } 5593 5594 return false; 5595 } 5596 5597 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5598 auto IsReachable = 5599 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5600 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5601 const Loop *L = LI.getLoopFor(PN->getParent()); 5602 5603 // We don't want to break LCSSA, even in a SCEV expression tree. 5604 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5605 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5606 return nullptr; 5607 5608 // Try to match 5609 // 5610 // br %cond, label %left, label %right 5611 // left: 5612 // br label %merge 5613 // right: 5614 // br label %merge 5615 // merge: 5616 // V = phi [ %x, %left ], [ %y, %right ] 5617 // 5618 // as "select %cond, %x, %y" 5619 5620 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5621 assert(IDom && "At least the entry block should dominate PN"); 5622 5623 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5624 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5625 5626 if (BI && BI->isConditional() && 5627 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5628 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5629 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5630 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5631 } 5632 5633 return nullptr; 5634 } 5635 5636 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5637 if (const SCEV *S = createAddRecFromPHI(PN)) 5638 return S; 5639 5640 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5641 return S; 5642 5643 // If the PHI has a single incoming value, follow that value, unless the 5644 // PHI's incoming blocks are in a different loop, in which case doing so 5645 // risks breaking LCSSA form. Instcombine would normally zap these, but 5646 // it doesn't have DominatorTree information, so it may miss cases. 5647 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5648 if (LI.replacementPreservesLCSSAForm(PN, V)) 5649 return getSCEV(V); 5650 5651 // If it's not a loop phi, we can't handle it yet. 5652 return getUnknown(PN); 5653 } 5654 5655 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5656 Value *Cond, 5657 Value *TrueVal, 5658 Value *FalseVal) { 5659 // Handle "constant" branch or select. This can occur for instance when a 5660 // loop pass transforms an inner loop and moves on to process the outer loop. 5661 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5662 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5663 5664 // Try to match some simple smax or umax patterns. 5665 auto *ICI = dyn_cast<ICmpInst>(Cond); 5666 if (!ICI) 5667 return getUnknown(I); 5668 5669 Value *LHS = ICI->getOperand(0); 5670 Value *RHS = ICI->getOperand(1); 5671 5672 switch (ICI->getPredicate()) { 5673 case ICmpInst::ICMP_SLT: 5674 case ICmpInst::ICMP_SLE: 5675 case ICmpInst::ICMP_ULT: 5676 case ICmpInst::ICMP_ULE: 5677 std::swap(LHS, RHS); 5678 LLVM_FALLTHROUGH; 5679 case ICmpInst::ICMP_SGT: 5680 case ICmpInst::ICMP_SGE: 5681 case ICmpInst::ICMP_UGT: 5682 case ICmpInst::ICMP_UGE: 5683 // a > b ? a+x : b+x -> max(a, b)+x 5684 // a > b ? b+x : a+x -> min(a, b)+x 5685 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5686 bool Signed = ICI->isSigned(); 5687 const SCEV *LA = getSCEV(TrueVal); 5688 const SCEV *RA = getSCEV(FalseVal); 5689 const SCEV *LS = getSCEV(LHS); 5690 const SCEV *RS = getSCEV(RHS); 5691 if (LA->getType()->isPointerTy()) { 5692 // FIXME: Handle cases where LS/RS are pointers not equal to LA/RA. 5693 // Need to make sure we can't produce weird expressions involving 5694 // negated pointers. 5695 if (LA == LS && RA == RS) 5696 return Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS); 5697 if (LA == RS && RA == LS) 5698 return Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS); 5699 } 5700 auto CoerceOperand = [&](const SCEV *Op) -> const SCEV * { 5701 if (Op->getType()->isPointerTy()) { 5702 Op = getLosslessPtrToIntExpr(Op); 5703 if (isa<SCEVCouldNotCompute>(Op)) 5704 return Op; 5705 } 5706 if (Signed) 5707 Op = getNoopOrSignExtend(Op, I->getType()); 5708 else 5709 Op = getNoopOrZeroExtend(Op, I->getType()); 5710 return Op; 5711 }; 5712 LS = CoerceOperand(LS); 5713 RS = CoerceOperand(RS); 5714 if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS)) 5715 break; 5716 const SCEV *LDiff = getMinusSCEV(LA, LS); 5717 const SCEV *RDiff = getMinusSCEV(RA, RS); 5718 if (LDiff == RDiff) 5719 return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS), 5720 LDiff); 5721 LDiff = getMinusSCEV(LA, RS); 5722 RDiff = getMinusSCEV(RA, LS); 5723 if (LDiff == RDiff) 5724 return getAddExpr(Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS), 5725 LDiff); 5726 } 5727 break; 5728 case ICmpInst::ICMP_NE: 5729 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5730 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5731 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5732 const SCEV *One = getOne(I->getType()); 5733 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5734 const SCEV *LA = getSCEV(TrueVal); 5735 const SCEV *RA = getSCEV(FalseVal); 5736 const SCEV *LDiff = getMinusSCEV(LA, LS); 5737 const SCEV *RDiff = getMinusSCEV(RA, One); 5738 if (LDiff == RDiff) 5739 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5740 } 5741 break; 5742 case ICmpInst::ICMP_EQ: 5743 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5744 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5745 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5746 const SCEV *One = getOne(I->getType()); 5747 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5748 const SCEV *LA = getSCEV(TrueVal); 5749 const SCEV *RA = getSCEV(FalseVal); 5750 const SCEV *LDiff = getMinusSCEV(LA, One); 5751 const SCEV *RDiff = getMinusSCEV(RA, LS); 5752 if (LDiff == RDiff) 5753 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5754 } 5755 break; 5756 default: 5757 break; 5758 } 5759 5760 return getUnknown(I); 5761 } 5762 5763 /// Expand GEP instructions into add and multiply operations. This allows them 5764 /// to be analyzed by regular SCEV code. 5765 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5766 // Don't attempt to analyze GEPs over unsized objects. 5767 if (!GEP->getSourceElementType()->isSized()) 5768 return getUnknown(GEP); 5769 5770 SmallVector<const SCEV *, 4> IndexExprs; 5771 for (Value *Index : GEP->indices()) 5772 IndexExprs.push_back(getSCEV(Index)); 5773 return getGEPExpr(GEP, IndexExprs); 5774 } 5775 5776 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5777 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5778 return C->getAPInt().countTrailingZeros(); 5779 5780 if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S)) 5781 return GetMinTrailingZeros(I->getOperand()); 5782 5783 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5784 return std::min(GetMinTrailingZeros(T->getOperand()), 5785 (uint32_t)getTypeSizeInBits(T->getType())); 5786 5787 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5788 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5789 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5790 ? getTypeSizeInBits(E->getType()) 5791 : OpRes; 5792 } 5793 5794 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5795 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5796 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5797 ? getTypeSizeInBits(E->getType()) 5798 : OpRes; 5799 } 5800 5801 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5802 // The result is the min of all operands results. 5803 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5804 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5805 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5806 return MinOpRes; 5807 } 5808 5809 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5810 // The result is the sum of all operands results. 5811 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5812 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5813 for (unsigned i = 1, e = M->getNumOperands(); 5814 SumOpRes != BitWidth && i != e; ++i) 5815 SumOpRes = 5816 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5817 return SumOpRes; 5818 } 5819 5820 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5821 // The result is the min of all operands results. 5822 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5823 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5824 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5825 return MinOpRes; 5826 } 5827 5828 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5829 // The result is the min of all operands results. 5830 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5831 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5832 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5833 return MinOpRes; 5834 } 5835 5836 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5837 // The result is the min of all operands results. 5838 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5839 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5840 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5841 return MinOpRes; 5842 } 5843 5844 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5845 // For a SCEVUnknown, ask ValueTracking. 5846 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5847 return Known.countMinTrailingZeros(); 5848 } 5849 5850 // SCEVUDivExpr 5851 return 0; 5852 } 5853 5854 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5855 auto I = MinTrailingZerosCache.find(S); 5856 if (I != MinTrailingZerosCache.end()) 5857 return I->second; 5858 5859 uint32_t Result = GetMinTrailingZerosImpl(S); 5860 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5861 assert(InsertPair.second && "Should insert a new key"); 5862 return InsertPair.first->second; 5863 } 5864 5865 /// Helper method to assign a range to V from metadata present in the IR. 5866 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5867 if (Instruction *I = dyn_cast<Instruction>(V)) 5868 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5869 return getConstantRangeFromMetadata(*MD); 5870 5871 return None; 5872 } 5873 5874 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec, 5875 SCEV::NoWrapFlags Flags) { 5876 if (AddRec->getNoWrapFlags(Flags) != Flags) { 5877 AddRec->setNoWrapFlags(Flags); 5878 UnsignedRanges.erase(AddRec); 5879 SignedRanges.erase(AddRec); 5880 } 5881 } 5882 5883 ConstantRange ScalarEvolution:: 5884 getRangeForUnknownRecurrence(const SCEVUnknown *U) { 5885 const DataLayout &DL = getDataLayout(); 5886 5887 unsigned BitWidth = getTypeSizeInBits(U->getType()); 5888 const ConstantRange FullSet(BitWidth, /*isFullSet=*/true); 5889 5890 // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then 5891 // use information about the trip count to improve our available range. Note 5892 // that the trip count independent cases are already handled by known bits. 5893 // WARNING: The definition of recurrence used here is subtly different than 5894 // the one used by AddRec (and thus most of this file). Step is allowed to 5895 // be arbitrarily loop varying here, where AddRec allows only loop invariant 5896 // and other addrecs in the same loop (for non-affine addrecs). The code 5897 // below intentionally handles the case where step is not loop invariant. 5898 auto *P = dyn_cast<PHINode>(U->getValue()); 5899 if (!P) 5900 return FullSet; 5901 5902 // Make sure that no Phi input comes from an unreachable block. Otherwise, 5903 // even the values that are not available in these blocks may come from them, 5904 // and this leads to false-positive recurrence test. 5905 for (auto *Pred : predecessors(P->getParent())) 5906 if (!DT.isReachableFromEntry(Pred)) 5907 return FullSet; 5908 5909 BinaryOperator *BO; 5910 Value *Start, *Step; 5911 if (!matchSimpleRecurrence(P, BO, Start, Step)) 5912 return FullSet; 5913 5914 // If we found a recurrence in reachable code, we must be in a loop. Note 5915 // that BO might be in some subloop of L, and that's completely okay. 5916 auto *L = LI.getLoopFor(P->getParent()); 5917 assert(L && L->getHeader() == P->getParent()); 5918 if (!L->contains(BO->getParent())) 5919 // NOTE: This bailout should be an assert instead. However, asserting 5920 // the condition here exposes a case where LoopFusion is querying SCEV 5921 // with malformed loop information during the midst of the transform. 5922 // There doesn't appear to be an obvious fix, so for the moment bailout 5923 // until the caller issue can be fixed. PR49566 tracks the bug. 5924 return FullSet; 5925 5926 // TODO: Extend to other opcodes such as mul, and div 5927 switch (BO->getOpcode()) { 5928 default: 5929 return FullSet; 5930 case Instruction::AShr: 5931 case Instruction::LShr: 5932 case Instruction::Shl: 5933 break; 5934 }; 5935 5936 if (BO->getOperand(0) != P) 5937 // TODO: Handle the power function forms some day. 5938 return FullSet; 5939 5940 unsigned TC = getSmallConstantMaxTripCount(L); 5941 if (!TC || TC >= BitWidth) 5942 return FullSet; 5943 5944 auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT); 5945 auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT); 5946 assert(KnownStart.getBitWidth() == BitWidth && 5947 KnownStep.getBitWidth() == BitWidth); 5948 5949 // Compute total shift amount, being careful of overflow and bitwidths. 5950 auto MaxShiftAmt = KnownStep.getMaxValue(); 5951 APInt TCAP(BitWidth, TC-1); 5952 bool Overflow = false; 5953 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow); 5954 if (Overflow) 5955 return FullSet; 5956 5957 switch (BO->getOpcode()) { 5958 default: 5959 llvm_unreachable("filtered out above"); 5960 case Instruction::AShr: { 5961 // For each ashr, three cases: 5962 // shift = 0 => unchanged value 5963 // saturation => 0 or -1 5964 // other => a value closer to zero (of the same sign) 5965 // Thus, the end value is closer to zero than the start. 5966 auto KnownEnd = KnownBits::ashr(KnownStart, 5967 KnownBits::makeConstant(TotalShift)); 5968 if (KnownStart.isNonNegative()) 5969 // Analogous to lshr (simply not yet canonicalized) 5970 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), 5971 KnownStart.getMaxValue() + 1); 5972 if (KnownStart.isNegative()) 5973 // End >=u Start && End <=s Start 5974 return ConstantRange::getNonEmpty(KnownStart.getMinValue(), 5975 KnownEnd.getMaxValue() + 1); 5976 break; 5977 } 5978 case Instruction::LShr: { 5979 // For each lshr, three cases: 5980 // shift = 0 => unchanged value 5981 // saturation => 0 5982 // other => a smaller positive number 5983 // Thus, the low end of the unsigned range is the last value produced. 5984 auto KnownEnd = KnownBits::lshr(KnownStart, 5985 KnownBits::makeConstant(TotalShift)); 5986 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), 5987 KnownStart.getMaxValue() + 1); 5988 } 5989 case Instruction::Shl: { 5990 // Iff no bits are shifted out, value increases on every shift. 5991 auto KnownEnd = KnownBits::shl(KnownStart, 5992 KnownBits::makeConstant(TotalShift)); 5993 if (TotalShift.ult(KnownStart.countMinLeadingZeros())) 5994 return ConstantRange(KnownStart.getMinValue(), 5995 KnownEnd.getMaxValue() + 1); 5996 break; 5997 } 5998 }; 5999 return FullSet; 6000 } 6001 6002 /// Determine the range for a particular SCEV. If SignHint is 6003 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 6004 /// with a "cleaner" unsigned (resp. signed) representation. 6005 const ConstantRange & 6006 ScalarEvolution::getRangeRef(const SCEV *S, 6007 ScalarEvolution::RangeSignHint SignHint) { 6008 DenseMap<const SCEV *, ConstantRange> &Cache = 6009 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 6010 : SignedRanges; 6011 ConstantRange::PreferredRangeType RangeType = 6012 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED 6013 ? ConstantRange::Unsigned : ConstantRange::Signed; 6014 6015 // See if we've computed this range already. 6016 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 6017 if (I != Cache.end()) 6018 return I->second; 6019 6020 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 6021 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 6022 6023 unsigned BitWidth = getTypeSizeInBits(S->getType()); 6024 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 6025 using OBO = OverflowingBinaryOperator; 6026 6027 // If the value has known zeros, the maximum value will have those known zeros 6028 // as well. 6029 uint32_t TZ = GetMinTrailingZeros(S); 6030 if (TZ != 0) { 6031 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 6032 ConservativeResult = 6033 ConstantRange(APInt::getMinValue(BitWidth), 6034 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 6035 else 6036 ConservativeResult = ConstantRange( 6037 APInt::getSignedMinValue(BitWidth), 6038 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 6039 } 6040 6041 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 6042 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 6043 unsigned WrapType = OBO::AnyWrap; 6044 if (Add->hasNoSignedWrap()) 6045 WrapType |= OBO::NoSignedWrap; 6046 if (Add->hasNoUnsignedWrap()) 6047 WrapType |= OBO::NoUnsignedWrap; 6048 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 6049 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint), 6050 WrapType, RangeType); 6051 return setRange(Add, SignHint, 6052 ConservativeResult.intersectWith(X, RangeType)); 6053 } 6054 6055 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 6056 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 6057 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 6058 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 6059 return setRange(Mul, SignHint, 6060 ConservativeResult.intersectWith(X, RangeType)); 6061 } 6062 6063 if (isa<SCEVMinMaxExpr>(S)) { 6064 Intrinsic::ID ID; 6065 switch (S->getSCEVType()) { 6066 case scUMaxExpr: 6067 ID = Intrinsic::umax; 6068 break; 6069 case scSMaxExpr: 6070 ID = Intrinsic::smax; 6071 break; 6072 case scUMinExpr: 6073 ID = Intrinsic::umin; 6074 break; 6075 case scSMinExpr: 6076 ID = Intrinsic::smin; 6077 break; 6078 default: 6079 llvm_unreachable("Unknown SCEVMinMaxExpr."); 6080 } 6081 6082 const auto *NAry = cast<SCEVNAryExpr>(S); 6083 ConstantRange X = getRangeRef(NAry->getOperand(0), SignHint); 6084 for (unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i) 6085 X = X.intrinsic(ID, {X, getRangeRef(NAry->getOperand(i), SignHint)}); 6086 return setRange(S, SignHint, 6087 ConservativeResult.intersectWith(X, RangeType)); 6088 } 6089 6090 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 6091 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 6092 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 6093 return setRange(UDiv, SignHint, 6094 ConservativeResult.intersectWith(X.udiv(Y), RangeType)); 6095 } 6096 6097 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 6098 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 6099 return setRange(ZExt, SignHint, 6100 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), 6101 RangeType)); 6102 } 6103 6104 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 6105 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 6106 return setRange(SExt, SignHint, 6107 ConservativeResult.intersectWith(X.signExtend(BitWidth), 6108 RangeType)); 6109 } 6110 6111 if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast<SCEVPtrToIntExpr>(S)) { 6112 ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint); 6113 return setRange(PtrToInt, SignHint, X); 6114 } 6115 6116 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 6117 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 6118 return setRange(Trunc, SignHint, 6119 ConservativeResult.intersectWith(X.truncate(BitWidth), 6120 RangeType)); 6121 } 6122 6123 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 6124 // If there's no unsigned wrap, the value will never be less than its 6125 // initial value. 6126 if (AddRec->hasNoUnsignedWrap()) { 6127 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); 6128 if (!UnsignedMinValue.isZero()) 6129 ConservativeResult = ConservativeResult.intersectWith( 6130 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); 6131 } 6132 6133 // If there's no signed wrap, and all the operands except initial value have 6134 // the same sign or zero, the value won't ever be: 6135 // 1: smaller than initial value if operands are non negative, 6136 // 2: bigger than initial value if operands are non positive. 6137 // For both cases, value can not cross signed min/max boundary. 6138 if (AddRec->hasNoSignedWrap()) { 6139 bool AllNonNeg = true; 6140 bool AllNonPos = true; 6141 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { 6142 if (!isKnownNonNegative(AddRec->getOperand(i))) 6143 AllNonNeg = false; 6144 if (!isKnownNonPositive(AddRec->getOperand(i))) 6145 AllNonPos = false; 6146 } 6147 if (AllNonNeg) 6148 ConservativeResult = ConservativeResult.intersectWith( 6149 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), 6150 APInt::getSignedMinValue(BitWidth)), 6151 RangeType); 6152 else if (AllNonPos) 6153 ConservativeResult = ConservativeResult.intersectWith( 6154 ConstantRange::getNonEmpty( 6155 APInt::getSignedMinValue(BitWidth), 6156 getSignedRangeMax(AddRec->getStart()) + 1), 6157 RangeType); 6158 } 6159 6160 // TODO: non-affine addrec 6161 if (AddRec->isAffine()) { 6162 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop()); 6163 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 6164 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 6165 auto RangeFromAffine = getRangeForAffineAR( 6166 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 6167 BitWidth); 6168 ConservativeResult = 6169 ConservativeResult.intersectWith(RangeFromAffine, RangeType); 6170 6171 auto RangeFromFactoring = getRangeViaFactoring( 6172 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 6173 BitWidth); 6174 ConservativeResult = 6175 ConservativeResult.intersectWith(RangeFromFactoring, RangeType); 6176 } 6177 6178 // Now try symbolic BE count and more powerful methods. 6179 if (UseExpensiveRangeSharpening) { 6180 const SCEV *SymbolicMaxBECount = 6181 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop()); 6182 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) && 6183 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 6184 AddRec->hasNoSelfWrap()) { 6185 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR( 6186 AddRec, SymbolicMaxBECount, BitWidth, SignHint); 6187 ConservativeResult = 6188 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType); 6189 } 6190 } 6191 } 6192 6193 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 6194 } 6195 6196 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 6197 6198 // Check if the IR explicitly contains !range metadata. 6199 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 6200 if (MDRange.hasValue()) 6201 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(), 6202 RangeType); 6203 6204 // Use facts about recurrences in the underlying IR. Note that add 6205 // recurrences are AddRecExprs and thus don't hit this path. This 6206 // primarily handles shift recurrences. 6207 auto CR = getRangeForUnknownRecurrence(U); 6208 ConservativeResult = ConservativeResult.intersectWith(CR); 6209 6210 // See if ValueTracking can give us a useful range. 6211 const DataLayout &DL = getDataLayout(); 6212 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 6213 if (Known.getBitWidth() != BitWidth) 6214 Known = Known.zextOrTrunc(BitWidth); 6215 6216 // ValueTracking may be able to compute a tighter result for the number of 6217 // sign bits than for the value of those sign bits. 6218 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 6219 if (U->getType()->isPointerTy()) { 6220 // If the pointer size is larger than the index size type, this can cause 6221 // NS to be larger than BitWidth. So compensate for this. 6222 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); 6223 int ptrIdxDiff = ptrSize - BitWidth; 6224 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) 6225 NS -= ptrIdxDiff; 6226 } 6227 6228 if (NS > 1) { 6229 // If we know any of the sign bits, we know all of the sign bits. 6230 if (!Known.Zero.getHiBits(NS).isZero()) 6231 Known.Zero.setHighBits(NS); 6232 if (!Known.One.getHiBits(NS).isZero()) 6233 Known.One.setHighBits(NS); 6234 } 6235 6236 if (Known.getMinValue() != Known.getMaxValue() + 1) 6237 ConservativeResult = ConservativeResult.intersectWith( 6238 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), 6239 RangeType); 6240 if (NS > 1) 6241 ConservativeResult = ConservativeResult.intersectWith( 6242 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 6243 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), 6244 RangeType); 6245 6246 // A range of Phi is a subset of union of all ranges of its input. 6247 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 6248 // Make sure that we do not run over cycled Phis. 6249 if (PendingPhiRanges.insert(Phi).second) { 6250 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 6251 for (auto &Op : Phi->operands()) { 6252 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 6253 RangeFromOps = RangeFromOps.unionWith(OpRange); 6254 // No point to continue if we already have a full set. 6255 if (RangeFromOps.isFullSet()) 6256 break; 6257 } 6258 ConservativeResult = 6259 ConservativeResult.intersectWith(RangeFromOps, RangeType); 6260 bool Erased = PendingPhiRanges.erase(Phi); 6261 assert(Erased && "Failed to erase Phi properly?"); 6262 (void) Erased; 6263 } 6264 } 6265 6266 return setRange(U, SignHint, std::move(ConservativeResult)); 6267 } 6268 6269 return setRange(S, SignHint, std::move(ConservativeResult)); 6270 } 6271 6272 // Given a StartRange, Step and MaxBECount for an expression compute a range of 6273 // values that the expression can take. Initially, the expression has a value 6274 // from StartRange and then is changed by Step up to MaxBECount times. Signed 6275 // argument defines if we treat Step as signed or unsigned. 6276 static ConstantRange getRangeForAffineARHelper(APInt Step, 6277 const ConstantRange &StartRange, 6278 const APInt &MaxBECount, 6279 unsigned BitWidth, bool Signed) { 6280 // If either Step or MaxBECount is 0, then the expression won't change, and we 6281 // just need to return the initial range. 6282 if (Step == 0 || MaxBECount == 0) 6283 return StartRange; 6284 6285 // If we don't know anything about the initial value (i.e. StartRange is 6286 // FullRange), then we don't know anything about the final range either. 6287 // Return FullRange. 6288 if (StartRange.isFullSet()) 6289 return ConstantRange::getFull(BitWidth); 6290 6291 // If Step is signed and negative, then we use its absolute value, but we also 6292 // note that we're moving in the opposite direction. 6293 bool Descending = Signed && Step.isNegative(); 6294 6295 if (Signed) 6296 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 6297 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 6298 // This equations hold true due to the well-defined wrap-around behavior of 6299 // APInt. 6300 Step = Step.abs(); 6301 6302 // Check if Offset is more than full span of BitWidth. If it is, the 6303 // expression is guaranteed to overflow. 6304 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 6305 return ConstantRange::getFull(BitWidth); 6306 6307 // Offset is by how much the expression can change. Checks above guarantee no 6308 // overflow here. 6309 APInt Offset = Step * MaxBECount; 6310 6311 // Minimum value of the final range will match the minimal value of StartRange 6312 // if the expression is increasing and will be decreased by Offset otherwise. 6313 // Maximum value of the final range will match the maximal value of StartRange 6314 // if the expression is decreasing and will be increased by Offset otherwise. 6315 APInt StartLower = StartRange.getLower(); 6316 APInt StartUpper = StartRange.getUpper() - 1; 6317 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 6318 : (StartUpper + std::move(Offset)); 6319 6320 // It's possible that the new minimum/maximum value will fall into the initial 6321 // range (due to wrap around). This means that the expression can take any 6322 // value in this bitwidth, and we have to return full range. 6323 if (StartRange.contains(MovedBoundary)) 6324 return ConstantRange::getFull(BitWidth); 6325 6326 APInt NewLower = 6327 Descending ? std::move(MovedBoundary) : std::move(StartLower); 6328 APInt NewUpper = 6329 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 6330 NewUpper += 1; 6331 6332 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 6333 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); 6334 } 6335 6336 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 6337 const SCEV *Step, 6338 const SCEV *MaxBECount, 6339 unsigned BitWidth) { 6340 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 6341 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 6342 "Precondition!"); 6343 6344 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 6345 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 6346 6347 // First, consider step signed. 6348 ConstantRange StartSRange = getSignedRange(Start); 6349 ConstantRange StepSRange = getSignedRange(Step); 6350 6351 // If Step can be both positive and negative, we need to find ranges for the 6352 // maximum absolute step values in both directions and union them. 6353 ConstantRange SR = 6354 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 6355 MaxBECountValue, BitWidth, /* Signed = */ true); 6356 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 6357 StartSRange, MaxBECountValue, 6358 BitWidth, /* Signed = */ true)); 6359 6360 // Next, consider step unsigned. 6361 ConstantRange UR = getRangeForAffineARHelper( 6362 getUnsignedRangeMax(Step), getUnsignedRange(Start), 6363 MaxBECountValue, BitWidth, /* Signed = */ false); 6364 6365 // Finally, intersect signed and unsigned ranges. 6366 return SR.intersectWith(UR, ConstantRange::Smallest); 6367 } 6368 6369 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR( 6370 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth, 6371 ScalarEvolution::RangeSignHint SignHint) { 6372 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n"); 6373 assert(AddRec->hasNoSelfWrap() && 6374 "This only works for non-self-wrapping AddRecs!"); 6375 const bool IsSigned = SignHint == HINT_RANGE_SIGNED; 6376 const SCEV *Step = AddRec->getStepRecurrence(*this); 6377 // Only deal with constant step to save compile time. 6378 if (!isa<SCEVConstant>(Step)) 6379 return ConstantRange::getFull(BitWidth); 6380 // Let's make sure that we can prove that we do not self-wrap during 6381 // MaxBECount iterations. We need this because MaxBECount is a maximum 6382 // iteration count estimate, and we might infer nw from some exit for which we 6383 // do not know max exit count (or any other side reasoning). 6384 // TODO: Turn into assert at some point. 6385 if (getTypeSizeInBits(MaxBECount->getType()) > 6386 getTypeSizeInBits(AddRec->getType())) 6387 return ConstantRange::getFull(BitWidth); 6388 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType()); 6389 const SCEV *RangeWidth = getMinusOne(AddRec->getType()); 6390 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step)); 6391 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs); 6392 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount, 6393 MaxItersWithoutWrap)) 6394 return ConstantRange::getFull(BitWidth); 6395 6396 ICmpInst::Predicate LEPred = 6397 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 6398 ICmpInst::Predicate GEPred = 6399 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 6400 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); 6401 6402 // We know that there is no self-wrap. Let's take Start and End values and 6403 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during 6404 // the iteration. They either lie inside the range [Min(Start, End), 6405 // Max(Start, End)] or outside it: 6406 // 6407 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax; 6408 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax; 6409 // 6410 // No self wrap flag guarantees that the intermediate values cannot be BOTH 6411 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that 6412 // knowledge, let's try to prove that we are dealing with Case 1. It is so if 6413 // Start <= End and step is positive, or Start >= End and step is negative. 6414 const SCEV *Start = AddRec->getStart(); 6415 ConstantRange StartRange = getRangeRef(Start, SignHint); 6416 ConstantRange EndRange = getRangeRef(End, SignHint); 6417 ConstantRange RangeBetween = StartRange.unionWith(EndRange); 6418 // If they already cover full iteration space, we will know nothing useful 6419 // even if we prove what we want to prove. 6420 if (RangeBetween.isFullSet()) 6421 return RangeBetween; 6422 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax). 6423 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet() 6424 : RangeBetween.isWrappedSet(); 6425 if (IsWrappedSet) 6426 return ConstantRange::getFull(BitWidth); 6427 6428 if (isKnownPositive(Step) && 6429 isKnownPredicateViaConstantRanges(LEPred, Start, End)) 6430 return RangeBetween; 6431 else if (isKnownNegative(Step) && 6432 isKnownPredicateViaConstantRanges(GEPred, Start, End)) 6433 return RangeBetween; 6434 return ConstantRange::getFull(BitWidth); 6435 } 6436 6437 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 6438 const SCEV *Step, 6439 const SCEV *MaxBECount, 6440 unsigned BitWidth) { 6441 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 6442 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 6443 6444 struct SelectPattern { 6445 Value *Condition = nullptr; 6446 APInt TrueValue; 6447 APInt FalseValue; 6448 6449 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 6450 const SCEV *S) { 6451 Optional<unsigned> CastOp; 6452 APInt Offset(BitWidth, 0); 6453 6454 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 6455 "Should be!"); 6456 6457 // Peel off a constant offset: 6458 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 6459 // In the future we could consider being smarter here and handle 6460 // {Start+Step,+,Step} too. 6461 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 6462 return; 6463 6464 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 6465 S = SA->getOperand(1); 6466 } 6467 6468 // Peel off a cast operation 6469 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) { 6470 CastOp = SCast->getSCEVType(); 6471 S = SCast->getOperand(); 6472 } 6473 6474 using namespace llvm::PatternMatch; 6475 6476 auto *SU = dyn_cast<SCEVUnknown>(S); 6477 const APInt *TrueVal, *FalseVal; 6478 if (!SU || 6479 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 6480 m_APInt(FalseVal)))) { 6481 Condition = nullptr; 6482 return; 6483 } 6484 6485 TrueValue = *TrueVal; 6486 FalseValue = *FalseVal; 6487 6488 // Re-apply the cast we peeled off earlier 6489 if (CastOp.hasValue()) 6490 switch (*CastOp) { 6491 default: 6492 llvm_unreachable("Unknown SCEV cast type!"); 6493 6494 case scTruncate: 6495 TrueValue = TrueValue.trunc(BitWidth); 6496 FalseValue = FalseValue.trunc(BitWidth); 6497 break; 6498 case scZeroExtend: 6499 TrueValue = TrueValue.zext(BitWidth); 6500 FalseValue = FalseValue.zext(BitWidth); 6501 break; 6502 case scSignExtend: 6503 TrueValue = TrueValue.sext(BitWidth); 6504 FalseValue = FalseValue.sext(BitWidth); 6505 break; 6506 } 6507 6508 // Re-apply the constant offset we peeled off earlier 6509 TrueValue += Offset; 6510 FalseValue += Offset; 6511 } 6512 6513 bool isRecognized() { return Condition != nullptr; } 6514 }; 6515 6516 SelectPattern StartPattern(*this, BitWidth, Start); 6517 if (!StartPattern.isRecognized()) 6518 return ConstantRange::getFull(BitWidth); 6519 6520 SelectPattern StepPattern(*this, BitWidth, Step); 6521 if (!StepPattern.isRecognized()) 6522 return ConstantRange::getFull(BitWidth); 6523 6524 if (StartPattern.Condition != StepPattern.Condition) { 6525 // We don't handle this case today; but we could, by considering four 6526 // possibilities below instead of two. I'm not sure if there are cases where 6527 // that will help over what getRange already does, though. 6528 return ConstantRange::getFull(BitWidth); 6529 } 6530 6531 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 6532 // construct arbitrary general SCEV expressions here. This function is called 6533 // from deep in the call stack, and calling getSCEV (on a sext instruction, 6534 // say) can end up caching a suboptimal value. 6535 6536 // FIXME: without the explicit `this` receiver below, MSVC errors out with 6537 // C2352 and C2512 (otherwise it isn't needed). 6538 6539 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 6540 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 6541 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 6542 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 6543 6544 ConstantRange TrueRange = 6545 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 6546 ConstantRange FalseRange = 6547 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 6548 6549 return TrueRange.unionWith(FalseRange); 6550 } 6551 6552 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 6553 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 6554 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 6555 6556 // Return early if there are no flags to propagate to the SCEV. 6557 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6558 if (BinOp->hasNoUnsignedWrap()) 6559 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 6560 if (BinOp->hasNoSignedWrap()) 6561 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 6562 if (Flags == SCEV::FlagAnyWrap) 6563 return SCEV::FlagAnyWrap; 6564 6565 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 6566 } 6567 6568 const Instruction * 6569 ScalarEvolution::getNonTrivialDefiningScopeBound(const SCEV *S) { 6570 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(S)) 6571 return &*AddRec->getLoop()->getHeader()->begin(); 6572 if (auto *U = dyn_cast<SCEVUnknown>(S)) 6573 if (auto *I = dyn_cast<Instruction>(U->getValue())) 6574 return I; 6575 return nullptr; 6576 } 6577 6578 /// Fills \p Ops with unique operands of \p S, if it has operands. If not, 6579 /// \p Ops remains unmodified. 6580 static void collectUniqueOps(const SCEV *S, 6581 SmallVectorImpl<const SCEV *> &Ops) { 6582 SmallPtrSet<const SCEV *, 4> Unique; 6583 auto InsertUnique = [&](const SCEV *S) { 6584 if (Unique.insert(S).second) 6585 Ops.push_back(S); 6586 }; 6587 if (auto *S2 = dyn_cast<SCEVCastExpr>(S)) 6588 for (auto *Op : S2->operands()) 6589 InsertUnique(Op); 6590 else if (auto *S2 = dyn_cast<SCEVNAryExpr>(S)) 6591 for (auto *Op : S2->operands()) 6592 InsertUnique(Op); 6593 else if (auto *S2 = dyn_cast<SCEVUDivExpr>(S)) 6594 for (auto *Op : S2->operands()) 6595 InsertUnique(Op); 6596 } 6597 6598 const Instruction * 6599 ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops, 6600 bool &Precise) { 6601 Precise = true; 6602 // Do a bounded search of the def relation of the requested SCEVs. 6603 SmallSet<const SCEV *, 16> Visited; 6604 SmallVector<const SCEV *> Worklist; 6605 auto pushOp = [&](const SCEV *S) { 6606 if (!Visited.insert(S).second) 6607 return; 6608 // Threshold of 30 here is arbitrary. 6609 if (Visited.size() > 30) { 6610 Precise = false; 6611 return; 6612 } 6613 Worklist.push_back(S); 6614 }; 6615 6616 for (auto *S : Ops) 6617 pushOp(S); 6618 6619 const Instruction *Bound = nullptr; 6620 while (!Worklist.empty()) { 6621 auto *S = Worklist.pop_back_val(); 6622 if (auto *DefI = getNonTrivialDefiningScopeBound(S)) { 6623 if (!Bound || DT.dominates(Bound, DefI)) 6624 Bound = DefI; 6625 } else { 6626 SmallVector<const SCEV *, 4> Ops; 6627 collectUniqueOps(S, Ops); 6628 for (auto *Op : Ops) 6629 pushOp(Op); 6630 } 6631 } 6632 return Bound ? Bound : &*F.getEntryBlock().begin(); 6633 } 6634 6635 const Instruction * 6636 ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops) { 6637 bool Discard; 6638 return getDefiningScopeBound(Ops, Discard); 6639 } 6640 6641 bool ScalarEvolution::isGuaranteedToTransferExecutionTo(const Instruction *A, 6642 const Instruction *B) { 6643 if (A->getParent() == B->getParent() && 6644 isGuaranteedToTransferExecutionToSuccessor(A->getIterator(), 6645 B->getIterator())) 6646 return true; 6647 6648 auto *BLoop = LI.getLoopFor(B->getParent()); 6649 if (BLoop && BLoop->getHeader() == B->getParent() && 6650 BLoop->getLoopPreheader() == A->getParent() && 6651 isGuaranteedToTransferExecutionToSuccessor(A->getIterator(), 6652 A->getParent()->end()) && 6653 isGuaranteedToTransferExecutionToSuccessor(B->getParent()->begin(), 6654 B->getIterator())) 6655 return true; 6656 return false; 6657 } 6658 6659 6660 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 6661 // Only proceed if we can prove that I does not yield poison. 6662 if (!programUndefinedIfPoison(I)) 6663 return false; 6664 6665 // At this point we know that if I is executed, then it does not wrap 6666 // according to at least one of NSW or NUW. If I is not executed, then we do 6667 // not know if the calculation that I represents would wrap. Multiple 6668 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 6669 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 6670 // derived from other instructions that map to the same SCEV. We cannot make 6671 // that guarantee for cases where I is not executed. So we need to find a 6672 // upper bound on the defining scope for the SCEV, and prove that I is 6673 // executed every time we enter that scope. When the bounding scope is a 6674 // loop (the common case), this is equivalent to proving I executes on every 6675 // iteration of that loop. 6676 SmallVector<const SCEV *> SCEVOps; 6677 for (const Use &Op : I->operands()) { 6678 // I could be an extractvalue from a call to an overflow intrinsic. 6679 // TODO: We can do better here in some cases. 6680 if (isSCEVable(Op->getType())) 6681 SCEVOps.push_back(getSCEV(Op)); 6682 } 6683 auto *DefI = getDefiningScopeBound(SCEVOps); 6684 return isGuaranteedToTransferExecutionTo(DefI, I); 6685 } 6686 6687 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 6688 // If we know that \c I can never be poison period, then that's enough. 6689 if (isSCEVExprNeverPoison(I)) 6690 return true; 6691 6692 // For an add recurrence specifically, we assume that infinite loops without 6693 // side effects are undefined behavior, and then reason as follows: 6694 // 6695 // If the add recurrence is poison in any iteration, it is poison on all 6696 // future iterations (since incrementing poison yields poison). If the result 6697 // of the add recurrence is fed into the loop latch condition and the loop 6698 // does not contain any throws or exiting blocks other than the latch, we now 6699 // have the ability to "choose" whether the backedge is taken or not (by 6700 // choosing a sufficiently evil value for the poison feeding into the branch) 6701 // for every iteration including and after the one in which \p I first became 6702 // poison. There are two possibilities (let's call the iteration in which \p 6703 // I first became poison as K): 6704 // 6705 // 1. In the set of iterations including and after K, the loop body executes 6706 // no side effects. In this case executing the backege an infinte number 6707 // of times will yield undefined behavior. 6708 // 6709 // 2. In the set of iterations including and after K, the loop body executes 6710 // at least one side effect. In this case, that specific instance of side 6711 // effect is control dependent on poison, which also yields undefined 6712 // behavior. 6713 6714 auto *ExitingBB = L->getExitingBlock(); 6715 auto *LatchBB = L->getLoopLatch(); 6716 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 6717 return false; 6718 6719 SmallPtrSet<const Instruction *, 16> Pushed; 6720 SmallVector<const Instruction *, 8> PoisonStack; 6721 6722 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 6723 // things that are known to be poison under that assumption go on the 6724 // PoisonStack. 6725 Pushed.insert(I); 6726 PoisonStack.push_back(I); 6727 6728 bool LatchControlDependentOnPoison = false; 6729 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 6730 const Instruction *Poison = PoisonStack.pop_back_val(); 6731 6732 for (auto *PoisonUser : Poison->users()) { 6733 if (propagatesPoison(cast<Operator>(PoisonUser))) { 6734 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 6735 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 6736 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 6737 assert(BI->isConditional() && "Only possibility!"); 6738 if (BI->getParent() == LatchBB) { 6739 LatchControlDependentOnPoison = true; 6740 break; 6741 } 6742 } 6743 } 6744 } 6745 6746 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 6747 } 6748 6749 ScalarEvolution::LoopProperties 6750 ScalarEvolution::getLoopProperties(const Loop *L) { 6751 using LoopProperties = ScalarEvolution::LoopProperties; 6752 6753 auto Itr = LoopPropertiesCache.find(L); 6754 if (Itr == LoopPropertiesCache.end()) { 6755 auto HasSideEffects = [](Instruction *I) { 6756 if (auto *SI = dyn_cast<StoreInst>(I)) 6757 return !SI->isSimple(); 6758 6759 return I->mayThrow() || I->mayWriteToMemory(); 6760 }; 6761 6762 LoopProperties LP = {/* HasNoAbnormalExits */ true, 6763 /*HasNoSideEffects*/ true}; 6764 6765 for (auto *BB : L->getBlocks()) 6766 for (auto &I : *BB) { 6767 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6768 LP.HasNoAbnormalExits = false; 6769 if (HasSideEffects(&I)) 6770 LP.HasNoSideEffects = false; 6771 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 6772 break; // We're already as pessimistic as we can get. 6773 } 6774 6775 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 6776 assert(InsertPair.second && "We just checked!"); 6777 Itr = InsertPair.first; 6778 } 6779 6780 return Itr->second; 6781 } 6782 6783 bool ScalarEvolution::loopIsFiniteByAssumption(const Loop *L) { 6784 // A mustprogress loop without side effects must be finite. 6785 // TODO: The check used here is very conservative. It's only *specific* 6786 // side effects which are well defined in infinite loops. 6787 return isMustProgress(L) && loopHasNoSideEffects(L); 6788 } 6789 6790 const SCEV *ScalarEvolution::createSCEV(Value *V) { 6791 if (!isSCEVable(V->getType())) 6792 return getUnknown(V); 6793 6794 if (Instruction *I = dyn_cast<Instruction>(V)) { 6795 // Don't attempt to analyze instructions in blocks that aren't 6796 // reachable. Such instructions don't matter, and they aren't required 6797 // to obey basic rules for definitions dominating uses which this 6798 // analysis depends on. 6799 if (!DT.isReachableFromEntry(I->getParent())) 6800 return getUnknown(UndefValue::get(V->getType())); 6801 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 6802 return getConstant(CI); 6803 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 6804 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 6805 else if (!isa<ConstantExpr>(V)) 6806 return getUnknown(V); 6807 6808 Operator *U = cast<Operator>(V); 6809 if (auto BO = MatchBinaryOp(U, DT)) { 6810 switch (BO->Opcode) { 6811 case Instruction::Add: { 6812 // The simple thing to do would be to just call getSCEV on both operands 6813 // and call getAddExpr with the result. However if we're looking at a 6814 // bunch of things all added together, this can be quite inefficient, 6815 // because it leads to N-1 getAddExpr calls for N ultimate operands. 6816 // Instead, gather up all the operands and make a single getAddExpr call. 6817 // LLVM IR canonical form means we need only traverse the left operands. 6818 SmallVector<const SCEV *, 4> AddOps; 6819 do { 6820 if (BO->Op) { 6821 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6822 AddOps.push_back(OpSCEV); 6823 break; 6824 } 6825 6826 // If a NUW or NSW flag can be applied to the SCEV for this 6827 // addition, then compute the SCEV for this addition by itself 6828 // with a separate call to getAddExpr. We need to do that 6829 // instead of pushing the operands of the addition onto AddOps, 6830 // since the flags are only known to apply to this particular 6831 // addition - they may not apply to other additions that can be 6832 // formed with operands from AddOps. 6833 const SCEV *RHS = getSCEV(BO->RHS); 6834 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6835 if (Flags != SCEV::FlagAnyWrap) { 6836 const SCEV *LHS = getSCEV(BO->LHS); 6837 if (BO->Opcode == Instruction::Sub) 6838 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 6839 else 6840 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 6841 break; 6842 } 6843 } 6844 6845 if (BO->Opcode == Instruction::Sub) 6846 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 6847 else 6848 AddOps.push_back(getSCEV(BO->RHS)); 6849 6850 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6851 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6852 NewBO->Opcode != Instruction::Sub)) { 6853 AddOps.push_back(getSCEV(BO->LHS)); 6854 break; 6855 } 6856 BO = NewBO; 6857 } while (true); 6858 6859 return getAddExpr(AddOps); 6860 } 6861 6862 case Instruction::Mul: { 6863 SmallVector<const SCEV *, 4> MulOps; 6864 do { 6865 if (BO->Op) { 6866 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6867 MulOps.push_back(OpSCEV); 6868 break; 6869 } 6870 6871 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6872 if (Flags != SCEV::FlagAnyWrap) { 6873 MulOps.push_back( 6874 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6875 break; 6876 } 6877 } 6878 6879 MulOps.push_back(getSCEV(BO->RHS)); 6880 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6881 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6882 MulOps.push_back(getSCEV(BO->LHS)); 6883 break; 6884 } 6885 BO = NewBO; 6886 } while (true); 6887 6888 return getMulExpr(MulOps); 6889 } 6890 case Instruction::UDiv: 6891 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6892 case Instruction::URem: 6893 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6894 case Instruction::Sub: { 6895 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6896 if (BO->Op) 6897 Flags = getNoWrapFlagsFromUB(BO->Op); 6898 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6899 } 6900 case Instruction::And: 6901 // For an expression like x&255 that merely masks off the high bits, 6902 // use zext(trunc(x)) as the SCEV expression. 6903 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6904 if (CI->isZero()) 6905 return getSCEV(BO->RHS); 6906 if (CI->isMinusOne()) 6907 return getSCEV(BO->LHS); 6908 const APInt &A = CI->getValue(); 6909 6910 // Instcombine's ShrinkDemandedConstant may strip bits out of 6911 // constants, obscuring what would otherwise be a low-bits mask. 6912 // Use computeKnownBits to compute what ShrinkDemandedConstant 6913 // knew about to reconstruct a low-bits mask value. 6914 unsigned LZ = A.countLeadingZeros(); 6915 unsigned TZ = A.countTrailingZeros(); 6916 unsigned BitWidth = A.getBitWidth(); 6917 KnownBits Known(BitWidth); 6918 computeKnownBits(BO->LHS, Known, getDataLayout(), 6919 0, &AC, nullptr, &DT); 6920 6921 APInt EffectiveMask = 6922 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6923 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6924 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6925 const SCEV *LHS = getSCEV(BO->LHS); 6926 const SCEV *ShiftedLHS = nullptr; 6927 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6928 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6929 // For an expression like (x * 8) & 8, simplify the multiply. 6930 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6931 unsigned GCD = std::min(MulZeros, TZ); 6932 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6933 SmallVector<const SCEV*, 4> MulOps; 6934 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6935 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6936 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6937 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6938 } 6939 } 6940 if (!ShiftedLHS) 6941 ShiftedLHS = getUDivExpr(LHS, MulCount); 6942 return getMulExpr( 6943 getZeroExtendExpr( 6944 getTruncateExpr(ShiftedLHS, 6945 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6946 BO->LHS->getType()), 6947 MulCount); 6948 } 6949 } 6950 break; 6951 6952 case Instruction::Or: 6953 // If the RHS of the Or is a constant, we may have something like: 6954 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6955 // optimizations will transparently handle this case. 6956 // 6957 // In order for this transformation to be safe, the LHS must be of the 6958 // form X*(2^n) and the Or constant must be less than 2^n. 6959 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6960 const SCEV *LHS = getSCEV(BO->LHS); 6961 const APInt &CIVal = CI->getValue(); 6962 if (GetMinTrailingZeros(LHS) >= 6963 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6964 // Build a plain add SCEV. 6965 return getAddExpr(LHS, getSCEV(CI), 6966 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 6967 } 6968 } 6969 break; 6970 6971 case Instruction::Xor: 6972 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6973 // If the RHS of xor is -1, then this is a not operation. 6974 if (CI->isMinusOne()) 6975 return getNotSCEV(getSCEV(BO->LHS)); 6976 6977 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6978 // This is a variant of the check for xor with -1, and it handles 6979 // the case where instcombine has trimmed non-demanded bits out 6980 // of an xor with -1. 6981 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6982 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6983 if (LBO->getOpcode() == Instruction::And && 6984 LCI->getValue() == CI->getValue()) 6985 if (const SCEVZeroExtendExpr *Z = 6986 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6987 Type *UTy = BO->LHS->getType(); 6988 const SCEV *Z0 = Z->getOperand(); 6989 Type *Z0Ty = Z0->getType(); 6990 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6991 6992 // If C is a low-bits mask, the zero extend is serving to 6993 // mask off the high bits. Complement the operand and 6994 // re-apply the zext. 6995 if (CI->getValue().isMask(Z0TySize)) 6996 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6997 6998 // If C is a single bit, it may be in the sign-bit position 6999 // before the zero-extend. In this case, represent the xor 7000 // using an add, which is equivalent, and re-apply the zext. 7001 APInt Trunc = CI->getValue().trunc(Z0TySize); 7002 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 7003 Trunc.isSignMask()) 7004 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 7005 UTy); 7006 } 7007 } 7008 break; 7009 7010 case Instruction::Shl: 7011 // Turn shift left of a constant amount into a multiply. 7012 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 7013 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 7014 7015 // If the shift count is not less than the bitwidth, the result of 7016 // the shift is undefined. Don't try to analyze it, because the 7017 // resolution chosen here may differ from the resolution chosen in 7018 // other parts of the compiler. 7019 if (SA->getValue().uge(BitWidth)) 7020 break; 7021 7022 // We can safely preserve the nuw flag in all cases. It's also safe to 7023 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation 7024 // requires special handling. It can be preserved as long as we're not 7025 // left shifting by bitwidth - 1. 7026 auto Flags = SCEV::FlagAnyWrap; 7027 if (BO->Op) { 7028 auto MulFlags = getNoWrapFlagsFromUB(BO->Op); 7029 if ((MulFlags & SCEV::FlagNSW) && 7030 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) 7031 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); 7032 if (MulFlags & SCEV::FlagNUW) 7033 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); 7034 } 7035 7036 Constant *X = ConstantInt::get( 7037 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 7038 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 7039 } 7040 break; 7041 7042 case Instruction::AShr: { 7043 // AShr X, C, where C is a constant. 7044 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 7045 if (!CI) 7046 break; 7047 7048 Type *OuterTy = BO->LHS->getType(); 7049 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 7050 // If the shift count is not less than the bitwidth, the result of 7051 // the shift is undefined. Don't try to analyze it, because the 7052 // resolution chosen here may differ from the resolution chosen in 7053 // other parts of the compiler. 7054 if (CI->getValue().uge(BitWidth)) 7055 break; 7056 7057 if (CI->isZero()) 7058 return getSCEV(BO->LHS); // shift by zero --> noop 7059 7060 uint64_t AShrAmt = CI->getZExtValue(); 7061 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 7062 7063 Operator *L = dyn_cast<Operator>(BO->LHS); 7064 if (L && L->getOpcode() == Instruction::Shl) { 7065 // X = Shl A, n 7066 // Y = AShr X, m 7067 // Both n and m are constant. 7068 7069 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 7070 if (L->getOperand(1) == BO->RHS) 7071 // For a two-shift sext-inreg, i.e. n = m, 7072 // use sext(trunc(x)) as the SCEV expression. 7073 return getSignExtendExpr( 7074 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 7075 7076 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 7077 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 7078 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 7079 if (ShlAmt > AShrAmt) { 7080 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 7081 // expression. We already checked that ShlAmt < BitWidth, so 7082 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 7083 // ShlAmt - AShrAmt < Amt. 7084 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 7085 ShlAmt - AShrAmt); 7086 return getSignExtendExpr( 7087 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 7088 getConstant(Mul)), OuterTy); 7089 } 7090 } 7091 } 7092 break; 7093 } 7094 } 7095 } 7096 7097 switch (U->getOpcode()) { 7098 case Instruction::Trunc: 7099 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 7100 7101 case Instruction::ZExt: 7102 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 7103 7104 case Instruction::SExt: 7105 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 7106 // The NSW flag of a subtract does not always survive the conversion to 7107 // A + (-1)*B. By pushing sign extension onto its operands we are much 7108 // more likely to preserve NSW and allow later AddRec optimisations. 7109 // 7110 // NOTE: This is effectively duplicating this logic from getSignExtend: 7111 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 7112 // but by that point the NSW information has potentially been lost. 7113 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 7114 Type *Ty = U->getType(); 7115 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 7116 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 7117 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 7118 } 7119 } 7120 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 7121 7122 case Instruction::BitCast: 7123 // BitCasts are no-op casts so we just eliminate the cast. 7124 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 7125 return getSCEV(U->getOperand(0)); 7126 break; 7127 7128 case Instruction::PtrToInt: { 7129 // Pointer to integer cast is straight-forward, so do model it. 7130 const SCEV *Op = getSCEV(U->getOperand(0)); 7131 Type *DstIntTy = U->getType(); 7132 // But only if effective SCEV (integer) type is wide enough to represent 7133 // all possible pointer values. 7134 const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy); 7135 if (isa<SCEVCouldNotCompute>(IntOp)) 7136 return getUnknown(V); 7137 return IntOp; 7138 } 7139 case Instruction::IntToPtr: 7140 // Just don't deal with inttoptr casts. 7141 return getUnknown(V); 7142 7143 case Instruction::SDiv: 7144 // If both operands are non-negative, this is just an udiv. 7145 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 7146 isKnownNonNegative(getSCEV(U->getOperand(1)))) 7147 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 7148 break; 7149 7150 case Instruction::SRem: 7151 // If both operands are non-negative, this is just an urem. 7152 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 7153 isKnownNonNegative(getSCEV(U->getOperand(1)))) 7154 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 7155 break; 7156 7157 case Instruction::GetElementPtr: 7158 return createNodeForGEP(cast<GEPOperator>(U)); 7159 7160 case Instruction::PHI: 7161 return createNodeForPHI(cast<PHINode>(U)); 7162 7163 case Instruction::Select: 7164 // U can also be a select constant expr, which let fall through. Since 7165 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 7166 // constant expressions cannot have instructions as operands, we'd have 7167 // returned getUnknown for a select constant expressions anyway. 7168 if (isa<Instruction>(U)) 7169 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 7170 U->getOperand(1), U->getOperand(2)); 7171 break; 7172 7173 case Instruction::Call: 7174 case Instruction::Invoke: 7175 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) 7176 return getSCEV(RV); 7177 7178 if (auto *II = dyn_cast<IntrinsicInst>(U)) { 7179 switch (II->getIntrinsicID()) { 7180 case Intrinsic::abs: 7181 return getAbsExpr( 7182 getSCEV(II->getArgOperand(0)), 7183 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne()); 7184 case Intrinsic::umax: 7185 return getUMaxExpr(getSCEV(II->getArgOperand(0)), 7186 getSCEV(II->getArgOperand(1))); 7187 case Intrinsic::umin: 7188 return getUMinExpr(getSCEV(II->getArgOperand(0)), 7189 getSCEV(II->getArgOperand(1))); 7190 case Intrinsic::smax: 7191 return getSMaxExpr(getSCEV(II->getArgOperand(0)), 7192 getSCEV(II->getArgOperand(1))); 7193 case Intrinsic::smin: 7194 return getSMinExpr(getSCEV(II->getArgOperand(0)), 7195 getSCEV(II->getArgOperand(1))); 7196 case Intrinsic::usub_sat: { 7197 const SCEV *X = getSCEV(II->getArgOperand(0)); 7198 const SCEV *Y = getSCEV(II->getArgOperand(1)); 7199 const SCEV *ClampedY = getUMinExpr(X, Y); 7200 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW); 7201 } 7202 case Intrinsic::uadd_sat: { 7203 const SCEV *X = getSCEV(II->getArgOperand(0)); 7204 const SCEV *Y = getSCEV(II->getArgOperand(1)); 7205 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y)); 7206 return getAddExpr(ClampedX, Y, SCEV::FlagNUW); 7207 } 7208 case Intrinsic::start_loop_iterations: 7209 // A start_loop_iterations is just equivalent to the first operand for 7210 // SCEV purposes. 7211 return getSCEV(II->getArgOperand(0)); 7212 default: 7213 break; 7214 } 7215 } 7216 break; 7217 } 7218 7219 return getUnknown(V); 7220 } 7221 7222 //===----------------------------------------------------------------------===// 7223 // Iteration Count Computation Code 7224 // 7225 7226 const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount, 7227 bool Extend) { 7228 if (isa<SCEVCouldNotCompute>(ExitCount)) 7229 return getCouldNotCompute(); 7230 7231 auto *ExitCountType = ExitCount->getType(); 7232 assert(ExitCountType->isIntegerTy()); 7233 7234 if (!Extend) 7235 return getAddExpr(ExitCount, getOne(ExitCountType)); 7236 7237 auto *WiderType = Type::getIntNTy(ExitCountType->getContext(), 7238 1 + ExitCountType->getScalarSizeInBits()); 7239 return getAddExpr(getNoopOrZeroExtend(ExitCount, WiderType), 7240 getOne(WiderType)); 7241 } 7242 7243 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 7244 if (!ExitCount) 7245 return 0; 7246 7247 ConstantInt *ExitConst = ExitCount->getValue(); 7248 7249 // Guard against huge trip counts. 7250 if (ExitConst->getValue().getActiveBits() > 32) 7251 return 0; 7252 7253 // In case of integer overflow, this returns 0, which is correct. 7254 return ((unsigned)ExitConst->getZExtValue()) + 1; 7255 } 7256 7257 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 7258 auto *ExitCount = dyn_cast<SCEVConstant>(getBackedgeTakenCount(L, Exact)); 7259 return getConstantTripCount(ExitCount); 7260 } 7261 7262 unsigned 7263 ScalarEvolution::getSmallConstantTripCount(const Loop *L, 7264 const BasicBlock *ExitingBlock) { 7265 assert(ExitingBlock && "Must pass a non-null exiting block!"); 7266 assert(L->isLoopExiting(ExitingBlock) && 7267 "Exiting block must actually branch out of the loop!"); 7268 const SCEVConstant *ExitCount = 7269 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 7270 return getConstantTripCount(ExitCount); 7271 } 7272 7273 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 7274 const auto *MaxExitCount = 7275 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); 7276 return getConstantTripCount(MaxExitCount); 7277 } 7278 7279 const SCEV *ScalarEvolution::getConstantMaxTripCountFromArray(const Loop *L) { 7280 // We can't infer from Array in Irregular Loop. 7281 // FIXME: It's hard to infer loop bound from array operated in Nested Loop. 7282 if (!L->isLoopSimplifyForm() || !L->isInnermost()) 7283 return getCouldNotCompute(); 7284 7285 // FIXME: To make the scene more typical, we only analysis loops that have 7286 // one exiting block and that block must be the latch. To make it easier to 7287 // capture loops that have memory access and memory access will be executed 7288 // in each iteration. 7289 const BasicBlock *LoopLatch = L->getLoopLatch(); 7290 assert(LoopLatch && "See defination of simplify form loop."); 7291 if (L->getExitingBlock() != LoopLatch) 7292 return getCouldNotCompute(); 7293 7294 const DataLayout &DL = getDataLayout(); 7295 SmallVector<const SCEV *> InferCountColl; 7296 for (auto *BB : L->getBlocks()) { 7297 // Go here, we can know that Loop is a single exiting and simplified form 7298 // loop. Make sure that infer from Memory Operation in those BBs must be 7299 // executed in loop. First step, we can make sure that max execution time 7300 // of MemAccessBB in loop represents latch max excution time. 7301 // If MemAccessBB does not dom Latch, skip. 7302 // Entry 7303 // │ 7304 // ┌─────▼─────┐ 7305 // │Loop Header◄─────┐ 7306 // └──┬──────┬─┘ │ 7307 // │ │ │ 7308 // ┌────────▼──┐ ┌─▼─────┐ │ 7309 // │MemAccessBB│ │OtherBB│ │ 7310 // └────────┬──┘ └─┬─────┘ │ 7311 // │ │ │ 7312 // ┌─▼──────▼─┐ │ 7313 // │Loop Latch├─────┘ 7314 // └────┬─────┘ 7315 // ▼ 7316 // Exit 7317 if (!DT.dominates(BB, LoopLatch)) 7318 continue; 7319 7320 for (Instruction &Inst : *BB) { 7321 // Find Memory Operation Instruction. 7322 auto *GEP = getLoadStorePointerOperand(&Inst); 7323 if (!GEP) 7324 continue; 7325 7326 auto *ElemSize = dyn_cast<SCEVConstant>(getElementSize(&Inst)); 7327 // Do not infer from scalar type, eg."ElemSize = sizeof()". 7328 if (!ElemSize) 7329 continue; 7330 7331 // Use a existing polynomial recurrence on the trip count. 7332 auto *AddRec = dyn_cast<SCEVAddRecExpr>(getSCEV(GEP)); 7333 if (!AddRec) 7334 continue; 7335 auto *ArrBase = dyn_cast<SCEVUnknown>(getPointerBase(AddRec)); 7336 auto *Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(*this)); 7337 if (!ArrBase || !Step) 7338 continue; 7339 assert(isLoopInvariant(ArrBase, L) && "See addrec definition"); 7340 7341 // Only handle { %array + step }, 7342 // FIXME: {(SCEVAddRecExpr) + step } could not be analysed here. 7343 if (AddRec->getStart() != ArrBase) 7344 continue; 7345 7346 // Memory operation pattern which have gaps. 7347 // Or repeat memory opreation. 7348 // And index of GEP wraps arround. 7349 if (Step->getAPInt().getActiveBits() > 32 || 7350 Step->getAPInt().getZExtValue() != 7351 ElemSize->getAPInt().getZExtValue() || 7352 Step->isZero() || Step->getAPInt().isNegative()) 7353 continue; 7354 7355 // Only infer from stack array which has certain size. 7356 // Make sure alloca instruction is not excuted in loop. 7357 AllocaInst *AllocateInst = dyn_cast<AllocaInst>(ArrBase->getValue()); 7358 if (!AllocateInst || L->contains(AllocateInst->getParent())) 7359 continue; 7360 7361 // Make sure only handle normal array. 7362 auto *Ty = dyn_cast<ArrayType>(AllocateInst->getAllocatedType()); 7363 auto *ArrSize = dyn_cast<ConstantInt>(AllocateInst->getArraySize()); 7364 if (!Ty || !ArrSize || !ArrSize->isOne()) 7365 continue; 7366 // Also make sure step was increased the same with sizeof allocated 7367 // element type. 7368 const PointerType *GEPT = dyn_cast<PointerType>(GEP->getType()); 7369 if (Ty->getElementType() != GEPT->getElementType()) 7370 continue; 7371 7372 // FIXME: Since gep indices are silently zext to the indexing type, 7373 // we will have a narrow gep index which wraps around rather than 7374 // increasing strictly, we shoule ensure that step is increasing 7375 // strictly by the loop iteration. 7376 // Now we can infer a max execution time by MemLength/StepLength. 7377 const SCEV *MemSize = 7378 getConstant(Step->getType(), DL.getTypeAllocSize(Ty)); 7379 auto *MaxExeCount = 7380 dyn_cast<SCEVConstant>(getUDivCeilSCEV(MemSize, Step)); 7381 if (!MaxExeCount || MaxExeCount->getAPInt().getActiveBits() > 32) 7382 continue; 7383 7384 // If the loop reaches the maximum number of executions, we can not 7385 // access bytes starting outside the statically allocated size without 7386 // being immediate UB. But it is allowed to enter loop header one more 7387 // time. 7388 auto *InferCount = dyn_cast<SCEVConstant>( 7389 getAddExpr(MaxExeCount, getOne(MaxExeCount->getType()))); 7390 // Discard the maximum number of execution times under 32bits. 7391 if (!InferCount || InferCount->getAPInt().getActiveBits() > 32) 7392 continue; 7393 7394 InferCountColl.push_back(InferCount); 7395 } 7396 } 7397 7398 if (InferCountColl.size() == 0) 7399 return getCouldNotCompute(); 7400 7401 return getUMinFromMismatchedTypes(InferCountColl); 7402 } 7403 7404 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 7405 SmallVector<BasicBlock *, 8> ExitingBlocks; 7406 L->getExitingBlocks(ExitingBlocks); 7407 7408 Optional<unsigned> Res = None; 7409 for (auto *ExitingBB : ExitingBlocks) { 7410 unsigned Multiple = getSmallConstantTripMultiple(L, ExitingBB); 7411 if (!Res) 7412 Res = Multiple; 7413 Res = (unsigned)GreatestCommonDivisor64(*Res, Multiple); 7414 } 7415 return Res.getValueOr(1); 7416 } 7417 7418 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 7419 const SCEV *ExitCount) { 7420 if (ExitCount == getCouldNotCompute()) 7421 return 1; 7422 7423 // Get the trip count 7424 const SCEV *TCExpr = getTripCountFromExitCount(ExitCount); 7425 7426 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 7427 if (!TC) 7428 // Attempt to factor more general cases. Returns the greatest power of 7429 // two divisor. If overflow happens, the trip count expression is still 7430 // divisible by the greatest power of 2 divisor returned. 7431 return 1U << std::min((uint32_t)31, 7432 GetMinTrailingZeros(applyLoopGuards(TCExpr, L))); 7433 7434 ConstantInt *Result = TC->getValue(); 7435 7436 // Guard against huge trip counts (this requires checking 7437 // for zero to handle the case where the trip count == -1 and the 7438 // addition wraps). 7439 if (!Result || Result->getValue().getActiveBits() > 32 || 7440 Result->getValue().getActiveBits() == 0) 7441 return 1; 7442 7443 return (unsigned)Result->getZExtValue(); 7444 } 7445 7446 /// Returns the largest constant divisor of the trip count of this loop as a 7447 /// normal unsigned value, if possible. This means that the actual trip count is 7448 /// always a multiple of the returned value (don't forget the trip count could 7449 /// very well be zero as well!). 7450 /// 7451 /// Returns 1 if the trip count is unknown or not guaranteed to be the 7452 /// multiple of a constant (which is also the case if the trip count is simply 7453 /// constant, use getSmallConstantTripCount for that case), Will also return 1 7454 /// if the trip count is very large (>= 2^32). 7455 /// 7456 /// As explained in the comments for getSmallConstantTripCount, this assumes 7457 /// that control exits the loop via ExitingBlock. 7458 unsigned 7459 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 7460 const BasicBlock *ExitingBlock) { 7461 assert(ExitingBlock && "Must pass a non-null exiting block!"); 7462 assert(L->isLoopExiting(ExitingBlock) && 7463 "Exiting block must actually branch out of the loop!"); 7464 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 7465 return getSmallConstantTripMultiple(L, ExitCount); 7466 } 7467 7468 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 7469 const BasicBlock *ExitingBlock, 7470 ExitCountKind Kind) { 7471 switch (Kind) { 7472 case Exact: 7473 case SymbolicMaximum: 7474 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 7475 case ConstantMaximum: 7476 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this); 7477 }; 7478 llvm_unreachable("Invalid ExitCountKind!"); 7479 } 7480 7481 const SCEV * 7482 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 7483 SCEVUnionPredicate &Preds) { 7484 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 7485 } 7486 7487 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, 7488 ExitCountKind Kind) { 7489 switch (Kind) { 7490 case Exact: 7491 return getBackedgeTakenInfo(L).getExact(L, this); 7492 case ConstantMaximum: 7493 return getBackedgeTakenInfo(L).getConstantMax(this); 7494 case SymbolicMaximum: 7495 return getBackedgeTakenInfo(L).getSymbolicMax(L, this); 7496 }; 7497 llvm_unreachable("Invalid ExitCountKind!"); 7498 } 7499 7500 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 7501 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this); 7502 } 7503 7504 /// Push PHI nodes in the header of the given loop onto the given Worklist. 7505 static void PushLoopPHIs(const Loop *L, 7506 SmallVectorImpl<Instruction *> &Worklist, 7507 SmallPtrSetImpl<Instruction *> &Visited) { 7508 BasicBlock *Header = L->getHeader(); 7509 7510 // Push all Loop-header PHIs onto the Worklist stack. 7511 for (PHINode &PN : Header->phis()) 7512 if (Visited.insert(&PN).second) 7513 Worklist.push_back(&PN); 7514 } 7515 7516 const ScalarEvolution::BackedgeTakenInfo & 7517 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 7518 auto &BTI = getBackedgeTakenInfo(L); 7519 if (BTI.hasFullInfo()) 7520 return BTI; 7521 7522 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 7523 7524 if (!Pair.second) 7525 return Pair.first->second; 7526 7527 BackedgeTakenInfo Result = 7528 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 7529 7530 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 7531 } 7532 7533 ScalarEvolution::BackedgeTakenInfo & 7534 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 7535 // Initially insert an invalid entry for this loop. If the insertion 7536 // succeeds, proceed to actually compute a backedge-taken count and 7537 // update the value. The temporary CouldNotCompute value tells SCEV 7538 // code elsewhere that it shouldn't attempt to request a new 7539 // backedge-taken count, which could result in infinite recursion. 7540 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 7541 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 7542 if (!Pair.second) 7543 return Pair.first->second; 7544 7545 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 7546 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 7547 // must be cleared in this scope. 7548 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 7549 7550 // In product build, there are no usage of statistic. 7551 (void)NumTripCountsComputed; 7552 (void)NumTripCountsNotComputed; 7553 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 7554 const SCEV *BEExact = Result.getExact(L, this); 7555 if (BEExact != getCouldNotCompute()) { 7556 assert(isLoopInvariant(BEExact, L) && 7557 isLoopInvariant(Result.getConstantMax(this), L) && 7558 "Computed backedge-taken count isn't loop invariant for loop!"); 7559 ++NumTripCountsComputed; 7560 } else if (Result.getConstantMax(this) == getCouldNotCompute() && 7561 isa<PHINode>(L->getHeader()->begin())) { 7562 // Only count loops that have phi nodes as not being computable. 7563 ++NumTripCountsNotComputed; 7564 } 7565 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 7566 7567 // Now that we know more about the trip count for this loop, forget any 7568 // existing SCEV values for PHI nodes in this loop since they are only 7569 // conservative estimates made without the benefit of trip count 7570 // information. This invalidation is not necessary for correctness, and is 7571 // only done to produce more precise results. 7572 if (Result.hasAnyInfo()) { 7573 // Invalidate any expression using an addrec in this loop. 7574 SmallVector<const SCEV *, 8> ToForget; 7575 auto LoopUsersIt = LoopUsers.find(L); 7576 if (LoopUsersIt != LoopUsers.end()) 7577 append_range(ToForget, LoopUsersIt->second); 7578 forgetMemoizedResults(ToForget); 7579 7580 // Invalidate constant-evolved loop header phis. 7581 for (PHINode &PN : L->getHeader()->phis()) 7582 ConstantEvolutionLoopExitValue.erase(&PN); 7583 } 7584 7585 // Re-lookup the insert position, since the call to 7586 // computeBackedgeTakenCount above could result in a 7587 // recusive call to getBackedgeTakenInfo (on a different 7588 // loop), which would invalidate the iterator computed 7589 // earlier. 7590 return BackedgeTakenCounts.find(L)->second = std::move(Result); 7591 } 7592 7593 void ScalarEvolution::forgetAllLoops() { 7594 // This method is intended to forget all info about loops. It should 7595 // invalidate caches as if the following happened: 7596 // - The trip counts of all loops have changed arbitrarily 7597 // - Every llvm::Value has been updated in place to produce a different 7598 // result. 7599 BackedgeTakenCounts.clear(); 7600 PredicatedBackedgeTakenCounts.clear(); 7601 BECountUsers.clear(); 7602 LoopPropertiesCache.clear(); 7603 ConstantEvolutionLoopExitValue.clear(); 7604 ValueExprMap.clear(); 7605 ValuesAtScopes.clear(); 7606 ValuesAtScopesUsers.clear(); 7607 LoopDispositions.clear(); 7608 BlockDispositions.clear(); 7609 UnsignedRanges.clear(); 7610 SignedRanges.clear(); 7611 ExprValueMap.clear(); 7612 HasRecMap.clear(); 7613 MinTrailingZerosCache.clear(); 7614 PredicatedSCEVRewrites.clear(); 7615 } 7616 7617 void ScalarEvolution::forgetLoop(const Loop *L) { 7618 SmallVector<const Loop *, 16> LoopWorklist(1, L); 7619 SmallVector<Instruction *, 32> Worklist; 7620 SmallPtrSet<Instruction *, 16> Visited; 7621 SmallVector<const SCEV *, 16> ToForget; 7622 7623 // Iterate over all the loops and sub-loops to drop SCEV information. 7624 while (!LoopWorklist.empty()) { 7625 auto *CurrL = LoopWorklist.pop_back_val(); 7626 7627 // Drop any stored trip count value. 7628 forgetBackedgeTakenCounts(CurrL, /* Predicated */ false); 7629 forgetBackedgeTakenCounts(CurrL, /* Predicated */ true); 7630 7631 // Drop information about predicated SCEV rewrites for this loop. 7632 for (auto I = PredicatedSCEVRewrites.begin(); 7633 I != PredicatedSCEVRewrites.end();) { 7634 std::pair<const SCEV *, const Loop *> Entry = I->first; 7635 if (Entry.second == CurrL) 7636 PredicatedSCEVRewrites.erase(I++); 7637 else 7638 ++I; 7639 } 7640 7641 auto LoopUsersItr = LoopUsers.find(CurrL); 7642 if (LoopUsersItr != LoopUsers.end()) { 7643 ToForget.insert(ToForget.end(), LoopUsersItr->second.begin(), 7644 LoopUsersItr->second.end()); 7645 LoopUsers.erase(LoopUsersItr); 7646 } 7647 7648 // Drop information about expressions based on loop-header PHIs. 7649 PushLoopPHIs(CurrL, Worklist, Visited); 7650 7651 while (!Worklist.empty()) { 7652 Instruction *I = Worklist.pop_back_val(); 7653 7654 ValueExprMapType::iterator It = 7655 ValueExprMap.find_as(static_cast<Value *>(I)); 7656 if (It != ValueExprMap.end()) { 7657 eraseValueFromMap(It->first); 7658 ToForget.push_back(It->second); 7659 if (PHINode *PN = dyn_cast<PHINode>(I)) 7660 ConstantEvolutionLoopExitValue.erase(PN); 7661 } 7662 7663 PushDefUseChildren(I, Worklist, Visited); 7664 } 7665 7666 LoopPropertiesCache.erase(CurrL); 7667 // Forget all contained loops too, to avoid dangling entries in the 7668 // ValuesAtScopes map. 7669 LoopWorklist.append(CurrL->begin(), CurrL->end()); 7670 } 7671 forgetMemoizedResults(ToForget); 7672 } 7673 7674 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 7675 while (Loop *Parent = L->getParentLoop()) 7676 L = Parent; 7677 forgetLoop(L); 7678 } 7679 7680 void ScalarEvolution::forgetValue(Value *V) { 7681 Instruction *I = dyn_cast<Instruction>(V); 7682 if (!I) return; 7683 7684 // Drop information about expressions based on loop-header PHIs. 7685 SmallVector<Instruction *, 16> Worklist; 7686 SmallPtrSet<Instruction *, 8> Visited; 7687 SmallVector<const SCEV *, 8> ToForget; 7688 Worklist.push_back(I); 7689 Visited.insert(I); 7690 7691 while (!Worklist.empty()) { 7692 I = Worklist.pop_back_val(); 7693 ValueExprMapType::iterator It = 7694 ValueExprMap.find_as(static_cast<Value *>(I)); 7695 if (It != ValueExprMap.end()) { 7696 eraseValueFromMap(It->first); 7697 ToForget.push_back(It->second); 7698 if (PHINode *PN = dyn_cast<PHINode>(I)) 7699 ConstantEvolutionLoopExitValue.erase(PN); 7700 } 7701 7702 PushDefUseChildren(I, Worklist, Visited); 7703 } 7704 forgetMemoizedResults(ToForget); 7705 } 7706 7707 void ScalarEvolution::forgetLoopDispositions(const Loop *L) { 7708 LoopDispositions.clear(); 7709 } 7710 7711 /// Get the exact loop backedge taken count considering all loop exits. A 7712 /// computable result can only be returned for loops with all exiting blocks 7713 /// dominating the latch. howFarToZero assumes that the limit of each loop test 7714 /// is never skipped. This is a valid assumption as long as the loop exits via 7715 /// that test. For precise results, it is the caller's responsibility to specify 7716 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 7717 const SCEV * 7718 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 7719 SCEVUnionPredicate *Preds) const { 7720 // If any exits were not computable, the loop is not computable. 7721 if (!isComplete() || ExitNotTaken.empty()) 7722 return SE->getCouldNotCompute(); 7723 7724 const BasicBlock *Latch = L->getLoopLatch(); 7725 // All exiting blocks we have collected must dominate the only backedge. 7726 if (!Latch) 7727 return SE->getCouldNotCompute(); 7728 7729 // All exiting blocks we have gathered dominate loop's latch, so exact trip 7730 // count is simply a minimum out of all these calculated exit counts. 7731 SmallVector<const SCEV *, 2> Ops; 7732 for (auto &ENT : ExitNotTaken) { 7733 const SCEV *BECount = ENT.ExactNotTaken; 7734 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 7735 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 7736 "We should only have known counts for exiting blocks that dominate " 7737 "latch!"); 7738 7739 Ops.push_back(BECount); 7740 7741 if (Preds && !ENT.hasAlwaysTruePredicate()) 7742 Preds->add(ENT.Predicate.get()); 7743 7744 assert((Preds || ENT.hasAlwaysTruePredicate()) && 7745 "Predicate should be always true!"); 7746 } 7747 7748 return SE->getUMinFromMismatchedTypes(Ops); 7749 } 7750 7751 /// Get the exact not taken count for this loop exit. 7752 const SCEV * 7753 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock, 7754 ScalarEvolution *SE) const { 7755 for (auto &ENT : ExitNotTaken) 7756 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 7757 return ENT.ExactNotTaken; 7758 7759 return SE->getCouldNotCompute(); 7760 } 7761 7762 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax( 7763 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { 7764 for (auto &ENT : ExitNotTaken) 7765 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 7766 return ENT.MaxNotTaken; 7767 7768 return SE->getCouldNotCompute(); 7769 } 7770 7771 /// getConstantMax - Get the constant max backedge taken count for the loop. 7772 const SCEV * 7773 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const { 7774 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 7775 return !ENT.hasAlwaysTruePredicate(); 7776 }; 7777 7778 if (!getConstantMax() || any_of(ExitNotTaken, PredicateNotAlwaysTrue)) 7779 return SE->getCouldNotCompute(); 7780 7781 assert((isa<SCEVCouldNotCompute>(getConstantMax()) || 7782 isa<SCEVConstant>(getConstantMax())) && 7783 "No point in having a non-constant max backedge taken count!"); 7784 return getConstantMax(); 7785 } 7786 7787 const SCEV * 7788 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L, 7789 ScalarEvolution *SE) { 7790 if (!SymbolicMax) 7791 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L); 7792 return SymbolicMax; 7793 } 7794 7795 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero( 7796 ScalarEvolution *SE) const { 7797 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 7798 return !ENT.hasAlwaysTruePredicate(); 7799 }; 7800 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 7801 } 7802 7803 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 7804 : ExitLimit(E, E, false, None) { 7805 } 7806 7807 ScalarEvolution::ExitLimit::ExitLimit( 7808 const SCEV *E, const SCEV *M, bool MaxOrZero, 7809 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 7810 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 7811 // If we prove the max count is zero, so is the symbolic bound. This happens 7812 // in practice due to differences in a) how context sensitive we've chosen 7813 // to be and b) how we reason about bounds impied by UB. 7814 if (MaxNotTaken->isZero()) 7815 ExactNotTaken = MaxNotTaken; 7816 7817 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 7818 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 7819 "Exact is not allowed to be less precise than Max"); 7820 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7821 isa<SCEVConstant>(MaxNotTaken)) && 7822 "No point in having a non-constant max backedge taken count!"); 7823 for (auto *PredSet : PredSetList) 7824 for (auto *P : *PredSet) 7825 addPredicate(P); 7826 assert((isa<SCEVCouldNotCompute>(E) || !E->getType()->isPointerTy()) && 7827 "Backedge count should be int"); 7828 assert((isa<SCEVCouldNotCompute>(M) || !M->getType()->isPointerTy()) && 7829 "Max backedge count should be int"); 7830 } 7831 7832 ScalarEvolution::ExitLimit::ExitLimit( 7833 const SCEV *E, const SCEV *M, bool MaxOrZero, 7834 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 7835 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 7836 } 7837 7838 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 7839 bool MaxOrZero) 7840 : ExitLimit(E, M, MaxOrZero, None) { 7841 } 7842 7843 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 7844 /// computable exit into a persistent ExitNotTakenInfo array. 7845 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 7846 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts, 7847 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero) 7848 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) { 7849 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7850 7851 ExitNotTaken.reserve(ExitCounts.size()); 7852 std::transform( 7853 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 7854 [&](const EdgeExitInfo &EEI) { 7855 BasicBlock *ExitBB = EEI.first; 7856 const ExitLimit &EL = EEI.second; 7857 if (EL.Predicates.empty()) 7858 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7859 nullptr); 7860 7861 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 7862 for (auto *Pred : EL.Predicates) 7863 Predicate->add(Pred); 7864 7865 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7866 std::move(Predicate)); 7867 }); 7868 assert((isa<SCEVCouldNotCompute>(ConstantMax) || 7869 isa<SCEVConstant>(ConstantMax)) && 7870 "No point in having a non-constant max backedge taken count!"); 7871 } 7872 7873 /// Compute the number of times the backedge of the specified loop will execute. 7874 ScalarEvolution::BackedgeTakenInfo 7875 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 7876 bool AllowPredicates) { 7877 SmallVector<BasicBlock *, 8> ExitingBlocks; 7878 L->getExitingBlocks(ExitingBlocks); 7879 7880 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7881 7882 SmallVector<EdgeExitInfo, 4> ExitCounts; 7883 bool CouldComputeBECount = true; 7884 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 7885 const SCEV *MustExitMaxBECount = nullptr; 7886 const SCEV *MayExitMaxBECount = nullptr; 7887 bool MustExitMaxOrZero = false; 7888 7889 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 7890 // and compute maxBECount. 7891 // Do a union of all the predicates here. 7892 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 7893 BasicBlock *ExitBB = ExitingBlocks[i]; 7894 7895 // We canonicalize untaken exits to br (constant), ignore them so that 7896 // proving an exit untaken doesn't negatively impact our ability to reason 7897 // about the loop as whole. 7898 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) 7899 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { 7900 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7901 if (ExitIfTrue == CI->isZero()) 7902 continue; 7903 } 7904 7905 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 7906 7907 assert((AllowPredicates || EL.Predicates.empty()) && 7908 "Predicated exit limit when predicates are not allowed!"); 7909 7910 // 1. For each exit that can be computed, add an entry to ExitCounts. 7911 // CouldComputeBECount is true only if all exits can be computed. 7912 if (EL.ExactNotTaken == getCouldNotCompute()) 7913 // We couldn't compute an exact value for this exit, so 7914 // we won't be able to compute an exact value for the loop. 7915 CouldComputeBECount = false; 7916 else 7917 ExitCounts.emplace_back(ExitBB, EL); 7918 7919 // 2. Derive the loop's MaxBECount from each exit's max number of 7920 // non-exiting iterations. Partition the loop exits into two kinds: 7921 // LoopMustExits and LoopMayExits. 7922 // 7923 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 7924 // is a LoopMayExit. If any computable LoopMustExit is found, then 7925 // MaxBECount is the minimum EL.MaxNotTaken of computable 7926 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 7927 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 7928 // computable EL.MaxNotTaken. 7929 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 7930 DT.dominates(ExitBB, Latch)) { 7931 if (!MustExitMaxBECount) { 7932 MustExitMaxBECount = EL.MaxNotTaken; 7933 MustExitMaxOrZero = EL.MaxOrZero; 7934 } else { 7935 MustExitMaxBECount = 7936 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 7937 } 7938 } else if (MayExitMaxBECount != getCouldNotCompute()) { 7939 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 7940 MayExitMaxBECount = EL.MaxNotTaken; 7941 else { 7942 MayExitMaxBECount = 7943 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 7944 } 7945 } 7946 } 7947 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 7948 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 7949 // The loop backedge will be taken the maximum or zero times if there's 7950 // a single exit that must be taken the maximum or zero times. 7951 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 7952 7953 // Remember which SCEVs are used in exit limits for invalidation purposes. 7954 // We only care about non-constant SCEVs here, so we can ignore EL.MaxNotTaken 7955 // and MaxBECount, which must be SCEVConstant. 7956 for (const auto &Pair : ExitCounts) 7957 if (!isa<SCEVConstant>(Pair.second.ExactNotTaken)) 7958 BECountUsers[Pair.second.ExactNotTaken].insert({L, AllowPredicates}); 7959 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 7960 MaxBECount, MaxOrZero); 7961 } 7962 7963 ScalarEvolution::ExitLimit 7964 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 7965 bool AllowPredicates) { 7966 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 7967 // If our exiting block does not dominate the latch, then its connection with 7968 // loop's exit limit may be far from trivial. 7969 const BasicBlock *Latch = L->getLoopLatch(); 7970 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 7971 return getCouldNotCompute(); 7972 7973 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 7974 Instruction *Term = ExitingBlock->getTerminator(); 7975 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 7976 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 7977 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7978 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 7979 "It should have one successor in loop and one exit block!"); 7980 // Proceed to the next level to examine the exit condition expression. 7981 return computeExitLimitFromCond( 7982 L, BI->getCondition(), ExitIfTrue, 7983 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 7984 } 7985 7986 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 7987 // For switch, make sure that there is a single exit from the loop. 7988 BasicBlock *Exit = nullptr; 7989 for (auto *SBB : successors(ExitingBlock)) 7990 if (!L->contains(SBB)) { 7991 if (Exit) // Multiple exit successors. 7992 return getCouldNotCompute(); 7993 Exit = SBB; 7994 } 7995 assert(Exit && "Exiting block must have at least one exit"); 7996 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 7997 /*ControlsExit=*/IsOnlyExit); 7998 } 7999 8000 return getCouldNotCompute(); 8001 } 8002 8003 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 8004 const Loop *L, Value *ExitCond, bool ExitIfTrue, 8005 bool ControlsExit, bool AllowPredicates) { 8006 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 8007 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 8008 ControlsExit, AllowPredicates); 8009 } 8010 8011 Optional<ScalarEvolution::ExitLimit> 8012 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 8013 bool ExitIfTrue, bool ControlsExit, 8014 bool AllowPredicates) { 8015 (void)this->L; 8016 (void)this->ExitIfTrue; 8017 (void)this->AllowPredicates; 8018 8019 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 8020 this->AllowPredicates == AllowPredicates && 8021 "Variance in assumed invariant key components!"); 8022 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 8023 if (Itr == TripCountMap.end()) 8024 return None; 8025 return Itr->second; 8026 } 8027 8028 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 8029 bool ExitIfTrue, 8030 bool ControlsExit, 8031 bool AllowPredicates, 8032 const ExitLimit &EL) { 8033 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 8034 this->AllowPredicates == AllowPredicates && 8035 "Variance in assumed invariant key components!"); 8036 8037 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 8038 assert(InsertResult.second && "Expected successful insertion!"); 8039 (void)InsertResult; 8040 (void)ExitIfTrue; 8041 } 8042 8043 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 8044 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 8045 bool ControlsExit, bool AllowPredicates) { 8046 8047 if (auto MaybeEL = 8048 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 8049 return *MaybeEL; 8050 8051 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 8052 ControlsExit, AllowPredicates); 8053 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 8054 return EL; 8055 } 8056 8057 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 8058 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 8059 bool ControlsExit, bool AllowPredicates) { 8060 // Handle BinOp conditions (And, Or). 8061 if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp( 8062 Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 8063 return *LimitFromBinOp; 8064 8065 // With an icmp, it may be feasible to compute an exact backedge-taken count. 8066 // Proceed to the next level to examine the icmp. 8067 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 8068 ExitLimit EL = 8069 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 8070 if (EL.hasFullInfo() || !AllowPredicates) 8071 return EL; 8072 8073 // Try again, but use SCEV predicates this time. 8074 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 8075 /*AllowPredicates=*/true); 8076 } 8077 8078 // Check for a constant condition. These are normally stripped out by 8079 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 8080 // preserve the CFG and is temporarily leaving constant conditions 8081 // in place. 8082 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 8083 if (ExitIfTrue == !CI->getZExtValue()) 8084 // The backedge is always taken. 8085 return getCouldNotCompute(); 8086 else 8087 // The backedge is never taken. 8088 return getZero(CI->getType()); 8089 } 8090 8091 // If we're exiting based on the overflow flag of an x.with.overflow intrinsic 8092 // with a constant step, we can form an equivalent icmp predicate and figure 8093 // out how many iterations will be taken before we exit. 8094 const WithOverflowInst *WO; 8095 const APInt *C; 8096 if (match(ExitCond, m_ExtractValue<1>(m_WithOverflowInst(WO))) && 8097 match(WO->getRHS(), m_APInt(C))) { 8098 ConstantRange NWR = 8099 ConstantRange::makeExactNoWrapRegion(WO->getBinaryOp(), *C, 8100 WO->getNoWrapKind()); 8101 CmpInst::Predicate Pred; 8102 APInt NewRHSC, Offset; 8103 NWR.getEquivalentICmp(Pred, NewRHSC, Offset); 8104 if (!ExitIfTrue) 8105 Pred = ICmpInst::getInversePredicate(Pred); 8106 auto *LHS = getSCEV(WO->getLHS()); 8107 if (Offset != 0) 8108 LHS = getAddExpr(LHS, getConstant(Offset)); 8109 auto EL = computeExitLimitFromICmp(L, Pred, LHS, getConstant(NewRHSC), 8110 ControlsExit, AllowPredicates); 8111 if (EL.hasAnyInfo()) return EL; 8112 } 8113 8114 // If it's not an integer or pointer comparison then compute it the hard way. 8115 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 8116 } 8117 8118 Optional<ScalarEvolution::ExitLimit> 8119 ScalarEvolution::computeExitLimitFromCondFromBinOp( 8120 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 8121 bool ControlsExit, bool AllowPredicates) { 8122 // Check if the controlling expression for this loop is an And or Or. 8123 Value *Op0, *Op1; 8124 bool IsAnd = false; 8125 if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) 8126 IsAnd = true; 8127 else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) 8128 IsAnd = false; 8129 else 8130 return None; 8131 8132 // EitherMayExit is true in these two cases: 8133 // br (and Op0 Op1), loop, exit 8134 // br (or Op0 Op1), exit, loop 8135 bool EitherMayExit = IsAnd ^ ExitIfTrue; 8136 ExitLimit EL0 = computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue, 8137 ControlsExit && !EitherMayExit, 8138 AllowPredicates); 8139 ExitLimit EL1 = computeExitLimitFromCondCached(Cache, L, Op1, ExitIfTrue, 8140 ControlsExit && !EitherMayExit, 8141 AllowPredicates); 8142 8143 // Be robust against unsimplified IR for the form "op i1 X, NeutralElement" 8144 const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd); 8145 if (isa<ConstantInt>(Op1)) 8146 return Op1 == NeutralElement ? EL0 : EL1; 8147 if (isa<ConstantInt>(Op0)) 8148 return Op0 == NeutralElement ? EL1 : EL0; 8149 8150 const SCEV *BECount = getCouldNotCompute(); 8151 const SCEV *MaxBECount = getCouldNotCompute(); 8152 if (EitherMayExit) { 8153 // Both conditions must be same for the loop to continue executing. 8154 // Choose the less conservative count. 8155 // If ExitCond is a short-circuit form (select), using 8156 // umin(EL0.ExactNotTaken, EL1.ExactNotTaken) is unsafe in general. 8157 // To see the detailed examples, please see 8158 // test/Analysis/ScalarEvolution/exit-count-select.ll 8159 bool PoisonSafe = isa<BinaryOperator>(ExitCond); 8160 if (!PoisonSafe) 8161 // Even if ExitCond is select, we can safely derive BECount using both 8162 // EL0 and EL1 in these cases: 8163 // (1) EL0.ExactNotTaken is non-zero 8164 // (2) EL1.ExactNotTaken is non-poison 8165 // (3) EL0.ExactNotTaken is zero (BECount should be simply zero and 8166 // it cannot be umin(0, ..)) 8167 // The PoisonSafe assignment below is simplified and the assertion after 8168 // BECount calculation fully guarantees the condition (3). 8169 PoisonSafe = isa<SCEVConstant>(EL0.ExactNotTaken) || 8170 isa<SCEVConstant>(EL1.ExactNotTaken); 8171 if (EL0.ExactNotTaken != getCouldNotCompute() && 8172 EL1.ExactNotTaken != getCouldNotCompute() && PoisonSafe) { 8173 BECount = 8174 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 8175 8176 // If EL0.ExactNotTaken was zero and ExitCond was a short-circuit form, 8177 // it should have been simplified to zero (see the condition (3) above) 8178 assert(!isa<BinaryOperator>(ExitCond) || !EL0.ExactNotTaken->isZero() || 8179 BECount->isZero()); 8180 } 8181 if (EL0.MaxNotTaken == getCouldNotCompute()) 8182 MaxBECount = EL1.MaxNotTaken; 8183 else if (EL1.MaxNotTaken == getCouldNotCompute()) 8184 MaxBECount = EL0.MaxNotTaken; 8185 else 8186 MaxBECount = getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 8187 } else { 8188 // Both conditions must be same at the same time for the loop to exit. 8189 // For now, be conservative. 8190 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 8191 BECount = EL0.ExactNotTaken; 8192 } 8193 8194 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 8195 // to be more aggressive when computing BECount than when computing 8196 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 8197 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 8198 // to not. 8199 if (isa<SCEVCouldNotCompute>(MaxBECount) && 8200 !isa<SCEVCouldNotCompute>(BECount)) 8201 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 8202 8203 return ExitLimit(BECount, MaxBECount, false, 8204 { &EL0.Predicates, &EL1.Predicates }); 8205 } 8206 8207 ScalarEvolution::ExitLimit 8208 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 8209 ICmpInst *ExitCond, 8210 bool ExitIfTrue, 8211 bool ControlsExit, 8212 bool AllowPredicates) { 8213 // If the condition was exit on true, convert the condition to exit on false 8214 ICmpInst::Predicate Pred; 8215 if (!ExitIfTrue) 8216 Pred = ExitCond->getPredicate(); 8217 else 8218 Pred = ExitCond->getInversePredicate(); 8219 const ICmpInst::Predicate OriginalPred = Pred; 8220 8221 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 8222 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 8223 8224 ExitLimit EL = computeExitLimitFromICmp(L, Pred, LHS, RHS, ControlsExit, 8225 AllowPredicates); 8226 if (EL.hasAnyInfo()) return EL; 8227 8228 auto *ExhaustiveCount = 8229 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 8230 8231 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 8232 return ExhaustiveCount; 8233 8234 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 8235 ExitCond->getOperand(1), L, OriginalPred); 8236 } 8237 ScalarEvolution::ExitLimit 8238 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 8239 ICmpInst::Predicate Pred, 8240 const SCEV *LHS, const SCEV *RHS, 8241 bool ControlsExit, 8242 bool AllowPredicates) { 8243 8244 // Try to evaluate any dependencies out of the loop. 8245 LHS = getSCEVAtScope(LHS, L); 8246 RHS = getSCEVAtScope(RHS, L); 8247 8248 // At this point, we would like to compute how many iterations of the 8249 // loop the predicate will return true for these inputs. 8250 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 8251 // If there is a loop-invariant, force it into the RHS. 8252 std::swap(LHS, RHS); 8253 Pred = ICmpInst::getSwappedPredicate(Pred); 8254 } 8255 8256 // Simplify the operands before analyzing them. 8257 (void)SimplifyICmpOperands(Pred, LHS, RHS); 8258 8259 // If we have a comparison of a chrec against a constant, try to use value 8260 // ranges to answer this query. 8261 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 8262 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 8263 if (AddRec->getLoop() == L) { 8264 // Form the constant range. 8265 ConstantRange CompRange = 8266 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 8267 8268 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 8269 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 8270 } 8271 8272 // If this loop must exit based on this condition (or execute undefined 8273 // behaviour), and we can prove the test sequence produced must repeat 8274 // the same values on self-wrap of the IV, then we can infer that IV 8275 // doesn't self wrap because if it did, we'd have an infinite (undefined) 8276 // loop. 8277 if (ControlsExit && isLoopInvariant(RHS, L) && loopHasNoAbnormalExits(L) && 8278 loopIsFiniteByAssumption(L)) { 8279 8280 // TODO: We can peel off any functions which are invertible *in L*. Loop 8281 // invariant terms are effectively constants for our purposes here. 8282 auto *InnerLHS = LHS; 8283 if (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS)) 8284 InnerLHS = ZExt->getOperand(); 8285 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(InnerLHS)) { 8286 auto *StrideC = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)); 8287 if (!AR->hasNoSelfWrap() && AR->getLoop() == L && AR->isAffine() && 8288 StrideC && StrideC->getAPInt().isPowerOf2()) { 8289 auto Flags = AR->getNoWrapFlags(); 8290 Flags = setFlags(Flags, SCEV::FlagNW); 8291 SmallVector<const SCEV*> Operands{AR->operands()}; 8292 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 8293 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags); 8294 } 8295 } 8296 } 8297 8298 switch (Pred) { 8299 case ICmpInst::ICMP_NE: { // while (X != Y) 8300 // Convert to: while (X-Y != 0) 8301 if (LHS->getType()->isPointerTy()) { 8302 LHS = getLosslessPtrToIntExpr(LHS); 8303 if (isa<SCEVCouldNotCompute>(LHS)) 8304 return LHS; 8305 } 8306 if (RHS->getType()->isPointerTy()) { 8307 RHS = getLosslessPtrToIntExpr(RHS); 8308 if (isa<SCEVCouldNotCompute>(RHS)) 8309 return RHS; 8310 } 8311 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 8312 AllowPredicates); 8313 if (EL.hasAnyInfo()) return EL; 8314 break; 8315 } 8316 case ICmpInst::ICMP_EQ: { // while (X == Y) 8317 // Convert to: while (X-Y == 0) 8318 if (LHS->getType()->isPointerTy()) { 8319 LHS = getLosslessPtrToIntExpr(LHS); 8320 if (isa<SCEVCouldNotCompute>(LHS)) 8321 return LHS; 8322 } 8323 if (RHS->getType()->isPointerTy()) { 8324 RHS = getLosslessPtrToIntExpr(RHS); 8325 if (isa<SCEVCouldNotCompute>(RHS)) 8326 return RHS; 8327 } 8328 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 8329 if (EL.hasAnyInfo()) return EL; 8330 break; 8331 } 8332 case ICmpInst::ICMP_SLT: 8333 case ICmpInst::ICMP_ULT: { // while (X < Y) 8334 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 8335 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 8336 AllowPredicates); 8337 if (EL.hasAnyInfo()) return EL; 8338 break; 8339 } 8340 case ICmpInst::ICMP_SGT: 8341 case ICmpInst::ICMP_UGT: { // while (X > Y) 8342 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 8343 ExitLimit EL = 8344 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 8345 AllowPredicates); 8346 if (EL.hasAnyInfo()) return EL; 8347 break; 8348 } 8349 default: 8350 break; 8351 } 8352 8353 return getCouldNotCompute(); 8354 } 8355 8356 ScalarEvolution::ExitLimit 8357 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 8358 SwitchInst *Switch, 8359 BasicBlock *ExitingBlock, 8360 bool ControlsExit) { 8361 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 8362 8363 // Give up if the exit is the default dest of a switch. 8364 if (Switch->getDefaultDest() == ExitingBlock) 8365 return getCouldNotCompute(); 8366 8367 assert(L->contains(Switch->getDefaultDest()) && 8368 "Default case must not exit the loop!"); 8369 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 8370 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 8371 8372 // while (X != Y) --> while (X-Y != 0) 8373 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 8374 if (EL.hasAnyInfo()) 8375 return EL; 8376 8377 return getCouldNotCompute(); 8378 } 8379 8380 static ConstantInt * 8381 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 8382 ScalarEvolution &SE) { 8383 const SCEV *InVal = SE.getConstant(C); 8384 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 8385 assert(isa<SCEVConstant>(Val) && 8386 "Evaluation of SCEV at constant didn't fold correctly?"); 8387 return cast<SCEVConstant>(Val)->getValue(); 8388 } 8389 8390 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 8391 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 8392 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 8393 if (!RHS) 8394 return getCouldNotCompute(); 8395 8396 const BasicBlock *Latch = L->getLoopLatch(); 8397 if (!Latch) 8398 return getCouldNotCompute(); 8399 8400 const BasicBlock *Predecessor = L->getLoopPredecessor(); 8401 if (!Predecessor) 8402 return getCouldNotCompute(); 8403 8404 // Return true if V is of the form "LHS `shift_op` <positive constant>". 8405 // Return LHS in OutLHS and shift_opt in OutOpCode. 8406 auto MatchPositiveShift = 8407 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 8408 8409 using namespace PatternMatch; 8410 8411 ConstantInt *ShiftAmt; 8412 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 8413 OutOpCode = Instruction::LShr; 8414 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 8415 OutOpCode = Instruction::AShr; 8416 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 8417 OutOpCode = Instruction::Shl; 8418 else 8419 return false; 8420 8421 return ShiftAmt->getValue().isStrictlyPositive(); 8422 }; 8423 8424 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 8425 // 8426 // loop: 8427 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 8428 // %iv.shifted = lshr i32 %iv, <positive constant> 8429 // 8430 // Return true on a successful match. Return the corresponding PHI node (%iv 8431 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 8432 auto MatchShiftRecurrence = 8433 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 8434 Optional<Instruction::BinaryOps> PostShiftOpCode; 8435 8436 { 8437 Instruction::BinaryOps OpC; 8438 Value *V; 8439 8440 // If we encounter a shift instruction, "peel off" the shift operation, 8441 // and remember that we did so. Later when we inspect %iv's backedge 8442 // value, we will make sure that the backedge value uses the same 8443 // operation. 8444 // 8445 // Note: the peeled shift operation does not have to be the same 8446 // instruction as the one feeding into the PHI's backedge value. We only 8447 // really care about it being the same *kind* of shift instruction -- 8448 // that's all that is required for our later inferences to hold. 8449 if (MatchPositiveShift(LHS, V, OpC)) { 8450 PostShiftOpCode = OpC; 8451 LHS = V; 8452 } 8453 } 8454 8455 PNOut = dyn_cast<PHINode>(LHS); 8456 if (!PNOut || PNOut->getParent() != L->getHeader()) 8457 return false; 8458 8459 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 8460 Value *OpLHS; 8461 8462 return 8463 // The backedge value for the PHI node must be a shift by a positive 8464 // amount 8465 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 8466 8467 // of the PHI node itself 8468 OpLHS == PNOut && 8469 8470 // and the kind of shift should be match the kind of shift we peeled 8471 // off, if any. 8472 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 8473 }; 8474 8475 PHINode *PN; 8476 Instruction::BinaryOps OpCode; 8477 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 8478 return getCouldNotCompute(); 8479 8480 const DataLayout &DL = getDataLayout(); 8481 8482 // The key rationale for this optimization is that for some kinds of shift 8483 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 8484 // within a finite number of iterations. If the condition guarding the 8485 // backedge (in the sense that the backedge is taken if the condition is true) 8486 // is false for the value the shift recurrence stabilizes to, then we know 8487 // that the backedge is taken only a finite number of times. 8488 8489 ConstantInt *StableValue = nullptr; 8490 switch (OpCode) { 8491 default: 8492 llvm_unreachable("Impossible case!"); 8493 8494 case Instruction::AShr: { 8495 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 8496 // bitwidth(K) iterations. 8497 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 8498 KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC, 8499 Predecessor->getTerminator(), &DT); 8500 auto *Ty = cast<IntegerType>(RHS->getType()); 8501 if (Known.isNonNegative()) 8502 StableValue = ConstantInt::get(Ty, 0); 8503 else if (Known.isNegative()) 8504 StableValue = ConstantInt::get(Ty, -1, true); 8505 else 8506 return getCouldNotCompute(); 8507 8508 break; 8509 } 8510 case Instruction::LShr: 8511 case Instruction::Shl: 8512 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 8513 // stabilize to 0 in at most bitwidth(K) iterations. 8514 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 8515 break; 8516 } 8517 8518 auto *Result = 8519 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 8520 assert(Result->getType()->isIntegerTy(1) && 8521 "Otherwise cannot be an operand to a branch instruction"); 8522 8523 if (Result->isZeroValue()) { 8524 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 8525 const SCEV *UpperBound = 8526 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 8527 return ExitLimit(getCouldNotCompute(), UpperBound, false); 8528 } 8529 8530 return getCouldNotCompute(); 8531 } 8532 8533 /// Return true if we can constant fold an instruction of the specified type, 8534 /// assuming that all operands were constants. 8535 static bool CanConstantFold(const Instruction *I) { 8536 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 8537 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 8538 isa<LoadInst>(I) || isa<ExtractValueInst>(I)) 8539 return true; 8540 8541 if (const CallInst *CI = dyn_cast<CallInst>(I)) 8542 if (const Function *F = CI->getCalledFunction()) 8543 return canConstantFoldCallTo(CI, F); 8544 return false; 8545 } 8546 8547 /// Determine whether this instruction can constant evolve within this loop 8548 /// assuming its operands can all constant evolve. 8549 static bool canConstantEvolve(Instruction *I, const Loop *L) { 8550 // An instruction outside of the loop can't be derived from a loop PHI. 8551 if (!L->contains(I)) return false; 8552 8553 if (isa<PHINode>(I)) { 8554 // We don't currently keep track of the control flow needed to evaluate 8555 // PHIs, so we cannot handle PHIs inside of loops. 8556 return L->getHeader() == I->getParent(); 8557 } 8558 8559 // If we won't be able to constant fold this expression even if the operands 8560 // are constants, bail early. 8561 return CanConstantFold(I); 8562 } 8563 8564 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 8565 /// recursing through each instruction operand until reaching a loop header phi. 8566 static PHINode * 8567 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 8568 DenseMap<Instruction *, PHINode *> &PHIMap, 8569 unsigned Depth) { 8570 if (Depth > MaxConstantEvolvingDepth) 8571 return nullptr; 8572 8573 // Otherwise, we can evaluate this instruction if all of its operands are 8574 // constant or derived from a PHI node themselves. 8575 PHINode *PHI = nullptr; 8576 for (Value *Op : UseInst->operands()) { 8577 if (isa<Constant>(Op)) continue; 8578 8579 Instruction *OpInst = dyn_cast<Instruction>(Op); 8580 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 8581 8582 PHINode *P = dyn_cast<PHINode>(OpInst); 8583 if (!P) 8584 // If this operand is already visited, reuse the prior result. 8585 // We may have P != PHI if this is the deepest point at which the 8586 // inconsistent paths meet. 8587 P = PHIMap.lookup(OpInst); 8588 if (!P) { 8589 // Recurse and memoize the results, whether a phi is found or not. 8590 // This recursive call invalidates pointers into PHIMap. 8591 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 8592 PHIMap[OpInst] = P; 8593 } 8594 if (!P) 8595 return nullptr; // Not evolving from PHI 8596 if (PHI && PHI != P) 8597 return nullptr; // Evolving from multiple different PHIs. 8598 PHI = P; 8599 } 8600 // This is a expression evolving from a constant PHI! 8601 return PHI; 8602 } 8603 8604 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 8605 /// in the loop that V is derived from. We allow arbitrary operations along the 8606 /// way, but the operands of an operation must either be constants or a value 8607 /// derived from a constant PHI. If this expression does not fit with these 8608 /// constraints, return null. 8609 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 8610 Instruction *I = dyn_cast<Instruction>(V); 8611 if (!I || !canConstantEvolve(I, L)) return nullptr; 8612 8613 if (PHINode *PN = dyn_cast<PHINode>(I)) 8614 return PN; 8615 8616 // Record non-constant instructions contained by the loop. 8617 DenseMap<Instruction *, PHINode *> PHIMap; 8618 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 8619 } 8620 8621 /// EvaluateExpression - Given an expression that passes the 8622 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 8623 /// in the loop has the value PHIVal. If we can't fold this expression for some 8624 /// reason, return null. 8625 static Constant *EvaluateExpression(Value *V, const Loop *L, 8626 DenseMap<Instruction *, Constant *> &Vals, 8627 const DataLayout &DL, 8628 const TargetLibraryInfo *TLI) { 8629 // Convenient constant check, but redundant for recursive calls. 8630 if (Constant *C = dyn_cast<Constant>(V)) return C; 8631 Instruction *I = dyn_cast<Instruction>(V); 8632 if (!I) return nullptr; 8633 8634 if (Constant *C = Vals.lookup(I)) return C; 8635 8636 // An instruction inside the loop depends on a value outside the loop that we 8637 // weren't given a mapping for, or a value such as a call inside the loop. 8638 if (!canConstantEvolve(I, L)) return nullptr; 8639 8640 // An unmapped PHI can be due to a branch or another loop inside this loop, 8641 // or due to this not being the initial iteration through a loop where we 8642 // couldn't compute the evolution of this particular PHI last time. 8643 if (isa<PHINode>(I)) return nullptr; 8644 8645 std::vector<Constant*> Operands(I->getNumOperands()); 8646 8647 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 8648 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 8649 if (!Operand) { 8650 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 8651 if (!Operands[i]) return nullptr; 8652 continue; 8653 } 8654 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 8655 Vals[Operand] = C; 8656 if (!C) return nullptr; 8657 Operands[i] = C; 8658 } 8659 8660 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 8661 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8662 Operands[1], DL, TLI); 8663 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 8664 if (!LI->isVolatile()) 8665 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 8666 } 8667 return ConstantFoldInstOperands(I, Operands, DL, TLI); 8668 } 8669 8670 8671 // If every incoming value to PN except the one for BB is a specific Constant, 8672 // return that, else return nullptr. 8673 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 8674 Constant *IncomingVal = nullptr; 8675 8676 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 8677 if (PN->getIncomingBlock(i) == BB) 8678 continue; 8679 8680 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 8681 if (!CurrentVal) 8682 return nullptr; 8683 8684 if (IncomingVal != CurrentVal) { 8685 if (IncomingVal) 8686 return nullptr; 8687 IncomingVal = CurrentVal; 8688 } 8689 } 8690 8691 return IncomingVal; 8692 } 8693 8694 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 8695 /// in the header of its containing loop, we know the loop executes a 8696 /// constant number of times, and the PHI node is just a recurrence 8697 /// involving constants, fold it. 8698 Constant * 8699 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 8700 const APInt &BEs, 8701 const Loop *L) { 8702 auto I = ConstantEvolutionLoopExitValue.find(PN); 8703 if (I != ConstantEvolutionLoopExitValue.end()) 8704 return I->second; 8705 8706 if (BEs.ugt(MaxBruteForceIterations)) 8707 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 8708 8709 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 8710 8711 DenseMap<Instruction *, Constant *> CurrentIterVals; 8712 BasicBlock *Header = L->getHeader(); 8713 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 8714 8715 BasicBlock *Latch = L->getLoopLatch(); 8716 if (!Latch) 8717 return nullptr; 8718 8719 for (PHINode &PHI : Header->phis()) { 8720 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8721 CurrentIterVals[&PHI] = StartCST; 8722 } 8723 if (!CurrentIterVals.count(PN)) 8724 return RetVal = nullptr; 8725 8726 Value *BEValue = PN->getIncomingValueForBlock(Latch); 8727 8728 // Execute the loop symbolically to determine the exit value. 8729 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 8730 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 8731 8732 unsigned NumIterations = BEs.getZExtValue(); // must be in range 8733 unsigned IterationNum = 0; 8734 const DataLayout &DL = getDataLayout(); 8735 for (; ; ++IterationNum) { 8736 if (IterationNum == NumIterations) 8737 return RetVal = CurrentIterVals[PN]; // Got exit value! 8738 8739 // Compute the value of the PHIs for the next iteration. 8740 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 8741 DenseMap<Instruction *, Constant *> NextIterVals; 8742 Constant *NextPHI = 8743 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8744 if (!NextPHI) 8745 return nullptr; // Couldn't evaluate! 8746 NextIterVals[PN] = NextPHI; 8747 8748 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 8749 8750 // Also evaluate the other PHI nodes. However, we don't get to stop if we 8751 // cease to be able to evaluate one of them or if they stop evolving, 8752 // because that doesn't necessarily prevent us from computing PN. 8753 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 8754 for (const auto &I : CurrentIterVals) { 8755 PHINode *PHI = dyn_cast<PHINode>(I.first); 8756 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 8757 PHIsToCompute.emplace_back(PHI, I.second); 8758 } 8759 // We use two distinct loops because EvaluateExpression may invalidate any 8760 // iterators into CurrentIterVals. 8761 for (const auto &I : PHIsToCompute) { 8762 PHINode *PHI = I.first; 8763 Constant *&NextPHI = NextIterVals[PHI]; 8764 if (!NextPHI) { // Not already computed. 8765 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8766 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8767 } 8768 if (NextPHI != I.second) 8769 StoppedEvolving = false; 8770 } 8771 8772 // If all entries in CurrentIterVals == NextIterVals then we can stop 8773 // iterating, the loop can't continue to change. 8774 if (StoppedEvolving) 8775 return RetVal = CurrentIterVals[PN]; 8776 8777 CurrentIterVals.swap(NextIterVals); 8778 } 8779 } 8780 8781 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 8782 Value *Cond, 8783 bool ExitWhen) { 8784 PHINode *PN = getConstantEvolvingPHI(Cond, L); 8785 if (!PN) return getCouldNotCompute(); 8786 8787 // If the loop is canonicalized, the PHI will have exactly two entries. 8788 // That's the only form we support here. 8789 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 8790 8791 DenseMap<Instruction *, Constant *> CurrentIterVals; 8792 BasicBlock *Header = L->getHeader(); 8793 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 8794 8795 BasicBlock *Latch = L->getLoopLatch(); 8796 assert(Latch && "Should follow from NumIncomingValues == 2!"); 8797 8798 for (PHINode &PHI : Header->phis()) { 8799 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8800 CurrentIterVals[&PHI] = StartCST; 8801 } 8802 if (!CurrentIterVals.count(PN)) 8803 return getCouldNotCompute(); 8804 8805 // Okay, we find a PHI node that defines the trip count of this loop. Execute 8806 // the loop symbolically to determine when the condition gets a value of 8807 // "ExitWhen". 8808 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 8809 const DataLayout &DL = getDataLayout(); 8810 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 8811 auto *CondVal = dyn_cast_or_null<ConstantInt>( 8812 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 8813 8814 // Couldn't symbolically evaluate. 8815 if (!CondVal) return getCouldNotCompute(); 8816 8817 if (CondVal->getValue() == uint64_t(ExitWhen)) { 8818 ++NumBruteForceTripCountsComputed; 8819 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 8820 } 8821 8822 // Update all the PHI nodes for the next iteration. 8823 DenseMap<Instruction *, Constant *> NextIterVals; 8824 8825 // Create a list of which PHIs we need to compute. We want to do this before 8826 // calling EvaluateExpression on them because that may invalidate iterators 8827 // into CurrentIterVals. 8828 SmallVector<PHINode *, 8> PHIsToCompute; 8829 for (const auto &I : CurrentIterVals) { 8830 PHINode *PHI = dyn_cast<PHINode>(I.first); 8831 if (!PHI || PHI->getParent() != Header) continue; 8832 PHIsToCompute.push_back(PHI); 8833 } 8834 for (PHINode *PHI : PHIsToCompute) { 8835 Constant *&NextPHI = NextIterVals[PHI]; 8836 if (NextPHI) continue; // Already computed! 8837 8838 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8839 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8840 } 8841 CurrentIterVals.swap(NextIterVals); 8842 } 8843 8844 // Too many iterations were needed to evaluate. 8845 return getCouldNotCompute(); 8846 } 8847 8848 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 8849 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 8850 ValuesAtScopes[V]; 8851 // Check to see if we've folded this expression at this loop before. 8852 for (auto &LS : Values) 8853 if (LS.first == L) 8854 return LS.second ? LS.second : V; 8855 8856 Values.emplace_back(L, nullptr); 8857 8858 // Otherwise compute it. 8859 const SCEV *C = computeSCEVAtScope(V, L); 8860 for (auto &LS : reverse(ValuesAtScopes[V])) 8861 if (LS.first == L) { 8862 LS.second = C; 8863 if (!isa<SCEVConstant>(C)) 8864 ValuesAtScopesUsers[C].push_back({L, V}); 8865 break; 8866 } 8867 return C; 8868 } 8869 8870 /// This builds up a Constant using the ConstantExpr interface. That way, we 8871 /// will return Constants for objects which aren't represented by a 8872 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 8873 /// Returns NULL if the SCEV isn't representable as a Constant. 8874 static Constant *BuildConstantFromSCEV(const SCEV *V) { 8875 switch (V->getSCEVType()) { 8876 case scCouldNotCompute: 8877 case scAddRecExpr: 8878 return nullptr; 8879 case scConstant: 8880 return cast<SCEVConstant>(V)->getValue(); 8881 case scUnknown: 8882 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 8883 case scSignExtend: { 8884 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 8885 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 8886 return ConstantExpr::getSExt(CastOp, SS->getType()); 8887 return nullptr; 8888 } 8889 case scZeroExtend: { 8890 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 8891 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 8892 return ConstantExpr::getZExt(CastOp, SZ->getType()); 8893 return nullptr; 8894 } 8895 case scPtrToInt: { 8896 const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V); 8897 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand())) 8898 return ConstantExpr::getPtrToInt(CastOp, P2I->getType()); 8899 8900 return nullptr; 8901 } 8902 case scTruncate: { 8903 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 8904 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 8905 return ConstantExpr::getTrunc(CastOp, ST->getType()); 8906 return nullptr; 8907 } 8908 case scAddExpr: { 8909 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 8910 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 8911 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8912 unsigned AS = PTy->getAddressSpace(); 8913 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8914 C = ConstantExpr::getBitCast(C, DestPtrTy); 8915 } 8916 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 8917 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 8918 if (!C2) 8919 return nullptr; 8920 8921 // First pointer! 8922 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 8923 unsigned AS = C2->getType()->getPointerAddressSpace(); 8924 std::swap(C, C2); 8925 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8926 // The offsets have been converted to bytes. We can add bytes to an 8927 // i8* by GEP with the byte count in the first index. 8928 C = ConstantExpr::getBitCast(C, DestPtrTy); 8929 } 8930 8931 // Don't bother trying to sum two pointers. We probably can't 8932 // statically compute a load that results from it anyway. 8933 if (C2->getType()->isPointerTy()) 8934 return nullptr; 8935 8936 if (C->getType()->isPointerTy()) { 8937 C = ConstantExpr::getGetElementPtr(Type::getInt8Ty(C->getContext()), 8938 C, C2); 8939 } else { 8940 C = ConstantExpr::getAdd(C, C2); 8941 } 8942 } 8943 return C; 8944 } 8945 return nullptr; 8946 } 8947 case scMulExpr: { 8948 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 8949 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 8950 // Don't bother with pointers at all. 8951 if (C->getType()->isPointerTy()) 8952 return nullptr; 8953 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 8954 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 8955 if (!C2 || C2->getType()->isPointerTy()) 8956 return nullptr; 8957 C = ConstantExpr::getMul(C, C2); 8958 } 8959 return C; 8960 } 8961 return nullptr; 8962 } 8963 case scUDivExpr: { 8964 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 8965 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 8966 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 8967 if (LHS->getType() == RHS->getType()) 8968 return ConstantExpr::getUDiv(LHS, RHS); 8969 return nullptr; 8970 } 8971 case scSMaxExpr: 8972 case scUMaxExpr: 8973 case scSMinExpr: 8974 case scUMinExpr: 8975 return nullptr; // TODO: smax, umax, smin, umax. 8976 } 8977 llvm_unreachable("Unknown SCEV kind!"); 8978 } 8979 8980 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 8981 if (isa<SCEVConstant>(V)) return V; 8982 8983 // If this instruction is evolved from a constant-evolving PHI, compute the 8984 // exit value from the loop without using SCEVs. 8985 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 8986 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 8987 if (PHINode *PN = dyn_cast<PHINode>(I)) { 8988 const Loop *CurrLoop = this->LI[I->getParent()]; 8989 // Looking for loop exit value. 8990 if (CurrLoop && CurrLoop->getParentLoop() == L && 8991 PN->getParent() == CurrLoop->getHeader()) { 8992 // Okay, there is no closed form solution for the PHI node. Check 8993 // to see if the loop that contains it has a known backedge-taken 8994 // count. If so, we may be able to force computation of the exit 8995 // value. 8996 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop); 8997 // This trivial case can show up in some degenerate cases where 8998 // the incoming IR has not yet been fully simplified. 8999 if (BackedgeTakenCount->isZero()) { 9000 Value *InitValue = nullptr; 9001 bool MultipleInitValues = false; 9002 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 9003 if (!CurrLoop->contains(PN->getIncomingBlock(i))) { 9004 if (!InitValue) 9005 InitValue = PN->getIncomingValue(i); 9006 else if (InitValue != PN->getIncomingValue(i)) { 9007 MultipleInitValues = true; 9008 break; 9009 } 9010 } 9011 } 9012 if (!MultipleInitValues && InitValue) 9013 return getSCEV(InitValue); 9014 } 9015 // Do we have a loop invariant value flowing around the backedge 9016 // for a loop which must execute the backedge? 9017 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 9018 isKnownPositive(BackedgeTakenCount) && 9019 PN->getNumIncomingValues() == 2) { 9020 9021 unsigned InLoopPred = 9022 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1; 9023 Value *BackedgeVal = PN->getIncomingValue(InLoopPred); 9024 if (CurrLoop->isLoopInvariant(BackedgeVal)) 9025 return getSCEV(BackedgeVal); 9026 } 9027 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 9028 // Okay, we know how many times the containing loop executes. If 9029 // this is a constant evolving PHI node, get the final value at 9030 // the specified iteration number. 9031 Constant *RV = getConstantEvolutionLoopExitValue( 9032 PN, BTCC->getAPInt(), CurrLoop); 9033 if (RV) return getSCEV(RV); 9034 } 9035 } 9036 9037 // If there is a single-input Phi, evaluate it at our scope. If we can 9038 // prove that this replacement does not break LCSSA form, use new value. 9039 if (PN->getNumOperands() == 1) { 9040 const SCEV *Input = getSCEV(PN->getOperand(0)); 9041 const SCEV *InputAtScope = getSCEVAtScope(Input, L); 9042 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, 9043 // for the simplest case just support constants. 9044 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope; 9045 } 9046 } 9047 9048 // Okay, this is an expression that we cannot symbolically evaluate 9049 // into a SCEV. Check to see if it's possible to symbolically evaluate 9050 // the arguments into constants, and if so, try to constant propagate the 9051 // result. This is particularly useful for computing loop exit values. 9052 if (CanConstantFold(I)) { 9053 SmallVector<Constant *, 4> Operands; 9054 bool MadeImprovement = false; 9055 for (Value *Op : I->operands()) { 9056 if (Constant *C = dyn_cast<Constant>(Op)) { 9057 Operands.push_back(C); 9058 continue; 9059 } 9060 9061 // If any of the operands is non-constant and if they are 9062 // non-integer and non-pointer, don't even try to analyze them 9063 // with scev techniques. 9064 if (!isSCEVable(Op->getType())) 9065 return V; 9066 9067 const SCEV *OrigV = getSCEV(Op); 9068 const SCEV *OpV = getSCEVAtScope(OrigV, L); 9069 MadeImprovement |= OrigV != OpV; 9070 9071 Constant *C = BuildConstantFromSCEV(OpV); 9072 if (!C) return V; 9073 if (C->getType() != Op->getType()) 9074 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 9075 Op->getType(), 9076 false), 9077 C, Op->getType()); 9078 Operands.push_back(C); 9079 } 9080 9081 // Check to see if getSCEVAtScope actually made an improvement. 9082 if (MadeImprovement) { 9083 Constant *C = nullptr; 9084 const DataLayout &DL = getDataLayout(); 9085 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 9086 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 9087 Operands[1], DL, &TLI); 9088 else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) { 9089 if (!Load->isVolatile()) 9090 C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(), 9091 DL); 9092 } else 9093 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 9094 if (!C) return V; 9095 return getSCEV(C); 9096 } 9097 } 9098 } 9099 9100 // This is some other type of SCEVUnknown, just return it. 9101 return V; 9102 } 9103 9104 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 9105 // Avoid performing the look-up in the common case where the specified 9106 // expression has no loop-variant portions. 9107 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 9108 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 9109 if (OpAtScope != Comm->getOperand(i)) { 9110 // Okay, at least one of these operands is loop variant but might be 9111 // foldable. Build a new instance of the folded commutative expression. 9112 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 9113 Comm->op_begin()+i); 9114 NewOps.push_back(OpAtScope); 9115 9116 for (++i; i != e; ++i) { 9117 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 9118 NewOps.push_back(OpAtScope); 9119 } 9120 if (isa<SCEVAddExpr>(Comm)) 9121 return getAddExpr(NewOps, Comm->getNoWrapFlags()); 9122 if (isa<SCEVMulExpr>(Comm)) 9123 return getMulExpr(NewOps, Comm->getNoWrapFlags()); 9124 if (isa<SCEVMinMaxExpr>(Comm)) 9125 return getMinMaxExpr(Comm->getSCEVType(), NewOps); 9126 llvm_unreachable("Unknown commutative SCEV type!"); 9127 } 9128 } 9129 // If we got here, all operands are loop invariant. 9130 return Comm; 9131 } 9132 9133 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 9134 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 9135 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 9136 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 9137 return Div; // must be loop invariant 9138 return getUDivExpr(LHS, RHS); 9139 } 9140 9141 // If this is a loop recurrence for a loop that does not contain L, then we 9142 // are dealing with the final value computed by the loop. 9143 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 9144 // First, attempt to evaluate each operand. 9145 // Avoid performing the look-up in the common case where the specified 9146 // expression has no loop-variant portions. 9147 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 9148 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 9149 if (OpAtScope == AddRec->getOperand(i)) 9150 continue; 9151 9152 // Okay, at least one of these operands is loop variant but might be 9153 // foldable. Build a new instance of the folded commutative expression. 9154 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 9155 AddRec->op_begin()+i); 9156 NewOps.push_back(OpAtScope); 9157 for (++i; i != e; ++i) 9158 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 9159 9160 const SCEV *FoldedRec = 9161 getAddRecExpr(NewOps, AddRec->getLoop(), 9162 AddRec->getNoWrapFlags(SCEV::FlagNW)); 9163 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 9164 // The addrec may be folded to a nonrecurrence, for example, if the 9165 // induction variable is multiplied by zero after constant folding. Go 9166 // ahead and return the folded value. 9167 if (!AddRec) 9168 return FoldedRec; 9169 break; 9170 } 9171 9172 // If the scope is outside the addrec's loop, evaluate it by using the 9173 // loop exit value of the addrec. 9174 if (!AddRec->getLoop()->contains(L)) { 9175 // To evaluate this recurrence, we need to know how many times the AddRec 9176 // loop iterates. Compute this now. 9177 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 9178 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 9179 9180 // Then, evaluate the AddRec. 9181 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 9182 } 9183 9184 return AddRec; 9185 } 9186 9187 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 9188 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 9189 if (Op == Cast->getOperand()) 9190 return Cast; // must be loop invariant 9191 return getZeroExtendExpr(Op, Cast->getType()); 9192 } 9193 9194 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 9195 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 9196 if (Op == Cast->getOperand()) 9197 return Cast; // must be loop invariant 9198 return getSignExtendExpr(Op, Cast->getType()); 9199 } 9200 9201 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 9202 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 9203 if (Op == Cast->getOperand()) 9204 return Cast; // must be loop invariant 9205 return getTruncateExpr(Op, Cast->getType()); 9206 } 9207 9208 if (const SCEVPtrToIntExpr *Cast = dyn_cast<SCEVPtrToIntExpr>(V)) { 9209 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 9210 if (Op == Cast->getOperand()) 9211 return Cast; // must be loop invariant 9212 return getPtrToIntExpr(Op, Cast->getType()); 9213 } 9214 9215 llvm_unreachable("Unknown SCEV type!"); 9216 } 9217 9218 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 9219 return getSCEVAtScope(getSCEV(V), L); 9220 } 9221 9222 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 9223 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 9224 return stripInjectiveFunctions(ZExt->getOperand()); 9225 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 9226 return stripInjectiveFunctions(SExt->getOperand()); 9227 return S; 9228 } 9229 9230 /// Finds the minimum unsigned root of the following equation: 9231 /// 9232 /// A * X = B (mod N) 9233 /// 9234 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 9235 /// A and B isn't important. 9236 /// 9237 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 9238 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 9239 ScalarEvolution &SE) { 9240 uint32_t BW = A.getBitWidth(); 9241 assert(BW == SE.getTypeSizeInBits(B->getType())); 9242 assert(A != 0 && "A must be non-zero."); 9243 9244 // 1. D = gcd(A, N) 9245 // 9246 // The gcd of A and N may have only one prime factor: 2. The number of 9247 // trailing zeros in A is its multiplicity 9248 uint32_t Mult2 = A.countTrailingZeros(); 9249 // D = 2^Mult2 9250 9251 // 2. Check if B is divisible by D. 9252 // 9253 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 9254 // is not less than multiplicity of this prime factor for D. 9255 if (SE.GetMinTrailingZeros(B) < Mult2) 9256 return SE.getCouldNotCompute(); 9257 9258 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 9259 // modulo (N / D). 9260 // 9261 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 9262 // (N / D) in general. The inverse itself always fits into BW bits, though, 9263 // so we immediately truncate it. 9264 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 9265 APInt Mod(BW + 1, 0); 9266 Mod.setBit(BW - Mult2); // Mod = N / D 9267 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 9268 9269 // 4. Compute the minimum unsigned root of the equation: 9270 // I * (B / D) mod (N / D) 9271 // To simplify the computation, we factor out the divide by D: 9272 // (I * B mod N) / D 9273 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 9274 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 9275 } 9276 9277 /// For a given quadratic addrec, generate coefficients of the corresponding 9278 /// quadratic equation, multiplied by a common value to ensure that they are 9279 /// integers. 9280 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 9281 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 9282 /// were multiplied by, and BitWidth is the bit width of the original addrec 9283 /// coefficients. 9284 /// This function returns None if the addrec coefficients are not compile- 9285 /// time constants. 9286 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 9287 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 9288 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 9289 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 9290 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 9291 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 9292 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 9293 << *AddRec << '\n'); 9294 9295 // We currently can only solve this if the coefficients are constants. 9296 if (!LC || !MC || !NC) { 9297 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 9298 return None; 9299 } 9300 9301 APInt L = LC->getAPInt(); 9302 APInt M = MC->getAPInt(); 9303 APInt N = NC->getAPInt(); 9304 assert(!N.isZero() && "This is not a quadratic addrec"); 9305 9306 unsigned BitWidth = LC->getAPInt().getBitWidth(); 9307 unsigned NewWidth = BitWidth + 1; 9308 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 9309 << BitWidth << '\n'); 9310 // The sign-extension (as opposed to a zero-extension) here matches the 9311 // extension used in SolveQuadraticEquationWrap (with the same motivation). 9312 N = N.sext(NewWidth); 9313 M = M.sext(NewWidth); 9314 L = L.sext(NewWidth); 9315 9316 // The increments are M, M+N, M+2N, ..., so the accumulated values are 9317 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 9318 // L+M, L+2M+N, L+3M+3N, ... 9319 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 9320 // 9321 // The equation Acc = 0 is then 9322 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 9323 // In a quadratic form it becomes: 9324 // N n^2 + (2M-N) n + 2L = 0. 9325 9326 APInt A = N; 9327 APInt B = 2 * M - A; 9328 APInt C = 2 * L; 9329 APInt T = APInt(NewWidth, 2); 9330 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 9331 << "x + " << C << ", coeff bw: " << NewWidth 9332 << ", multiplied by " << T << '\n'); 9333 return std::make_tuple(A, B, C, T, BitWidth); 9334 } 9335 9336 /// Helper function to compare optional APInts: 9337 /// (a) if X and Y both exist, return min(X, Y), 9338 /// (b) if neither X nor Y exist, return None, 9339 /// (c) if exactly one of X and Y exists, return that value. 9340 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 9341 if (X.hasValue() && Y.hasValue()) { 9342 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 9343 APInt XW = X->sextOrSelf(W); 9344 APInt YW = Y->sextOrSelf(W); 9345 return XW.slt(YW) ? *X : *Y; 9346 } 9347 if (!X.hasValue() && !Y.hasValue()) 9348 return None; 9349 return X.hasValue() ? *X : *Y; 9350 } 9351 9352 /// Helper function to truncate an optional APInt to a given BitWidth. 9353 /// When solving addrec-related equations, it is preferable to return a value 9354 /// that has the same bit width as the original addrec's coefficients. If the 9355 /// solution fits in the original bit width, truncate it (except for i1). 9356 /// Returning a value of a different bit width may inhibit some optimizations. 9357 /// 9358 /// In general, a solution to a quadratic equation generated from an addrec 9359 /// may require BW+1 bits, where BW is the bit width of the addrec's 9360 /// coefficients. The reason is that the coefficients of the quadratic 9361 /// equation are BW+1 bits wide (to avoid truncation when converting from 9362 /// the addrec to the equation). 9363 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 9364 if (!X.hasValue()) 9365 return None; 9366 unsigned W = X->getBitWidth(); 9367 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 9368 return X->trunc(BitWidth); 9369 return X; 9370 } 9371 9372 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 9373 /// iterations. The values L, M, N are assumed to be signed, and they 9374 /// should all have the same bit widths. 9375 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 9376 /// where BW is the bit width of the addrec's coefficients. 9377 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 9378 /// returned as such, otherwise the bit width of the returned value may 9379 /// be greater than BW. 9380 /// 9381 /// This function returns None if 9382 /// (a) the addrec coefficients are not constant, or 9383 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 9384 /// like x^2 = 5, no integer solutions exist, in other cases an integer 9385 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 9386 static Optional<APInt> 9387 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 9388 APInt A, B, C, M; 9389 unsigned BitWidth; 9390 auto T = GetQuadraticEquation(AddRec); 9391 if (!T.hasValue()) 9392 return None; 9393 9394 std::tie(A, B, C, M, BitWidth) = *T; 9395 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 9396 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 9397 if (!X.hasValue()) 9398 return None; 9399 9400 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 9401 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 9402 if (!V->isZero()) 9403 return None; 9404 9405 return TruncIfPossible(X, BitWidth); 9406 } 9407 9408 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 9409 /// iterations. The values M, N are assumed to be signed, and they 9410 /// should all have the same bit widths. 9411 /// Find the least n such that c(n) does not belong to the given range, 9412 /// while c(n-1) does. 9413 /// 9414 /// This function returns None if 9415 /// (a) the addrec coefficients are not constant, or 9416 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 9417 /// bounds of the range. 9418 static Optional<APInt> 9419 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 9420 const ConstantRange &Range, ScalarEvolution &SE) { 9421 assert(AddRec->getOperand(0)->isZero() && 9422 "Starting value of addrec should be 0"); 9423 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 9424 << Range << ", addrec " << *AddRec << '\n'); 9425 // This case is handled in getNumIterationsInRange. Here we can assume that 9426 // we start in the range. 9427 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 9428 "Addrec's initial value should be in range"); 9429 9430 APInt A, B, C, M; 9431 unsigned BitWidth; 9432 auto T = GetQuadraticEquation(AddRec); 9433 if (!T.hasValue()) 9434 return None; 9435 9436 // Be careful about the return value: there can be two reasons for not 9437 // returning an actual number. First, if no solutions to the equations 9438 // were found, and second, if the solutions don't leave the given range. 9439 // The first case means that the actual solution is "unknown", the second 9440 // means that it's known, but not valid. If the solution is unknown, we 9441 // cannot make any conclusions. 9442 // Return a pair: the optional solution and a flag indicating if the 9443 // solution was found. 9444 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 9445 // Solve for signed overflow and unsigned overflow, pick the lower 9446 // solution. 9447 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 9448 << Bound << " (before multiplying by " << M << ")\n"); 9449 Bound *= M; // The quadratic equation multiplier. 9450 9451 Optional<APInt> SO = None; 9452 if (BitWidth > 1) { 9453 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 9454 "signed overflow\n"); 9455 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 9456 } 9457 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 9458 "unsigned overflow\n"); 9459 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 9460 BitWidth+1); 9461 9462 auto LeavesRange = [&] (const APInt &X) { 9463 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 9464 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 9465 if (Range.contains(V0->getValue())) 9466 return false; 9467 // X should be at least 1, so X-1 is non-negative. 9468 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 9469 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 9470 if (Range.contains(V1->getValue())) 9471 return true; 9472 return false; 9473 }; 9474 9475 // If SolveQuadraticEquationWrap returns None, it means that there can 9476 // be a solution, but the function failed to find it. We cannot treat it 9477 // as "no solution". 9478 if (!SO.hasValue() || !UO.hasValue()) 9479 return { None, false }; 9480 9481 // Check the smaller value first to see if it leaves the range. 9482 // At this point, both SO and UO must have values. 9483 Optional<APInt> Min = MinOptional(SO, UO); 9484 if (LeavesRange(*Min)) 9485 return { Min, true }; 9486 Optional<APInt> Max = Min == SO ? UO : SO; 9487 if (LeavesRange(*Max)) 9488 return { Max, true }; 9489 9490 // Solutions were found, but were eliminated, hence the "true". 9491 return { None, true }; 9492 }; 9493 9494 std::tie(A, B, C, M, BitWidth) = *T; 9495 // Lower bound is inclusive, subtract 1 to represent the exiting value. 9496 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; 9497 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); 9498 auto SL = SolveForBoundary(Lower); 9499 auto SU = SolveForBoundary(Upper); 9500 // If any of the solutions was unknown, no meaninigful conclusions can 9501 // be made. 9502 if (!SL.second || !SU.second) 9503 return None; 9504 9505 // Claim: The correct solution is not some value between Min and Max. 9506 // 9507 // Justification: Assuming that Min and Max are different values, one of 9508 // them is when the first signed overflow happens, the other is when the 9509 // first unsigned overflow happens. Crossing the range boundary is only 9510 // possible via an overflow (treating 0 as a special case of it, modeling 9511 // an overflow as crossing k*2^W for some k). 9512 // 9513 // The interesting case here is when Min was eliminated as an invalid 9514 // solution, but Max was not. The argument is that if there was another 9515 // overflow between Min and Max, it would also have been eliminated if 9516 // it was considered. 9517 // 9518 // For a given boundary, it is possible to have two overflows of the same 9519 // type (signed/unsigned) without having the other type in between: this 9520 // can happen when the vertex of the parabola is between the iterations 9521 // corresponding to the overflows. This is only possible when the two 9522 // overflows cross k*2^W for the same k. In such case, if the second one 9523 // left the range (and was the first one to do so), the first overflow 9524 // would have to enter the range, which would mean that either we had left 9525 // the range before or that we started outside of it. Both of these cases 9526 // are contradictions. 9527 // 9528 // Claim: In the case where SolveForBoundary returns None, the correct 9529 // solution is not some value between the Max for this boundary and the 9530 // Min of the other boundary. 9531 // 9532 // Justification: Assume that we had such Max_A and Min_B corresponding 9533 // to range boundaries A and B and such that Max_A < Min_B. If there was 9534 // a solution between Max_A and Min_B, it would have to be caused by an 9535 // overflow corresponding to either A or B. It cannot correspond to B, 9536 // since Min_B is the first occurrence of such an overflow. If it 9537 // corresponded to A, it would have to be either a signed or an unsigned 9538 // overflow that is larger than both eliminated overflows for A. But 9539 // between the eliminated overflows and this overflow, the values would 9540 // cover the entire value space, thus crossing the other boundary, which 9541 // is a contradiction. 9542 9543 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 9544 } 9545 9546 ScalarEvolution::ExitLimit 9547 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 9548 bool AllowPredicates) { 9549 9550 // This is only used for loops with a "x != y" exit test. The exit condition 9551 // is now expressed as a single expression, V = x-y. So the exit test is 9552 // effectively V != 0. We know and take advantage of the fact that this 9553 // expression only being used in a comparison by zero context. 9554 9555 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9556 // If the value is a constant 9557 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 9558 // If the value is already zero, the branch will execute zero times. 9559 if (C->getValue()->isZero()) return C; 9560 return getCouldNotCompute(); // Otherwise it will loop infinitely. 9561 } 9562 9563 const SCEVAddRecExpr *AddRec = 9564 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 9565 9566 if (!AddRec && AllowPredicates) 9567 // Try to make this an AddRec using runtime tests, in the first X 9568 // iterations of this loop, where X is the SCEV expression found by the 9569 // algorithm below. 9570 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 9571 9572 if (!AddRec || AddRec->getLoop() != L) 9573 return getCouldNotCompute(); 9574 9575 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 9576 // the quadratic equation to solve it. 9577 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 9578 // We can only use this value if the chrec ends up with an exact zero 9579 // value at this index. When solving for "X*X != 5", for example, we 9580 // should not accept a root of 2. 9581 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 9582 const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); 9583 return ExitLimit(R, R, false, Predicates); 9584 } 9585 return getCouldNotCompute(); 9586 } 9587 9588 // Otherwise we can only handle this if it is affine. 9589 if (!AddRec->isAffine()) 9590 return getCouldNotCompute(); 9591 9592 // If this is an affine expression, the execution count of this branch is 9593 // the minimum unsigned root of the following equation: 9594 // 9595 // Start + Step*N = 0 (mod 2^BW) 9596 // 9597 // equivalent to: 9598 // 9599 // Step*N = -Start (mod 2^BW) 9600 // 9601 // where BW is the common bit width of Start and Step. 9602 9603 // Get the initial value for the loop. 9604 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 9605 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 9606 9607 // For now we handle only constant steps. 9608 // 9609 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 9610 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 9611 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 9612 // We have not yet seen any such cases. 9613 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 9614 if (!StepC || StepC->getValue()->isZero()) 9615 return getCouldNotCompute(); 9616 9617 // For positive steps (counting up until unsigned overflow): 9618 // N = -Start/Step (as unsigned) 9619 // For negative steps (counting down to zero): 9620 // N = Start/-Step 9621 // First compute the unsigned distance from zero in the direction of Step. 9622 bool CountDown = StepC->getAPInt().isNegative(); 9623 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 9624 9625 // Handle unitary steps, which cannot wraparound. 9626 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 9627 // N = Distance (as unsigned) 9628 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 9629 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L)); 9630 MaxBECount = APIntOps::umin(MaxBECount, getUnsignedRangeMax(Distance)); 9631 9632 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 9633 // we end up with a loop whose backedge-taken count is n - 1. Detect this 9634 // case, and see if we can improve the bound. 9635 // 9636 // Explicitly handling this here is necessary because getUnsignedRange 9637 // isn't context-sensitive; it doesn't know that we only care about the 9638 // range inside the loop. 9639 const SCEV *Zero = getZero(Distance->getType()); 9640 const SCEV *One = getOne(Distance->getType()); 9641 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 9642 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 9643 // If Distance + 1 doesn't overflow, we can compute the maximum distance 9644 // as "unsigned_max(Distance + 1) - 1". 9645 ConstantRange CR = getUnsignedRange(DistancePlusOne); 9646 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 9647 } 9648 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 9649 } 9650 9651 // If the condition controls loop exit (the loop exits only if the expression 9652 // is true) and the addition is no-wrap we can use unsigned divide to 9653 // compute the backedge count. In this case, the step may not divide the 9654 // distance, but we don't care because if the condition is "missed" the loop 9655 // will have undefined behavior due to wrapping. 9656 if (ControlsExit && AddRec->hasNoSelfWrap() && 9657 loopHasNoAbnormalExits(AddRec->getLoop())) { 9658 const SCEV *Exact = 9659 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 9660 const SCEV *Max = getCouldNotCompute(); 9661 if (Exact != getCouldNotCompute()) { 9662 APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, L)); 9663 Max = getConstant(APIntOps::umin(MaxInt, getUnsignedRangeMax(Exact))); 9664 } 9665 return ExitLimit(Exact, Max, false, Predicates); 9666 } 9667 9668 // Solve the general equation. 9669 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 9670 getNegativeSCEV(Start), *this); 9671 9672 const SCEV *M = E; 9673 if (E != getCouldNotCompute()) { 9674 APInt MaxWithGuards = getUnsignedRangeMax(applyLoopGuards(E, L)); 9675 M = getConstant(APIntOps::umin(MaxWithGuards, getUnsignedRangeMax(E))); 9676 } 9677 return ExitLimit(E, M, false, Predicates); 9678 } 9679 9680 ScalarEvolution::ExitLimit 9681 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 9682 // Loops that look like: while (X == 0) are very strange indeed. We don't 9683 // handle them yet except for the trivial case. This could be expanded in the 9684 // future as needed. 9685 9686 // If the value is a constant, check to see if it is known to be non-zero 9687 // already. If so, the backedge will execute zero times. 9688 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 9689 if (!C->getValue()->isZero()) 9690 return getZero(C->getType()); 9691 return getCouldNotCompute(); // Otherwise it will loop infinitely. 9692 } 9693 9694 // We could implement others, but I really doubt anyone writes loops like 9695 // this, and if they did, they would already be constant folded. 9696 return getCouldNotCompute(); 9697 } 9698 9699 std::pair<const BasicBlock *, const BasicBlock *> 9700 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) 9701 const { 9702 // If the block has a unique predecessor, then there is no path from the 9703 // predecessor to the block that does not go through the direct edge 9704 // from the predecessor to the block. 9705 if (const BasicBlock *Pred = BB->getSinglePredecessor()) 9706 return {Pred, BB}; 9707 9708 // A loop's header is defined to be a block that dominates the loop. 9709 // If the header has a unique predecessor outside the loop, it must be 9710 // a block that has exactly one successor that can reach the loop. 9711 if (const Loop *L = LI.getLoopFor(BB)) 9712 return {L->getLoopPredecessor(), L->getHeader()}; 9713 9714 return {nullptr, nullptr}; 9715 } 9716 9717 /// SCEV structural equivalence is usually sufficient for testing whether two 9718 /// expressions are equal, however for the purposes of looking for a condition 9719 /// guarding a loop, it can be useful to be a little more general, since a 9720 /// front-end may have replicated the controlling expression. 9721 static bool HasSameValue(const SCEV *A, const SCEV *B) { 9722 // Quick check to see if they are the same SCEV. 9723 if (A == B) return true; 9724 9725 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 9726 // Not all instructions that are "identical" compute the same value. For 9727 // instance, two distinct alloca instructions allocating the same type are 9728 // identical and do not read memory; but compute distinct values. 9729 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 9730 }; 9731 9732 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 9733 // two different instructions with the same value. Check for this case. 9734 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 9735 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 9736 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 9737 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 9738 if (ComputesEqualValues(AI, BI)) 9739 return true; 9740 9741 // Otherwise assume they may have a different value. 9742 return false; 9743 } 9744 9745 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 9746 const SCEV *&LHS, const SCEV *&RHS, 9747 unsigned Depth) { 9748 bool Changed = false; 9749 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 9750 // '0 != 0'. 9751 auto TrivialCase = [&](bool TriviallyTrue) { 9752 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 9753 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 9754 return true; 9755 }; 9756 // If we hit the max recursion limit bail out. 9757 if (Depth >= 3) 9758 return false; 9759 9760 // Canonicalize a constant to the right side. 9761 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 9762 // Check for both operands constant. 9763 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 9764 if (ConstantExpr::getICmp(Pred, 9765 LHSC->getValue(), 9766 RHSC->getValue())->isNullValue()) 9767 return TrivialCase(false); 9768 else 9769 return TrivialCase(true); 9770 } 9771 // Otherwise swap the operands to put the constant on the right. 9772 std::swap(LHS, RHS); 9773 Pred = ICmpInst::getSwappedPredicate(Pred); 9774 Changed = true; 9775 } 9776 9777 // If we're comparing an addrec with a value which is loop-invariant in the 9778 // addrec's loop, put the addrec on the left. Also make a dominance check, 9779 // as both operands could be addrecs loop-invariant in each other's loop. 9780 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 9781 const Loop *L = AR->getLoop(); 9782 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 9783 std::swap(LHS, RHS); 9784 Pred = ICmpInst::getSwappedPredicate(Pred); 9785 Changed = true; 9786 } 9787 } 9788 9789 // If there's a constant operand, canonicalize comparisons with boundary 9790 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 9791 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 9792 const APInt &RA = RC->getAPInt(); 9793 9794 bool SimplifiedByConstantRange = false; 9795 9796 if (!ICmpInst::isEquality(Pred)) { 9797 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 9798 if (ExactCR.isFullSet()) 9799 return TrivialCase(true); 9800 else if (ExactCR.isEmptySet()) 9801 return TrivialCase(false); 9802 9803 APInt NewRHS; 9804 CmpInst::Predicate NewPred; 9805 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 9806 ICmpInst::isEquality(NewPred)) { 9807 // We were able to convert an inequality to an equality. 9808 Pred = NewPred; 9809 RHS = getConstant(NewRHS); 9810 Changed = SimplifiedByConstantRange = true; 9811 } 9812 } 9813 9814 if (!SimplifiedByConstantRange) { 9815 switch (Pred) { 9816 default: 9817 break; 9818 case ICmpInst::ICMP_EQ: 9819 case ICmpInst::ICMP_NE: 9820 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 9821 if (!RA) 9822 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 9823 if (const SCEVMulExpr *ME = 9824 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 9825 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 9826 ME->getOperand(0)->isAllOnesValue()) { 9827 RHS = AE->getOperand(1); 9828 LHS = ME->getOperand(1); 9829 Changed = true; 9830 } 9831 break; 9832 9833 9834 // The "Should have been caught earlier!" messages refer to the fact 9835 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 9836 // should have fired on the corresponding cases, and canonicalized the 9837 // check to trivial case. 9838 9839 case ICmpInst::ICMP_UGE: 9840 assert(!RA.isMinValue() && "Should have been caught earlier!"); 9841 Pred = ICmpInst::ICMP_UGT; 9842 RHS = getConstant(RA - 1); 9843 Changed = true; 9844 break; 9845 case ICmpInst::ICMP_ULE: 9846 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 9847 Pred = ICmpInst::ICMP_ULT; 9848 RHS = getConstant(RA + 1); 9849 Changed = true; 9850 break; 9851 case ICmpInst::ICMP_SGE: 9852 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 9853 Pred = ICmpInst::ICMP_SGT; 9854 RHS = getConstant(RA - 1); 9855 Changed = true; 9856 break; 9857 case ICmpInst::ICMP_SLE: 9858 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 9859 Pred = ICmpInst::ICMP_SLT; 9860 RHS = getConstant(RA + 1); 9861 Changed = true; 9862 break; 9863 } 9864 } 9865 } 9866 9867 // Check for obvious equality. 9868 if (HasSameValue(LHS, RHS)) { 9869 if (ICmpInst::isTrueWhenEqual(Pred)) 9870 return TrivialCase(true); 9871 if (ICmpInst::isFalseWhenEqual(Pred)) 9872 return TrivialCase(false); 9873 } 9874 9875 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 9876 // adding or subtracting 1 from one of the operands. 9877 switch (Pred) { 9878 case ICmpInst::ICMP_SLE: 9879 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 9880 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9881 SCEV::FlagNSW); 9882 Pred = ICmpInst::ICMP_SLT; 9883 Changed = true; 9884 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 9885 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 9886 SCEV::FlagNSW); 9887 Pred = ICmpInst::ICMP_SLT; 9888 Changed = true; 9889 } 9890 break; 9891 case ICmpInst::ICMP_SGE: 9892 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 9893 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 9894 SCEV::FlagNSW); 9895 Pred = ICmpInst::ICMP_SGT; 9896 Changed = true; 9897 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 9898 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9899 SCEV::FlagNSW); 9900 Pred = ICmpInst::ICMP_SGT; 9901 Changed = true; 9902 } 9903 break; 9904 case ICmpInst::ICMP_ULE: 9905 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 9906 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9907 SCEV::FlagNUW); 9908 Pred = ICmpInst::ICMP_ULT; 9909 Changed = true; 9910 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 9911 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 9912 Pred = ICmpInst::ICMP_ULT; 9913 Changed = true; 9914 } 9915 break; 9916 case ICmpInst::ICMP_UGE: 9917 if (!getUnsignedRangeMin(RHS).isMinValue()) { 9918 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 9919 Pred = ICmpInst::ICMP_UGT; 9920 Changed = true; 9921 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 9922 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9923 SCEV::FlagNUW); 9924 Pred = ICmpInst::ICMP_UGT; 9925 Changed = true; 9926 } 9927 break; 9928 default: 9929 break; 9930 } 9931 9932 // TODO: More simplifications are possible here. 9933 9934 // Recursively simplify until we either hit a recursion limit or nothing 9935 // changes. 9936 if (Changed) 9937 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 9938 9939 return Changed; 9940 } 9941 9942 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 9943 return getSignedRangeMax(S).isNegative(); 9944 } 9945 9946 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 9947 return getSignedRangeMin(S).isStrictlyPositive(); 9948 } 9949 9950 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 9951 return !getSignedRangeMin(S).isNegative(); 9952 } 9953 9954 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 9955 return !getSignedRangeMax(S).isStrictlyPositive(); 9956 } 9957 9958 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 9959 return getUnsignedRangeMin(S) != 0; 9960 } 9961 9962 std::pair<const SCEV *, const SCEV *> 9963 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 9964 // Compute SCEV on entry of loop L. 9965 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 9966 if (Start == getCouldNotCompute()) 9967 return { Start, Start }; 9968 // Compute post increment SCEV for loop L. 9969 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 9970 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 9971 return { Start, PostInc }; 9972 } 9973 9974 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 9975 const SCEV *LHS, const SCEV *RHS) { 9976 // First collect all loops. 9977 SmallPtrSet<const Loop *, 8> LoopsUsed; 9978 getUsedLoops(LHS, LoopsUsed); 9979 getUsedLoops(RHS, LoopsUsed); 9980 9981 if (LoopsUsed.empty()) 9982 return false; 9983 9984 // Domination relationship must be a linear order on collected loops. 9985 #ifndef NDEBUG 9986 for (auto *L1 : LoopsUsed) 9987 for (auto *L2 : LoopsUsed) 9988 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 9989 DT.dominates(L2->getHeader(), L1->getHeader())) && 9990 "Domination relationship is not a linear order"); 9991 #endif 9992 9993 const Loop *MDL = 9994 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 9995 [&](const Loop *L1, const Loop *L2) { 9996 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 9997 }); 9998 9999 // Get init and post increment value for LHS. 10000 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 10001 // if LHS contains unknown non-invariant SCEV then bail out. 10002 if (SplitLHS.first == getCouldNotCompute()) 10003 return false; 10004 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 10005 // Get init and post increment value for RHS. 10006 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 10007 // if RHS contains unknown non-invariant SCEV then bail out. 10008 if (SplitRHS.first == getCouldNotCompute()) 10009 return false; 10010 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 10011 // It is possible that init SCEV contains an invariant load but it does 10012 // not dominate MDL and is not available at MDL loop entry, so we should 10013 // check it here. 10014 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 10015 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 10016 return false; 10017 10018 // It seems backedge guard check is faster than entry one so in some cases 10019 // it can speed up whole estimation by short circuit 10020 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 10021 SplitRHS.second) && 10022 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); 10023 } 10024 10025 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 10026 const SCEV *LHS, const SCEV *RHS) { 10027 // Canonicalize the inputs first. 10028 (void)SimplifyICmpOperands(Pred, LHS, RHS); 10029 10030 if (isKnownViaInduction(Pred, LHS, RHS)) 10031 return true; 10032 10033 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 10034 return true; 10035 10036 // Otherwise see what can be done with some simple reasoning. 10037 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 10038 } 10039 10040 Optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred, 10041 const SCEV *LHS, 10042 const SCEV *RHS) { 10043 if (isKnownPredicate(Pred, LHS, RHS)) 10044 return true; 10045 else if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS)) 10046 return false; 10047 return None; 10048 } 10049 10050 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred, 10051 const SCEV *LHS, const SCEV *RHS, 10052 const Instruction *CtxI) { 10053 // TODO: Analyze guards and assumes from Context's block. 10054 return isKnownPredicate(Pred, LHS, RHS) || 10055 isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS); 10056 } 10057 10058 Optional<bool> ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, 10059 const SCEV *LHS, 10060 const SCEV *RHS, 10061 const Instruction *CtxI) { 10062 Optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS); 10063 if (KnownWithoutContext) 10064 return KnownWithoutContext; 10065 10066 if (isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS)) 10067 return true; 10068 else if (isBasicBlockEntryGuardedByCond(CtxI->getParent(), 10069 ICmpInst::getInversePredicate(Pred), 10070 LHS, RHS)) 10071 return false; 10072 return None; 10073 } 10074 10075 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 10076 const SCEVAddRecExpr *LHS, 10077 const SCEV *RHS) { 10078 const Loop *L = LHS->getLoop(); 10079 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 10080 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 10081 } 10082 10083 Optional<ScalarEvolution::MonotonicPredicateType> 10084 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS, 10085 ICmpInst::Predicate Pred) { 10086 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred); 10087 10088 #ifndef NDEBUG 10089 // Verify an invariant: inverting the predicate should turn a monotonically 10090 // increasing change to a monotonically decreasing one, and vice versa. 10091 if (Result) { 10092 auto ResultSwapped = 10093 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred)); 10094 10095 assert(ResultSwapped.hasValue() && "should be able to analyze both!"); 10096 assert(ResultSwapped.getValue() != Result.getValue() && 10097 "monotonicity should flip as we flip the predicate"); 10098 } 10099 #endif 10100 10101 return Result; 10102 } 10103 10104 Optional<ScalarEvolution::MonotonicPredicateType> 10105 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS, 10106 ICmpInst::Predicate Pred) { 10107 // A zero step value for LHS means the induction variable is essentially a 10108 // loop invariant value. We don't really depend on the predicate actually 10109 // flipping from false to true (for increasing predicates, and the other way 10110 // around for decreasing predicates), all we care about is that *if* the 10111 // predicate changes then it only changes from false to true. 10112 // 10113 // A zero step value in itself is not very useful, but there may be places 10114 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 10115 // as general as possible. 10116 10117 // Only handle LE/LT/GE/GT predicates. 10118 if (!ICmpInst::isRelational(Pred)) 10119 return None; 10120 10121 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred); 10122 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) && 10123 "Should be greater or less!"); 10124 10125 // Check that AR does not wrap. 10126 if (ICmpInst::isUnsigned(Pred)) { 10127 if (!LHS->hasNoUnsignedWrap()) 10128 return None; 10129 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 10130 } else { 10131 assert(ICmpInst::isSigned(Pred) && 10132 "Relational predicate is either signed or unsigned!"); 10133 if (!LHS->hasNoSignedWrap()) 10134 return None; 10135 10136 const SCEV *Step = LHS->getStepRecurrence(*this); 10137 10138 if (isKnownNonNegative(Step)) 10139 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 10140 10141 if (isKnownNonPositive(Step)) 10142 return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 10143 10144 return None; 10145 } 10146 } 10147 10148 Optional<ScalarEvolution::LoopInvariantPredicate> 10149 ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred, 10150 const SCEV *LHS, const SCEV *RHS, 10151 const Loop *L) { 10152 10153 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 10154 if (!isLoopInvariant(RHS, L)) { 10155 if (!isLoopInvariant(LHS, L)) 10156 return None; 10157 10158 std::swap(LHS, RHS); 10159 Pred = ICmpInst::getSwappedPredicate(Pred); 10160 } 10161 10162 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 10163 if (!ArLHS || ArLHS->getLoop() != L) 10164 return None; 10165 10166 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred); 10167 if (!MonotonicType) 10168 return None; 10169 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 10170 // true as the loop iterates, and the backedge is control dependent on 10171 // "ArLHS `Pred` RHS" == true then we can reason as follows: 10172 // 10173 // * if the predicate was false in the first iteration then the predicate 10174 // is never evaluated again, since the loop exits without taking the 10175 // backedge. 10176 // * if the predicate was true in the first iteration then it will 10177 // continue to be true for all future iterations since it is 10178 // monotonically increasing. 10179 // 10180 // For both the above possibilities, we can replace the loop varying 10181 // predicate with its value on the first iteration of the loop (which is 10182 // loop invariant). 10183 // 10184 // A similar reasoning applies for a monotonically decreasing predicate, by 10185 // replacing true with false and false with true in the above two bullets. 10186 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing; 10187 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 10188 10189 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 10190 return None; 10191 10192 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), RHS); 10193 } 10194 10195 Optional<ScalarEvolution::LoopInvariantPredicate> 10196 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations( 10197 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 10198 const Instruction *CtxI, const SCEV *MaxIter) { 10199 // Try to prove the following set of facts: 10200 // - The predicate is monotonic in the iteration space. 10201 // - If the check does not fail on the 1st iteration: 10202 // - No overflow will happen during first MaxIter iterations; 10203 // - It will not fail on the MaxIter'th iteration. 10204 // If the check does fail on the 1st iteration, we leave the loop and no 10205 // other checks matter. 10206 10207 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 10208 if (!isLoopInvariant(RHS, L)) { 10209 if (!isLoopInvariant(LHS, L)) 10210 return None; 10211 10212 std::swap(LHS, RHS); 10213 Pred = ICmpInst::getSwappedPredicate(Pred); 10214 } 10215 10216 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS); 10217 if (!AR || AR->getLoop() != L) 10218 return None; 10219 10220 // The predicate must be relational (i.e. <, <=, >=, >). 10221 if (!ICmpInst::isRelational(Pred)) 10222 return None; 10223 10224 // TODO: Support steps other than +/- 1. 10225 const SCEV *Step = AR->getStepRecurrence(*this); 10226 auto *One = getOne(Step->getType()); 10227 auto *MinusOne = getNegativeSCEV(One); 10228 if (Step != One && Step != MinusOne) 10229 return None; 10230 10231 // Type mismatch here means that MaxIter is potentially larger than max 10232 // unsigned value in start type, which mean we cannot prove no wrap for the 10233 // indvar. 10234 if (AR->getType() != MaxIter->getType()) 10235 return None; 10236 10237 // Value of IV on suggested last iteration. 10238 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this); 10239 // Does it still meet the requirement? 10240 if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS)) 10241 return None; 10242 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does 10243 // not exceed max unsigned value of this type), this effectively proves 10244 // that there is no wrap during the iteration. To prove that there is no 10245 // signed/unsigned wrap, we need to check that 10246 // Start <= Last for step = 1 or Start >= Last for step = -1. 10247 ICmpInst::Predicate NoOverflowPred = 10248 CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 10249 if (Step == MinusOne) 10250 NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred); 10251 const SCEV *Start = AR->getStart(); 10252 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, CtxI)) 10253 return None; 10254 10255 // Everything is fine. 10256 return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS); 10257 } 10258 10259 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 10260 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 10261 if (HasSameValue(LHS, RHS)) 10262 return ICmpInst::isTrueWhenEqual(Pred); 10263 10264 // This code is split out from isKnownPredicate because it is called from 10265 // within isLoopEntryGuardedByCond. 10266 10267 auto CheckRanges = [&](const ConstantRange &RangeLHS, 10268 const ConstantRange &RangeRHS) { 10269 return RangeLHS.icmp(Pred, RangeRHS); 10270 }; 10271 10272 // The check at the top of the function catches the case where the values are 10273 // known to be equal. 10274 if (Pred == CmpInst::ICMP_EQ) 10275 return false; 10276 10277 if (Pred == CmpInst::ICMP_NE) { 10278 if (CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 10279 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS))) 10280 return true; 10281 auto *Diff = getMinusSCEV(LHS, RHS); 10282 return !isa<SCEVCouldNotCompute>(Diff) && isKnownNonZero(Diff); 10283 } 10284 10285 if (CmpInst::isSigned(Pred)) 10286 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 10287 10288 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 10289 } 10290 10291 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 10292 const SCEV *LHS, 10293 const SCEV *RHS) { 10294 // Match X to (A + C1)<ExpectedFlags> and Y to (A + C2)<ExpectedFlags>, where 10295 // C1 and C2 are constant integers. If either X or Y are not add expressions, 10296 // consider them as X + 0 and Y + 0 respectively. C1 and C2 are returned via 10297 // OutC1 and OutC2. 10298 auto MatchBinaryAddToConst = [this](const SCEV *X, const SCEV *Y, 10299 APInt &OutC1, APInt &OutC2, 10300 SCEV::NoWrapFlags ExpectedFlags) { 10301 const SCEV *XNonConstOp, *XConstOp; 10302 const SCEV *YNonConstOp, *YConstOp; 10303 SCEV::NoWrapFlags XFlagsPresent; 10304 SCEV::NoWrapFlags YFlagsPresent; 10305 10306 if (!splitBinaryAdd(X, XConstOp, XNonConstOp, XFlagsPresent)) { 10307 XConstOp = getZero(X->getType()); 10308 XNonConstOp = X; 10309 XFlagsPresent = ExpectedFlags; 10310 } 10311 if (!isa<SCEVConstant>(XConstOp) || 10312 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) 10313 return false; 10314 10315 if (!splitBinaryAdd(Y, YConstOp, YNonConstOp, YFlagsPresent)) { 10316 YConstOp = getZero(Y->getType()); 10317 YNonConstOp = Y; 10318 YFlagsPresent = ExpectedFlags; 10319 } 10320 10321 if (!isa<SCEVConstant>(YConstOp) || 10322 (YFlagsPresent & ExpectedFlags) != ExpectedFlags) 10323 return false; 10324 10325 if (YNonConstOp != XNonConstOp) 10326 return false; 10327 10328 OutC1 = cast<SCEVConstant>(XConstOp)->getAPInt(); 10329 OutC2 = cast<SCEVConstant>(YConstOp)->getAPInt(); 10330 10331 return true; 10332 }; 10333 10334 APInt C1; 10335 APInt C2; 10336 10337 switch (Pred) { 10338 default: 10339 break; 10340 10341 case ICmpInst::ICMP_SGE: 10342 std::swap(LHS, RHS); 10343 LLVM_FALLTHROUGH; 10344 case ICmpInst::ICMP_SLE: 10345 // (X + C1)<nsw> s<= (X + C2)<nsw> if C1 s<= C2. 10346 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.sle(C2)) 10347 return true; 10348 10349 break; 10350 10351 case ICmpInst::ICMP_SGT: 10352 std::swap(LHS, RHS); 10353 LLVM_FALLTHROUGH; 10354 case ICmpInst::ICMP_SLT: 10355 // (X + C1)<nsw> s< (X + C2)<nsw> if C1 s< C2. 10356 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.slt(C2)) 10357 return true; 10358 10359 break; 10360 10361 case ICmpInst::ICMP_UGE: 10362 std::swap(LHS, RHS); 10363 LLVM_FALLTHROUGH; 10364 case ICmpInst::ICMP_ULE: 10365 // (X + C1)<nuw> u<= (X + C2)<nuw> for C1 u<= C2. 10366 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ule(C2)) 10367 return true; 10368 10369 break; 10370 10371 case ICmpInst::ICMP_UGT: 10372 std::swap(LHS, RHS); 10373 LLVM_FALLTHROUGH; 10374 case ICmpInst::ICMP_ULT: 10375 // (X + C1)<nuw> u< (X + C2)<nuw> if C1 u< C2. 10376 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ult(C2)) 10377 return true; 10378 break; 10379 } 10380 10381 return false; 10382 } 10383 10384 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 10385 const SCEV *LHS, 10386 const SCEV *RHS) { 10387 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 10388 return false; 10389 10390 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 10391 // the stack can result in exponential time complexity. 10392 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 10393 10394 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 10395 // 10396 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 10397 // isKnownPredicate. isKnownPredicate is more powerful, but also more 10398 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 10399 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 10400 // use isKnownPredicate later if needed. 10401 return isKnownNonNegative(RHS) && 10402 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 10403 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 10404 } 10405 10406 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, 10407 ICmpInst::Predicate Pred, 10408 const SCEV *LHS, const SCEV *RHS) { 10409 // No need to even try if we know the module has no guards. 10410 if (!HasGuards) 10411 return false; 10412 10413 return any_of(*BB, [&](const Instruction &I) { 10414 using namespace llvm::PatternMatch; 10415 10416 Value *Condition; 10417 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 10418 m_Value(Condition))) && 10419 isImpliedCond(Pred, LHS, RHS, Condition, false); 10420 }); 10421 } 10422 10423 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 10424 /// protected by a conditional between LHS and RHS. This is used to 10425 /// to eliminate casts. 10426 bool 10427 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 10428 ICmpInst::Predicate Pred, 10429 const SCEV *LHS, const SCEV *RHS) { 10430 // Interpret a null as meaning no loop, where there is obviously no guard 10431 // (interprocedural conditions notwithstanding). 10432 if (!L) return true; 10433 10434 if (VerifyIR) 10435 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 10436 "This cannot be done on broken IR!"); 10437 10438 10439 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 10440 return true; 10441 10442 BasicBlock *Latch = L->getLoopLatch(); 10443 if (!Latch) 10444 return false; 10445 10446 BranchInst *LoopContinuePredicate = 10447 dyn_cast<BranchInst>(Latch->getTerminator()); 10448 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 10449 isImpliedCond(Pred, LHS, RHS, 10450 LoopContinuePredicate->getCondition(), 10451 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 10452 return true; 10453 10454 // We don't want more than one activation of the following loops on the stack 10455 // -- that can lead to O(n!) time complexity. 10456 if (WalkingBEDominatingConds) 10457 return false; 10458 10459 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 10460 10461 // See if we can exploit a trip count to prove the predicate. 10462 const auto &BETakenInfo = getBackedgeTakenInfo(L); 10463 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 10464 if (LatchBECount != getCouldNotCompute()) { 10465 // We know that Latch branches back to the loop header exactly 10466 // LatchBECount times. This means the backdege condition at Latch is 10467 // equivalent to "{0,+,1} u< LatchBECount". 10468 Type *Ty = LatchBECount->getType(); 10469 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 10470 const SCEV *LoopCounter = 10471 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 10472 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 10473 LatchBECount)) 10474 return true; 10475 } 10476 10477 // Check conditions due to any @llvm.assume intrinsics. 10478 for (auto &AssumeVH : AC.assumptions()) { 10479 if (!AssumeVH) 10480 continue; 10481 auto *CI = cast<CallInst>(AssumeVH); 10482 if (!DT.dominates(CI, Latch->getTerminator())) 10483 continue; 10484 10485 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 10486 return true; 10487 } 10488 10489 // If the loop is not reachable from the entry block, we risk running into an 10490 // infinite loop as we walk up into the dom tree. These loops do not matter 10491 // anyway, so we just return a conservative answer when we see them. 10492 if (!DT.isReachableFromEntry(L->getHeader())) 10493 return false; 10494 10495 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 10496 return true; 10497 10498 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 10499 DTN != HeaderDTN; DTN = DTN->getIDom()) { 10500 assert(DTN && "should reach the loop header before reaching the root!"); 10501 10502 BasicBlock *BB = DTN->getBlock(); 10503 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 10504 return true; 10505 10506 BasicBlock *PBB = BB->getSinglePredecessor(); 10507 if (!PBB) 10508 continue; 10509 10510 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 10511 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 10512 continue; 10513 10514 Value *Condition = ContinuePredicate->getCondition(); 10515 10516 // If we have an edge `E` within the loop body that dominates the only 10517 // latch, the condition guarding `E` also guards the backedge. This 10518 // reasoning works only for loops with a single latch. 10519 10520 BasicBlockEdge DominatingEdge(PBB, BB); 10521 if (DominatingEdge.isSingleEdge()) { 10522 // We're constructively (and conservatively) enumerating edges within the 10523 // loop body that dominate the latch. The dominator tree better agree 10524 // with us on this: 10525 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 10526 10527 if (isImpliedCond(Pred, LHS, RHS, Condition, 10528 BB != ContinuePredicate->getSuccessor(0))) 10529 return true; 10530 } 10531 } 10532 10533 return false; 10534 } 10535 10536 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, 10537 ICmpInst::Predicate Pred, 10538 const SCEV *LHS, 10539 const SCEV *RHS) { 10540 if (VerifyIR) 10541 assert(!verifyFunction(*BB->getParent(), &dbgs()) && 10542 "This cannot be done on broken IR!"); 10543 10544 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 10545 // the facts (a >= b && a != b) separately. A typical situation is when the 10546 // non-strict comparison is known from ranges and non-equality is known from 10547 // dominating predicates. If we are proving strict comparison, we always try 10548 // to prove non-equality and non-strict comparison separately. 10549 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 10550 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 10551 bool ProvedNonStrictComparison = false; 10552 bool ProvedNonEquality = false; 10553 10554 auto SplitAndProve = 10555 [&](std::function<bool(ICmpInst::Predicate)> Fn) -> bool { 10556 if (!ProvedNonStrictComparison) 10557 ProvedNonStrictComparison = Fn(NonStrictPredicate); 10558 if (!ProvedNonEquality) 10559 ProvedNonEquality = Fn(ICmpInst::ICMP_NE); 10560 if (ProvedNonStrictComparison && ProvedNonEquality) 10561 return true; 10562 return false; 10563 }; 10564 10565 if (ProvingStrictComparison) { 10566 auto ProofFn = [&](ICmpInst::Predicate P) { 10567 return isKnownViaNonRecursiveReasoning(P, LHS, RHS); 10568 }; 10569 if (SplitAndProve(ProofFn)) 10570 return true; 10571 } 10572 10573 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 10574 auto ProveViaGuard = [&](const BasicBlock *Block) { 10575 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 10576 return true; 10577 if (ProvingStrictComparison) { 10578 auto ProofFn = [&](ICmpInst::Predicate P) { 10579 return isImpliedViaGuard(Block, P, LHS, RHS); 10580 }; 10581 if (SplitAndProve(ProofFn)) 10582 return true; 10583 } 10584 return false; 10585 }; 10586 10587 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 10588 auto ProveViaCond = [&](const Value *Condition, bool Inverse) { 10589 const Instruction *CtxI = &BB->front(); 10590 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, CtxI)) 10591 return true; 10592 if (ProvingStrictComparison) { 10593 auto ProofFn = [&](ICmpInst::Predicate P) { 10594 return isImpliedCond(P, LHS, RHS, Condition, Inverse, CtxI); 10595 }; 10596 if (SplitAndProve(ProofFn)) 10597 return true; 10598 } 10599 return false; 10600 }; 10601 10602 // Starting at the block's predecessor, climb up the predecessor chain, as long 10603 // as there are predecessors that can be found that have unique successors 10604 // leading to the original block. 10605 const Loop *ContainingLoop = LI.getLoopFor(BB); 10606 const BasicBlock *PredBB; 10607 if (ContainingLoop && ContainingLoop->getHeader() == BB) 10608 PredBB = ContainingLoop->getLoopPredecessor(); 10609 else 10610 PredBB = BB->getSinglePredecessor(); 10611 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB); 10612 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 10613 if (ProveViaGuard(Pair.first)) 10614 return true; 10615 10616 const BranchInst *LoopEntryPredicate = 10617 dyn_cast<BranchInst>(Pair.first->getTerminator()); 10618 if (!LoopEntryPredicate || 10619 LoopEntryPredicate->isUnconditional()) 10620 continue; 10621 10622 if (ProveViaCond(LoopEntryPredicate->getCondition(), 10623 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 10624 return true; 10625 } 10626 10627 // Check conditions due to any @llvm.assume intrinsics. 10628 for (auto &AssumeVH : AC.assumptions()) { 10629 if (!AssumeVH) 10630 continue; 10631 auto *CI = cast<CallInst>(AssumeVH); 10632 if (!DT.dominates(CI, BB)) 10633 continue; 10634 10635 if (ProveViaCond(CI->getArgOperand(0), false)) 10636 return true; 10637 } 10638 10639 return false; 10640 } 10641 10642 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 10643 ICmpInst::Predicate Pred, 10644 const SCEV *LHS, 10645 const SCEV *RHS) { 10646 // Interpret a null as meaning no loop, where there is obviously no guard 10647 // (interprocedural conditions notwithstanding). 10648 if (!L) 10649 return false; 10650 10651 // Both LHS and RHS must be available at loop entry. 10652 assert(isAvailableAtLoopEntry(LHS, L) && 10653 "LHS is not available at Loop Entry"); 10654 assert(isAvailableAtLoopEntry(RHS, L) && 10655 "RHS is not available at Loop Entry"); 10656 10657 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 10658 return true; 10659 10660 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS); 10661 } 10662 10663 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 10664 const SCEV *RHS, 10665 const Value *FoundCondValue, bool Inverse, 10666 const Instruction *CtxI) { 10667 // False conditions implies anything. Do not bother analyzing it further. 10668 if (FoundCondValue == 10669 ConstantInt::getBool(FoundCondValue->getContext(), Inverse)) 10670 return true; 10671 10672 if (!PendingLoopPredicates.insert(FoundCondValue).second) 10673 return false; 10674 10675 auto ClearOnExit = 10676 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 10677 10678 // Recursively handle And and Or conditions. 10679 const Value *Op0, *Op1; 10680 if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { 10681 if (!Inverse) 10682 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) || 10683 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI); 10684 } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) { 10685 if (Inverse) 10686 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) || 10687 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI); 10688 } 10689 10690 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 10691 if (!ICI) return false; 10692 10693 // Now that we found a conditional branch that dominates the loop or controls 10694 // the loop latch. Check to see if it is the comparison we are looking for. 10695 ICmpInst::Predicate FoundPred; 10696 if (Inverse) 10697 FoundPred = ICI->getInversePredicate(); 10698 else 10699 FoundPred = ICI->getPredicate(); 10700 10701 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 10702 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 10703 10704 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, CtxI); 10705 } 10706 10707 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 10708 const SCEV *RHS, 10709 ICmpInst::Predicate FoundPred, 10710 const SCEV *FoundLHS, const SCEV *FoundRHS, 10711 const Instruction *CtxI) { 10712 // Balance the types. 10713 if (getTypeSizeInBits(LHS->getType()) < 10714 getTypeSizeInBits(FoundLHS->getType())) { 10715 // For unsigned and equality predicates, try to prove that both found 10716 // operands fit into narrow unsigned range. If so, try to prove facts in 10717 // narrow types. 10718 if (!CmpInst::isSigned(FoundPred) && !FoundLHS->getType()->isPointerTy()) { 10719 auto *NarrowType = LHS->getType(); 10720 auto *WideType = FoundLHS->getType(); 10721 auto BitWidth = getTypeSizeInBits(NarrowType); 10722 const SCEV *MaxValue = getZeroExtendExpr( 10723 getConstant(APInt::getMaxValue(BitWidth)), WideType); 10724 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundLHS, 10725 MaxValue) && 10726 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundRHS, 10727 MaxValue)) { 10728 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType); 10729 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType); 10730 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS, 10731 TruncFoundRHS, CtxI)) 10732 return true; 10733 } 10734 } 10735 10736 if (LHS->getType()->isPointerTy()) 10737 return false; 10738 if (CmpInst::isSigned(Pred)) { 10739 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 10740 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 10741 } else { 10742 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 10743 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 10744 } 10745 } else if (getTypeSizeInBits(LHS->getType()) > 10746 getTypeSizeInBits(FoundLHS->getType())) { 10747 if (FoundLHS->getType()->isPointerTy()) 10748 return false; 10749 if (CmpInst::isSigned(FoundPred)) { 10750 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 10751 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 10752 } else { 10753 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 10754 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 10755 } 10756 } 10757 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS, 10758 FoundRHS, CtxI); 10759 } 10760 10761 bool ScalarEvolution::isImpliedCondBalancedTypes( 10762 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10763 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS, 10764 const Instruction *CtxI) { 10765 assert(getTypeSizeInBits(LHS->getType()) == 10766 getTypeSizeInBits(FoundLHS->getType()) && 10767 "Types should be balanced!"); 10768 // Canonicalize the query to match the way instcombine will have 10769 // canonicalized the comparison. 10770 if (SimplifyICmpOperands(Pred, LHS, RHS)) 10771 if (LHS == RHS) 10772 return CmpInst::isTrueWhenEqual(Pred); 10773 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 10774 if (FoundLHS == FoundRHS) 10775 return CmpInst::isFalseWhenEqual(FoundPred); 10776 10777 // Check to see if we can make the LHS or RHS match. 10778 if (LHS == FoundRHS || RHS == FoundLHS) { 10779 if (isa<SCEVConstant>(RHS)) { 10780 std::swap(FoundLHS, FoundRHS); 10781 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 10782 } else { 10783 std::swap(LHS, RHS); 10784 Pred = ICmpInst::getSwappedPredicate(Pred); 10785 } 10786 } 10787 10788 // Check whether the found predicate is the same as the desired predicate. 10789 if (FoundPred == Pred) 10790 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI); 10791 10792 // Check whether swapping the found predicate makes it the same as the 10793 // desired predicate. 10794 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 10795 // We can write the implication 10796 // 0. LHS Pred RHS <- FoundLHS SwapPred FoundRHS 10797 // using one of the following ways: 10798 // 1. LHS Pred RHS <- FoundRHS Pred FoundLHS 10799 // 2. RHS SwapPred LHS <- FoundLHS SwapPred FoundRHS 10800 // 3. LHS Pred RHS <- ~FoundLHS Pred ~FoundRHS 10801 // 4. ~LHS SwapPred ~RHS <- FoundLHS SwapPred FoundRHS 10802 // Forms 1. and 2. require swapping the operands of one condition. Don't 10803 // do this if it would break canonical constant/addrec ordering. 10804 if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS)) 10805 return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS, 10806 CtxI); 10807 if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS)) 10808 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, CtxI); 10809 10810 // There's no clear preference between forms 3. and 4., try both. Avoid 10811 // forming getNotSCEV of pointer values as the resulting subtract is 10812 // not legal. 10813 if (!LHS->getType()->isPointerTy() && !RHS->getType()->isPointerTy() && 10814 isImpliedCondOperands(FoundPred, getNotSCEV(LHS), getNotSCEV(RHS), 10815 FoundLHS, FoundRHS, CtxI)) 10816 return true; 10817 10818 if (!FoundLHS->getType()->isPointerTy() && 10819 !FoundRHS->getType()->isPointerTy() && 10820 isImpliedCondOperands(Pred, LHS, RHS, getNotSCEV(FoundLHS), 10821 getNotSCEV(FoundRHS), CtxI)) 10822 return true; 10823 10824 return false; 10825 } 10826 10827 auto IsSignFlippedPredicate = [](CmpInst::Predicate P1, 10828 CmpInst::Predicate P2) { 10829 assert(P1 != P2 && "Handled earlier!"); 10830 return CmpInst::isRelational(P2) && 10831 P1 == CmpInst::getFlippedSignednessPredicate(P2); 10832 }; 10833 if (IsSignFlippedPredicate(Pred, FoundPred)) { 10834 // Unsigned comparison is the same as signed comparison when both the 10835 // operands are non-negative or negative. 10836 if ((isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) || 10837 (isKnownNegative(FoundLHS) && isKnownNegative(FoundRHS))) 10838 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI); 10839 // Create local copies that we can freely swap and canonicalize our 10840 // conditions to "le/lt". 10841 ICmpInst::Predicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred; 10842 const SCEV *CanonicalLHS = LHS, *CanonicalRHS = RHS, 10843 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS; 10844 if (ICmpInst::isGT(CanonicalPred) || ICmpInst::isGE(CanonicalPred)) { 10845 CanonicalPred = ICmpInst::getSwappedPredicate(CanonicalPred); 10846 CanonicalFoundPred = ICmpInst::getSwappedPredicate(CanonicalFoundPred); 10847 std::swap(CanonicalLHS, CanonicalRHS); 10848 std::swap(CanonicalFoundLHS, CanonicalFoundRHS); 10849 } 10850 assert((ICmpInst::isLT(CanonicalPred) || ICmpInst::isLE(CanonicalPred)) && 10851 "Must be!"); 10852 assert((ICmpInst::isLT(CanonicalFoundPred) || 10853 ICmpInst::isLE(CanonicalFoundPred)) && 10854 "Must be!"); 10855 if (ICmpInst::isSigned(CanonicalPred) && isKnownNonNegative(CanonicalRHS)) 10856 // Use implication: 10857 // x <u y && y >=s 0 --> x <s y. 10858 // If we can prove the left part, the right part is also proven. 10859 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS, 10860 CanonicalRHS, CanonicalFoundLHS, 10861 CanonicalFoundRHS); 10862 if (ICmpInst::isUnsigned(CanonicalPred) && isKnownNegative(CanonicalRHS)) 10863 // Use implication: 10864 // x <s y && y <s 0 --> x <u y. 10865 // If we can prove the left part, the right part is also proven. 10866 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS, 10867 CanonicalRHS, CanonicalFoundLHS, 10868 CanonicalFoundRHS); 10869 } 10870 10871 // Check if we can make progress by sharpening ranges. 10872 if (FoundPred == ICmpInst::ICMP_NE && 10873 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 10874 10875 const SCEVConstant *C = nullptr; 10876 const SCEV *V = nullptr; 10877 10878 if (isa<SCEVConstant>(FoundLHS)) { 10879 C = cast<SCEVConstant>(FoundLHS); 10880 V = FoundRHS; 10881 } else { 10882 C = cast<SCEVConstant>(FoundRHS); 10883 V = FoundLHS; 10884 } 10885 10886 // The guarding predicate tells us that C != V. If the known range 10887 // of V is [C, t), we can sharpen the range to [C + 1, t). The 10888 // range we consider has to correspond to same signedness as the 10889 // predicate we're interested in folding. 10890 10891 APInt Min = ICmpInst::isSigned(Pred) ? 10892 getSignedRangeMin(V) : getUnsignedRangeMin(V); 10893 10894 if (Min == C->getAPInt()) { 10895 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 10896 // This is true even if (Min + 1) wraps around -- in case of 10897 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 10898 10899 APInt SharperMin = Min + 1; 10900 10901 switch (Pred) { 10902 case ICmpInst::ICMP_SGE: 10903 case ICmpInst::ICMP_UGE: 10904 // We know V `Pred` SharperMin. If this implies LHS `Pred` 10905 // RHS, we're done. 10906 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin), 10907 CtxI)) 10908 return true; 10909 LLVM_FALLTHROUGH; 10910 10911 case ICmpInst::ICMP_SGT: 10912 case ICmpInst::ICMP_UGT: 10913 // We know from the range information that (V `Pred` Min || 10914 // V == Min). We know from the guarding condition that !(V 10915 // == Min). This gives us 10916 // 10917 // V `Pred` Min || V == Min && !(V == Min) 10918 // => V `Pred` Min 10919 // 10920 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 10921 10922 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI)) 10923 return true; 10924 break; 10925 10926 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively. 10927 case ICmpInst::ICMP_SLE: 10928 case ICmpInst::ICMP_ULE: 10929 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 10930 LHS, V, getConstant(SharperMin), CtxI)) 10931 return true; 10932 LLVM_FALLTHROUGH; 10933 10934 case ICmpInst::ICMP_SLT: 10935 case ICmpInst::ICMP_ULT: 10936 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 10937 LHS, V, getConstant(Min), CtxI)) 10938 return true; 10939 break; 10940 10941 default: 10942 // No change 10943 break; 10944 } 10945 } 10946 } 10947 10948 // Check whether the actual condition is beyond sufficient. 10949 if (FoundPred == ICmpInst::ICMP_EQ) 10950 if (ICmpInst::isTrueWhenEqual(Pred)) 10951 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI)) 10952 return true; 10953 if (Pred == ICmpInst::ICMP_NE) 10954 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 10955 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, CtxI)) 10956 return true; 10957 10958 // Otherwise assume the worst. 10959 return false; 10960 } 10961 10962 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 10963 const SCEV *&L, const SCEV *&R, 10964 SCEV::NoWrapFlags &Flags) { 10965 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 10966 if (!AE || AE->getNumOperands() != 2) 10967 return false; 10968 10969 L = AE->getOperand(0); 10970 R = AE->getOperand(1); 10971 Flags = AE->getNoWrapFlags(); 10972 return true; 10973 } 10974 10975 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 10976 const SCEV *Less) { 10977 // We avoid subtracting expressions here because this function is usually 10978 // fairly deep in the call stack (i.e. is called many times). 10979 10980 // X - X = 0. 10981 if (More == Less) 10982 return APInt(getTypeSizeInBits(More->getType()), 0); 10983 10984 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 10985 const auto *LAR = cast<SCEVAddRecExpr>(Less); 10986 const auto *MAR = cast<SCEVAddRecExpr>(More); 10987 10988 if (LAR->getLoop() != MAR->getLoop()) 10989 return None; 10990 10991 // We look at affine expressions only; not for correctness but to keep 10992 // getStepRecurrence cheap. 10993 if (!LAR->isAffine() || !MAR->isAffine()) 10994 return None; 10995 10996 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 10997 return None; 10998 10999 Less = LAR->getStart(); 11000 More = MAR->getStart(); 11001 11002 // fall through 11003 } 11004 11005 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 11006 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 11007 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 11008 return M - L; 11009 } 11010 11011 SCEV::NoWrapFlags Flags; 11012 const SCEV *LLess = nullptr, *RLess = nullptr; 11013 const SCEV *LMore = nullptr, *RMore = nullptr; 11014 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 11015 // Compare (X + C1) vs X. 11016 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 11017 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 11018 if (RLess == More) 11019 return -(C1->getAPInt()); 11020 11021 // Compare X vs (X + C2). 11022 if (splitBinaryAdd(More, LMore, RMore, Flags)) 11023 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 11024 if (RMore == Less) 11025 return C2->getAPInt(); 11026 11027 // Compare (X + C1) vs (X + C2). 11028 if (C1 && C2 && RLess == RMore) 11029 return C2->getAPInt() - C1->getAPInt(); 11030 11031 return None; 11032 } 11033 11034 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart( 11035 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 11036 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *CtxI) { 11037 // Try to recognize the following pattern: 11038 // 11039 // FoundRHS = ... 11040 // ... 11041 // loop: 11042 // FoundLHS = {Start,+,W} 11043 // context_bb: // Basic block from the same loop 11044 // known(Pred, FoundLHS, FoundRHS) 11045 // 11046 // If some predicate is known in the context of a loop, it is also known on 11047 // each iteration of this loop, including the first iteration. Therefore, in 11048 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to 11049 // prove the original pred using this fact. 11050 if (!CtxI) 11051 return false; 11052 const BasicBlock *ContextBB = CtxI->getParent(); 11053 // Make sure AR varies in the context block. 11054 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) { 11055 const Loop *L = AR->getLoop(); 11056 // Make sure that context belongs to the loop and executes on 1st iteration 11057 // (if it ever executes at all). 11058 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 11059 return false; 11060 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop())) 11061 return false; 11062 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS); 11063 } 11064 11065 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) { 11066 const Loop *L = AR->getLoop(); 11067 // Make sure that context belongs to the loop and executes on 1st iteration 11068 // (if it ever executes at all). 11069 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 11070 return false; 11071 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop())) 11072 return false; 11073 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart()); 11074 } 11075 11076 return false; 11077 } 11078 11079 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 11080 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 11081 const SCEV *FoundLHS, const SCEV *FoundRHS) { 11082 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 11083 return false; 11084 11085 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 11086 if (!AddRecLHS) 11087 return false; 11088 11089 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 11090 if (!AddRecFoundLHS) 11091 return false; 11092 11093 // We'd like to let SCEV reason about control dependencies, so we constrain 11094 // both the inequalities to be about add recurrences on the same loop. This 11095 // way we can use isLoopEntryGuardedByCond later. 11096 11097 const Loop *L = AddRecFoundLHS->getLoop(); 11098 if (L != AddRecLHS->getLoop()) 11099 return false; 11100 11101 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 11102 // 11103 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 11104 // ... (2) 11105 // 11106 // Informal proof for (2), assuming (1) [*]: 11107 // 11108 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 11109 // 11110 // Then 11111 // 11112 // FoundLHS s< FoundRHS s< INT_MIN - C 11113 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 11114 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 11115 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 11116 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 11117 // <=> FoundLHS + C s< FoundRHS + C 11118 // 11119 // [*]: (1) can be proved by ruling out overflow. 11120 // 11121 // [**]: This can be proved by analyzing all the four possibilities: 11122 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 11123 // (A s>= 0, B s>= 0). 11124 // 11125 // Note: 11126 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 11127 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 11128 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 11129 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 11130 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 11131 // C)". 11132 11133 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 11134 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 11135 if (!LDiff || !RDiff || *LDiff != *RDiff) 11136 return false; 11137 11138 if (LDiff->isMinValue()) 11139 return true; 11140 11141 APInt FoundRHSLimit; 11142 11143 if (Pred == CmpInst::ICMP_ULT) { 11144 FoundRHSLimit = -(*RDiff); 11145 } else { 11146 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 11147 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 11148 } 11149 11150 // Try to prove (1) or (2), as needed. 11151 return isAvailableAtLoopEntry(FoundRHS, L) && 11152 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 11153 getConstant(FoundRHSLimit)); 11154 } 11155 11156 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 11157 const SCEV *LHS, const SCEV *RHS, 11158 const SCEV *FoundLHS, 11159 const SCEV *FoundRHS, unsigned Depth) { 11160 const PHINode *LPhi = nullptr, *RPhi = nullptr; 11161 11162 auto ClearOnExit = make_scope_exit([&]() { 11163 if (LPhi) { 11164 bool Erased = PendingMerges.erase(LPhi); 11165 assert(Erased && "Failed to erase LPhi!"); 11166 (void)Erased; 11167 } 11168 if (RPhi) { 11169 bool Erased = PendingMerges.erase(RPhi); 11170 assert(Erased && "Failed to erase RPhi!"); 11171 (void)Erased; 11172 } 11173 }); 11174 11175 // Find respective Phis and check that they are not being pending. 11176 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 11177 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 11178 if (!PendingMerges.insert(Phi).second) 11179 return false; 11180 LPhi = Phi; 11181 } 11182 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 11183 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 11184 // If we detect a loop of Phi nodes being processed by this method, for 11185 // example: 11186 // 11187 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 11188 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 11189 // 11190 // we don't want to deal with a case that complex, so return conservative 11191 // answer false. 11192 if (!PendingMerges.insert(Phi).second) 11193 return false; 11194 RPhi = Phi; 11195 } 11196 11197 // If none of LHS, RHS is a Phi, nothing to do here. 11198 if (!LPhi && !RPhi) 11199 return false; 11200 11201 // If there is a SCEVUnknown Phi we are interested in, make it left. 11202 if (!LPhi) { 11203 std::swap(LHS, RHS); 11204 std::swap(FoundLHS, FoundRHS); 11205 std::swap(LPhi, RPhi); 11206 Pred = ICmpInst::getSwappedPredicate(Pred); 11207 } 11208 11209 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 11210 const BasicBlock *LBB = LPhi->getParent(); 11211 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 11212 11213 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 11214 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 11215 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 11216 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 11217 }; 11218 11219 if (RPhi && RPhi->getParent() == LBB) { 11220 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 11221 // If we compare two Phis from the same block, and for each entry block 11222 // the predicate is true for incoming values from this block, then the 11223 // predicate is also true for the Phis. 11224 for (const BasicBlock *IncBB : predecessors(LBB)) { 11225 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 11226 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 11227 if (!ProvedEasily(L, R)) 11228 return false; 11229 } 11230 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 11231 // Case two: RHS is also a Phi from the same basic block, and it is an 11232 // AddRec. It means that there is a loop which has both AddRec and Unknown 11233 // PHIs, for it we can compare incoming values of AddRec from above the loop 11234 // and latch with their respective incoming values of LPhi. 11235 // TODO: Generalize to handle loops with many inputs in a header. 11236 if (LPhi->getNumIncomingValues() != 2) return false; 11237 11238 auto *RLoop = RAR->getLoop(); 11239 auto *Predecessor = RLoop->getLoopPredecessor(); 11240 assert(Predecessor && "Loop with AddRec with no predecessor?"); 11241 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 11242 if (!ProvedEasily(L1, RAR->getStart())) 11243 return false; 11244 auto *Latch = RLoop->getLoopLatch(); 11245 assert(Latch && "Loop with AddRec with no latch?"); 11246 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 11247 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 11248 return false; 11249 } else { 11250 // In all other cases go over inputs of LHS and compare each of them to RHS, 11251 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 11252 // At this point RHS is either a non-Phi, or it is a Phi from some block 11253 // different from LBB. 11254 for (const BasicBlock *IncBB : predecessors(LBB)) { 11255 // Check that RHS is available in this block. 11256 if (!dominates(RHS, IncBB)) 11257 return false; 11258 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 11259 // Make sure L does not refer to a value from a potentially previous 11260 // iteration of a loop. 11261 if (!properlyDominates(L, IncBB)) 11262 return false; 11263 if (!ProvedEasily(L, RHS)) 11264 return false; 11265 } 11266 } 11267 return true; 11268 } 11269 11270 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 11271 const SCEV *LHS, const SCEV *RHS, 11272 const SCEV *FoundLHS, 11273 const SCEV *FoundRHS, 11274 const Instruction *CtxI) { 11275 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 11276 return true; 11277 11278 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 11279 return true; 11280 11281 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS, 11282 CtxI)) 11283 return true; 11284 11285 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 11286 FoundLHS, FoundRHS); 11287 } 11288 11289 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? 11290 template <typename MinMaxExprType> 11291 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, 11292 const SCEV *Candidate) { 11293 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); 11294 if (!MinMaxExpr) 11295 return false; 11296 11297 return is_contained(MinMaxExpr->operands(), Candidate); 11298 } 11299 11300 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 11301 ICmpInst::Predicate Pred, 11302 const SCEV *LHS, const SCEV *RHS) { 11303 // If both sides are affine addrecs for the same loop, with equal 11304 // steps, and we know the recurrences don't wrap, then we only 11305 // need to check the predicate on the starting values. 11306 11307 if (!ICmpInst::isRelational(Pred)) 11308 return false; 11309 11310 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 11311 if (!LAR) 11312 return false; 11313 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 11314 if (!RAR) 11315 return false; 11316 if (LAR->getLoop() != RAR->getLoop()) 11317 return false; 11318 if (!LAR->isAffine() || !RAR->isAffine()) 11319 return false; 11320 11321 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 11322 return false; 11323 11324 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 11325 SCEV::FlagNSW : SCEV::FlagNUW; 11326 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 11327 return false; 11328 11329 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 11330 } 11331 11332 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 11333 /// expression? 11334 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 11335 ICmpInst::Predicate Pred, 11336 const SCEV *LHS, const SCEV *RHS) { 11337 switch (Pred) { 11338 default: 11339 return false; 11340 11341 case ICmpInst::ICMP_SGE: 11342 std::swap(LHS, RHS); 11343 LLVM_FALLTHROUGH; 11344 case ICmpInst::ICMP_SLE: 11345 return 11346 // min(A, ...) <= A 11347 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || 11348 // A <= max(A, ...) 11349 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 11350 11351 case ICmpInst::ICMP_UGE: 11352 std::swap(LHS, RHS); 11353 LLVM_FALLTHROUGH; 11354 case ICmpInst::ICMP_ULE: 11355 return 11356 // min(A, ...) <= A 11357 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || 11358 // A <= max(A, ...) 11359 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 11360 } 11361 11362 llvm_unreachable("covered switch fell through?!"); 11363 } 11364 11365 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 11366 const SCEV *LHS, const SCEV *RHS, 11367 const SCEV *FoundLHS, 11368 const SCEV *FoundRHS, 11369 unsigned Depth) { 11370 assert(getTypeSizeInBits(LHS->getType()) == 11371 getTypeSizeInBits(RHS->getType()) && 11372 "LHS and RHS have different sizes?"); 11373 assert(getTypeSizeInBits(FoundLHS->getType()) == 11374 getTypeSizeInBits(FoundRHS->getType()) && 11375 "FoundLHS and FoundRHS have different sizes?"); 11376 // We want to avoid hurting the compile time with analysis of too big trees. 11377 if (Depth > MaxSCEVOperationsImplicationDepth) 11378 return false; 11379 11380 // We only want to work with GT comparison so far. 11381 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) { 11382 Pred = CmpInst::getSwappedPredicate(Pred); 11383 std::swap(LHS, RHS); 11384 std::swap(FoundLHS, FoundRHS); 11385 } 11386 11387 // For unsigned, try to reduce it to corresponding signed comparison. 11388 if (Pred == ICmpInst::ICMP_UGT) 11389 // We can replace unsigned predicate with its signed counterpart if all 11390 // involved values are non-negative. 11391 // TODO: We could have better support for unsigned. 11392 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) { 11393 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing 11394 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us 11395 // use this fact to prove that LHS and RHS are non-negative. 11396 const SCEV *MinusOne = getMinusOne(LHS->getType()); 11397 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS, 11398 FoundRHS) && 11399 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS, 11400 FoundRHS)) 11401 Pred = ICmpInst::ICMP_SGT; 11402 } 11403 11404 if (Pred != ICmpInst::ICMP_SGT) 11405 return false; 11406 11407 auto GetOpFromSExt = [&](const SCEV *S) { 11408 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 11409 return Ext->getOperand(); 11410 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 11411 // the constant in some cases. 11412 return S; 11413 }; 11414 11415 // Acquire values from extensions. 11416 auto *OrigLHS = LHS; 11417 auto *OrigFoundLHS = FoundLHS; 11418 LHS = GetOpFromSExt(LHS); 11419 FoundLHS = GetOpFromSExt(FoundLHS); 11420 11421 // Is the SGT predicate can be proved trivially or using the found context. 11422 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 11423 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 11424 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 11425 FoundRHS, Depth + 1); 11426 }; 11427 11428 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 11429 // We want to avoid creation of any new non-constant SCEV. Since we are 11430 // going to compare the operands to RHS, we should be certain that we don't 11431 // need any size extensions for this. So let's decline all cases when the 11432 // sizes of types of LHS and RHS do not match. 11433 // TODO: Maybe try to get RHS from sext to catch more cases? 11434 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 11435 return false; 11436 11437 // Should not overflow. 11438 if (!LHSAddExpr->hasNoSignedWrap()) 11439 return false; 11440 11441 auto *LL = LHSAddExpr->getOperand(0); 11442 auto *LR = LHSAddExpr->getOperand(1); 11443 auto *MinusOne = getMinusOne(RHS->getType()); 11444 11445 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 11446 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 11447 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 11448 }; 11449 // Try to prove the following rule: 11450 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 11451 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 11452 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 11453 return true; 11454 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 11455 Value *LL, *LR; 11456 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 11457 11458 using namespace llvm::PatternMatch; 11459 11460 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 11461 // Rules for division. 11462 // We are going to perform some comparisons with Denominator and its 11463 // derivative expressions. In general case, creating a SCEV for it may 11464 // lead to a complex analysis of the entire graph, and in particular it 11465 // can request trip count recalculation for the same loop. This would 11466 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 11467 // this, we only want to create SCEVs that are constants in this section. 11468 // So we bail if Denominator is not a constant. 11469 if (!isa<ConstantInt>(LR)) 11470 return false; 11471 11472 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 11473 11474 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 11475 // then a SCEV for the numerator already exists and matches with FoundLHS. 11476 auto *Numerator = getExistingSCEV(LL); 11477 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 11478 return false; 11479 11480 // Make sure that the numerator matches with FoundLHS and the denominator 11481 // is positive. 11482 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 11483 return false; 11484 11485 auto *DTy = Denominator->getType(); 11486 auto *FRHSTy = FoundRHS->getType(); 11487 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 11488 // One of types is a pointer and another one is not. We cannot extend 11489 // them properly to a wider type, so let us just reject this case. 11490 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 11491 // to avoid this check. 11492 return false; 11493 11494 // Given that: 11495 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 11496 auto *WTy = getWiderType(DTy, FRHSTy); 11497 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 11498 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 11499 11500 // Try to prove the following rule: 11501 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 11502 // For example, given that FoundLHS > 2. It means that FoundLHS is at 11503 // least 3. If we divide it by Denominator < 4, we will have at least 1. 11504 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 11505 if (isKnownNonPositive(RHS) && 11506 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 11507 return true; 11508 11509 // Try to prove the following rule: 11510 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 11511 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 11512 // If we divide it by Denominator > 2, then: 11513 // 1. If FoundLHS is negative, then the result is 0. 11514 // 2. If FoundLHS is non-negative, then the result is non-negative. 11515 // Anyways, the result is non-negative. 11516 auto *MinusOne = getMinusOne(WTy); 11517 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 11518 if (isKnownNegative(RHS) && 11519 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 11520 return true; 11521 } 11522 } 11523 11524 // If our expression contained SCEVUnknown Phis, and we split it down and now 11525 // need to prove something for them, try to prove the predicate for every 11526 // possible incoming values of those Phis. 11527 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 11528 return true; 11529 11530 return false; 11531 } 11532 11533 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, 11534 const SCEV *LHS, const SCEV *RHS) { 11535 // zext x u<= sext x, sext x s<= zext x 11536 switch (Pred) { 11537 case ICmpInst::ICMP_SGE: 11538 std::swap(LHS, RHS); 11539 LLVM_FALLTHROUGH; 11540 case ICmpInst::ICMP_SLE: { 11541 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. 11542 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); 11543 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); 11544 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 11545 return true; 11546 break; 11547 } 11548 case ICmpInst::ICMP_UGE: 11549 std::swap(LHS, RHS); 11550 LLVM_FALLTHROUGH; 11551 case ICmpInst::ICMP_ULE: { 11552 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. 11553 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); 11554 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); 11555 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 11556 return true; 11557 break; 11558 } 11559 default: 11560 break; 11561 }; 11562 return false; 11563 } 11564 11565 bool 11566 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 11567 const SCEV *LHS, const SCEV *RHS) { 11568 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || 11569 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 11570 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 11571 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 11572 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 11573 } 11574 11575 bool 11576 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 11577 const SCEV *LHS, const SCEV *RHS, 11578 const SCEV *FoundLHS, 11579 const SCEV *FoundRHS) { 11580 switch (Pred) { 11581 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 11582 case ICmpInst::ICMP_EQ: 11583 case ICmpInst::ICMP_NE: 11584 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 11585 return true; 11586 break; 11587 case ICmpInst::ICMP_SLT: 11588 case ICmpInst::ICMP_SLE: 11589 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 11590 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 11591 return true; 11592 break; 11593 case ICmpInst::ICMP_SGT: 11594 case ICmpInst::ICMP_SGE: 11595 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 11596 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 11597 return true; 11598 break; 11599 case ICmpInst::ICMP_ULT: 11600 case ICmpInst::ICMP_ULE: 11601 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 11602 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 11603 return true; 11604 break; 11605 case ICmpInst::ICMP_UGT: 11606 case ICmpInst::ICMP_UGE: 11607 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 11608 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 11609 return true; 11610 break; 11611 } 11612 11613 // Maybe it can be proved via operations? 11614 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 11615 return true; 11616 11617 return false; 11618 } 11619 11620 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 11621 const SCEV *LHS, 11622 const SCEV *RHS, 11623 const SCEV *FoundLHS, 11624 const SCEV *FoundRHS) { 11625 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 11626 // The restriction on `FoundRHS` be lifted easily -- it exists only to 11627 // reduce the compile time impact of this optimization. 11628 return false; 11629 11630 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 11631 if (!Addend) 11632 return false; 11633 11634 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 11635 11636 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 11637 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 11638 ConstantRange FoundLHSRange = 11639 ConstantRange::makeExactICmpRegion(Pred, ConstFoundRHS); 11640 11641 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 11642 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 11643 11644 // We can also compute the range of values for `LHS` that satisfy the 11645 // consequent, "`LHS` `Pred` `RHS`": 11646 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 11647 // The antecedent implies the consequent if every value of `LHS` that 11648 // satisfies the antecedent also satisfies the consequent. 11649 return LHSRange.icmp(Pred, ConstRHS); 11650 } 11651 11652 bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 11653 bool IsSigned) { 11654 assert(isKnownPositive(Stride) && "Positive stride expected!"); 11655 11656 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 11657 const SCEV *One = getOne(Stride->getType()); 11658 11659 if (IsSigned) { 11660 APInt MaxRHS = getSignedRangeMax(RHS); 11661 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 11662 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 11663 11664 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 11665 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 11666 } 11667 11668 APInt MaxRHS = getUnsignedRangeMax(RHS); 11669 APInt MaxValue = APInt::getMaxValue(BitWidth); 11670 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 11671 11672 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 11673 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 11674 } 11675 11676 bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 11677 bool IsSigned) { 11678 11679 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 11680 const SCEV *One = getOne(Stride->getType()); 11681 11682 if (IsSigned) { 11683 APInt MinRHS = getSignedRangeMin(RHS); 11684 APInt MinValue = APInt::getSignedMinValue(BitWidth); 11685 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 11686 11687 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 11688 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 11689 } 11690 11691 APInt MinRHS = getUnsignedRangeMin(RHS); 11692 APInt MinValue = APInt::getMinValue(BitWidth); 11693 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 11694 11695 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 11696 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 11697 } 11698 11699 const SCEV *ScalarEvolution::getUDivCeilSCEV(const SCEV *N, const SCEV *D) { 11700 // umin(N, 1) + floor((N - umin(N, 1)) / D) 11701 // This is equivalent to "1 + floor((N - 1) / D)" for N != 0. The umin 11702 // expression fixes the case of N=0. 11703 const SCEV *MinNOne = getUMinExpr(N, getOne(N->getType())); 11704 const SCEV *NMinusOne = getMinusSCEV(N, MinNOne); 11705 return getAddExpr(MinNOne, getUDivExpr(NMinusOne, D)); 11706 } 11707 11708 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 11709 const SCEV *Stride, 11710 const SCEV *End, 11711 unsigned BitWidth, 11712 bool IsSigned) { 11713 // The logic in this function assumes we can represent a positive stride. 11714 // If we can't, the backedge-taken count must be zero. 11715 if (IsSigned && BitWidth == 1) 11716 return getZero(Stride->getType()); 11717 11718 // This code has only been closely audited for negative strides in the 11719 // unsigned comparison case, it may be correct for signed comparison, but 11720 // that needs to be established. 11721 assert((!IsSigned || !isKnownNonPositive(Stride)) && 11722 "Stride is expected strictly positive for signed case!"); 11723 11724 // Calculate the maximum backedge count based on the range of values 11725 // permitted by Start, End, and Stride. 11726 APInt MinStart = 11727 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 11728 11729 APInt MinStride = 11730 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 11731 11732 // We assume either the stride is positive, or the backedge-taken count 11733 // is zero. So force StrideForMaxBECount to be at least one. 11734 APInt One(BitWidth, 1); 11735 APInt StrideForMaxBECount = IsSigned ? APIntOps::smax(One, MinStride) 11736 : APIntOps::umax(One, MinStride); 11737 11738 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 11739 : APInt::getMaxValue(BitWidth); 11740 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 11741 11742 // Although End can be a MAX expression we estimate MaxEnd considering only 11743 // the case End = RHS of the loop termination condition. This is safe because 11744 // in the other case (End - Start) is zero, leading to a zero maximum backedge 11745 // taken count. 11746 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 11747 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 11748 11749 // MaxBECount = ceil((max(MaxEnd, MinStart) - MinStart) / Stride) 11750 MaxEnd = IsSigned ? APIntOps::smax(MaxEnd, MinStart) 11751 : APIntOps::umax(MaxEnd, MinStart); 11752 11753 return getUDivCeilSCEV(getConstant(MaxEnd - MinStart) /* Delta */, 11754 getConstant(StrideForMaxBECount) /* Step */); 11755 } 11756 11757 ScalarEvolution::ExitLimit 11758 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 11759 const Loop *L, bool IsSigned, 11760 bool ControlsExit, bool AllowPredicates) { 11761 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 11762 11763 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 11764 bool PredicatedIV = false; 11765 11766 auto canAssumeNoSelfWrap = [&](const SCEVAddRecExpr *AR) { 11767 // Can we prove this loop *must* be UB if overflow of IV occurs? 11768 // Reasoning goes as follows: 11769 // * Suppose the IV did self wrap. 11770 // * If Stride evenly divides the iteration space, then once wrap 11771 // occurs, the loop must revisit the same values. 11772 // * We know that RHS is invariant, and that none of those values 11773 // caused this exit to be taken previously. Thus, this exit is 11774 // dynamically dead. 11775 // * If this is the sole exit, then a dead exit implies the loop 11776 // must be infinite if there are no abnormal exits. 11777 // * If the loop were infinite, then it must either not be mustprogress 11778 // or have side effects. Otherwise, it must be UB. 11779 // * It can't (by assumption), be UB so we have contradicted our 11780 // premise and can conclude the IV did not in fact self-wrap. 11781 if (!isLoopInvariant(RHS, L)) 11782 return false; 11783 11784 auto *StrideC = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)); 11785 if (!StrideC || !StrideC->getAPInt().isPowerOf2()) 11786 return false; 11787 11788 if (!ControlsExit || !loopHasNoAbnormalExits(L)) 11789 return false; 11790 11791 return loopIsFiniteByAssumption(L); 11792 }; 11793 11794 if (!IV) { 11795 if (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS)) { 11796 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ZExt->getOperand()); 11797 if (AR && AR->getLoop() == L && AR->isAffine()) { 11798 auto canProveNUW = [&]() { 11799 if (!isLoopInvariant(RHS, L)) 11800 return false; 11801 11802 if (!isKnownNonZero(AR->getStepRecurrence(*this))) 11803 // We need the sequence defined by AR to strictly increase in the 11804 // unsigned integer domain for the logic below to hold. 11805 return false; 11806 11807 const unsigned InnerBitWidth = getTypeSizeInBits(AR->getType()); 11808 const unsigned OuterBitWidth = getTypeSizeInBits(RHS->getType()); 11809 // If RHS <=u Limit, then there must exist a value V in the sequence 11810 // defined by AR (e.g. {Start,+,Step}) such that V >u RHS, and 11811 // V <=u UINT_MAX. Thus, we must exit the loop before unsigned 11812 // overflow occurs. This limit also implies that a signed comparison 11813 // (in the wide bitwidth) is equivalent to an unsigned comparison as 11814 // the high bits on both sides must be zero. 11815 APInt StrideMax = getUnsignedRangeMax(AR->getStepRecurrence(*this)); 11816 APInt Limit = APInt::getMaxValue(InnerBitWidth) - (StrideMax - 1); 11817 Limit = Limit.zext(OuterBitWidth); 11818 return getUnsignedRangeMax(applyLoopGuards(RHS, L)).ule(Limit); 11819 }; 11820 auto Flags = AR->getNoWrapFlags(); 11821 if (!hasFlags(Flags, SCEV::FlagNUW) && canProveNUW()) 11822 Flags = setFlags(Flags, SCEV::FlagNUW); 11823 11824 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags); 11825 if (AR->hasNoUnsignedWrap()) { 11826 // Emulate what getZeroExtendExpr would have done during construction 11827 // if we'd been able to infer the fact just above at that time. 11828 const SCEV *Step = AR->getStepRecurrence(*this); 11829 Type *Ty = ZExt->getType(); 11830 auto *S = getAddRecExpr( 11831 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 0), 11832 getZeroExtendExpr(Step, Ty, 0), L, AR->getNoWrapFlags()); 11833 IV = dyn_cast<SCEVAddRecExpr>(S); 11834 } 11835 } 11836 } 11837 } 11838 11839 11840 if (!IV && AllowPredicates) { 11841 // Try to make this an AddRec using runtime tests, in the first X 11842 // iterations of this loop, where X is the SCEV expression found by the 11843 // algorithm below. 11844 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 11845 PredicatedIV = true; 11846 } 11847 11848 // Avoid weird loops 11849 if (!IV || IV->getLoop() != L || !IV->isAffine()) 11850 return getCouldNotCompute(); 11851 11852 // A precondition of this method is that the condition being analyzed 11853 // reaches an exiting branch which dominates the latch. Given that, we can 11854 // assume that an increment which violates the nowrap specification and 11855 // produces poison must cause undefined behavior when the resulting poison 11856 // value is branched upon and thus we can conclude that the backedge is 11857 // taken no more often than would be required to produce that poison value. 11858 // Note that a well defined loop can exit on the iteration which violates 11859 // the nowrap specification if there is another exit (either explicit or 11860 // implicit/exceptional) which causes the loop to execute before the 11861 // exiting instruction we're analyzing would trigger UB. 11862 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; 11863 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType); 11864 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; 11865 11866 const SCEV *Stride = IV->getStepRecurrence(*this); 11867 11868 bool PositiveStride = isKnownPositive(Stride); 11869 11870 // Avoid negative or zero stride values. 11871 if (!PositiveStride) { 11872 // We can compute the correct backedge taken count for loops with unknown 11873 // strides if we can prove that the loop is not an infinite loop with side 11874 // effects. Here's the loop structure we are trying to handle - 11875 // 11876 // i = start 11877 // do { 11878 // A[i] = i; 11879 // i += s; 11880 // } while (i < end); 11881 // 11882 // The backedge taken count for such loops is evaluated as - 11883 // (max(end, start + stride) - start - 1) /u stride 11884 // 11885 // The additional preconditions that we need to check to prove correctness 11886 // of the above formula is as follows - 11887 // 11888 // a) IV is either nuw or nsw depending upon signedness (indicated by the 11889 // NoWrap flag). 11890 // b) the loop is guaranteed to be finite (e.g. is mustprogress and has 11891 // no side effects within the loop) 11892 // c) loop has a single static exit (with no abnormal exits) 11893 // 11894 // Precondition a) implies that if the stride is negative, this is a single 11895 // trip loop. The backedge taken count formula reduces to zero in this case. 11896 // 11897 // Precondition b) and c) combine to imply that if rhs is invariant in L, 11898 // then a zero stride means the backedge can't be taken without executing 11899 // undefined behavior. 11900 // 11901 // The positive stride case is the same as isKnownPositive(Stride) returning 11902 // true (original behavior of the function). 11903 // 11904 if (PredicatedIV || !NoWrap || !loopIsFiniteByAssumption(L) || 11905 !loopHasNoAbnormalExits(L)) 11906 return getCouldNotCompute(); 11907 11908 // This bailout is protecting the logic in computeMaxBECountForLT which 11909 // has not yet been sufficiently auditted or tested with negative strides. 11910 // We used to filter out all known-non-positive cases here, we're in the 11911 // process of being less restrictive bit by bit. 11912 if (IsSigned && isKnownNonPositive(Stride)) 11913 return getCouldNotCompute(); 11914 11915 if (!isKnownNonZero(Stride)) { 11916 // If we have a step of zero, and RHS isn't invariant in L, we don't know 11917 // if it might eventually be greater than start and if so, on which 11918 // iteration. We can't even produce a useful upper bound. 11919 if (!isLoopInvariant(RHS, L)) 11920 return getCouldNotCompute(); 11921 11922 // We allow a potentially zero stride, but we need to divide by stride 11923 // below. Since the loop can't be infinite and this check must control 11924 // the sole exit, we can infer the exit must be taken on the first 11925 // iteration (e.g. backedge count = 0) if the stride is zero. Given that, 11926 // we know the numerator in the divides below must be zero, so we can 11927 // pick an arbitrary non-zero value for the denominator (e.g. stride) 11928 // and produce the right result. 11929 // FIXME: Handle the case where Stride is poison? 11930 auto wouldZeroStrideBeUB = [&]() { 11931 // Proof by contradiction. Suppose the stride were zero. If we can 11932 // prove that the backedge *is* taken on the first iteration, then since 11933 // we know this condition controls the sole exit, we must have an 11934 // infinite loop. We can't have a (well defined) infinite loop per 11935 // check just above. 11936 // Note: The (Start - Stride) term is used to get the start' term from 11937 // (start' + stride,+,stride). Remember that we only care about the 11938 // result of this expression when stride == 0 at runtime. 11939 auto *StartIfZero = getMinusSCEV(IV->getStart(), Stride); 11940 return isLoopEntryGuardedByCond(L, Cond, StartIfZero, RHS); 11941 }; 11942 if (!wouldZeroStrideBeUB()) { 11943 Stride = getUMaxExpr(Stride, getOne(Stride->getType())); 11944 } 11945 } 11946 } else if (!Stride->isOne() && !NoWrap) { 11947 auto isUBOnWrap = [&]() { 11948 // From no-self-wrap, we need to then prove no-(un)signed-wrap. This 11949 // follows trivially from the fact that every (un)signed-wrapped, but 11950 // not self-wrapped value must be LT than the last value before 11951 // (un)signed wrap. Since we know that last value didn't exit, nor 11952 // will any smaller one. 11953 return canAssumeNoSelfWrap(IV); 11954 }; 11955 11956 // Avoid proven overflow cases: this will ensure that the backedge taken 11957 // count will not generate any unsigned overflow. Relaxed no-overflow 11958 // conditions exploit NoWrapFlags, allowing to optimize in presence of 11959 // undefined behaviors like the case of C language. 11960 if (canIVOverflowOnLT(RHS, Stride, IsSigned) && !isUBOnWrap()) 11961 return getCouldNotCompute(); 11962 } 11963 11964 // On all paths just preceeding, we established the following invariant: 11965 // IV can be assumed not to overflow up to and including the exiting 11966 // iteration. We proved this in one of two ways: 11967 // 1) We can show overflow doesn't occur before the exiting iteration 11968 // 1a) canIVOverflowOnLT, and b) step of one 11969 // 2) We can show that if overflow occurs, the loop must execute UB 11970 // before any possible exit. 11971 // Note that we have not yet proved RHS invariant (in general). 11972 11973 const SCEV *Start = IV->getStart(); 11974 11975 // Preserve pointer-typed Start/RHS to pass to isLoopEntryGuardedByCond. 11976 // If we convert to integers, isLoopEntryGuardedByCond will miss some cases. 11977 // Use integer-typed versions for actual computation; we can't subtract 11978 // pointers in general. 11979 const SCEV *OrigStart = Start; 11980 const SCEV *OrigRHS = RHS; 11981 if (Start->getType()->isPointerTy()) { 11982 Start = getLosslessPtrToIntExpr(Start); 11983 if (isa<SCEVCouldNotCompute>(Start)) 11984 return Start; 11985 } 11986 if (RHS->getType()->isPointerTy()) { 11987 RHS = getLosslessPtrToIntExpr(RHS); 11988 if (isa<SCEVCouldNotCompute>(RHS)) 11989 return RHS; 11990 } 11991 11992 // When the RHS is not invariant, we do not know the end bound of the loop and 11993 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 11994 // calculate the MaxBECount, given the start, stride and max value for the end 11995 // bound of the loop (RHS), and the fact that IV does not overflow (which is 11996 // checked above). 11997 if (!isLoopInvariant(RHS, L)) { 11998 const SCEV *MaxBECount = computeMaxBECountForLT( 11999 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 12000 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 12001 false /*MaxOrZero*/, Predicates); 12002 } 12003 12004 // We use the expression (max(End,Start)-Start)/Stride to describe the 12005 // backedge count, as if the backedge is taken at least once max(End,Start) 12006 // is End and so the result is as above, and if not max(End,Start) is Start 12007 // so we get a backedge count of zero. 12008 const SCEV *BECount = nullptr; 12009 auto *OrigStartMinusStride = getMinusSCEV(OrigStart, Stride); 12010 assert(isAvailableAtLoopEntry(OrigStartMinusStride, L) && "Must be!"); 12011 assert(isAvailableAtLoopEntry(OrigStart, L) && "Must be!"); 12012 assert(isAvailableAtLoopEntry(OrigRHS, L) && "Must be!"); 12013 // Can we prove (max(RHS,Start) > Start - Stride? 12014 if (isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigStart) && 12015 isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigRHS)) { 12016 // In this case, we can use a refined formula for computing backedge taken 12017 // count. The general formula remains: 12018 // "End-Start /uceiling Stride" where "End = max(RHS,Start)" 12019 // We want to use the alternate formula: 12020 // "((End - 1) - (Start - Stride)) /u Stride" 12021 // Let's do a quick case analysis to show these are equivalent under 12022 // our precondition that max(RHS,Start) > Start - Stride. 12023 // * For RHS <= Start, the backedge-taken count must be zero. 12024 // "((End - 1) - (Start - Stride)) /u Stride" reduces to 12025 // "((Start - 1) - (Start - Stride)) /u Stride" which simplies to 12026 // "Stride - 1 /u Stride" which is indeed zero for all non-zero values 12027 // of Stride. For 0 stride, we've use umin(1,Stride) above, reducing 12028 // this to the stride of 1 case. 12029 // * For RHS >= Start, the backedge count must be "RHS-Start /uceil Stride". 12030 // "((End - 1) - (Start - Stride)) /u Stride" reduces to 12031 // "((RHS - 1) - (Start - Stride)) /u Stride" reassociates to 12032 // "((RHS - (Start - Stride) - 1) /u Stride". 12033 // Our preconditions trivially imply no overflow in that form. 12034 const SCEV *MinusOne = getMinusOne(Stride->getType()); 12035 const SCEV *Numerator = 12036 getMinusSCEV(getAddExpr(RHS, MinusOne), getMinusSCEV(Start, Stride)); 12037 BECount = getUDivExpr(Numerator, Stride); 12038 } 12039 12040 const SCEV *BECountIfBackedgeTaken = nullptr; 12041 if (!BECount) { 12042 auto canProveRHSGreaterThanEqualStart = [&]() { 12043 auto CondGE = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 12044 if (isLoopEntryGuardedByCond(L, CondGE, OrigRHS, OrigStart)) 12045 return true; 12046 12047 // (RHS > Start - 1) implies RHS >= Start. 12048 // * "RHS >= Start" is trivially equivalent to "RHS > Start - 1" if 12049 // "Start - 1" doesn't overflow. 12050 // * For signed comparison, if Start - 1 does overflow, it's equal 12051 // to INT_MAX, and "RHS >s INT_MAX" is trivially false. 12052 // * For unsigned comparison, if Start - 1 does overflow, it's equal 12053 // to UINT_MAX, and "RHS >u UINT_MAX" is trivially false. 12054 // 12055 // FIXME: Should isLoopEntryGuardedByCond do this for us? 12056 auto CondGT = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 12057 auto *StartMinusOne = getAddExpr(OrigStart, 12058 getMinusOne(OrigStart->getType())); 12059 return isLoopEntryGuardedByCond(L, CondGT, OrigRHS, StartMinusOne); 12060 }; 12061 12062 // If we know that RHS >= Start in the context of loop, then we know that 12063 // max(RHS, Start) = RHS at this point. 12064 const SCEV *End; 12065 if (canProveRHSGreaterThanEqualStart()) { 12066 End = RHS; 12067 } else { 12068 // If RHS < Start, the backedge will be taken zero times. So in 12069 // general, we can write the backedge-taken count as: 12070 // 12071 // RHS >= Start ? ceil(RHS - Start) / Stride : 0 12072 // 12073 // We convert it to the following to make it more convenient for SCEV: 12074 // 12075 // ceil(max(RHS, Start) - Start) / Stride 12076 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 12077 12078 // See what would happen if we assume the backedge is taken. This is 12079 // used to compute MaxBECount. 12080 BECountIfBackedgeTaken = getUDivCeilSCEV(getMinusSCEV(RHS, Start), Stride); 12081 } 12082 12083 // At this point, we know: 12084 // 12085 // 1. If IsSigned, Start <=s End; otherwise, Start <=u End 12086 // 2. The index variable doesn't overflow. 12087 // 12088 // Therefore, we know N exists such that 12089 // (Start + Stride * N) >= End, and computing "(Start + Stride * N)" 12090 // doesn't overflow. 12091 // 12092 // Using this information, try to prove whether the addition in 12093 // "(Start - End) + (Stride - 1)" has unsigned overflow. 12094 const SCEV *One = getOne(Stride->getType()); 12095 bool MayAddOverflow = [&] { 12096 if (auto *StrideC = dyn_cast<SCEVConstant>(Stride)) { 12097 if (StrideC->getAPInt().isPowerOf2()) { 12098 // Suppose Stride is a power of two, and Start/End are unsigned 12099 // integers. Let UMAX be the largest representable unsigned 12100 // integer. 12101 // 12102 // By the preconditions of this function, we know 12103 // "(Start + Stride * N) >= End", and this doesn't overflow. 12104 // As a formula: 12105 // 12106 // End <= (Start + Stride * N) <= UMAX 12107 // 12108 // Subtracting Start from all the terms: 12109 // 12110 // End - Start <= Stride * N <= UMAX - Start 12111 // 12112 // Since Start is unsigned, UMAX - Start <= UMAX. Therefore: 12113 // 12114 // End - Start <= Stride * N <= UMAX 12115 // 12116 // Stride * N is a multiple of Stride. Therefore, 12117 // 12118 // End - Start <= Stride * N <= UMAX - (UMAX mod Stride) 12119 // 12120 // Since Stride is a power of two, UMAX + 1 is divisible by Stride. 12121 // Therefore, UMAX mod Stride == Stride - 1. So we can write: 12122 // 12123 // End - Start <= Stride * N <= UMAX - Stride - 1 12124 // 12125 // Dropping the middle term: 12126 // 12127 // End - Start <= UMAX - Stride - 1 12128 // 12129 // Adding Stride - 1 to both sides: 12130 // 12131 // (End - Start) + (Stride - 1) <= UMAX 12132 // 12133 // In other words, the addition doesn't have unsigned overflow. 12134 // 12135 // A similar proof works if we treat Start/End as signed values. 12136 // Just rewrite steps before "End - Start <= Stride * N <= UMAX" to 12137 // use signed max instead of unsigned max. Note that we're trying 12138 // to prove a lack of unsigned overflow in either case. 12139 return false; 12140 } 12141 } 12142 if (Start == Stride || Start == getMinusSCEV(Stride, One)) { 12143 // If Start is equal to Stride, (End - Start) + (Stride - 1) == End - 1. 12144 // If !IsSigned, 0 <u Stride == Start <=u End; so 0 <u End - 1 <u End. 12145 // If IsSigned, 0 <s Stride == Start <=s End; so 0 <s End - 1 <s End. 12146 // 12147 // If Start is equal to Stride - 1, (End - Start) + Stride - 1 == End. 12148 return false; 12149 } 12150 return true; 12151 }(); 12152 12153 const SCEV *Delta = getMinusSCEV(End, Start); 12154 if (!MayAddOverflow) { 12155 // floor((D + (S - 1)) / S) 12156 // We prefer this formulation if it's legal because it's fewer operations. 12157 BECount = 12158 getUDivExpr(getAddExpr(Delta, getMinusSCEV(Stride, One)), Stride); 12159 } else { 12160 BECount = getUDivCeilSCEV(Delta, Stride); 12161 } 12162 } 12163 12164 const SCEV *MaxBECount; 12165 bool MaxOrZero = false; 12166 if (isa<SCEVConstant>(BECount)) { 12167 MaxBECount = BECount; 12168 } else if (BECountIfBackedgeTaken && 12169 isa<SCEVConstant>(BECountIfBackedgeTaken)) { 12170 // If we know exactly how many times the backedge will be taken if it's 12171 // taken at least once, then the backedge count will either be that or 12172 // zero. 12173 MaxBECount = BECountIfBackedgeTaken; 12174 MaxOrZero = true; 12175 } else { 12176 MaxBECount = computeMaxBECountForLT( 12177 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 12178 } 12179 12180 if (isa<SCEVCouldNotCompute>(MaxBECount) && 12181 !isa<SCEVCouldNotCompute>(BECount)) 12182 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 12183 12184 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 12185 } 12186 12187 ScalarEvolution::ExitLimit 12188 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 12189 const Loop *L, bool IsSigned, 12190 bool ControlsExit, bool AllowPredicates) { 12191 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 12192 // We handle only IV > Invariant 12193 if (!isLoopInvariant(RHS, L)) 12194 return getCouldNotCompute(); 12195 12196 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 12197 if (!IV && AllowPredicates) 12198 // Try to make this an AddRec using runtime tests, in the first X 12199 // iterations of this loop, where X is the SCEV expression found by the 12200 // algorithm below. 12201 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 12202 12203 // Avoid weird loops 12204 if (!IV || IV->getLoop() != L || !IV->isAffine()) 12205 return getCouldNotCompute(); 12206 12207 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; 12208 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType); 12209 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 12210 12211 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 12212 12213 // Avoid negative or zero stride values 12214 if (!isKnownPositive(Stride)) 12215 return getCouldNotCompute(); 12216 12217 // Avoid proven overflow cases: this will ensure that the backedge taken count 12218 // will not generate any unsigned overflow. Relaxed no-overflow conditions 12219 // exploit NoWrapFlags, allowing to optimize in presence of undefined 12220 // behaviors like the case of C language. 12221 if (!Stride->isOne() && !NoWrap) 12222 if (canIVOverflowOnGT(RHS, Stride, IsSigned)) 12223 return getCouldNotCompute(); 12224 12225 const SCEV *Start = IV->getStart(); 12226 const SCEV *End = RHS; 12227 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { 12228 // If we know that Start >= RHS in the context of loop, then we know that 12229 // min(RHS, Start) = RHS at this point. 12230 if (isLoopEntryGuardedByCond( 12231 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS)) 12232 End = RHS; 12233 else 12234 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 12235 } 12236 12237 if (Start->getType()->isPointerTy()) { 12238 Start = getLosslessPtrToIntExpr(Start); 12239 if (isa<SCEVCouldNotCompute>(Start)) 12240 return Start; 12241 } 12242 if (End->getType()->isPointerTy()) { 12243 End = getLosslessPtrToIntExpr(End); 12244 if (isa<SCEVCouldNotCompute>(End)) 12245 return End; 12246 } 12247 12248 // Compute ((Start - End) + (Stride - 1)) / Stride. 12249 // FIXME: This can overflow. Holding off on fixing this for now; 12250 // howManyGreaterThans will hopefully be gone soon. 12251 const SCEV *One = getOne(Stride->getType()); 12252 const SCEV *BECount = getUDivExpr( 12253 getAddExpr(getMinusSCEV(Start, End), getMinusSCEV(Stride, One)), Stride); 12254 12255 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 12256 : getUnsignedRangeMax(Start); 12257 12258 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 12259 : getUnsignedRangeMin(Stride); 12260 12261 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 12262 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 12263 : APInt::getMinValue(BitWidth) + (MinStride - 1); 12264 12265 // Although End can be a MIN expression we estimate MinEnd considering only 12266 // the case End = RHS. This is safe because in the other case (Start - End) 12267 // is zero, leading to a zero maximum backedge taken count. 12268 APInt MinEnd = 12269 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 12270 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 12271 12272 const SCEV *MaxBECount = isa<SCEVConstant>(BECount) 12273 ? BECount 12274 : getUDivCeilSCEV(getConstant(MaxStart - MinEnd), 12275 getConstant(MinStride)); 12276 12277 if (isa<SCEVCouldNotCompute>(MaxBECount)) 12278 MaxBECount = BECount; 12279 12280 return ExitLimit(BECount, MaxBECount, false, Predicates); 12281 } 12282 12283 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 12284 ScalarEvolution &SE) const { 12285 if (Range.isFullSet()) // Infinite loop. 12286 return SE.getCouldNotCompute(); 12287 12288 // If the start is a non-zero constant, shift the range to simplify things. 12289 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 12290 if (!SC->getValue()->isZero()) { 12291 SmallVector<const SCEV *, 4> Operands(operands()); 12292 Operands[0] = SE.getZero(SC->getType()); 12293 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 12294 getNoWrapFlags(FlagNW)); 12295 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 12296 return ShiftedAddRec->getNumIterationsInRange( 12297 Range.subtract(SC->getAPInt()), SE); 12298 // This is strange and shouldn't happen. 12299 return SE.getCouldNotCompute(); 12300 } 12301 12302 // The only time we can solve this is when we have all constant indices. 12303 // Otherwise, we cannot determine the overflow conditions. 12304 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 12305 return SE.getCouldNotCompute(); 12306 12307 // Okay at this point we know that all elements of the chrec are constants and 12308 // that the start element is zero. 12309 12310 // First check to see if the range contains zero. If not, the first 12311 // iteration exits. 12312 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 12313 if (!Range.contains(APInt(BitWidth, 0))) 12314 return SE.getZero(getType()); 12315 12316 if (isAffine()) { 12317 // If this is an affine expression then we have this situation: 12318 // Solve {0,+,A} in Range === Ax in Range 12319 12320 // We know that zero is in the range. If A is positive then we know that 12321 // the upper value of the range must be the first possible exit value. 12322 // If A is negative then the lower of the range is the last possible loop 12323 // value. Also note that we already checked for a full range. 12324 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 12325 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 12326 12327 // The exit value should be (End+A)/A. 12328 APInt ExitVal = (End + A).udiv(A); 12329 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 12330 12331 // Evaluate at the exit value. If we really did fall out of the valid 12332 // range, then we computed our trip count, otherwise wrap around or other 12333 // things must have happened. 12334 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 12335 if (Range.contains(Val->getValue())) 12336 return SE.getCouldNotCompute(); // Something strange happened 12337 12338 // Ensure that the previous value is in the range. 12339 assert(Range.contains( 12340 EvaluateConstantChrecAtConstant(this, 12341 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 12342 "Linear scev computation is off in a bad way!"); 12343 return SE.getConstant(ExitValue); 12344 } 12345 12346 if (isQuadratic()) { 12347 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 12348 return SE.getConstant(S.getValue()); 12349 } 12350 12351 return SE.getCouldNotCompute(); 12352 } 12353 12354 const SCEVAddRecExpr * 12355 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 12356 assert(getNumOperands() > 1 && "AddRec with zero step?"); 12357 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 12358 // but in this case we cannot guarantee that the value returned will be an 12359 // AddRec because SCEV does not have a fixed point where it stops 12360 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 12361 // may happen if we reach arithmetic depth limit while simplifying. So we 12362 // construct the returned value explicitly. 12363 SmallVector<const SCEV *, 3> Ops; 12364 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 12365 // (this + Step) is {A+B,+,B+C,+...,+,N}. 12366 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 12367 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 12368 // We know that the last operand is not a constant zero (otherwise it would 12369 // have been popped out earlier). This guarantees us that if the result has 12370 // the same last operand, then it will also not be popped out, meaning that 12371 // the returned value will be an AddRec. 12372 const SCEV *Last = getOperand(getNumOperands() - 1); 12373 assert(!Last->isZero() && "Recurrency with zero step?"); 12374 Ops.push_back(Last); 12375 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 12376 SCEV::FlagAnyWrap)); 12377 } 12378 12379 // Return true when S contains at least an undef value. 12380 bool ScalarEvolution::containsUndefs(const SCEV *S) const { 12381 return SCEVExprContains(S, [](const SCEV *S) { 12382 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 12383 return isa<UndefValue>(SU->getValue()); 12384 return false; 12385 }); 12386 } 12387 12388 /// Return the size of an element read or written by Inst. 12389 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 12390 Type *Ty; 12391 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 12392 Ty = Store->getValueOperand()->getType(); 12393 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 12394 Ty = Load->getType(); 12395 else 12396 return nullptr; 12397 12398 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 12399 return getSizeOfExpr(ETy, Ty); 12400 } 12401 12402 //===----------------------------------------------------------------------===// 12403 // SCEVCallbackVH Class Implementation 12404 //===----------------------------------------------------------------------===// 12405 12406 void ScalarEvolution::SCEVCallbackVH::deleted() { 12407 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 12408 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 12409 SE->ConstantEvolutionLoopExitValue.erase(PN); 12410 SE->eraseValueFromMap(getValPtr()); 12411 // this now dangles! 12412 } 12413 12414 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 12415 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 12416 12417 // Forget all the expressions associated with users of the old value, 12418 // so that future queries will recompute the expressions using the new 12419 // value. 12420 Value *Old = getValPtr(); 12421 SmallVector<User *, 16> Worklist(Old->users()); 12422 SmallPtrSet<User *, 8> Visited; 12423 while (!Worklist.empty()) { 12424 User *U = Worklist.pop_back_val(); 12425 // Deleting the Old value will cause this to dangle. Postpone 12426 // that until everything else is done. 12427 if (U == Old) 12428 continue; 12429 if (!Visited.insert(U).second) 12430 continue; 12431 if (PHINode *PN = dyn_cast<PHINode>(U)) 12432 SE->ConstantEvolutionLoopExitValue.erase(PN); 12433 SE->eraseValueFromMap(U); 12434 llvm::append_range(Worklist, U->users()); 12435 } 12436 // Delete the Old value. 12437 if (PHINode *PN = dyn_cast<PHINode>(Old)) 12438 SE->ConstantEvolutionLoopExitValue.erase(PN); 12439 SE->eraseValueFromMap(Old); 12440 // this now dangles! 12441 } 12442 12443 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 12444 : CallbackVH(V), SE(se) {} 12445 12446 //===----------------------------------------------------------------------===// 12447 // ScalarEvolution Class Implementation 12448 //===----------------------------------------------------------------------===// 12449 12450 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 12451 AssumptionCache &AC, DominatorTree &DT, 12452 LoopInfo &LI) 12453 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 12454 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 12455 LoopDispositions(64), BlockDispositions(64) { 12456 // To use guards for proving predicates, we need to scan every instruction in 12457 // relevant basic blocks, and not just terminators. Doing this is a waste of 12458 // time if the IR does not actually contain any calls to 12459 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 12460 // 12461 // This pessimizes the case where a pass that preserves ScalarEvolution wants 12462 // to _add_ guards to the module when there weren't any before, and wants 12463 // ScalarEvolution to optimize based on those guards. For now we prefer to be 12464 // efficient in lieu of being smart in that rather obscure case. 12465 12466 auto *GuardDecl = F.getParent()->getFunction( 12467 Intrinsic::getName(Intrinsic::experimental_guard)); 12468 HasGuards = GuardDecl && !GuardDecl->use_empty(); 12469 } 12470 12471 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 12472 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 12473 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 12474 ValueExprMap(std::move(Arg.ValueExprMap)), 12475 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 12476 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 12477 PendingMerges(std::move(Arg.PendingMerges)), 12478 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 12479 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 12480 PredicatedBackedgeTakenCounts( 12481 std::move(Arg.PredicatedBackedgeTakenCounts)), 12482 BECountUsers(std::move(Arg.BECountUsers)), 12483 ConstantEvolutionLoopExitValue( 12484 std::move(Arg.ConstantEvolutionLoopExitValue)), 12485 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 12486 ValuesAtScopesUsers(std::move(Arg.ValuesAtScopesUsers)), 12487 LoopDispositions(std::move(Arg.LoopDispositions)), 12488 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 12489 BlockDispositions(std::move(Arg.BlockDispositions)), 12490 SCEVUsers(std::move(Arg.SCEVUsers)), 12491 UnsignedRanges(std::move(Arg.UnsignedRanges)), 12492 SignedRanges(std::move(Arg.SignedRanges)), 12493 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 12494 UniquePreds(std::move(Arg.UniquePreds)), 12495 SCEVAllocator(std::move(Arg.SCEVAllocator)), 12496 LoopUsers(std::move(Arg.LoopUsers)), 12497 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 12498 FirstUnknown(Arg.FirstUnknown) { 12499 Arg.FirstUnknown = nullptr; 12500 } 12501 12502 ScalarEvolution::~ScalarEvolution() { 12503 // Iterate through all the SCEVUnknown instances and call their 12504 // destructors, so that they release their references to their values. 12505 for (SCEVUnknown *U = FirstUnknown; U;) { 12506 SCEVUnknown *Tmp = U; 12507 U = U->Next; 12508 Tmp->~SCEVUnknown(); 12509 } 12510 FirstUnknown = nullptr; 12511 12512 ExprValueMap.clear(); 12513 ValueExprMap.clear(); 12514 HasRecMap.clear(); 12515 BackedgeTakenCounts.clear(); 12516 PredicatedBackedgeTakenCounts.clear(); 12517 12518 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 12519 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 12520 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 12521 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 12522 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 12523 } 12524 12525 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 12526 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 12527 } 12528 12529 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 12530 const Loop *L) { 12531 // Print all inner loops first 12532 for (Loop *I : *L) 12533 PrintLoopInfo(OS, SE, I); 12534 12535 OS << "Loop "; 12536 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12537 OS << ": "; 12538 12539 SmallVector<BasicBlock *, 8> ExitingBlocks; 12540 L->getExitingBlocks(ExitingBlocks); 12541 if (ExitingBlocks.size() != 1) 12542 OS << "<multiple exits> "; 12543 12544 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 12545 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; 12546 else 12547 OS << "Unpredictable backedge-taken count.\n"; 12548 12549 if (ExitingBlocks.size() > 1) 12550 for (BasicBlock *ExitingBlock : ExitingBlocks) { 12551 OS << " exit count for " << ExitingBlock->getName() << ": " 12552 << *SE->getExitCount(L, ExitingBlock) << "\n"; 12553 } 12554 12555 OS << "Loop "; 12556 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12557 OS << ": "; 12558 12559 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) { 12560 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L); 12561 if (SE->isBackedgeTakenCountMaxOrZero(L)) 12562 OS << ", actual taken count either this or zero."; 12563 } else { 12564 OS << "Unpredictable max backedge-taken count. "; 12565 } 12566 12567 OS << "\n" 12568 "Loop "; 12569 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12570 OS << ": "; 12571 12572 SCEVUnionPredicate Pred; 12573 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 12574 if (!isa<SCEVCouldNotCompute>(PBT)) { 12575 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 12576 OS << " Predicates:\n"; 12577 Pred.print(OS, 4); 12578 } else { 12579 OS << "Unpredictable predicated backedge-taken count. "; 12580 } 12581 OS << "\n"; 12582 12583 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 12584 OS << "Loop "; 12585 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12586 OS << ": "; 12587 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 12588 } 12589 } 12590 12591 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 12592 switch (LD) { 12593 case ScalarEvolution::LoopVariant: 12594 return "Variant"; 12595 case ScalarEvolution::LoopInvariant: 12596 return "Invariant"; 12597 case ScalarEvolution::LoopComputable: 12598 return "Computable"; 12599 } 12600 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 12601 } 12602 12603 void ScalarEvolution::print(raw_ostream &OS) const { 12604 // ScalarEvolution's implementation of the print method is to print 12605 // out SCEV values of all instructions that are interesting. Doing 12606 // this potentially causes it to create new SCEV objects though, 12607 // which technically conflicts with the const qualifier. This isn't 12608 // observable from outside the class though, so casting away the 12609 // const isn't dangerous. 12610 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 12611 12612 if (ClassifyExpressions) { 12613 OS << "Classifying expressions for: "; 12614 F.printAsOperand(OS, /*PrintType=*/false); 12615 OS << "\n"; 12616 for (Instruction &I : instructions(F)) 12617 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 12618 OS << I << '\n'; 12619 OS << " --> "; 12620 const SCEV *SV = SE.getSCEV(&I); 12621 SV->print(OS); 12622 if (!isa<SCEVCouldNotCompute>(SV)) { 12623 OS << " U: "; 12624 SE.getUnsignedRange(SV).print(OS); 12625 OS << " S: "; 12626 SE.getSignedRange(SV).print(OS); 12627 } 12628 12629 const Loop *L = LI.getLoopFor(I.getParent()); 12630 12631 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 12632 if (AtUse != SV) { 12633 OS << " --> "; 12634 AtUse->print(OS); 12635 if (!isa<SCEVCouldNotCompute>(AtUse)) { 12636 OS << " U: "; 12637 SE.getUnsignedRange(AtUse).print(OS); 12638 OS << " S: "; 12639 SE.getSignedRange(AtUse).print(OS); 12640 } 12641 } 12642 12643 if (L) { 12644 OS << "\t\t" "Exits: "; 12645 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 12646 if (!SE.isLoopInvariant(ExitValue, L)) { 12647 OS << "<<Unknown>>"; 12648 } else { 12649 OS << *ExitValue; 12650 } 12651 12652 bool First = true; 12653 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 12654 if (First) { 12655 OS << "\t\t" "LoopDispositions: { "; 12656 First = false; 12657 } else { 12658 OS << ", "; 12659 } 12660 12661 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12662 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 12663 } 12664 12665 for (auto *InnerL : depth_first(L)) { 12666 if (InnerL == L) 12667 continue; 12668 if (First) { 12669 OS << "\t\t" "LoopDispositions: { "; 12670 First = false; 12671 } else { 12672 OS << ", "; 12673 } 12674 12675 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12676 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 12677 } 12678 12679 OS << " }"; 12680 } 12681 12682 OS << "\n"; 12683 } 12684 } 12685 12686 OS << "Determining loop execution counts for: "; 12687 F.printAsOperand(OS, /*PrintType=*/false); 12688 OS << "\n"; 12689 for (Loop *I : LI) 12690 PrintLoopInfo(OS, &SE, I); 12691 } 12692 12693 ScalarEvolution::LoopDisposition 12694 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 12695 auto &Values = LoopDispositions[S]; 12696 for (auto &V : Values) { 12697 if (V.getPointer() == L) 12698 return V.getInt(); 12699 } 12700 Values.emplace_back(L, LoopVariant); 12701 LoopDisposition D = computeLoopDisposition(S, L); 12702 auto &Values2 = LoopDispositions[S]; 12703 for (auto &V : llvm::reverse(Values2)) { 12704 if (V.getPointer() == L) { 12705 V.setInt(D); 12706 break; 12707 } 12708 } 12709 return D; 12710 } 12711 12712 ScalarEvolution::LoopDisposition 12713 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 12714 switch (S->getSCEVType()) { 12715 case scConstant: 12716 return LoopInvariant; 12717 case scPtrToInt: 12718 case scTruncate: 12719 case scZeroExtend: 12720 case scSignExtend: 12721 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 12722 case scAddRecExpr: { 12723 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 12724 12725 // If L is the addrec's loop, it's computable. 12726 if (AR->getLoop() == L) 12727 return LoopComputable; 12728 12729 // Add recurrences are never invariant in the function-body (null loop). 12730 if (!L) 12731 return LoopVariant; 12732 12733 // Everything that is not defined at loop entry is variant. 12734 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 12735 return LoopVariant; 12736 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 12737 " dominate the contained loop's header?"); 12738 12739 // This recurrence is invariant w.r.t. L if AR's loop contains L. 12740 if (AR->getLoop()->contains(L)) 12741 return LoopInvariant; 12742 12743 // This recurrence is variant w.r.t. L if any of its operands 12744 // are variant. 12745 for (auto *Op : AR->operands()) 12746 if (!isLoopInvariant(Op, L)) 12747 return LoopVariant; 12748 12749 // Otherwise it's loop-invariant. 12750 return LoopInvariant; 12751 } 12752 case scAddExpr: 12753 case scMulExpr: 12754 case scUMaxExpr: 12755 case scSMaxExpr: 12756 case scUMinExpr: 12757 case scSMinExpr: { 12758 bool HasVarying = false; 12759 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 12760 LoopDisposition D = getLoopDisposition(Op, L); 12761 if (D == LoopVariant) 12762 return LoopVariant; 12763 if (D == LoopComputable) 12764 HasVarying = true; 12765 } 12766 return HasVarying ? LoopComputable : LoopInvariant; 12767 } 12768 case scUDivExpr: { 12769 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 12770 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 12771 if (LD == LoopVariant) 12772 return LoopVariant; 12773 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 12774 if (RD == LoopVariant) 12775 return LoopVariant; 12776 return (LD == LoopInvariant && RD == LoopInvariant) ? 12777 LoopInvariant : LoopComputable; 12778 } 12779 case scUnknown: 12780 // All non-instruction values are loop invariant. All instructions are loop 12781 // invariant if they are not contained in the specified loop. 12782 // Instructions are never considered invariant in the function body 12783 // (null loop) because they are defined within the "loop". 12784 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 12785 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 12786 return LoopInvariant; 12787 case scCouldNotCompute: 12788 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 12789 } 12790 llvm_unreachable("Unknown SCEV kind!"); 12791 } 12792 12793 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 12794 return getLoopDisposition(S, L) == LoopInvariant; 12795 } 12796 12797 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 12798 return getLoopDisposition(S, L) == LoopComputable; 12799 } 12800 12801 ScalarEvolution::BlockDisposition 12802 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 12803 auto &Values = BlockDispositions[S]; 12804 for (auto &V : Values) { 12805 if (V.getPointer() == BB) 12806 return V.getInt(); 12807 } 12808 Values.emplace_back(BB, DoesNotDominateBlock); 12809 BlockDisposition D = computeBlockDisposition(S, BB); 12810 auto &Values2 = BlockDispositions[S]; 12811 for (auto &V : llvm::reverse(Values2)) { 12812 if (V.getPointer() == BB) { 12813 V.setInt(D); 12814 break; 12815 } 12816 } 12817 return D; 12818 } 12819 12820 ScalarEvolution::BlockDisposition 12821 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 12822 switch (S->getSCEVType()) { 12823 case scConstant: 12824 return ProperlyDominatesBlock; 12825 case scPtrToInt: 12826 case scTruncate: 12827 case scZeroExtend: 12828 case scSignExtend: 12829 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 12830 case scAddRecExpr: { 12831 // This uses a "dominates" query instead of "properly dominates" query 12832 // to test for proper dominance too, because the instruction which 12833 // produces the addrec's value is a PHI, and a PHI effectively properly 12834 // dominates its entire containing block. 12835 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 12836 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 12837 return DoesNotDominateBlock; 12838 12839 // Fall through into SCEVNAryExpr handling. 12840 LLVM_FALLTHROUGH; 12841 } 12842 case scAddExpr: 12843 case scMulExpr: 12844 case scUMaxExpr: 12845 case scSMaxExpr: 12846 case scUMinExpr: 12847 case scSMinExpr: { 12848 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 12849 bool Proper = true; 12850 for (const SCEV *NAryOp : NAry->operands()) { 12851 BlockDisposition D = getBlockDisposition(NAryOp, BB); 12852 if (D == DoesNotDominateBlock) 12853 return DoesNotDominateBlock; 12854 if (D == DominatesBlock) 12855 Proper = false; 12856 } 12857 return Proper ? ProperlyDominatesBlock : DominatesBlock; 12858 } 12859 case scUDivExpr: { 12860 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 12861 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 12862 BlockDisposition LD = getBlockDisposition(LHS, BB); 12863 if (LD == DoesNotDominateBlock) 12864 return DoesNotDominateBlock; 12865 BlockDisposition RD = getBlockDisposition(RHS, BB); 12866 if (RD == DoesNotDominateBlock) 12867 return DoesNotDominateBlock; 12868 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 12869 ProperlyDominatesBlock : DominatesBlock; 12870 } 12871 case scUnknown: 12872 if (Instruction *I = 12873 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 12874 if (I->getParent() == BB) 12875 return DominatesBlock; 12876 if (DT.properlyDominates(I->getParent(), BB)) 12877 return ProperlyDominatesBlock; 12878 return DoesNotDominateBlock; 12879 } 12880 return ProperlyDominatesBlock; 12881 case scCouldNotCompute: 12882 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 12883 } 12884 llvm_unreachable("Unknown SCEV kind!"); 12885 } 12886 12887 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 12888 return getBlockDisposition(S, BB) >= DominatesBlock; 12889 } 12890 12891 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 12892 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 12893 } 12894 12895 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 12896 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 12897 } 12898 12899 void ScalarEvolution::forgetBackedgeTakenCounts(const Loop *L, 12900 bool Predicated) { 12901 auto &BECounts = 12902 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts; 12903 auto It = BECounts.find(L); 12904 if (It != BECounts.end()) { 12905 for (const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) { 12906 if (!isa<SCEVConstant>(ENT.ExactNotTaken)) { 12907 auto UserIt = BECountUsers.find(ENT.ExactNotTaken); 12908 assert(UserIt != BECountUsers.end()); 12909 UserIt->second.erase({L, Predicated}); 12910 } 12911 } 12912 BECounts.erase(It); 12913 } 12914 } 12915 12916 void ScalarEvolution::forgetMemoizedResults(ArrayRef<const SCEV *> SCEVs) { 12917 SmallPtrSet<const SCEV *, 8> ToForget(SCEVs.begin(), SCEVs.end()); 12918 SmallVector<const SCEV *, 8> Worklist(ToForget.begin(), ToForget.end()); 12919 12920 while (!Worklist.empty()) { 12921 const SCEV *Curr = Worklist.pop_back_val(); 12922 auto Users = SCEVUsers.find(Curr); 12923 if (Users != SCEVUsers.end()) 12924 for (auto *User : Users->second) 12925 if (ToForget.insert(User).second) 12926 Worklist.push_back(User); 12927 } 12928 12929 for (auto *S : ToForget) 12930 forgetMemoizedResultsImpl(S); 12931 12932 for (auto I = PredicatedSCEVRewrites.begin(); 12933 I != PredicatedSCEVRewrites.end();) { 12934 std::pair<const SCEV *, const Loop *> Entry = I->first; 12935 if (ToForget.count(Entry.first)) 12936 PredicatedSCEVRewrites.erase(I++); 12937 else 12938 ++I; 12939 } 12940 } 12941 12942 void ScalarEvolution::forgetMemoizedResultsImpl(const SCEV *S) { 12943 LoopDispositions.erase(S); 12944 BlockDispositions.erase(S); 12945 UnsignedRanges.erase(S); 12946 SignedRanges.erase(S); 12947 HasRecMap.erase(S); 12948 MinTrailingZerosCache.erase(S); 12949 12950 auto ExprIt = ExprValueMap.find(S); 12951 if (ExprIt != ExprValueMap.end()) { 12952 for (auto &ValueAndOffset : ExprIt->second) { 12953 if (ValueAndOffset.second == nullptr) { 12954 auto ValueIt = ValueExprMap.find_as(ValueAndOffset.first); 12955 if (ValueIt != ValueExprMap.end()) 12956 ValueExprMap.erase(ValueIt); 12957 } 12958 } 12959 ExprValueMap.erase(ExprIt); 12960 } 12961 12962 auto ScopeIt = ValuesAtScopes.find(S); 12963 if (ScopeIt != ValuesAtScopes.end()) { 12964 for (const auto &Pair : ScopeIt->second) 12965 if (!isa_and_nonnull<SCEVConstant>(Pair.second)) 12966 erase_value(ValuesAtScopesUsers[Pair.second], 12967 std::make_pair(Pair.first, S)); 12968 ValuesAtScopes.erase(ScopeIt); 12969 } 12970 12971 auto ScopeUserIt = ValuesAtScopesUsers.find(S); 12972 if (ScopeUserIt != ValuesAtScopesUsers.end()) { 12973 for (const auto &Pair : ScopeUserIt->second) 12974 erase_value(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S)); 12975 ValuesAtScopesUsers.erase(ScopeUserIt); 12976 } 12977 12978 auto BEUsersIt = BECountUsers.find(S); 12979 if (BEUsersIt != BECountUsers.end()) { 12980 // Work on a copy, as forgetBackedgeTakenCounts() will modify the original. 12981 auto Copy = BEUsersIt->second; 12982 for (const auto &Pair : Copy) 12983 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt()); 12984 BECountUsers.erase(BEUsersIt); 12985 } 12986 } 12987 12988 void 12989 ScalarEvolution::getUsedLoops(const SCEV *S, 12990 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 12991 struct FindUsedLoops { 12992 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 12993 : LoopsUsed(LoopsUsed) {} 12994 SmallPtrSetImpl<const Loop *> &LoopsUsed; 12995 bool follow(const SCEV *S) { 12996 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 12997 LoopsUsed.insert(AR->getLoop()); 12998 return true; 12999 } 13000 13001 bool isDone() const { return false; } 13002 }; 13003 13004 FindUsedLoops F(LoopsUsed); 13005 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 13006 } 13007 13008 void ScalarEvolution::verify() const { 13009 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 13010 ScalarEvolution SE2(F, TLI, AC, DT, LI); 13011 13012 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 13013 13014 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 13015 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 13016 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 13017 13018 const SCEV *visitConstant(const SCEVConstant *Constant) { 13019 return SE.getConstant(Constant->getAPInt()); 13020 } 13021 13022 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 13023 return SE.getUnknown(Expr->getValue()); 13024 } 13025 13026 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 13027 return SE.getCouldNotCompute(); 13028 } 13029 }; 13030 13031 SCEVMapper SCM(SE2); 13032 13033 while (!LoopStack.empty()) { 13034 auto *L = LoopStack.pop_back_val(); 13035 llvm::append_range(LoopStack, *L); 13036 13037 auto *CurBECount = SCM.visit( 13038 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 13039 auto *NewBECount = SE2.getBackedgeTakenCount(L); 13040 13041 if (CurBECount == SE2.getCouldNotCompute() || 13042 NewBECount == SE2.getCouldNotCompute()) { 13043 // NB! This situation is legal, but is very suspicious -- whatever pass 13044 // change the loop to make a trip count go from could not compute to 13045 // computable or vice-versa *should have* invalidated SCEV. However, we 13046 // choose not to assert here (for now) since we don't want false 13047 // positives. 13048 continue; 13049 } 13050 13051 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 13052 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 13053 // not propagate undef aggressively). This means we can (and do) fail 13054 // verification in cases where a transform makes the trip count of a loop 13055 // go from "undef" to "undef+1" (say). The transform is fine, since in 13056 // both cases the loop iterates "undef" times, but SCEV thinks we 13057 // increased the trip count of the loop by 1 incorrectly. 13058 continue; 13059 } 13060 13061 if (SE.getTypeSizeInBits(CurBECount->getType()) > 13062 SE.getTypeSizeInBits(NewBECount->getType())) 13063 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 13064 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 13065 SE.getTypeSizeInBits(NewBECount->getType())) 13066 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 13067 13068 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount); 13069 13070 // Unless VerifySCEVStrict is set, we only compare constant deltas. 13071 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) { 13072 dbgs() << "Trip Count for " << *L << " Changed!\n"; 13073 dbgs() << "Old: " << *CurBECount << "\n"; 13074 dbgs() << "New: " << *NewBECount << "\n"; 13075 dbgs() << "Delta: " << *Delta << "\n"; 13076 std::abort(); 13077 } 13078 } 13079 13080 // Collect all valid loops currently in LoopInfo. 13081 SmallPtrSet<Loop *, 32> ValidLoops; 13082 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end()); 13083 while (!Worklist.empty()) { 13084 Loop *L = Worklist.pop_back_val(); 13085 if (ValidLoops.contains(L)) 13086 continue; 13087 ValidLoops.insert(L); 13088 Worklist.append(L->begin(), L->end()); 13089 } 13090 for (auto &KV : ValueExprMap) { 13091 #ifndef NDEBUG 13092 // Check for SCEV expressions referencing invalid/deleted loops. 13093 if (auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second)) { 13094 assert(ValidLoops.contains(AR->getLoop()) && 13095 "AddRec references invalid loop"); 13096 } 13097 #endif 13098 13099 // Check that the value is also part of the reverse map. 13100 auto It = ExprValueMap.find(KV.second); 13101 if (It == ExprValueMap.end() || !It->second.contains({KV.first, nullptr})) { 13102 dbgs() << "Value " << *KV.first 13103 << " is in ValueExprMap but not in ExprValueMap\n"; 13104 std::abort(); 13105 } 13106 } 13107 13108 for (const auto &KV : ExprValueMap) { 13109 for (const auto &ValueAndOffset : KV.second) { 13110 if (ValueAndOffset.second != nullptr) 13111 continue; 13112 13113 auto It = ValueExprMap.find_as(ValueAndOffset.first); 13114 if (It == ValueExprMap.end()) { 13115 dbgs() << "Value " << *ValueAndOffset.first 13116 << " is in ExprValueMap but not in ValueExprMap\n"; 13117 std::abort(); 13118 } 13119 if (It->second != KV.first) { 13120 dbgs() << "Value " << *ValueAndOffset.first 13121 << " mapped to " << *It->second 13122 << " rather than " << *KV.first << "\n"; 13123 std::abort(); 13124 } 13125 } 13126 } 13127 13128 // Verify integrity of SCEV users. 13129 for (const auto &S : UniqueSCEVs) { 13130 SmallVector<const SCEV *, 4> Ops; 13131 collectUniqueOps(&S, Ops); 13132 for (const auto *Op : Ops) { 13133 // We do not store dependencies of constants. 13134 if (isa<SCEVConstant>(Op)) 13135 continue; 13136 auto It = SCEVUsers.find(Op); 13137 if (It != SCEVUsers.end() && It->second.count(&S)) 13138 continue; 13139 dbgs() << "Use of operand " << *Op << " by user " << S 13140 << " is not being tracked!\n"; 13141 std::abort(); 13142 } 13143 } 13144 13145 // Verify integrity of ValuesAtScopes users. 13146 for (const auto &ValueAndVec : ValuesAtScopes) { 13147 const SCEV *Value = ValueAndVec.first; 13148 for (const auto &LoopAndValueAtScope : ValueAndVec.second) { 13149 const Loop *L = LoopAndValueAtScope.first; 13150 const SCEV *ValueAtScope = LoopAndValueAtScope.second; 13151 if (!isa<SCEVConstant>(ValueAtScope)) { 13152 auto It = ValuesAtScopesUsers.find(ValueAtScope); 13153 if (It != ValuesAtScopesUsers.end() && 13154 is_contained(It->second, std::make_pair(L, Value))) 13155 continue; 13156 dbgs() << "Value: " << *Value << ", Loop: " << *L << ", ValueAtScope: " 13157 << *ValueAtScope << " missing in ValuesAtScopesUsers\n"; 13158 std::abort(); 13159 } 13160 } 13161 } 13162 13163 for (const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) { 13164 const SCEV *ValueAtScope = ValueAtScopeAndVec.first; 13165 for (const auto &LoopAndValue : ValueAtScopeAndVec.second) { 13166 const Loop *L = LoopAndValue.first; 13167 const SCEV *Value = LoopAndValue.second; 13168 assert(!isa<SCEVConstant>(Value)); 13169 auto It = ValuesAtScopes.find(Value); 13170 if (It != ValuesAtScopes.end() && 13171 is_contained(It->second, std::make_pair(L, ValueAtScope))) 13172 continue; 13173 dbgs() << "Value: " << *Value << ", Loop: " << *L << ", ValueAtScope: " 13174 << *ValueAtScope << " missing in ValuesAtScopes\n"; 13175 std::abort(); 13176 } 13177 } 13178 13179 // Verify integrity of BECountUsers. 13180 auto VerifyBECountUsers = [&](bool Predicated) { 13181 auto &BECounts = 13182 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts; 13183 for (const auto &LoopAndBEInfo : BECounts) { 13184 for (const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) { 13185 if (!isa<SCEVConstant>(ENT.ExactNotTaken)) { 13186 auto UserIt = BECountUsers.find(ENT.ExactNotTaken); 13187 if (UserIt != BECountUsers.end() && 13188 UserIt->second.contains({ LoopAndBEInfo.first, Predicated })) 13189 continue; 13190 dbgs() << "Value " << *ENT.ExactNotTaken << " for loop " 13191 << *LoopAndBEInfo.first << " missing from BECountUsers\n"; 13192 std::abort(); 13193 } 13194 } 13195 } 13196 }; 13197 VerifyBECountUsers(/* Predicated */ false); 13198 VerifyBECountUsers(/* Predicated */ true); 13199 } 13200 13201 bool ScalarEvolution::invalidate( 13202 Function &F, const PreservedAnalyses &PA, 13203 FunctionAnalysisManager::Invalidator &Inv) { 13204 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 13205 // of its dependencies is invalidated. 13206 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 13207 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 13208 Inv.invalidate<AssumptionAnalysis>(F, PA) || 13209 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 13210 Inv.invalidate<LoopAnalysis>(F, PA); 13211 } 13212 13213 AnalysisKey ScalarEvolutionAnalysis::Key; 13214 13215 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 13216 FunctionAnalysisManager &AM) { 13217 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 13218 AM.getResult<AssumptionAnalysis>(F), 13219 AM.getResult<DominatorTreeAnalysis>(F), 13220 AM.getResult<LoopAnalysis>(F)); 13221 } 13222 13223 PreservedAnalyses 13224 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 13225 AM.getResult<ScalarEvolutionAnalysis>(F).verify(); 13226 return PreservedAnalyses::all(); 13227 } 13228 13229 PreservedAnalyses 13230 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 13231 // For compatibility with opt's -analyze feature under legacy pass manager 13232 // which was not ported to NPM. This keeps tests using 13233 // update_analyze_test_checks.py working. 13234 OS << "Printing analysis 'Scalar Evolution Analysis' for function '" 13235 << F.getName() << "':\n"; 13236 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 13237 return PreservedAnalyses::all(); 13238 } 13239 13240 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 13241 "Scalar Evolution Analysis", false, true) 13242 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 13243 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 13244 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 13245 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 13246 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 13247 "Scalar Evolution Analysis", false, true) 13248 13249 char ScalarEvolutionWrapperPass::ID = 0; 13250 13251 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 13252 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 13253 } 13254 13255 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 13256 SE.reset(new ScalarEvolution( 13257 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 13258 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 13259 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 13260 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 13261 return false; 13262 } 13263 13264 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 13265 13266 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 13267 SE->print(OS); 13268 } 13269 13270 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 13271 if (!VerifySCEV) 13272 return; 13273 13274 SE->verify(); 13275 } 13276 13277 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 13278 AU.setPreservesAll(); 13279 AU.addRequiredTransitive<AssumptionCacheTracker>(); 13280 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 13281 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 13282 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 13283 } 13284 13285 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 13286 const SCEV *RHS) { 13287 FoldingSetNodeID ID; 13288 assert(LHS->getType() == RHS->getType() && 13289 "Type mismatch between LHS and RHS"); 13290 // Unique this node based on the arguments 13291 ID.AddInteger(SCEVPredicate::P_Equal); 13292 ID.AddPointer(LHS); 13293 ID.AddPointer(RHS); 13294 void *IP = nullptr; 13295 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 13296 return S; 13297 SCEVEqualPredicate *Eq = new (SCEVAllocator) 13298 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 13299 UniquePreds.InsertNode(Eq, IP); 13300 return Eq; 13301 } 13302 13303 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 13304 const SCEVAddRecExpr *AR, 13305 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 13306 FoldingSetNodeID ID; 13307 // Unique this node based on the arguments 13308 ID.AddInteger(SCEVPredicate::P_Wrap); 13309 ID.AddPointer(AR); 13310 ID.AddInteger(AddedFlags); 13311 void *IP = nullptr; 13312 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 13313 return S; 13314 auto *OF = new (SCEVAllocator) 13315 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 13316 UniquePreds.InsertNode(OF, IP); 13317 return OF; 13318 } 13319 13320 namespace { 13321 13322 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 13323 public: 13324 13325 /// Rewrites \p S in the context of a loop L and the SCEV predication 13326 /// infrastructure. 13327 /// 13328 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 13329 /// equivalences present in \p Pred. 13330 /// 13331 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 13332 /// \p NewPreds such that the result will be an AddRecExpr. 13333 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 13334 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 13335 SCEVUnionPredicate *Pred) { 13336 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 13337 return Rewriter.visit(S); 13338 } 13339 13340 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 13341 if (Pred) { 13342 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 13343 for (auto *Pred : ExprPreds) 13344 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 13345 if (IPred->getLHS() == Expr) 13346 return IPred->getRHS(); 13347 } 13348 return convertToAddRecWithPreds(Expr); 13349 } 13350 13351 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 13352 const SCEV *Operand = visit(Expr->getOperand()); 13353 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 13354 if (AR && AR->getLoop() == L && AR->isAffine()) { 13355 // This couldn't be folded because the operand didn't have the nuw 13356 // flag. Add the nusw flag as an assumption that we could make. 13357 const SCEV *Step = AR->getStepRecurrence(SE); 13358 Type *Ty = Expr->getType(); 13359 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 13360 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 13361 SE.getSignExtendExpr(Step, Ty), L, 13362 AR->getNoWrapFlags()); 13363 } 13364 return SE.getZeroExtendExpr(Operand, Expr->getType()); 13365 } 13366 13367 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 13368 const SCEV *Operand = visit(Expr->getOperand()); 13369 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 13370 if (AR && AR->getLoop() == L && AR->isAffine()) { 13371 // This couldn't be folded because the operand didn't have the nsw 13372 // flag. Add the nssw flag as an assumption that we could make. 13373 const SCEV *Step = AR->getStepRecurrence(SE); 13374 Type *Ty = Expr->getType(); 13375 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 13376 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 13377 SE.getSignExtendExpr(Step, Ty), L, 13378 AR->getNoWrapFlags()); 13379 } 13380 return SE.getSignExtendExpr(Operand, Expr->getType()); 13381 } 13382 13383 private: 13384 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 13385 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 13386 SCEVUnionPredicate *Pred) 13387 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 13388 13389 bool addOverflowAssumption(const SCEVPredicate *P) { 13390 if (!NewPreds) { 13391 // Check if we've already made this assumption. 13392 return Pred && Pred->implies(P); 13393 } 13394 NewPreds->insert(P); 13395 return true; 13396 } 13397 13398 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 13399 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 13400 auto *A = SE.getWrapPredicate(AR, AddedFlags); 13401 return addOverflowAssumption(A); 13402 } 13403 13404 // If \p Expr represents a PHINode, we try to see if it can be represented 13405 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 13406 // to add this predicate as a runtime overflow check, we return the AddRec. 13407 // If \p Expr does not meet these conditions (is not a PHI node, or we 13408 // couldn't create an AddRec for it, or couldn't add the predicate), we just 13409 // return \p Expr. 13410 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 13411 if (!isa<PHINode>(Expr->getValue())) 13412 return Expr; 13413 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 13414 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 13415 if (!PredicatedRewrite) 13416 return Expr; 13417 for (auto *P : PredicatedRewrite->second){ 13418 // Wrap predicates from outer loops are not supported. 13419 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 13420 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 13421 if (L != AR->getLoop()) 13422 return Expr; 13423 } 13424 if (!addOverflowAssumption(P)) 13425 return Expr; 13426 } 13427 return PredicatedRewrite->first; 13428 } 13429 13430 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 13431 SCEVUnionPredicate *Pred; 13432 const Loop *L; 13433 }; 13434 13435 } // end anonymous namespace 13436 13437 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 13438 SCEVUnionPredicate &Preds) { 13439 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 13440 } 13441 13442 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 13443 const SCEV *S, const Loop *L, 13444 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 13445 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 13446 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 13447 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 13448 13449 if (!AddRec) 13450 return nullptr; 13451 13452 // Since the transformation was successful, we can now transfer the SCEV 13453 // predicates. 13454 for (auto *P : TransformPreds) 13455 Preds.insert(P); 13456 13457 return AddRec; 13458 } 13459 13460 /// SCEV predicates 13461 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 13462 SCEVPredicateKind Kind) 13463 : FastID(ID), Kind(Kind) {} 13464 13465 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 13466 const SCEV *LHS, const SCEV *RHS) 13467 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 13468 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 13469 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 13470 } 13471 13472 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 13473 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 13474 13475 if (!Op) 13476 return false; 13477 13478 return Op->LHS == LHS && Op->RHS == RHS; 13479 } 13480 13481 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 13482 13483 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 13484 13485 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 13486 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 13487 } 13488 13489 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 13490 const SCEVAddRecExpr *AR, 13491 IncrementWrapFlags Flags) 13492 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 13493 13494 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 13495 13496 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 13497 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 13498 13499 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 13500 } 13501 13502 bool SCEVWrapPredicate::isAlwaysTrue() const { 13503 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 13504 IncrementWrapFlags IFlags = Flags; 13505 13506 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 13507 IFlags = clearFlags(IFlags, IncrementNSSW); 13508 13509 return IFlags == IncrementAnyWrap; 13510 } 13511 13512 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 13513 OS.indent(Depth) << *getExpr() << " Added Flags: "; 13514 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 13515 OS << "<nusw>"; 13516 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 13517 OS << "<nssw>"; 13518 OS << "\n"; 13519 } 13520 13521 SCEVWrapPredicate::IncrementWrapFlags 13522 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 13523 ScalarEvolution &SE) { 13524 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 13525 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 13526 13527 // We can safely transfer the NSW flag as NSSW. 13528 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 13529 ImpliedFlags = IncrementNSSW; 13530 13531 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 13532 // If the increment is positive, the SCEV NUW flag will also imply the 13533 // WrapPredicate NUSW flag. 13534 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 13535 if (Step->getValue()->getValue().isNonNegative()) 13536 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 13537 } 13538 13539 return ImpliedFlags; 13540 } 13541 13542 /// Union predicates don't get cached so create a dummy set ID for it. 13543 SCEVUnionPredicate::SCEVUnionPredicate() 13544 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 13545 13546 bool SCEVUnionPredicate::isAlwaysTrue() const { 13547 return all_of(Preds, 13548 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 13549 } 13550 13551 ArrayRef<const SCEVPredicate *> 13552 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 13553 auto I = SCEVToPreds.find(Expr); 13554 if (I == SCEVToPreds.end()) 13555 return ArrayRef<const SCEVPredicate *>(); 13556 return I->second; 13557 } 13558 13559 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 13560 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 13561 return all_of(Set->Preds, 13562 [this](const SCEVPredicate *I) { return this->implies(I); }); 13563 13564 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 13565 if (ScevPredsIt == SCEVToPreds.end()) 13566 return false; 13567 auto &SCEVPreds = ScevPredsIt->second; 13568 13569 return any_of(SCEVPreds, 13570 [N](const SCEVPredicate *I) { return I->implies(N); }); 13571 } 13572 13573 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 13574 13575 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 13576 for (auto Pred : Preds) 13577 Pred->print(OS, Depth); 13578 } 13579 13580 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 13581 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 13582 for (auto Pred : Set->Preds) 13583 add(Pred); 13584 return; 13585 } 13586 13587 if (implies(N)) 13588 return; 13589 13590 const SCEV *Key = N->getExpr(); 13591 assert(Key && "Only SCEVUnionPredicate doesn't have an " 13592 " associated expression!"); 13593 13594 SCEVToPreds[Key].push_back(N); 13595 Preds.push_back(N); 13596 } 13597 13598 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 13599 Loop &L) 13600 : SE(SE), L(L) {} 13601 13602 void ScalarEvolution::registerUser(const SCEV *User, 13603 ArrayRef<const SCEV *> Ops) { 13604 for (auto *Op : Ops) 13605 // We do not expect that forgetting cached data for SCEVConstants will ever 13606 // open any prospects for sharpening or introduce any correctness issues, 13607 // so we don't bother storing their dependencies. 13608 if (!isa<SCEVConstant>(Op)) 13609 SCEVUsers[Op].insert(User); 13610 } 13611 13612 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 13613 const SCEV *Expr = SE.getSCEV(V); 13614 RewriteEntry &Entry = RewriteMap[Expr]; 13615 13616 // If we already have an entry and the version matches, return it. 13617 if (Entry.second && Generation == Entry.first) 13618 return Entry.second; 13619 13620 // We found an entry but it's stale. Rewrite the stale entry 13621 // according to the current predicate. 13622 if (Entry.second) 13623 Expr = Entry.second; 13624 13625 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 13626 Entry = {Generation, NewSCEV}; 13627 13628 return NewSCEV; 13629 } 13630 13631 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 13632 if (!BackedgeCount) { 13633 SCEVUnionPredicate BackedgePred; 13634 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 13635 addPredicate(BackedgePred); 13636 } 13637 return BackedgeCount; 13638 } 13639 13640 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 13641 if (Preds.implies(&Pred)) 13642 return; 13643 Preds.add(&Pred); 13644 updateGeneration(); 13645 } 13646 13647 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 13648 return Preds; 13649 } 13650 13651 void PredicatedScalarEvolution::updateGeneration() { 13652 // If the generation number wrapped recompute everything. 13653 if (++Generation == 0) { 13654 for (auto &II : RewriteMap) { 13655 const SCEV *Rewritten = II.second.second; 13656 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 13657 } 13658 } 13659 } 13660 13661 void PredicatedScalarEvolution::setNoOverflow( 13662 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 13663 const SCEV *Expr = getSCEV(V); 13664 const auto *AR = cast<SCEVAddRecExpr>(Expr); 13665 13666 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 13667 13668 // Clear the statically implied flags. 13669 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 13670 addPredicate(*SE.getWrapPredicate(AR, Flags)); 13671 13672 auto II = FlagsMap.insert({V, Flags}); 13673 if (!II.second) 13674 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 13675 } 13676 13677 bool PredicatedScalarEvolution::hasNoOverflow( 13678 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 13679 const SCEV *Expr = getSCEV(V); 13680 const auto *AR = cast<SCEVAddRecExpr>(Expr); 13681 13682 Flags = SCEVWrapPredicate::clearFlags( 13683 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 13684 13685 auto II = FlagsMap.find(V); 13686 13687 if (II != FlagsMap.end()) 13688 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 13689 13690 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 13691 } 13692 13693 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 13694 const SCEV *Expr = this->getSCEV(V); 13695 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 13696 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 13697 13698 if (!New) 13699 return nullptr; 13700 13701 for (auto *P : NewPreds) 13702 Preds.add(P); 13703 13704 updateGeneration(); 13705 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 13706 return New; 13707 } 13708 13709 PredicatedScalarEvolution::PredicatedScalarEvolution( 13710 const PredicatedScalarEvolution &Init) 13711 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 13712 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 13713 for (auto I : Init.FlagsMap) 13714 FlagsMap.insert(I); 13715 } 13716 13717 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 13718 // For each block. 13719 for (auto *BB : L.getBlocks()) 13720 for (auto &I : *BB) { 13721 if (!SE.isSCEVable(I.getType())) 13722 continue; 13723 13724 auto *Expr = SE.getSCEV(&I); 13725 auto II = RewriteMap.find(Expr); 13726 13727 if (II == RewriteMap.end()) 13728 continue; 13729 13730 // Don't print things that are not interesting. 13731 if (II->second.second == Expr) 13732 continue; 13733 13734 OS.indent(Depth) << "[PSE]" << I << ":\n"; 13735 OS.indent(Depth + 2) << *Expr << "\n"; 13736 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 13737 } 13738 } 13739 13740 // Match the mathematical pattern A - (A / B) * B, where A and B can be 13741 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used 13742 // for URem with constant power-of-2 second operands. 13743 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 13744 // 4, A / B becomes X / 8). 13745 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 13746 const SCEV *&RHS) { 13747 // Try to match 'zext (trunc A to iB) to iY', which is used 13748 // for URem with constant power-of-2 second operands. Make sure the size of 13749 // the operand A matches the size of the whole expressions. 13750 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr)) 13751 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) { 13752 LHS = Trunc->getOperand(); 13753 // Bail out if the type of the LHS is larger than the type of the 13754 // expression for now. 13755 if (getTypeSizeInBits(LHS->getType()) > 13756 getTypeSizeInBits(Expr->getType())) 13757 return false; 13758 if (LHS->getType() != Expr->getType()) 13759 LHS = getZeroExtendExpr(LHS, Expr->getType()); 13760 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1) 13761 << getTypeSizeInBits(Trunc->getType())); 13762 return true; 13763 } 13764 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 13765 if (Add == nullptr || Add->getNumOperands() != 2) 13766 return false; 13767 13768 const SCEV *A = Add->getOperand(1); 13769 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 13770 13771 if (Mul == nullptr) 13772 return false; 13773 13774 const auto MatchURemWithDivisor = [&](const SCEV *B) { 13775 // (SomeExpr + (-(SomeExpr / B) * B)). 13776 if (Expr == getURemExpr(A, B)) { 13777 LHS = A; 13778 RHS = B; 13779 return true; 13780 } 13781 return false; 13782 }; 13783 13784 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 13785 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 13786 return MatchURemWithDivisor(Mul->getOperand(1)) || 13787 MatchURemWithDivisor(Mul->getOperand(2)); 13788 13789 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 13790 if (Mul->getNumOperands() == 2) 13791 return MatchURemWithDivisor(Mul->getOperand(1)) || 13792 MatchURemWithDivisor(Mul->getOperand(0)) || 13793 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 13794 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 13795 return false; 13796 } 13797 13798 const SCEV * 13799 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) { 13800 SmallVector<BasicBlock*, 16> ExitingBlocks; 13801 L->getExitingBlocks(ExitingBlocks); 13802 13803 // Form an expression for the maximum exit count possible for this loop. We 13804 // merge the max and exact information to approximate a version of 13805 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants. 13806 SmallVector<const SCEV*, 4> ExitCounts; 13807 for (BasicBlock *ExitingBB : ExitingBlocks) { 13808 const SCEV *ExitCount = getExitCount(L, ExitingBB); 13809 if (isa<SCEVCouldNotCompute>(ExitCount)) 13810 ExitCount = getExitCount(L, ExitingBB, 13811 ScalarEvolution::ConstantMaximum); 13812 if (!isa<SCEVCouldNotCompute>(ExitCount)) { 13813 assert(DT.dominates(ExitingBB, L->getLoopLatch()) && 13814 "We should only have known counts for exiting blocks that " 13815 "dominate latch!"); 13816 ExitCounts.push_back(ExitCount); 13817 } 13818 } 13819 if (ExitCounts.empty()) 13820 return getCouldNotCompute(); 13821 return getUMinFromMismatchedTypes(ExitCounts); 13822 } 13823 13824 /// A rewriter to replace SCEV expressions in Map with the corresponding entry 13825 /// in the map. It skips AddRecExpr because we cannot guarantee that the 13826 /// replacement is loop invariant in the loop of the AddRec. 13827 /// 13828 /// At the moment only rewriting SCEVUnknown and SCEVZeroExtendExpr is 13829 /// supported. 13830 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> { 13831 const DenseMap<const SCEV *, const SCEV *> ⤅ 13832 13833 public: 13834 SCEVLoopGuardRewriter(ScalarEvolution &SE, 13835 DenseMap<const SCEV *, const SCEV *> &M) 13836 : SCEVRewriteVisitor(SE), Map(M) {} 13837 13838 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 13839 13840 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 13841 auto I = Map.find(Expr); 13842 if (I == Map.end()) 13843 return Expr; 13844 return I->second; 13845 } 13846 13847 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 13848 auto I = Map.find(Expr); 13849 if (I == Map.end()) 13850 return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitZeroExtendExpr( 13851 Expr); 13852 return I->second; 13853 } 13854 }; 13855 13856 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) { 13857 SmallVector<const SCEV *> ExprsToRewrite; 13858 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS, 13859 const SCEV *RHS, 13860 DenseMap<const SCEV *, const SCEV *> 13861 &RewriteMap) { 13862 // WARNING: It is generally unsound to apply any wrap flags to the proposed 13863 // replacement SCEV which isn't directly implied by the structure of that 13864 // SCEV. In particular, using contextual facts to imply flags is *NOT* 13865 // legal. See the scoping rules for flags in the header to understand why. 13866 13867 // If LHS is a constant, apply information to the other expression. 13868 if (isa<SCEVConstant>(LHS)) { 13869 std::swap(LHS, RHS); 13870 Predicate = CmpInst::getSwappedPredicate(Predicate); 13871 } 13872 13873 // Check for a condition of the form (-C1 + X < C2). InstCombine will 13874 // create this form when combining two checks of the form (X u< C2 + C1) and 13875 // (X >=u C1). 13876 auto MatchRangeCheckIdiom = [this, Predicate, LHS, RHS, &RewriteMap, 13877 &ExprsToRewrite]() { 13878 auto *AddExpr = dyn_cast<SCEVAddExpr>(LHS); 13879 if (!AddExpr || AddExpr->getNumOperands() != 2) 13880 return false; 13881 13882 auto *C1 = dyn_cast<SCEVConstant>(AddExpr->getOperand(0)); 13883 auto *LHSUnknown = dyn_cast<SCEVUnknown>(AddExpr->getOperand(1)); 13884 auto *C2 = dyn_cast<SCEVConstant>(RHS); 13885 if (!C1 || !C2 || !LHSUnknown) 13886 return false; 13887 13888 auto ExactRegion = 13889 ConstantRange::makeExactICmpRegion(Predicate, C2->getAPInt()) 13890 .sub(C1->getAPInt()); 13891 13892 // Bail out, unless we have a non-wrapping, monotonic range. 13893 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet()) 13894 return false; 13895 auto I = RewriteMap.find(LHSUnknown); 13896 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHSUnknown; 13897 RewriteMap[LHSUnknown] = getUMaxExpr( 13898 getConstant(ExactRegion.getUnsignedMin()), 13899 getUMinExpr(RewrittenLHS, getConstant(ExactRegion.getUnsignedMax()))); 13900 ExprsToRewrite.push_back(LHSUnknown); 13901 return true; 13902 }; 13903 if (MatchRangeCheckIdiom()) 13904 return; 13905 13906 // If we have LHS == 0, check if LHS is computing a property of some unknown 13907 // SCEV %v which we can rewrite %v to express explicitly. 13908 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS); 13909 if (Predicate == CmpInst::ICMP_EQ && RHSC && 13910 RHSC->getValue()->isNullValue()) { 13911 // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to 13912 // explicitly express that. 13913 const SCEV *URemLHS = nullptr; 13914 const SCEV *URemRHS = nullptr; 13915 if (matchURem(LHS, URemLHS, URemRHS)) { 13916 if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) { 13917 auto Multiple = getMulExpr(getUDivExpr(URemLHS, URemRHS), URemRHS); 13918 RewriteMap[LHSUnknown] = Multiple; 13919 ExprsToRewrite.push_back(LHSUnknown); 13920 return; 13921 } 13922 } 13923 } 13924 13925 // Do not apply information for constants or if RHS contains an AddRec. 13926 if (isa<SCEVConstant>(LHS) || containsAddRecurrence(RHS)) 13927 return; 13928 13929 // If RHS is SCEVUnknown, make sure the information is applied to it. 13930 if (!isa<SCEVUnknown>(LHS) && isa<SCEVUnknown>(RHS)) { 13931 std::swap(LHS, RHS); 13932 Predicate = CmpInst::getSwappedPredicate(Predicate); 13933 } 13934 13935 // Limit to expressions that can be rewritten. 13936 if (!isa<SCEVUnknown>(LHS) && !isa<SCEVZeroExtendExpr>(LHS)) 13937 return; 13938 13939 // Check whether LHS has already been rewritten. In that case we want to 13940 // chain further rewrites onto the already rewritten value. 13941 auto I = RewriteMap.find(LHS); 13942 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS; 13943 13944 const SCEV *RewrittenRHS = nullptr; 13945 switch (Predicate) { 13946 case CmpInst::ICMP_ULT: 13947 RewrittenRHS = 13948 getUMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType()))); 13949 break; 13950 case CmpInst::ICMP_SLT: 13951 RewrittenRHS = 13952 getSMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType()))); 13953 break; 13954 case CmpInst::ICMP_ULE: 13955 RewrittenRHS = getUMinExpr(RewrittenLHS, RHS); 13956 break; 13957 case CmpInst::ICMP_SLE: 13958 RewrittenRHS = getSMinExpr(RewrittenLHS, RHS); 13959 break; 13960 case CmpInst::ICMP_UGT: 13961 RewrittenRHS = 13962 getUMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType()))); 13963 break; 13964 case CmpInst::ICMP_SGT: 13965 RewrittenRHS = 13966 getSMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType()))); 13967 break; 13968 case CmpInst::ICMP_UGE: 13969 RewrittenRHS = getUMaxExpr(RewrittenLHS, RHS); 13970 break; 13971 case CmpInst::ICMP_SGE: 13972 RewrittenRHS = getSMaxExpr(RewrittenLHS, RHS); 13973 break; 13974 case CmpInst::ICMP_EQ: 13975 if (isa<SCEVConstant>(RHS)) 13976 RewrittenRHS = RHS; 13977 break; 13978 case CmpInst::ICMP_NE: 13979 if (isa<SCEVConstant>(RHS) && 13980 cast<SCEVConstant>(RHS)->getValue()->isNullValue()) 13981 RewrittenRHS = getUMaxExpr(RewrittenLHS, getOne(RHS->getType())); 13982 break; 13983 default: 13984 break; 13985 } 13986 13987 if (RewrittenRHS) { 13988 RewriteMap[LHS] = RewrittenRHS; 13989 if (LHS == RewrittenLHS) 13990 ExprsToRewrite.push_back(LHS); 13991 } 13992 }; 13993 // First, collect conditions from dominating branches. Starting at the loop 13994 // predecessor, climb up the predecessor chain, as long as there are 13995 // predecessors that can be found that have unique successors leading to the 13996 // original header. 13997 // TODO: share this logic with isLoopEntryGuardedByCond. 13998 SmallVector<std::pair<Value *, bool>> Terms; 13999 for (std::pair<const BasicBlock *, const BasicBlock *> Pair( 14000 L->getLoopPredecessor(), L->getHeader()); 14001 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 14002 14003 const BranchInst *LoopEntryPredicate = 14004 dyn_cast<BranchInst>(Pair.first->getTerminator()); 14005 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) 14006 continue; 14007 14008 Terms.emplace_back(LoopEntryPredicate->getCondition(), 14009 LoopEntryPredicate->getSuccessor(0) == Pair.second); 14010 } 14011 14012 // Now apply the information from the collected conditions to RewriteMap. 14013 // Conditions are processed in reverse order, so the earliest conditions is 14014 // processed first. This ensures the SCEVs with the shortest dependency chains 14015 // are constructed first. 14016 DenseMap<const SCEV *, const SCEV *> RewriteMap; 14017 for (auto &E : reverse(Terms)) { 14018 bool EnterIfTrue = E.second; 14019 SmallVector<Value *, 8> Worklist; 14020 SmallPtrSet<Value *, 8> Visited; 14021 Worklist.push_back(E.first); 14022 while (!Worklist.empty()) { 14023 Value *Cond = Worklist.pop_back_val(); 14024 if (!Visited.insert(Cond).second) 14025 continue; 14026 14027 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) { 14028 auto Predicate = 14029 EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate(); 14030 CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)), 14031 getSCEV(Cmp->getOperand(1)), RewriteMap); 14032 continue; 14033 } 14034 14035 Value *L, *R; 14036 if (EnterIfTrue ? match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))) 14037 : match(Cond, m_LogicalOr(m_Value(L), m_Value(R)))) { 14038 Worklist.push_back(L); 14039 Worklist.push_back(R); 14040 } 14041 } 14042 } 14043 14044 // Also collect information from assumptions dominating the loop. 14045 for (auto &AssumeVH : AC.assumptions()) { 14046 if (!AssumeVH) 14047 continue; 14048 auto *AssumeI = cast<CallInst>(AssumeVH); 14049 auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0)); 14050 if (!Cmp || !DT.dominates(AssumeI, L->getHeader())) 14051 continue; 14052 CollectCondition(Cmp->getPredicate(), getSCEV(Cmp->getOperand(0)), 14053 getSCEV(Cmp->getOperand(1)), RewriteMap); 14054 } 14055 14056 if (RewriteMap.empty()) 14057 return Expr; 14058 14059 // Now that all rewrite information is collect, rewrite the collected 14060 // expressions with the information in the map. This applies information to 14061 // sub-expressions. 14062 if (ExprsToRewrite.size() > 1) { 14063 for (const SCEV *Expr : ExprsToRewrite) { 14064 const SCEV *RewriteTo = RewriteMap[Expr]; 14065 RewriteMap.erase(Expr); 14066 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 14067 RewriteMap.insert({Expr, Rewriter.visit(RewriteTo)}); 14068 } 14069 } 14070 14071 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 14072 return Rewriter.visit(Expr); 14073 } 14074