1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionDivision.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/Config/llvm-config.h" 87 #include "llvm/IR/Argument.h" 88 #include "llvm/IR/BasicBlock.h" 89 #include "llvm/IR/CFG.h" 90 #include "llvm/IR/Constant.h" 91 #include "llvm/IR/ConstantRange.h" 92 #include "llvm/IR/Constants.h" 93 #include "llvm/IR/DataLayout.h" 94 #include "llvm/IR/DerivedTypes.h" 95 #include "llvm/IR/Dominators.h" 96 #include "llvm/IR/Function.h" 97 #include "llvm/IR/GlobalAlias.h" 98 #include "llvm/IR/GlobalValue.h" 99 #include "llvm/IR/GlobalVariable.h" 100 #include "llvm/IR/InstIterator.h" 101 #include "llvm/IR/InstrTypes.h" 102 #include "llvm/IR/Instruction.h" 103 #include "llvm/IR/Instructions.h" 104 #include "llvm/IR/IntrinsicInst.h" 105 #include "llvm/IR/Intrinsics.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/Metadata.h" 108 #include "llvm/IR/Operator.h" 109 #include "llvm/IR/PatternMatch.h" 110 #include "llvm/IR/Type.h" 111 #include "llvm/IR/Use.h" 112 #include "llvm/IR/User.h" 113 #include "llvm/IR/Value.h" 114 #include "llvm/IR/Verifier.h" 115 #include "llvm/InitializePasses.h" 116 #include "llvm/Pass.h" 117 #include "llvm/Support/Casting.h" 118 #include "llvm/Support/CommandLine.h" 119 #include "llvm/Support/Compiler.h" 120 #include "llvm/Support/Debug.h" 121 #include "llvm/Support/ErrorHandling.h" 122 #include "llvm/Support/KnownBits.h" 123 #include "llvm/Support/SaveAndRestore.h" 124 #include "llvm/Support/raw_ostream.h" 125 #include <algorithm> 126 #include <cassert> 127 #include <climits> 128 #include <cstddef> 129 #include <cstdint> 130 #include <cstdlib> 131 #include <map> 132 #include <memory> 133 #include <tuple> 134 #include <utility> 135 #include <vector> 136 137 using namespace llvm; 138 using namespace PatternMatch; 139 140 #define DEBUG_TYPE "scalar-evolution" 141 142 STATISTIC(NumArrayLenItCounts, 143 "Number of trip counts computed with array length"); 144 STATISTIC(NumTripCountsComputed, 145 "Number of loops with predictable loop counts"); 146 STATISTIC(NumTripCountsNotComputed, 147 "Number of loops without predictable loop counts"); 148 STATISTIC(NumBruteForceTripCountsComputed, 149 "Number of loops with trip counts computed by force"); 150 151 static cl::opt<unsigned> 152 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 153 cl::ZeroOrMore, 154 cl::desc("Maximum number of iterations SCEV will " 155 "symbolically execute a constant " 156 "derived loop"), 157 cl::init(100)); 158 159 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 160 static cl::opt<bool> VerifySCEV( 161 "verify-scev", cl::Hidden, 162 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 163 static cl::opt<bool> VerifySCEVStrict( 164 "verify-scev-strict", cl::Hidden, 165 cl::desc("Enable stricter verification with -verify-scev is passed")); 166 static cl::opt<bool> 167 VerifySCEVMap("verify-scev-maps", cl::Hidden, 168 cl::desc("Verify no dangling value in ScalarEvolution's " 169 "ExprValueMap (slow)")); 170 171 static cl::opt<bool> VerifyIR( 172 "scev-verify-ir", cl::Hidden, 173 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 174 cl::init(false)); 175 176 static cl::opt<unsigned> MulOpsInlineThreshold( 177 "scev-mulops-inline-threshold", cl::Hidden, 178 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 179 cl::init(32)); 180 181 static cl::opt<unsigned> AddOpsInlineThreshold( 182 "scev-addops-inline-threshold", cl::Hidden, 183 cl::desc("Threshold for inlining addition operands into a SCEV"), 184 cl::init(500)); 185 186 static cl::opt<unsigned> MaxSCEVCompareDepth( 187 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 188 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 189 cl::init(32)); 190 191 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 192 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 193 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 194 cl::init(2)); 195 196 static cl::opt<unsigned> MaxValueCompareDepth( 197 "scalar-evolution-max-value-compare-depth", cl::Hidden, 198 cl::desc("Maximum depth of recursive value complexity comparisons"), 199 cl::init(2)); 200 201 static cl::opt<unsigned> 202 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 203 cl::desc("Maximum depth of recursive arithmetics"), 204 cl::init(32)); 205 206 static cl::opt<unsigned> MaxConstantEvolvingDepth( 207 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 208 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 209 210 static cl::opt<unsigned> 211 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 212 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 213 cl::init(8)); 214 215 static cl::opt<unsigned> 216 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 217 cl::desc("Max coefficients in AddRec during evolving"), 218 cl::init(8)); 219 220 static cl::opt<unsigned> 221 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 222 cl::desc("Size of the expression which is considered huge"), 223 cl::init(4096)); 224 225 static cl::opt<bool> 226 ClassifyExpressions("scalar-evolution-classify-expressions", 227 cl::Hidden, cl::init(true), 228 cl::desc("When printing analysis, include information on every instruction")); 229 230 static cl::opt<bool> UseExpensiveRangeSharpening( 231 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden, 232 cl::init(false), 233 cl::desc("Use more powerful methods of sharpening expression ranges. May " 234 "be costly in terms of compile time")); 235 236 //===----------------------------------------------------------------------===// 237 // SCEV class definitions 238 //===----------------------------------------------------------------------===// 239 240 //===----------------------------------------------------------------------===// 241 // Implementation of the SCEV class. 242 // 243 244 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 245 LLVM_DUMP_METHOD void SCEV::dump() const { 246 print(dbgs()); 247 dbgs() << '\n'; 248 } 249 #endif 250 251 void SCEV::print(raw_ostream &OS) const { 252 switch (getSCEVType()) { 253 case scConstant: 254 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 255 return; 256 case scPtrToInt: { 257 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this); 258 const SCEV *Op = PtrToInt->getOperand(); 259 OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to " 260 << *PtrToInt->getType() << ")"; 261 return; 262 } 263 case scTruncate: { 264 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 265 const SCEV *Op = Trunc->getOperand(); 266 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 267 << *Trunc->getType() << ")"; 268 return; 269 } 270 case scZeroExtend: { 271 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 272 const SCEV *Op = ZExt->getOperand(); 273 OS << "(zext " << *Op->getType() << " " << *Op << " to " 274 << *ZExt->getType() << ")"; 275 return; 276 } 277 case scSignExtend: { 278 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 279 const SCEV *Op = SExt->getOperand(); 280 OS << "(sext " << *Op->getType() << " " << *Op << " to " 281 << *SExt->getType() << ")"; 282 return; 283 } 284 case scAddRecExpr: { 285 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 286 OS << "{" << *AR->getOperand(0); 287 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 288 OS << ",+," << *AR->getOperand(i); 289 OS << "}<"; 290 if (AR->hasNoUnsignedWrap()) 291 OS << "nuw><"; 292 if (AR->hasNoSignedWrap()) 293 OS << "nsw><"; 294 if (AR->hasNoSelfWrap() && 295 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 296 OS << "nw><"; 297 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 298 OS << ">"; 299 return; 300 } 301 case scAddExpr: 302 case scMulExpr: 303 case scUMaxExpr: 304 case scSMaxExpr: 305 case scUMinExpr: 306 case scSMinExpr: { 307 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 308 const char *OpStr = nullptr; 309 switch (NAry->getSCEVType()) { 310 case scAddExpr: OpStr = " + "; break; 311 case scMulExpr: OpStr = " * "; break; 312 case scUMaxExpr: OpStr = " umax "; break; 313 case scSMaxExpr: OpStr = " smax "; break; 314 case scUMinExpr: 315 OpStr = " umin "; 316 break; 317 case scSMinExpr: 318 OpStr = " smin "; 319 break; 320 default: 321 llvm_unreachable("There are no other nary expression types."); 322 } 323 OS << "("; 324 ListSeparator LS(OpStr); 325 for (const SCEV *Op : NAry->operands()) 326 OS << LS << *Op; 327 OS << ")"; 328 switch (NAry->getSCEVType()) { 329 case scAddExpr: 330 case scMulExpr: 331 if (NAry->hasNoUnsignedWrap()) 332 OS << "<nuw>"; 333 if (NAry->hasNoSignedWrap()) 334 OS << "<nsw>"; 335 break; 336 default: 337 // Nothing to print for other nary expressions. 338 break; 339 } 340 return; 341 } 342 case scUDivExpr: { 343 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 344 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 345 return; 346 } 347 case scUnknown: { 348 const SCEVUnknown *U = cast<SCEVUnknown>(this); 349 Type *AllocTy; 350 if (U->isSizeOf(AllocTy)) { 351 OS << "sizeof(" << *AllocTy << ")"; 352 return; 353 } 354 if (U->isAlignOf(AllocTy)) { 355 OS << "alignof(" << *AllocTy << ")"; 356 return; 357 } 358 359 Type *CTy; 360 Constant *FieldNo; 361 if (U->isOffsetOf(CTy, FieldNo)) { 362 OS << "offsetof(" << *CTy << ", "; 363 FieldNo->printAsOperand(OS, false); 364 OS << ")"; 365 return; 366 } 367 368 // Otherwise just print it normally. 369 U->getValue()->printAsOperand(OS, false); 370 return; 371 } 372 case scCouldNotCompute: 373 OS << "***COULDNOTCOMPUTE***"; 374 return; 375 } 376 llvm_unreachable("Unknown SCEV kind!"); 377 } 378 379 Type *SCEV::getType() const { 380 switch (getSCEVType()) { 381 case scConstant: 382 return cast<SCEVConstant>(this)->getType(); 383 case scPtrToInt: 384 case scTruncate: 385 case scZeroExtend: 386 case scSignExtend: 387 return cast<SCEVCastExpr>(this)->getType(); 388 case scAddRecExpr: 389 case scMulExpr: 390 case scUMaxExpr: 391 case scSMaxExpr: 392 case scUMinExpr: 393 case scSMinExpr: 394 return cast<SCEVNAryExpr>(this)->getType(); 395 case scAddExpr: 396 return cast<SCEVAddExpr>(this)->getType(); 397 case scUDivExpr: 398 return cast<SCEVUDivExpr>(this)->getType(); 399 case scUnknown: 400 return cast<SCEVUnknown>(this)->getType(); 401 case scCouldNotCompute: 402 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 403 } 404 llvm_unreachable("Unknown SCEV kind!"); 405 } 406 407 bool SCEV::isZero() const { 408 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 409 return SC->getValue()->isZero(); 410 return false; 411 } 412 413 bool SCEV::isOne() const { 414 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 415 return SC->getValue()->isOne(); 416 return false; 417 } 418 419 bool SCEV::isAllOnesValue() const { 420 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 421 return SC->getValue()->isMinusOne(); 422 return false; 423 } 424 425 bool SCEV::isNonConstantNegative() const { 426 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 427 if (!Mul) return false; 428 429 // If there is a constant factor, it will be first. 430 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 431 if (!SC) return false; 432 433 // Return true if the value is negative, this matches things like (-42 * V). 434 return SC->getAPInt().isNegative(); 435 } 436 437 SCEVCouldNotCompute::SCEVCouldNotCompute() : 438 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 439 440 bool SCEVCouldNotCompute::classof(const SCEV *S) { 441 return S->getSCEVType() == scCouldNotCompute; 442 } 443 444 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 445 FoldingSetNodeID ID; 446 ID.AddInteger(scConstant); 447 ID.AddPointer(V); 448 void *IP = nullptr; 449 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 450 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 451 UniqueSCEVs.InsertNode(S, IP); 452 return S; 453 } 454 455 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 456 return getConstant(ConstantInt::get(getContext(), Val)); 457 } 458 459 const SCEV * 460 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 461 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 462 return getConstant(ConstantInt::get(ITy, V, isSigned)); 463 } 464 465 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, 466 const SCEV *op, Type *ty) 467 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) { 468 Operands[0] = op; 469 } 470 471 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, 472 Type *ITy) 473 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) { 474 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() && 475 "Must be a non-bit-width-changing pointer-to-integer cast!"); 476 } 477 478 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, 479 SCEVTypes SCEVTy, const SCEV *op, 480 Type *ty) 481 : SCEVCastExpr(ID, SCEVTy, op, ty) {} 482 483 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, 484 Type *ty) 485 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { 486 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 487 "Cannot truncate non-integer value!"); 488 } 489 490 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 491 const SCEV *op, Type *ty) 492 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { 493 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 494 "Cannot zero extend non-integer value!"); 495 } 496 497 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 498 const SCEV *op, Type *ty) 499 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { 500 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 501 "Cannot sign extend non-integer value!"); 502 } 503 504 void SCEVUnknown::deleted() { 505 // Clear this SCEVUnknown from various maps. 506 SE->forgetMemoizedResults(this); 507 508 // Remove this SCEVUnknown from the uniquing map. 509 SE->UniqueSCEVs.RemoveNode(this); 510 511 // Release the value. 512 setValPtr(nullptr); 513 } 514 515 void SCEVUnknown::allUsesReplacedWith(Value *New) { 516 // Remove this SCEVUnknown from the uniquing map. 517 SE->UniqueSCEVs.RemoveNode(this); 518 519 // Update this SCEVUnknown to point to the new value. This is needed 520 // because there may still be outstanding SCEVs which still point to 521 // this SCEVUnknown. 522 setValPtr(New); 523 } 524 525 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 526 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 527 if (VCE->getOpcode() == Instruction::PtrToInt) 528 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 529 if (CE->getOpcode() == Instruction::GetElementPtr && 530 CE->getOperand(0)->isNullValue() && 531 CE->getNumOperands() == 2) 532 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 533 if (CI->isOne()) { 534 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 535 ->getElementType(); 536 return true; 537 } 538 539 return false; 540 } 541 542 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 543 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 544 if (VCE->getOpcode() == Instruction::PtrToInt) 545 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 546 if (CE->getOpcode() == Instruction::GetElementPtr && 547 CE->getOperand(0)->isNullValue()) { 548 Type *Ty = 549 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 550 if (StructType *STy = dyn_cast<StructType>(Ty)) 551 if (!STy->isPacked() && 552 CE->getNumOperands() == 3 && 553 CE->getOperand(1)->isNullValue()) { 554 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 555 if (CI->isOne() && 556 STy->getNumElements() == 2 && 557 STy->getElementType(0)->isIntegerTy(1)) { 558 AllocTy = STy->getElementType(1); 559 return true; 560 } 561 } 562 } 563 564 return false; 565 } 566 567 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 568 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 569 if (VCE->getOpcode() == Instruction::PtrToInt) 570 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 571 if (CE->getOpcode() == Instruction::GetElementPtr && 572 CE->getNumOperands() == 3 && 573 CE->getOperand(0)->isNullValue() && 574 CE->getOperand(1)->isNullValue()) { 575 Type *Ty = 576 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 577 // Ignore vector types here so that ScalarEvolutionExpander doesn't 578 // emit getelementptrs that index into vectors. 579 if (Ty->isStructTy() || Ty->isArrayTy()) { 580 CTy = Ty; 581 FieldNo = CE->getOperand(2); 582 return true; 583 } 584 } 585 586 return false; 587 } 588 589 //===----------------------------------------------------------------------===// 590 // SCEV Utilities 591 //===----------------------------------------------------------------------===// 592 593 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 594 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 595 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 596 /// have been previously deemed to be "equally complex" by this routine. It is 597 /// intended to avoid exponential time complexity in cases like: 598 /// 599 /// %a = f(%x, %y) 600 /// %b = f(%a, %a) 601 /// %c = f(%b, %b) 602 /// 603 /// %d = f(%x, %y) 604 /// %e = f(%d, %d) 605 /// %f = f(%e, %e) 606 /// 607 /// CompareValueComplexity(%f, %c) 608 /// 609 /// Since we do not continue running this routine on expression trees once we 610 /// have seen unequal values, there is no need to track them in the cache. 611 static int 612 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 613 const LoopInfo *const LI, Value *LV, Value *RV, 614 unsigned Depth) { 615 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 616 return 0; 617 618 // Order pointer values after integer values. This helps SCEVExpander form 619 // GEPs. 620 bool LIsPointer = LV->getType()->isPointerTy(), 621 RIsPointer = RV->getType()->isPointerTy(); 622 if (LIsPointer != RIsPointer) 623 return (int)LIsPointer - (int)RIsPointer; 624 625 // Compare getValueID values. 626 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 627 if (LID != RID) 628 return (int)LID - (int)RID; 629 630 // Sort arguments by their position. 631 if (const auto *LA = dyn_cast<Argument>(LV)) { 632 const auto *RA = cast<Argument>(RV); 633 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 634 return (int)LArgNo - (int)RArgNo; 635 } 636 637 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 638 const auto *RGV = cast<GlobalValue>(RV); 639 640 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 641 auto LT = GV->getLinkage(); 642 return !(GlobalValue::isPrivateLinkage(LT) || 643 GlobalValue::isInternalLinkage(LT)); 644 }; 645 646 // Use the names to distinguish the two values, but only if the 647 // names are semantically important. 648 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 649 return LGV->getName().compare(RGV->getName()); 650 } 651 652 // For instructions, compare their loop depth, and their operand count. This 653 // is pretty loose. 654 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 655 const auto *RInst = cast<Instruction>(RV); 656 657 // Compare loop depths. 658 const BasicBlock *LParent = LInst->getParent(), 659 *RParent = RInst->getParent(); 660 if (LParent != RParent) { 661 unsigned LDepth = LI->getLoopDepth(LParent), 662 RDepth = LI->getLoopDepth(RParent); 663 if (LDepth != RDepth) 664 return (int)LDepth - (int)RDepth; 665 } 666 667 // Compare the number of operands. 668 unsigned LNumOps = LInst->getNumOperands(), 669 RNumOps = RInst->getNumOperands(); 670 if (LNumOps != RNumOps) 671 return (int)LNumOps - (int)RNumOps; 672 673 for (unsigned Idx : seq(0u, LNumOps)) { 674 int Result = 675 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 676 RInst->getOperand(Idx), Depth + 1); 677 if (Result != 0) 678 return Result; 679 } 680 } 681 682 EqCacheValue.unionSets(LV, RV); 683 return 0; 684 } 685 686 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 687 // than RHS, respectively. A three-way result allows recursive comparisons to be 688 // more efficient. 689 // If the max analysis depth was reached, return None, assuming we do not know 690 // if they are equivalent for sure. 691 static Optional<int> 692 CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV, 693 EquivalenceClasses<const Value *> &EqCacheValue, 694 const LoopInfo *const LI, const SCEV *LHS, 695 const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) { 696 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 697 if (LHS == RHS) 698 return 0; 699 700 // Primarily, sort the SCEVs by their getSCEVType(). 701 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 702 if (LType != RType) 703 return (int)LType - (int)RType; 704 705 if (EqCacheSCEV.isEquivalent(LHS, RHS)) 706 return 0; 707 708 if (Depth > MaxSCEVCompareDepth) 709 return None; 710 711 // Aside from the getSCEVType() ordering, the particular ordering 712 // isn't very important except that it's beneficial to be consistent, 713 // so that (a + b) and (b + a) don't end up as different expressions. 714 switch (LType) { 715 case scUnknown: { 716 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 717 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 718 719 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 720 RU->getValue(), Depth + 1); 721 if (X == 0) 722 EqCacheSCEV.unionSets(LHS, RHS); 723 return X; 724 } 725 726 case scConstant: { 727 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 728 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 729 730 // Compare constant values. 731 const APInt &LA = LC->getAPInt(); 732 const APInt &RA = RC->getAPInt(); 733 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 734 if (LBitWidth != RBitWidth) 735 return (int)LBitWidth - (int)RBitWidth; 736 return LA.ult(RA) ? -1 : 1; 737 } 738 739 case scAddRecExpr: { 740 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 741 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 742 743 // There is always a dominance between two recs that are used by one SCEV, 744 // so we can safely sort recs by loop header dominance. We require such 745 // order in getAddExpr. 746 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 747 if (LLoop != RLoop) { 748 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 749 assert(LHead != RHead && "Two loops share the same header?"); 750 if (DT.dominates(LHead, RHead)) 751 return 1; 752 else 753 assert(DT.dominates(RHead, LHead) && 754 "No dominance between recurrences used by one SCEV?"); 755 return -1; 756 } 757 758 // Addrec complexity grows with operand count. 759 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 760 if (LNumOps != RNumOps) 761 return (int)LNumOps - (int)RNumOps; 762 763 // Lexicographically compare. 764 for (unsigned i = 0; i != LNumOps; ++i) { 765 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 766 LA->getOperand(i), RA->getOperand(i), DT, 767 Depth + 1); 768 if (X != 0) 769 return X; 770 } 771 EqCacheSCEV.unionSets(LHS, RHS); 772 return 0; 773 } 774 775 case scAddExpr: 776 case scMulExpr: 777 case scSMaxExpr: 778 case scUMaxExpr: 779 case scSMinExpr: 780 case scUMinExpr: { 781 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 782 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 783 784 // Lexicographically compare n-ary expressions. 785 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 786 if (LNumOps != RNumOps) 787 return (int)LNumOps - (int)RNumOps; 788 789 for (unsigned i = 0; i != LNumOps; ++i) { 790 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 791 LC->getOperand(i), RC->getOperand(i), DT, 792 Depth + 1); 793 if (X != 0) 794 return X; 795 } 796 EqCacheSCEV.unionSets(LHS, RHS); 797 return 0; 798 } 799 800 case scUDivExpr: { 801 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 802 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 803 804 // Lexicographically compare udiv expressions. 805 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 806 RC->getLHS(), DT, Depth + 1); 807 if (X != 0) 808 return X; 809 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 810 RC->getRHS(), DT, Depth + 1); 811 if (X == 0) 812 EqCacheSCEV.unionSets(LHS, RHS); 813 return X; 814 } 815 816 case scPtrToInt: 817 case scTruncate: 818 case scZeroExtend: 819 case scSignExtend: { 820 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 821 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 822 823 // Compare cast expressions by operand. 824 auto X = 825 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(), 826 RC->getOperand(), DT, Depth + 1); 827 if (X == 0) 828 EqCacheSCEV.unionSets(LHS, RHS); 829 return X; 830 } 831 832 case scCouldNotCompute: 833 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 834 } 835 llvm_unreachable("Unknown SCEV kind!"); 836 } 837 838 /// Given a list of SCEV objects, order them by their complexity, and group 839 /// objects of the same complexity together by value. When this routine is 840 /// finished, we know that any duplicates in the vector are consecutive and that 841 /// complexity is monotonically increasing. 842 /// 843 /// Note that we go take special precautions to ensure that we get deterministic 844 /// results from this routine. In other words, we don't want the results of 845 /// this to depend on where the addresses of various SCEV objects happened to 846 /// land in memory. 847 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 848 LoopInfo *LI, DominatorTree &DT) { 849 if (Ops.size() < 2) return; // Noop 850 851 EquivalenceClasses<const SCEV *> EqCacheSCEV; 852 EquivalenceClasses<const Value *> EqCacheValue; 853 854 // Whether LHS has provably less complexity than RHS. 855 auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) { 856 auto Complexity = 857 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT); 858 return Complexity && *Complexity < 0; 859 }; 860 if (Ops.size() == 2) { 861 // This is the common case, which also happens to be trivially simple. 862 // Special case it. 863 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 864 if (IsLessComplex(RHS, LHS)) 865 std::swap(LHS, RHS); 866 return; 867 } 868 869 // Do the rough sort by complexity. 870 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { 871 return IsLessComplex(LHS, RHS); 872 }); 873 874 // Now that we are sorted by complexity, group elements of the same 875 // complexity. Note that this is, at worst, N^2, but the vector is likely to 876 // be extremely short in practice. Note that we take this approach because we 877 // do not want to depend on the addresses of the objects we are grouping. 878 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 879 const SCEV *S = Ops[i]; 880 unsigned Complexity = S->getSCEVType(); 881 882 // If there are any objects of the same complexity and same value as this 883 // one, group them. 884 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 885 if (Ops[j] == S) { // Found a duplicate. 886 // Move it to immediately after i'th element. 887 std::swap(Ops[i+1], Ops[j]); 888 ++i; // no need to rescan it. 889 if (i == e-2) return; // Done! 890 } 891 } 892 } 893 } 894 895 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at 896 /// least HugeExprThreshold nodes). 897 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 898 return any_of(Ops, [](const SCEV *S) { 899 return S->getExpressionSize() >= HugeExprThreshold; 900 }); 901 } 902 903 //===----------------------------------------------------------------------===// 904 // Simple SCEV method implementations 905 //===----------------------------------------------------------------------===// 906 907 /// Compute BC(It, K). The result has width W. Assume, K > 0. 908 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 909 ScalarEvolution &SE, 910 Type *ResultTy) { 911 // Handle the simplest case efficiently. 912 if (K == 1) 913 return SE.getTruncateOrZeroExtend(It, ResultTy); 914 915 // We are using the following formula for BC(It, K): 916 // 917 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 918 // 919 // Suppose, W is the bitwidth of the return value. We must be prepared for 920 // overflow. Hence, we must assure that the result of our computation is 921 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 922 // safe in modular arithmetic. 923 // 924 // However, this code doesn't use exactly that formula; the formula it uses 925 // is something like the following, where T is the number of factors of 2 in 926 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 927 // exponentiation: 928 // 929 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 930 // 931 // This formula is trivially equivalent to the previous formula. However, 932 // this formula can be implemented much more efficiently. The trick is that 933 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 934 // arithmetic. To do exact division in modular arithmetic, all we have 935 // to do is multiply by the inverse. Therefore, this step can be done at 936 // width W. 937 // 938 // The next issue is how to safely do the division by 2^T. The way this 939 // is done is by doing the multiplication step at a width of at least W + T 940 // bits. This way, the bottom W+T bits of the product are accurate. Then, 941 // when we perform the division by 2^T (which is equivalent to a right shift 942 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 943 // truncated out after the division by 2^T. 944 // 945 // In comparison to just directly using the first formula, this technique 946 // is much more efficient; using the first formula requires W * K bits, 947 // but this formula less than W + K bits. Also, the first formula requires 948 // a division step, whereas this formula only requires multiplies and shifts. 949 // 950 // It doesn't matter whether the subtraction step is done in the calculation 951 // width or the input iteration count's width; if the subtraction overflows, 952 // the result must be zero anyway. We prefer here to do it in the width of 953 // the induction variable because it helps a lot for certain cases; CodeGen 954 // isn't smart enough to ignore the overflow, which leads to much less 955 // efficient code if the width of the subtraction is wider than the native 956 // register width. 957 // 958 // (It's possible to not widen at all by pulling out factors of 2 before 959 // the multiplication; for example, K=2 can be calculated as 960 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 961 // extra arithmetic, so it's not an obvious win, and it gets 962 // much more complicated for K > 3.) 963 964 // Protection from insane SCEVs; this bound is conservative, 965 // but it probably doesn't matter. 966 if (K > 1000) 967 return SE.getCouldNotCompute(); 968 969 unsigned W = SE.getTypeSizeInBits(ResultTy); 970 971 // Calculate K! / 2^T and T; we divide out the factors of two before 972 // multiplying for calculating K! / 2^T to avoid overflow. 973 // Other overflow doesn't matter because we only care about the bottom 974 // W bits of the result. 975 APInt OddFactorial(W, 1); 976 unsigned T = 1; 977 for (unsigned i = 3; i <= K; ++i) { 978 APInt Mult(W, i); 979 unsigned TwoFactors = Mult.countTrailingZeros(); 980 T += TwoFactors; 981 Mult.lshrInPlace(TwoFactors); 982 OddFactorial *= Mult; 983 } 984 985 // We need at least W + T bits for the multiplication step 986 unsigned CalculationBits = W + T; 987 988 // Calculate 2^T, at width T+W. 989 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 990 991 // Calculate the multiplicative inverse of K! / 2^T; 992 // this multiplication factor will perform the exact division by 993 // K! / 2^T. 994 APInt Mod = APInt::getSignedMinValue(W+1); 995 APInt MultiplyFactor = OddFactorial.zext(W+1); 996 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 997 MultiplyFactor = MultiplyFactor.trunc(W); 998 999 // Calculate the product, at width T+W 1000 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1001 CalculationBits); 1002 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1003 for (unsigned i = 1; i != K; ++i) { 1004 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1005 Dividend = SE.getMulExpr(Dividend, 1006 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1007 } 1008 1009 // Divide by 2^T 1010 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1011 1012 // Truncate the result, and divide by K! / 2^T. 1013 1014 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1015 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1016 } 1017 1018 /// Return the value of this chain of recurrences at the specified iteration 1019 /// number. We can evaluate this recurrence by multiplying each element in the 1020 /// chain by the binomial coefficient corresponding to it. In other words, we 1021 /// can evaluate {A,+,B,+,C,+,D} as: 1022 /// 1023 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1024 /// 1025 /// where BC(It, k) stands for binomial coefficient. 1026 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1027 ScalarEvolution &SE) const { 1028 return evaluateAtIteration(makeArrayRef(op_begin(), op_end()), It, SE); 1029 } 1030 1031 const SCEV * 1032 SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands, 1033 const SCEV *It, ScalarEvolution &SE) { 1034 assert(Operands.size() > 0); 1035 const SCEV *Result = Operands[0]; 1036 for (unsigned i = 1, e = Operands.size(); i != e; ++i) { 1037 // The computation is correct in the face of overflow provided that the 1038 // multiplication is performed _after_ the evaluation of the binomial 1039 // coefficient. 1040 const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType()); 1041 if (isa<SCEVCouldNotCompute>(Coeff)) 1042 return Coeff; 1043 1044 Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff)); 1045 } 1046 return Result; 1047 } 1048 1049 //===----------------------------------------------------------------------===// 1050 // SCEV Expression folder implementations 1051 //===----------------------------------------------------------------------===// 1052 1053 const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op, 1054 unsigned Depth) { 1055 assert(Depth <= 1 && 1056 "getLosslessPtrToIntExpr() should self-recurse at most once."); 1057 1058 // We could be called with an integer-typed operands during SCEV rewrites. 1059 // Since the operand is an integer already, just perform zext/trunc/self cast. 1060 if (!Op->getType()->isPointerTy()) 1061 return Op; 1062 1063 // What would be an ID for such a SCEV cast expression? 1064 FoldingSetNodeID ID; 1065 ID.AddInteger(scPtrToInt); 1066 ID.AddPointer(Op); 1067 1068 void *IP = nullptr; 1069 1070 // Is there already an expression for such a cast? 1071 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1072 return S; 1073 1074 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType()); 1075 1076 // We can only model ptrtoint if SCEV's effective (integer) type 1077 // is sufficiently wide to represent all possible pointer values. 1078 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) != 1079 getDataLayout().getTypeSizeInBits(IntPtrTy)) 1080 return getCouldNotCompute(); 1081 1082 // If not, is this expression something we can't reduce any further? 1083 if (auto *U = dyn_cast<SCEVUnknown>(Op)) { 1084 // Perform some basic constant folding. If the operand of the ptr2int cast 1085 // is a null pointer, don't create a ptr2int SCEV expression (that will be 1086 // left as-is), but produce a zero constant. 1087 // NOTE: We could handle a more general case, but lack motivational cases. 1088 if (isa<ConstantPointerNull>(U->getValue())) 1089 return getZero(IntPtrTy); 1090 1091 // Create an explicit cast node. 1092 // We can reuse the existing insert position since if we get here, 1093 // we won't have made any changes which would invalidate it. 1094 SCEV *S = new (SCEVAllocator) 1095 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy); 1096 UniqueSCEVs.InsertNode(S, IP); 1097 addToLoopUseLists(S); 1098 return S; 1099 } 1100 1101 assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for " 1102 "non-SCEVUnknown's."); 1103 1104 // Otherwise, we've got some expression that is more complex than just a 1105 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an 1106 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown 1107 // only, and the expressions must otherwise be integer-typed. 1108 // So sink the cast down to the SCEVUnknown's. 1109 1110 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression, 1111 /// which computes a pointer-typed value, and rewrites the whole expression 1112 /// tree so that *all* the computations are done on integers, and the only 1113 /// pointer-typed operands in the expression are SCEVUnknown. 1114 class SCEVPtrToIntSinkingRewriter 1115 : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> { 1116 using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>; 1117 1118 public: 1119 SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {} 1120 1121 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) { 1122 SCEVPtrToIntSinkingRewriter Rewriter(SE); 1123 return Rewriter.visit(Scev); 1124 } 1125 1126 const SCEV *visit(const SCEV *S) { 1127 Type *STy = S->getType(); 1128 // If the expression is not pointer-typed, just keep it as-is. 1129 if (!STy->isPointerTy()) 1130 return S; 1131 // Else, recursively sink the cast down into it. 1132 return Base::visit(S); 1133 } 1134 1135 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) { 1136 SmallVector<const SCEV *, 2> Operands; 1137 bool Changed = false; 1138 for (auto *Op : Expr->operands()) { 1139 Operands.push_back(visit(Op)); 1140 Changed |= Op != Operands.back(); 1141 } 1142 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags()); 1143 } 1144 1145 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) { 1146 SmallVector<const SCEV *, 2> Operands; 1147 bool Changed = false; 1148 for (auto *Op : Expr->operands()) { 1149 Operands.push_back(visit(Op)); 1150 Changed |= Op != Operands.back(); 1151 } 1152 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags()); 1153 } 1154 1155 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 1156 assert(Expr->getType()->isPointerTy() && 1157 "Should only reach pointer-typed SCEVUnknown's."); 1158 return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1); 1159 } 1160 }; 1161 1162 // And actually perform the cast sinking. 1163 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this); 1164 assert(IntOp->getType()->isIntegerTy() && 1165 "We must have succeeded in sinking the cast, " 1166 "and ending up with an integer-typed expression!"); 1167 return IntOp; 1168 } 1169 1170 const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) { 1171 assert(Ty->isIntegerTy() && "Target type must be an integer type!"); 1172 1173 const SCEV *IntOp = getLosslessPtrToIntExpr(Op); 1174 if (isa<SCEVCouldNotCompute>(IntOp)) 1175 return IntOp; 1176 1177 return getTruncateOrZeroExtend(IntOp, Ty); 1178 } 1179 1180 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1181 unsigned Depth) { 1182 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1183 "This is not a truncating conversion!"); 1184 assert(isSCEVable(Ty) && 1185 "This is not a conversion to a SCEVable type!"); 1186 Ty = getEffectiveSCEVType(Ty); 1187 1188 FoldingSetNodeID ID; 1189 ID.AddInteger(scTruncate); 1190 ID.AddPointer(Op); 1191 ID.AddPointer(Ty); 1192 void *IP = nullptr; 1193 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1194 1195 // Fold if the operand is constant. 1196 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1197 return getConstant( 1198 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1199 1200 // trunc(trunc(x)) --> trunc(x) 1201 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1202 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1203 1204 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1205 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1206 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1207 1208 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1209 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1210 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1211 1212 if (Depth > MaxCastDepth) { 1213 SCEV *S = 1214 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1215 UniqueSCEVs.InsertNode(S, IP); 1216 addToLoopUseLists(S); 1217 return S; 1218 } 1219 1220 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1221 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1222 // if after transforming we have at most one truncate, not counting truncates 1223 // that replace other casts. 1224 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1225 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1226 SmallVector<const SCEV *, 4> Operands; 1227 unsigned numTruncs = 0; 1228 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1229 ++i) { 1230 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1231 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) && 1232 isa<SCEVTruncateExpr>(S)) 1233 numTruncs++; 1234 Operands.push_back(S); 1235 } 1236 if (numTruncs < 2) { 1237 if (isa<SCEVAddExpr>(Op)) 1238 return getAddExpr(Operands); 1239 else if (isa<SCEVMulExpr>(Op)) 1240 return getMulExpr(Operands); 1241 else 1242 llvm_unreachable("Unexpected SCEV type for Op."); 1243 } 1244 // Although we checked in the beginning that ID is not in the cache, it is 1245 // possible that during recursion and different modification ID was inserted 1246 // into the cache. So if we find it, just return it. 1247 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1248 return S; 1249 } 1250 1251 // If the input value is a chrec scev, truncate the chrec's operands. 1252 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1253 SmallVector<const SCEV *, 4> Operands; 1254 for (const SCEV *Op : AddRec->operands()) 1255 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1256 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1257 } 1258 1259 // Return zero if truncating to known zeros. 1260 uint32_t MinTrailingZeros = GetMinTrailingZeros(Op); 1261 if (MinTrailingZeros >= getTypeSizeInBits(Ty)) 1262 return getZero(Ty); 1263 1264 // The cast wasn't folded; create an explicit cast node. We can reuse 1265 // the existing insert position since if we get here, we won't have 1266 // made any changes which would invalidate it. 1267 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1268 Op, Ty); 1269 UniqueSCEVs.InsertNode(S, IP); 1270 addToLoopUseLists(S); 1271 return S; 1272 } 1273 1274 // Get the limit of a recurrence such that incrementing by Step cannot cause 1275 // signed overflow as long as the value of the recurrence within the 1276 // loop does not exceed this limit before incrementing. 1277 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1278 ICmpInst::Predicate *Pred, 1279 ScalarEvolution *SE) { 1280 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1281 if (SE->isKnownPositive(Step)) { 1282 *Pred = ICmpInst::ICMP_SLT; 1283 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1284 SE->getSignedRangeMax(Step)); 1285 } 1286 if (SE->isKnownNegative(Step)) { 1287 *Pred = ICmpInst::ICMP_SGT; 1288 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1289 SE->getSignedRangeMin(Step)); 1290 } 1291 return nullptr; 1292 } 1293 1294 // Get the limit of a recurrence such that incrementing by Step cannot cause 1295 // unsigned overflow as long as the value of the recurrence within the loop does 1296 // not exceed this limit before incrementing. 1297 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1298 ICmpInst::Predicate *Pred, 1299 ScalarEvolution *SE) { 1300 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1301 *Pred = ICmpInst::ICMP_ULT; 1302 1303 return SE->getConstant(APInt::getMinValue(BitWidth) - 1304 SE->getUnsignedRangeMax(Step)); 1305 } 1306 1307 namespace { 1308 1309 struct ExtendOpTraitsBase { 1310 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1311 unsigned); 1312 }; 1313 1314 // Used to make code generic over signed and unsigned overflow. 1315 template <typename ExtendOp> struct ExtendOpTraits { 1316 // Members present: 1317 // 1318 // static const SCEV::NoWrapFlags WrapType; 1319 // 1320 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1321 // 1322 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1323 // ICmpInst::Predicate *Pred, 1324 // ScalarEvolution *SE); 1325 }; 1326 1327 template <> 1328 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1329 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1330 1331 static const GetExtendExprTy GetExtendExpr; 1332 1333 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1334 ICmpInst::Predicate *Pred, 1335 ScalarEvolution *SE) { 1336 return getSignedOverflowLimitForStep(Step, Pred, SE); 1337 } 1338 }; 1339 1340 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1341 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1342 1343 template <> 1344 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1345 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1346 1347 static const GetExtendExprTy GetExtendExpr; 1348 1349 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1350 ICmpInst::Predicate *Pred, 1351 ScalarEvolution *SE) { 1352 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1353 } 1354 }; 1355 1356 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1357 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1358 1359 } // end anonymous namespace 1360 1361 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1362 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1363 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1364 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1365 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1366 // expression "Step + sext/zext(PreIncAR)" is congruent with 1367 // "sext/zext(PostIncAR)" 1368 template <typename ExtendOpTy> 1369 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1370 ScalarEvolution *SE, unsigned Depth) { 1371 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1372 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1373 1374 const Loop *L = AR->getLoop(); 1375 const SCEV *Start = AR->getStart(); 1376 const SCEV *Step = AR->getStepRecurrence(*SE); 1377 1378 // Check for a simple looking step prior to loop entry. 1379 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1380 if (!SA) 1381 return nullptr; 1382 1383 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1384 // subtraction is expensive. For this purpose, perform a quick and dirty 1385 // difference, by checking for Step in the operand list. 1386 SmallVector<const SCEV *, 4> DiffOps; 1387 for (const SCEV *Op : SA->operands()) 1388 if (Op != Step) 1389 DiffOps.push_back(Op); 1390 1391 if (DiffOps.size() == SA->getNumOperands()) 1392 return nullptr; 1393 1394 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1395 // `Step`: 1396 1397 // 1. NSW/NUW flags on the step increment. 1398 auto PreStartFlags = 1399 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1400 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1401 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1402 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1403 1404 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1405 // "S+X does not sign/unsign-overflow". 1406 // 1407 1408 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1409 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1410 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1411 return PreStart; 1412 1413 // 2. Direct overflow check on the step operation's expression. 1414 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1415 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1416 const SCEV *OperandExtendedStart = 1417 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1418 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1419 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1420 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1421 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1422 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1423 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1424 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType); 1425 } 1426 return PreStart; 1427 } 1428 1429 // 3. Loop precondition. 1430 ICmpInst::Predicate Pred; 1431 const SCEV *OverflowLimit = 1432 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1433 1434 if (OverflowLimit && 1435 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1436 return PreStart; 1437 1438 return nullptr; 1439 } 1440 1441 // Get the normalized zero or sign extended expression for this AddRec's Start. 1442 template <typename ExtendOpTy> 1443 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1444 ScalarEvolution *SE, 1445 unsigned Depth) { 1446 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1447 1448 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1449 if (!PreStart) 1450 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1451 1452 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1453 Depth), 1454 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1455 } 1456 1457 // Try to prove away overflow by looking at "nearby" add recurrences. A 1458 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1459 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1460 // 1461 // Formally: 1462 // 1463 // {S,+,X} == {S-T,+,X} + T 1464 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1465 // 1466 // If ({S-T,+,X} + T) does not overflow ... (1) 1467 // 1468 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1469 // 1470 // If {S-T,+,X} does not overflow ... (2) 1471 // 1472 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1473 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1474 // 1475 // If (S-T)+T does not overflow ... (3) 1476 // 1477 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1478 // == {Ext(S),+,Ext(X)} == LHS 1479 // 1480 // Thus, if (1), (2) and (3) are true for some T, then 1481 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1482 // 1483 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1484 // does not overflow" restricted to the 0th iteration. Therefore we only need 1485 // to check for (1) and (2). 1486 // 1487 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1488 // is `Delta` (defined below). 1489 template <typename ExtendOpTy> 1490 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1491 const SCEV *Step, 1492 const Loop *L) { 1493 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1494 1495 // We restrict `Start` to a constant to prevent SCEV from spending too much 1496 // time here. It is correct (but more expensive) to continue with a 1497 // non-constant `Start` and do a general SCEV subtraction to compute 1498 // `PreStart` below. 1499 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1500 if (!StartC) 1501 return false; 1502 1503 APInt StartAI = StartC->getAPInt(); 1504 1505 for (unsigned Delta : {-2, -1, 1, 2}) { 1506 const SCEV *PreStart = getConstant(StartAI - Delta); 1507 1508 FoldingSetNodeID ID; 1509 ID.AddInteger(scAddRecExpr); 1510 ID.AddPointer(PreStart); 1511 ID.AddPointer(Step); 1512 ID.AddPointer(L); 1513 void *IP = nullptr; 1514 const auto *PreAR = 1515 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1516 1517 // Give up if we don't already have the add recurrence we need because 1518 // actually constructing an add recurrence is relatively expensive. 1519 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1520 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1521 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1522 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1523 DeltaS, &Pred, this); 1524 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1525 return true; 1526 } 1527 } 1528 1529 return false; 1530 } 1531 1532 // Finds an integer D for an expression (C + x + y + ...) such that the top 1533 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1534 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1535 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1536 // the (C + x + y + ...) expression is \p WholeAddExpr. 1537 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1538 const SCEVConstant *ConstantTerm, 1539 const SCEVAddExpr *WholeAddExpr) { 1540 const APInt &C = ConstantTerm->getAPInt(); 1541 const unsigned BitWidth = C.getBitWidth(); 1542 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1543 uint32_t TZ = BitWidth; 1544 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1545 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1546 if (TZ) { 1547 // Set D to be as many least significant bits of C as possible while still 1548 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1549 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1550 } 1551 return APInt(BitWidth, 0); 1552 } 1553 1554 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1555 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1556 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1557 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1558 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1559 const APInt &ConstantStart, 1560 const SCEV *Step) { 1561 const unsigned BitWidth = ConstantStart.getBitWidth(); 1562 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1563 if (TZ) 1564 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1565 : ConstantStart; 1566 return APInt(BitWidth, 0); 1567 } 1568 1569 const SCEV * 1570 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1571 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1572 "This is not an extending conversion!"); 1573 assert(isSCEVable(Ty) && 1574 "This is not a conversion to a SCEVable type!"); 1575 Ty = getEffectiveSCEVType(Ty); 1576 1577 // Fold if the operand is constant. 1578 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1579 return getConstant( 1580 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1581 1582 // zext(zext(x)) --> zext(x) 1583 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1584 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1585 1586 // Before doing any expensive analysis, check to see if we've already 1587 // computed a SCEV for this Op and Ty. 1588 FoldingSetNodeID ID; 1589 ID.AddInteger(scZeroExtend); 1590 ID.AddPointer(Op); 1591 ID.AddPointer(Ty); 1592 void *IP = nullptr; 1593 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1594 if (Depth > MaxCastDepth) { 1595 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1596 Op, Ty); 1597 UniqueSCEVs.InsertNode(S, IP); 1598 addToLoopUseLists(S); 1599 return S; 1600 } 1601 1602 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1603 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1604 // It's possible the bits taken off by the truncate were all zero bits. If 1605 // so, we should be able to simplify this further. 1606 const SCEV *X = ST->getOperand(); 1607 ConstantRange CR = getUnsignedRange(X); 1608 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1609 unsigned NewBits = getTypeSizeInBits(Ty); 1610 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1611 CR.zextOrTrunc(NewBits))) 1612 return getTruncateOrZeroExtend(X, Ty, Depth); 1613 } 1614 1615 // If the input value is a chrec scev, and we can prove that the value 1616 // did not overflow the old, smaller, value, we can zero extend all of the 1617 // operands (often constants). This allows analysis of something like 1618 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1619 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1620 if (AR->isAffine()) { 1621 const SCEV *Start = AR->getStart(); 1622 const SCEV *Step = AR->getStepRecurrence(*this); 1623 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1624 const Loop *L = AR->getLoop(); 1625 1626 if (!AR->hasNoUnsignedWrap()) { 1627 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1628 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1629 } 1630 1631 // If we have special knowledge that this addrec won't overflow, 1632 // we don't need to do any further analysis. 1633 if (AR->hasNoUnsignedWrap()) 1634 return getAddRecExpr( 1635 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1636 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1637 1638 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1639 // Note that this serves two purposes: It filters out loops that are 1640 // simply not analyzable, and it covers the case where this code is 1641 // being called from within backedge-taken count analysis, such that 1642 // attempting to ask for the backedge-taken count would likely result 1643 // in infinite recursion. In the later case, the analysis code will 1644 // cope with a conservative value, and it will take care to purge 1645 // that value once it has finished. 1646 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1647 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1648 // Manually compute the final value for AR, checking for overflow. 1649 1650 // Check whether the backedge-taken count can be losslessly casted to 1651 // the addrec's type. The count is always unsigned. 1652 const SCEV *CastedMaxBECount = 1653 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1654 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1655 CastedMaxBECount, MaxBECount->getType(), Depth); 1656 if (MaxBECount == RecastedMaxBECount) { 1657 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1658 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1659 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1660 SCEV::FlagAnyWrap, Depth + 1); 1661 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1662 SCEV::FlagAnyWrap, 1663 Depth + 1), 1664 WideTy, Depth + 1); 1665 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1666 const SCEV *WideMaxBECount = 1667 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1668 const SCEV *OperandExtendedAdd = 1669 getAddExpr(WideStart, 1670 getMulExpr(WideMaxBECount, 1671 getZeroExtendExpr(Step, WideTy, Depth + 1), 1672 SCEV::FlagAnyWrap, Depth + 1), 1673 SCEV::FlagAnyWrap, Depth + 1); 1674 if (ZAdd == OperandExtendedAdd) { 1675 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1676 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1677 // Return the expression with the addrec on the outside. 1678 return getAddRecExpr( 1679 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1680 Depth + 1), 1681 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1682 AR->getNoWrapFlags()); 1683 } 1684 // Similar to above, only this time treat the step value as signed. 1685 // This covers loops that count down. 1686 OperandExtendedAdd = 1687 getAddExpr(WideStart, 1688 getMulExpr(WideMaxBECount, 1689 getSignExtendExpr(Step, WideTy, Depth + 1), 1690 SCEV::FlagAnyWrap, Depth + 1), 1691 SCEV::FlagAnyWrap, Depth + 1); 1692 if (ZAdd == OperandExtendedAdd) { 1693 // Cache knowledge of AR NW, which is propagated to this AddRec. 1694 // Negative step causes unsigned wrap, but it still can't self-wrap. 1695 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1696 // Return the expression with the addrec on the outside. 1697 return getAddRecExpr( 1698 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1699 Depth + 1), 1700 getSignExtendExpr(Step, Ty, Depth + 1), L, 1701 AR->getNoWrapFlags()); 1702 } 1703 } 1704 } 1705 1706 // Normally, in the cases we can prove no-overflow via a 1707 // backedge guarding condition, we can also compute a backedge 1708 // taken count for the loop. The exceptions are assumptions and 1709 // guards present in the loop -- SCEV is not great at exploiting 1710 // these to compute max backedge taken counts, but can still use 1711 // these to prove lack of overflow. Use this fact to avoid 1712 // doing extra work that may not pay off. 1713 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1714 !AC.assumptions().empty()) { 1715 1716 auto NewFlags = proveNoUnsignedWrapViaInduction(AR); 1717 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1718 if (AR->hasNoUnsignedWrap()) { 1719 // Same as nuw case above - duplicated here to avoid a compile time 1720 // issue. It's not clear that the order of checks does matter, but 1721 // it's one of two issue possible causes for a change which was 1722 // reverted. Be conservative for the moment. 1723 return getAddRecExpr( 1724 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1725 Depth + 1), 1726 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1727 AR->getNoWrapFlags()); 1728 } 1729 1730 // For a negative step, we can extend the operands iff doing so only 1731 // traverses values in the range zext([0,UINT_MAX]). 1732 if (isKnownNegative(Step)) { 1733 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1734 getSignedRangeMin(Step)); 1735 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1736 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1737 // Cache knowledge of AR NW, which is propagated to this 1738 // AddRec. Negative step causes unsigned wrap, but it 1739 // still can't self-wrap. 1740 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1741 // Return the expression with the addrec on the outside. 1742 return getAddRecExpr( 1743 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1744 Depth + 1), 1745 getSignExtendExpr(Step, Ty, Depth + 1), L, 1746 AR->getNoWrapFlags()); 1747 } 1748 } 1749 } 1750 1751 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1752 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1753 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1754 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1755 const APInt &C = SC->getAPInt(); 1756 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1757 if (D != 0) { 1758 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1759 const SCEV *SResidual = 1760 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1761 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1762 return getAddExpr(SZExtD, SZExtR, 1763 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1764 Depth + 1); 1765 } 1766 } 1767 1768 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1769 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1770 return getAddRecExpr( 1771 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1772 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1773 } 1774 } 1775 1776 // zext(A % B) --> zext(A) % zext(B) 1777 { 1778 const SCEV *LHS; 1779 const SCEV *RHS; 1780 if (matchURem(Op, LHS, RHS)) 1781 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1782 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1783 } 1784 1785 // zext(A / B) --> zext(A) / zext(B). 1786 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1787 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1788 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1789 1790 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1791 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1792 if (SA->hasNoUnsignedWrap()) { 1793 // If the addition does not unsign overflow then we can, by definition, 1794 // commute the zero extension with the addition operation. 1795 SmallVector<const SCEV *, 4> Ops; 1796 for (const auto *Op : SA->operands()) 1797 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1798 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1799 } 1800 1801 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1802 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1803 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1804 // 1805 // Often address arithmetics contain expressions like 1806 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1807 // This transformation is useful while proving that such expressions are 1808 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1809 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1810 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1811 if (D != 0) { 1812 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1813 const SCEV *SResidual = 1814 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1815 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1816 return getAddExpr(SZExtD, SZExtR, 1817 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1818 Depth + 1); 1819 } 1820 } 1821 } 1822 1823 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1824 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1825 if (SM->hasNoUnsignedWrap()) { 1826 // If the multiply does not unsign overflow then we can, by definition, 1827 // commute the zero extension with the multiply operation. 1828 SmallVector<const SCEV *, 4> Ops; 1829 for (const auto *Op : SM->operands()) 1830 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1831 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1832 } 1833 1834 // zext(2^K * (trunc X to iN)) to iM -> 1835 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1836 // 1837 // Proof: 1838 // 1839 // zext(2^K * (trunc X to iN)) to iM 1840 // = zext((trunc X to iN) << K) to iM 1841 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1842 // (because shl removes the top K bits) 1843 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1844 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1845 // 1846 if (SM->getNumOperands() == 2) 1847 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1848 if (MulLHS->getAPInt().isPowerOf2()) 1849 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1850 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1851 MulLHS->getAPInt().logBase2(); 1852 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1853 return getMulExpr( 1854 getZeroExtendExpr(MulLHS, Ty), 1855 getZeroExtendExpr( 1856 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1857 SCEV::FlagNUW, Depth + 1); 1858 } 1859 } 1860 1861 // The cast wasn't folded; create an explicit cast node. 1862 // Recompute the insert position, as it may have been invalidated. 1863 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1864 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1865 Op, Ty); 1866 UniqueSCEVs.InsertNode(S, IP); 1867 addToLoopUseLists(S); 1868 return S; 1869 } 1870 1871 const SCEV * 1872 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1873 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1874 "This is not an extending conversion!"); 1875 assert(isSCEVable(Ty) && 1876 "This is not a conversion to a SCEVable type!"); 1877 Ty = getEffectiveSCEVType(Ty); 1878 1879 // Fold if the operand is constant. 1880 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1881 return getConstant( 1882 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1883 1884 // sext(sext(x)) --> sext(x) 1885 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1886 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1887 1888 // sext(zext(x)) --> zext(x) 1889 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1890 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1891 1892 // Before doing any expensive analysis, check to see if we've already 1893 // computed a SCEV for this Op and Ty. 1894 FoldingSetNodeID ID; 1895 ID.AddInteger(scSignExtend); 1896 ID.AddPointer(Op); 1897 ID.AddPointer(Ty); 1898 void *IP = nullptr; 1899 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1900 // Limit recursion depth. 1901 if (Depth > MaxCastDepth) { 1902 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1903 Op, Ty); 1904 UniqueSCEVs.InsertNode(S, IP); 1905 addToLoopUseLists(S); 1906 return S; 1907 } 1908 1909 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1910 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1911 // It's possible the bits taken off by the truncate were all sign bits. If 1912 // so, we should be able to simplify this further. 1913 const SCEV *X = ST->getOperand(); 1914 ConstantRange CR = getSignedRange(X); 1915 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1916 unsigned NewBits = getTypeSizeInBits(Ty); 1917 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1918 CR.sextOrTrunc(NewBits))) 1919 return getTruncateOrSignExtend(X, Ty, Depth); 1920 } 1921 1922 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1923 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1924 if (SA->hasNoSignedWrap()) { 1925 // If the addition does not sign overflow then we can, by definition, 1926 // commute the sign extension with the addition operation. 1927 SmallVector<const SCEV *, 4> Ops; 1928 for (const auto *Op : SA->operands()) 1929 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1930 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1931 } 1932 1933 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 1934 // if D + (C - D + x + y + ...) could be proven to not signed wrap 1935 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1936 // 1937 // For instance, this will bring two seemingly different expressions: 1938 // 1 + sext(5 + 20 * %x + 24 * %y) and 1939 // sext(6 + 20 * %x + 24 * %y) 1940 // to the same form: 1941 // 2 + sext(4 + 20 * %x + 24 * %y) 1942 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1943 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1944 if (D != 0) { 1945 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1946 const SCEV *SResidual = 1947 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1948 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1949 return getAddExpr(SSExtD, SSExtR, 1950 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1951 Depth + 1); 1952 } 1953 } 1954 } 1955 // If the input value is a chrec scev, and we can prove that the value 1956 // did not overflow the old, smaller, value, we can sign extend all of the 1957 // operands (often constants). This allows analysis of something like 1958 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1959 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1960 if (AR->isAffine()) { 1961 const SCEV *Start = AR->getStart(); 1962 const SCEV *Step = AR->getStepRecurrence(*this); 1963 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1964 const Loop *L = AR->getLoop(); 1965 1966 if (!AR->hasNoSignedWrap()) { 1967 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1968 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1969 } 1970 1971 // If we have special knowledge that this addrec won't overflow, 1972 // we don't need to do any further analysis. 1973 if (AR->hasNoSignedWrap()) 1974 return getAddRecExpr( 1975 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1976 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1977 1978 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1979 // Note that this serves two purposes: It filters out loops that are 1980 // simply not analyzable, and it covers the case where this code is 1981 // being called from within backedge-taken count analysis, such that 1982 // attempting to ask for the backedge-taken count would likely result 1983 // in infinite recursion. In the later case, the analysis code will 1984 // cope with a conservative value, and it will take care to purge 1985 // that value once it has finished. 1986 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1987 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1988 // Manually compute the final value for AR, checking for 1989 // overflow. 1990 1991 // Check whether the backedge-taken count can be losslessly casted to 1992 // the addrec's type. The count is always unsigned. 1993 const SCEV *CastedMaxBECount = 1994 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1995 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1996 CastedMaxBECount, MaxBECount->getType(), Depth); 1997 if (MaxBECount == RecastedMaxBECount) { 1998 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1999 // Check whether Start+Step*MaxBECount has no signed overflow. 2000 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 2001 SCEV::FlagAnyWrap, Depth + 1); 2002 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 2003 SCEV::FlagAnyWrap, 2004 Depth + 1), 2005 WideTy, Depth + 1); 2006 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 2007 const SCEV *WideMaxBECount = 2008 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 2009 const SCEV *OperandExtendedAdd = 2010 getAddExpr(WideStart, 2011 getMulExpr(WideMaxBECount, 2012 getSignExtendExpr(Step, WideTy, Depth + 1), 2013 SCEV::FlagAnyWrap, Depth + 1), 2014 SCEV::FlagAnyWrap, Depth + 1); 2015 if (SAdd == OperandExtendedAdd) { 2016 // Cache knowledge of AR NSW, which is propagated to this AddRec. 2017 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2018 // Return the expression with the addrec on the outside. 2019 return getAddRecExpr( 2020 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2021 Depth + 1), 2022 getSignExtendExpr(Step, Ty, Depth + 1), L, 2023 AR->getNoWrapFlags()); 2024 } 2025 // Similar to above, only this time treat the step value as unsigned. 2026 // This covers loops that count up with an unsigned step. 2027 OperandExtendedAdd = 2028 getAddExpr(WideStart, 2029 getMulExpr(WideMaxBECount, 2030 getZeroExtendExpr(Step, WideTy, Depth + 1), 2031 SCEV::FlagAnyWrap, Depth + 1), 2032 SCEV::FlagAnyWrap, Depth + 1); 2033 if (SAdd == OperandExtendedAdd) { 2034 // If AR wraps around then 2035 // 2036 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 2037 // => SAdd != OperandExtendedAdd 2038 // 2039 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 2040 // (SAdd == OperandExtendedAdd => AR is NW) 2041 2042 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 2043 2044 // Return the expression with the addrec on the outside. 2045 return getAddRecExpr( 2046 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2047 Depth + 1), 2048 getZeroExtendExpr(Step, Ty, Depth + 1), L, 2049 AR->getNoWrapFlags()); 2050 } 2051 } 2052 } 2053 2054 auto NewFlags = proveNoSignedWrapViaInduction(AR); 2055 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 2056 if (AR->hasNoSignedWrap()) { 2057 // Same as nsw case above - duplicated here to avoid a compile time 2058 // issue. It's not clear that the order of checks does matter, but 2059 // it's one of two issue possible causes for a change which was 2060 // reverted. Be conservative for the moment. 2061 return getAddRecExpr( 2062 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2063 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2064 } 2065 2066 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 2067 // if D + (C - D + Step * n) could be proven to not signed wrap 2068 // where D maximizes the number of trailing zeros of (C - D + Step * n) 2069 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 2070 const APInt &C = SC->getAPInt(); 2071 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 2072 if (D != 0) { 2073 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2074 const SCEV *SResidual = 2075 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 2076 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2077 return getAddExpr(SSExtD, SSExtR, 2078 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2079 Depth + 1); 2080 } 2081 } 2082 2083 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2084 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2085 return getAddRecExpr( 2086 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2087 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2088 } 2089 } 2090 2091 // If the input value is provably positive and we could not simplify 2092 // away the sext build a zext instead. 2093 if (isKnownNonNegative(Op)) 2094 return getZeroExtendExpr(Op, Ty, Depth + 1); 2095 2096 // The cast wasn't folded; create an explicit cast node. 2097 // Recompute the insert position, as it may have been invalidated. 2098 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2099 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2100 Op, Ty); 2101 UniqueSCEVs.InsertNode(S, IP); 2102 addToLoopUseLists(S); 2103 return S; 2104 } 2105 2106 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2107 /// unspecified bits out to the given type. 2108 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2109 Type *Ty) { 2110 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2111 "This is not an extending conversion!"); 2112 assert(isSCEVable(Ty) && 2113 "This is not a conversion to a SCEVable type!"); 2114 Ty = getEffectiveSCEVType(Ty); 2115 2116 // Sign-extend negative constants. 2117 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2118 if (SC->getAPInt().isNegative()) 2119 return getSignExtendExpr(Op, Ty); 2120 2121 // Peel off a truncate cast. 2122 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2123 const SCEV *NewOp = T->getOperand(); 2124 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2125 return getAnyExtendExpr(NewOp, Ty); 2126 return getTruncateOrNoop(NewOp, Ty); 2127 } 2128 2129 // Next try a zext cast. If the cast is folded, use it. 2130 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2131 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2132 return ZExt; 2133 2134 // Next try a sext cast. If the cast is folded, use it. 2135 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2136 if (!isa<SCEVSignExtendExpr>(SExt)) 2137 return SExt; 2138 2139 // Force the cast to be folded into the operands of an addrec. 2140 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2141 SmallVector<const SCEV *, 4> Ops; 2142 for (const SCEV *Op : AR->operands()) 2143 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2144 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2145 } 2146 2147 // If the expression is obviously signed, use the sext cast value. 2148 if (isa<SCEVSMaxExpr>(Op)) 2149 return SExt; 2150 2151 // Absent any other information, use the zext cast value. 2152 return ZExt; 2153 } 2154 2155 /// Process the given Ops list, which is a list of operands to be added under 2156 /// the given scale, update the given map. This is a helper function for 2157 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2158 /// that would form an add expression like this: 2159 /// 2160 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2161 /// 2162 /// where A and B are constants, update the map with these values: 2163 /// 2164 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2165 /// 2166 /// and add 13 + A*B*29 to AccumulatedConstant. 2167 /// This will allow getAddRecExpr to produce this: 2168 /// 2169 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2170 /// 2171 /// This form often exposes folding opportunities that are hidden in 2172 /// the original operand list. 2173 /// 2174 /// Return true iff it appears that any interesting folding opportunities 2175 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2176 /// the common case where no interesting opportunities are present, and 2177 /// is also used as a check to avoid infinite recursion. 2178 static bool 2179 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2180 SmallVectorImpl<const SCEV *> &NewOps, 2181 APInt &AccumulatedConstant, 2182 const SCEV *const *Ops, size_t NumOperands, 2183 const APInt &Scale, 2184 ScalarEvolution &SE) { 2185 bool Interesting = false; 2186 2187 // Iterate over the add operands. They are sorted, with constants first. 2188 unsigned i = 0; 2189 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2190 ++i; 2191 // Pull a buried constant out to the outside. 2192 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2193 Interesting = true; 2194 AccumulatedConstant += Scale * C->getAPInt(); 2195 } 2196 2197 // Next comes everything else. We're especially interested in multiplies 2198 // here, but they're in the middle, so just visit the rest with one loop. 2199 for (; i != NumOperands; ++i) { 2200 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2201 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2202 APInt NewScale = 2203 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2204 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2205 // A multiplication of a constant with another add; recurse. 2206 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2207 Interesting |= 2208 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2209 Add->op_begin(), Add->getNumOperands(), 2210 NewScale, SE); 2211 } else { 2212 // A multiplication of a constant with some other value. Update 2213 // the map. 2214 SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands())); 2215 const SCEV *Key = SE.getMulExpr(MulOps); 2216 auto Pair = M.insert({Key, NewScale}); 2217 if (Pair.second) { 2218 NewOps.push_back(Pair.first->first); 2219 } else { 2220 Pair.first->second += NewScale; 2221 // The map already had an entry for this value, which may indicate 2222 // a folding opportunity. 2223 Interesting = true; 2224 } 2225 } 2226 } else { 2227 // An ordinary operand. Update the map. 2228 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2229 M.insert({Ops[i], Scale}); 2230 if (Pair.second) { 2231 NewOps.push_back(Pair.first->first); 2232 } else { 2233 Pair.first->second += Scale; 2234 // The map already had an entry for this value, which may indicate 2235 // a folding opportunity. 2236 Interesting = true; 2237 } 2238 } 2239 } 2240 2241 return Interesting; 2242 } 2243 2244 bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, 2245 const SCEV *LHS, const SCEV *RHS) { 2246 const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *, 2247 SCEV::NoWrapFlags, unsigned); 2248 switch (BinOp) { 2249 default: 2250 llvm_unreachable("Unsupported binary op"); 2251 case Instruction::Add: 2252 Operation = &ScalarEvolution::getAddExpr; 2253 break; 2254 case Instruction::Sub: 2255 Operation = &ScalarEvolution::getMinusSCEV; 2256 break; 2257 case Instruction::Mul: 2258 Operation = &ScalarEvolution::getMulExpr; 2259 break; 2260 } 2261 2262 const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) = 2263 Signed ? &ScalarEvolution::getSignExtendExpr 2264 : &ScalarEvolution::getZeroExtendExpr; 2265 2266 // Check ext(LHS op RHS) == ext(LHS) op ext(RHS) 2267 auto *NarrowTy = cast<IntegerType>(LHS->getType()); 2268 auto *WideTy = 2269 IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2); 2270 2271 const SCEV *A = (this->*Extension)( 2272 (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0); 2273 const SCEV *B = (this->*Operation)((this->*Extension)(LHS, WideTy, 0), 2274 (this->*Extension)(RHS, WideTy, 0), 2275 SCEV::FlagAnyWrap, 0); 2276 return A == B; 2277 } 2278 2279 std::pair<SCEV::NoWrapFlags, bool /*Deduced*/> 2280 ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp( 2281 const OverflowingBinaryOperator *OBO) { 2282 SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap; 2283 2284 if (OBO->hasNoUnsignedWrap()) 2285 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2286 if (OBO->hasNoSignedWrap()) 2287 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2288 2289 bool Deduced = false; 2290 2291 if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap()) 2292 return {Flags, Deduced}; 2293 2294 if (OBO->getOpcode() != Instruction::Add && 2295 OBO->getOpcode() != Instruction::Sub && 2296 OBO->getOpcode() != Instruction::Mul) 2297 return {Flags, Deduced}; 2298 2299 const SCEV *LHS = getSCEV(OBO->getOperand(0)); 2300 const SCEV *RHS = getSCEV(OBO->getOperand(1)); 2301 2302 if (!OBO->hasNoUnsignedWrap() && 2303 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), 2304 /* Signed */ false, LHS, RHS)) { 2305 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2306 Deduced = true; 2307 } 2308 2309 if (!OBO->hasNoSignedWrap() && 2310 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), 2311 /* Signed */ true, LHS, RHS)) { 2312 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2313 Deduced = true; 2314 } 2315 2316 return {Flags, Deduced}; 2317 } 2318 2319 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2320 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2321 // can't-overflow flags for the operation if possible. 2322 static SCEV::NoWrapFlags 2323 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2324 const ArrayRef<const SCEV *> Ops, 2325 SCEV::NoWrapFlags Flags) { 2326 using namespace std::placeholders; 2327 2328 using OBO = OverflowingBinaryOperator; 2329 2330 bool CanAnalyze = 2331 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2332 (void)CanAnalyze; 2333 assert(CanAnalyze && "don't call from other places!"); 2334 2335 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2336 SCEV::NoWrapFlags SignOrUnsignWrap = 2337 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2338 2339 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2340 auto IsKnownNonNegative = [&](const SCEV *S) { 2341 return SE->isKnownNonNegative(S); 2342 }; 2343 2344 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2345 Flags = 2346 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2347 2348 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2349 2350 if (SignOrUnsignWrap != SignOrUnsignMask && 2351 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2352 isa<SCEVConstant>(Ops[0])) { 2353 2354 auto Opcode = [&] { 2355 switch (Type) { 2356 case scAddExpr: 2357 return Instruction::Add; 2358 case scMulExpr: 2359 return Instruction::Mul; 2360 default: 2361 llvm_unreachable("Unexpected SCEV op."); 2362 } 2363 }(); 2364 2365 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2366 2367 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2368 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2369 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2370 Opcode, C, OBO::NoSignedWrap); 2371 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2372 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2373 } 2374 2375 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2376 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2377 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2378 Opcode, C, OBO::NoUnsignedWrap); 2379 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2380 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2381 } 2382 } 2383 2384 return Flags; 2385 } 2386 2387 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2388 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2389 } 2390 2391 /// Get a canonical add expression, or something simpler if possible. 2392 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2393 SCEV::NoWrapFlags OrigFlags, 2394 unsigned Depth) { 2395 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2396 "only nuw or nsw allowed"); 2397 assert(!Ops.empty() && "Cannot get empty add!"); 2398 if (Ops.size() == 1) return Ops[0]; 2399 #ifndef NDEBUG 2400 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2401 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2402 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2403 "SCEVAddExpr operand types don't match!"); 2404 #endif 2405 2406 // Sort by complexity, this groups all similar expression types together. 2407 GroupByComplexity(Ops, &LI, DT); 2408 2409 // If there are any constants, fold them together. 2410 unsigned Idx = 0; 2411 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2412 ++Idx; 2413 assert(Idx < Ops.size()); 2414 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2415 // We found two constants, fold them together! 2416 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2417 if (Ops.size() == 2) return Ops[0]; 2418 Ops.erase(Ops.begin()+1); // Erase the folded element 2419 LHSC = cast<SCEVConstant>(Ops[0]); 2420 } 2421 2422 // If we are left with a constant zero being added, strip it off. 2423 if (LHSC->getValue()->isZero()) { 2424 Ops.erase(Ops.begin()); 2425 --Idx; 2426 } 2427 2428 if (Ops.size() == 1) return Ops[0]; 2429 } 2430 2431 // Delay expensive flag strengthening until necessary. 2432 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 2433 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags); 2434 }; 2435 2436 // Limit recursion calls depth. 2437 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2438 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2439 2440 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scAddExpr, Ops))) { 2441 // Don't strengthen flags if we have no new information. 2442 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S); 2443 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags) 2444 Add->setNoWrapFlags(ComputeFlags(Ops)); 2445 return S; 2446 } 2447 2448 // Okay, check to see if the same value occurs in the operand list more than 2449 // once. If so, merge them together into an multiply expression. Since we 2450 // sorted the list, these values are required to be adjacent. 2451 Type *Ty = Ops[0]->getType(); 2452 bool FoundMatch = false; 2453 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2454 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2455 // Scan ahead to count how many equal operands there are. 2456 unsigned Count = 2; 2457 while (i+Count != e && Ops[i+Count] == Ops[i]) 2458 ++Count; 2459 // Merge the values into a multiply. 2460 const SCEV *Scale = getConstant(Ty, Count); 2461 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2462 if (Ops.size() == Count) 2463 return Mul; 2464 Ops[i] = Mul; 2465 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2466 --i; e -= Count - 1; 2467 FoundMatch = true; 2468 } 2469 if (FoundMatch) 2470 return getAddExpr(Ops, OrigFlags, Depth + 1); 2471 2472 // Check for truncates. If all the operands are truncated from the same 2473 // type, see if factoring out the truncate would permit the result to be 2474 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2475 // if the contents of the resulting outer trunc fold to something simple. 2476 auto FindTruncSrcType = [&]() -> Type * { 2477 // We're ultimately looking to fold an addrec of truncs and muls of only 2478 // constants and truncs, so if we find any other types of SCEV 2479 // as operands of the addrec then we bail and return nullptr here. 2480 // Otherwise, we return the type of the operand of a trunc that we find. 2481 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2482 return T->getOperand()->getType(); 2483 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2484 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2485 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2486 return T->getOperand()->getType(); 2487 } 2488 return nullptr; 2489 }; 2490 if (auto *SrcType = FindTruncSrcType()) { 2491 SmallVector<const SCEV *, 8> LargeOps; 2492 bool Ok = true; 2493 // Check all the operands to see if they can be represented in the 2494 // source type of the truncate. 2495 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2496 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2497 if (T->getOperand()->getType() != SrcType) { 2498 Ok = false; 2499 break; 2500 } 2501 LargeOps.push_back(T->getOperand()); 2502 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2503 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2504 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2505 SmallVector<const SCEV *, 8> LargeMulOps; 2506 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2507 if (const SCEVTruncateExpr *T = 2508 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2509 if (T->getOperand()->getType() != SrcType) { 2510 Ok = false; 2511 break; 2512 } 2513 LargeMulOps.push_back(T->getOperand()); 2514 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2515 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2516 } else { 2517 Ok = false; 2518 break; 2519 } 2520 } 2521 if (Ok) 2522 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2523 } else { 2524 Ok = false; 2525 break; 2526 } 2527 } 2528 if (Ok) { 2529 // Evaluate the expression in the larger type. 2530 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2531 // If it folds to something simple, use it. Otherwise, don't. 2532 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2533 return getTruncateExpr(Fold, Ty); 2534 } 2535 } 2536 2537 // Skip past any other cast SCEVs. 2538 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2539 ++Idx; 2540 2541 // If there are add operands they would be next. 2542 if (Idx < Ops.size()) { 2543 bool DeletedAdd = false; 2544 // If the original flags and all inlined SCEVAddExprs are NUW, use the 2545 // common NUW flag for expression after inlining. Other flags cannot be 2546 // preserved, because they may depend on the original order of operations. 2547 SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW); 2548 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2549 if (Ops.size() > AddOpsInlineThreshold || 2550 Add->getNumOperands() > AddOpsInlineThreshold) 2551 break; 2552 // If we have an add, expand the add operands onto the end of the operands 2553 // list. 2554 Ops.erase(Ops.begin()+Idx); 2555 Ops.append(Add->op_begin(), Add->op_end()); 2556 DeletedAdd = true; 2557 CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags()); 2558 } 2559 2560 // If we deleted at least one add, we added operands to the end of the list, 2561 // and they are not necessarily sorted. Recurse to resort and resimplify 2562 // any operands we just acquired. 2563 if (DeletedAdd) 2564 return getAddExpr(Ops, CommonFlags, Depth + 1); 2565 } 2566 2567 // Skip over the add expression until we get to a multiply. 2568 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2569 ++Idx; 2570 2571 // Check to see if there are any folding opportunities present with 2572 // operands multiplied by constant values. 2573 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2574 uint64_t BitWidth = getTypeSizeInBits(Ty); 2575 DenseMap<const SCEV *, APInt> M; 2576 SmallVector<const SCEV *, 8> NewOps; 2577 APInt AccumulatedConstant(BitWidth, 0); 2578 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2579 Ops.data(), Ops.size(), 2580 APInt(BitWidth, 1), *this)) { 2581 struct APIntCompare { 2582 bool operator()(const APInt &LHS, const APInt &RHS) const { 2583 return LHS.ult(RHS); 2584 } 2585 }; 2586 2587 // Some interesting folding opportunity is present, so its worthwhile to 2588 // re-generate the operands list. Group the operands by constant scale, 2589 // to avoid multiplying by the same constant scale multiple times. 2590 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2591 for (const SCEV *NewOp : NewOps) 2592 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2593 // Re-generate the operands list. 2594 Ops.clear(); 2595 if (AccumulatedConstant != 0) 2596 Ops.push_back(getConstant(AccumulatedConstant)); 2597 for (auto &MulOp : MulOpLists) 2598 if (MulOp.first != 0) 2599 Ops.push_back(getMulExpr( 2600 getConstant(MulOp.first), 2601 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2602 SCEV::FlagAnyWrap, Depth + 1)); 2603 if (Ops.empty()) 2604 return getZero(Ty); 2605 if (Ops.size() == 1) 2606 return Ops[0]; 2607 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2608 } 2609 } 2610 2611 // If we are adding something to a multiply expression, make sure the 2612 // something is not already an operand of the multiply. If so, merge it into 2613 // the multiply. 2614 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2615 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2616 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2617 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2618 if (isa<SCEVConstant>(MulOpSCEV)) 2619 continue; 2620 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2621 if (MulOpSCEV == Ops[AddOp]) { 2622 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2623 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2624 if (Mul->getNumOperands() != 2) { 2625 // If the multiply has more than two operands, we must get the 2626 // Y*Z term. 2627 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2628 Mul->op_begin()+MulOp); 2629 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2630 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2631 } 2632 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2633 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2634 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2635 SCEV::FlagAnyWrap, Depth + 1); 2636 if (Ops.size() == 2) return OuterMul; 2637 if (AddOp < Idx) { 2638 Ops.erase(Ops.begin()+AddOp); 2639 Ops.erase(Ops.begin()+Idx-1); 2640 } else { 2641 Ops.erase(Ops.begin()+Idx); 2642 Ops.erase(Ops.begin()+AddOp-1); 2643 } 2644 Ops.push_back(OuterMul); 2645 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2646 } 2647 2648 // Check this multiply against other multiplies being added together. 2649 for (unsigned OtherMulIdx = Idx+1; 2650 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2651 ++OtherMulIdx) { 2652 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2653 // If MulOp occurs in OtherMul, we can fold the two multiplies 2654 // together. 2655 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2656 OMulOp != e; ++OMulOp) 2657 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2658 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2659 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2660 if (Mul->getNumOperands() != 2) { 2661 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2662 Mul->op_begin()+MulOp); 2663 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2664 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2665 } 2666 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2667 if (OtherMul->getNumOperands() != 2) { 2668 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2669 OtherMul->op_begin()+OMulOp); 2670 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2671 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2672 } 2673 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2674 const SCEV *InnerMulSum = 2675 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2676 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2677 SCEV::FlagAnyWrap, Depth + 1); 2678 if (Ops.size() == 2) return OuterMul; 2679 Ops.erase(Ops.begin()+Idx); 2680 Ops.erase(Ops.begin()+OtherMulIdx-1); 2681 Ops.push_back(OuterMul); 2682 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2683 } 2684 } 2685 } 2686 } 2687 2688 // If there are any add recurrences in the operands list, see if any other 2689 // added values are loop invariant. If so, we can fold them into the 2690 // recurrence. 2691 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2692 ++Idx; 2693 2694 // Scan over all recurrences, trying to fold loop invariants into them. 2695 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2696 // Scan all of the other operands to this add and add them to the vector if 2697 // they are loop invariant w.r.t. the recurrence. 2698 SmallVector<const SCEV *, 8> LIOps; 2699 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2700 const Loop *AddRecLoop = AddRec->getLoop(); 2701 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2702 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2703 LIOps.push_back(Ops[i]); 2704 Ops.erase(Ops.begin()+i); 2705 --i; --e; 2706 } 2707 2708 // If we found some loop invariants, fold them into the recurrence. 2709 if (!LIOps.empty()) { 2710 // Compute nowrap flags for the addition of the loop-invariant ops and 2711 // the addrec. Temporarily push it as an operand for that purpose. 2712 LIOps.push_back(AddRec); 2713 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps); 2714 LIOps.pop_back(); 2715 2716 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2717 LIOps.push_back(AddRec->getStart()); 2718 2719 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2720 // This follows from the fact that the no-wrap flags on the outer add 2721 // expression are applicable on the 0th iteration, when the add recurrence 2722 // will be equal to its start value. 2723 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2724 2725 // Build the new addrec. Propagate the NUW and NSW flags if both the 2726 // outer add and the inner addrec are guaranteed to have no overflow. 2727 // Always propagate NW. 2728 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2729 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2730 2731 // If all of the other operands were loop invariant, we are done. 2732 if (Ops.size() == 1) return NewRec; 2733 2734 // Otherwise, add the folded AddRec by the non-invariant parts. 2735 for (unsigned i = 0;; ++i) 2736 if (Ops[i] == AddRec) { 2737 Ops[i] = NewRec; 2738 break; 2739 } 2740 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2741 } 2742 2743 // Okay, if there weren't any loop invariants to be folded, check to see if 2744 // there are multiple AddRec's with the same loop induction variable being 2745 // added together. If so, we can fold them. 2746 for (unsigned OtherIdx = Idx+1; 2747 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2748 ++OtherIdx) { 2749 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2750 // so that the 1st found AddRecExpr is dominated by all others. 2751 assert(DT.dominates( 2752 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2753 AddRec->getLoop()->getHeader()) && 2754 "AddRecExprs are not sorted in reverse dominance order?"); 2755 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2756 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2757 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2758 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2759 ++OtherIdx) { 2760 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2761 if (OtherAddRec->getLoop() == AddRecLoop) { 2762 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2763 i != e; ++i) { 2764 if (i >= AddRecOps.size()) { 2765 AddRecOps.append(OtherAddRec->op_begin()+i, 2766 OtherAddRec->op_end()); 2767 break; 2768 } 2769 SmallVector<const SCEV *, 2> TwoOps = { 2770 AddRecOps[i], OtherAddRec->getOperand(i)}; 2771 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2772 } 2773 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2774 } 2775 } 2776 // Step size has changed, so we cannot guarantee no self-wraparound. 2777 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2778 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2779 } 2780 } 2781 2782 // Otherwise couldn't fold anything into this recurrence. Move onto the 2783 // next one. 2784 } 2785 2786 // Okay, it looks like we really DO need an add expr. Check to see if we 2787 // already have one, otherwise create a new one. 2788 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2789 } 2790 2791 const SCEV * 2792 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 2793 SCEV::NoWrapFlags Flags) { 2794 FoldingSetNodeID ID; 2795 ID.AddInteger(scAddExpr); 2796 for (const SCEV *Op : Ops) 2797 ID.AddPointer(Op); 2798 void *IP = nullptr; 2799 SCEVAddExpr *S = 2800 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2801 if (!S) { 2802 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2803 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2804 S = new (SCEVAllocator) 2805 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2806 UniqueSCEVs.InsertNode(S, IP); 2807 addToLoopUseLists(S); 2808 } 2809 S->setNoWrapFlags(Flags); 2810 return S; 2811 } 2812 2813 const SCEV * 2814 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 2815 const Loop *L, SCEV::NoWrapFlags Flags) { 2816 FoldingSetNodeID ID; 2817 ID.AddInteger(scAddRecExpr); 2818 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2819 ID.AddPointer(Ops[i]); 2820 ID.AddPointer(L); 2821 void *IP = nullptr; 2822 SCEVAddRecExpr *S = 2823 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2824 if (!S) { 2825 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2826 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2827 S = new (SCEVAllocator) 2828 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 2829 UniqueSCEVs.InsertNode(S, IP); 2830 addToLoopUseLists(S); 2831 } 2832 setNoWrapFlags(S, Flags); 2833 return S; 2834 } 2835 2836 const SCEV * 2837 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 2838 SCEV::NoWrapFlags Flags) { 2839 FoldingSetNodeID ID; 2840 ID.AddInteger(scMulExpr); 2841 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2842 ID.AddPointer(Ops[i]); 2843 void *IP = nullptr; 2844 SCEVMulExpr *S = 2845 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2846 if (!S) { 2847 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2848 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2849 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2850 O, Ops.size()); 2851 UniqueSCEVs.InsertNode(S, IP); 2852 addToLoopUseLists(S); 2853 } 2854 S->setNoWrapFlags(Flags); 2855 return S; 2856 } 2857 2858 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2859 uint64_t k = i*j; 2860 if (j > 1 && k / j != i) Overflow = true; 2861 return k; 2862 } 2863 2864 /// Compute the result of "n choose k", the binomial coefficient. If an 2865 /// intermediate computation overflows, Overflow will be set and the return will 2866 /// be garbage. Overflow is not cleared on absence of overflow. 2867 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2868 // We use the multiplicative formula: 2869 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2870 // At each iteration, we take the n-th term of the numeral and divide by the 2871 // (k-n)th term of the denominator. This division will always produce an 2872 // integral result, and helps reduce the chance of overflow in the 2873 // intermediate computations. However, we can still overflow even when the 2874 // final result would fit. 2875 2876 if (n == 0 || n == k) return 1; 2877 if (k > n) return 0; 2878 2879 if (k > n/2) 2880 k = n-k; 2881 2882 uint64_t r = 1; 2883 for (uint64_t i = 1; i <= k; ++i) { 2884 r = umul_ov(r, n-(i-1), Overflow); 2885 r /= i; 2886 } 2887 return r; 2888 } 2889 2890 /// Determine if any of the operands in this SCEV are a constant or if 2891 /// any of the add or multiply expressions in this SCEV contain a constant. 2892 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2893 struct FindConstantInAddMulChain { 2894 bool FoundConstant = false; 2895 2896 bool follow(const SCEV *S) { 2897 FoundConstant |= isa<SCEVConstant>(S); 2898 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2899 } 2900 2901 bool isDone() const { 2902 return FoundConstant; 2903 } 2904 }; 2905 2906 FindConstantInAddMulChain F; 2907 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2908 ST.visitAll(StartExpr); 2909 return F.FoundConstant; 2910 } 2911 2912 /// Get a canonical multiply expression, or something simpler if possible. 2913 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2914 SCEV::NoWrapFlags OrigFlags, 2915 unsigned Depth) { 2916 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) && 2917 "only nuw or nsw allowed"); 2918 assert(!Ops.empty() && "Cannot get empty mul!"); 2919 if (Ops.size() == 1) return Ops[0]; 2920 #ifndef NDEBUG 2921 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2922 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2923 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2924 "SCEVMulExpr operand types don't match!"); 2925 #endif 2926 2927 // Sort by complexity, this groups all similar expression types together. 2928 GroupByComplexity(Ops, &LI, DT); 2929 2930 // If there are any constants, fold them together. 2931 unsigned Idx = 0; 2932 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2933 ++Idx; 2934 assert(Idx < Ops.size()); 2935 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2936 // We found two constants, fold them together! 2937 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt()); 2938 if (Ops.size() == 2) return Ops[0]; 2939 Ops.erase(Ops.begin()+1); // Erase the folded element 2940 LHSC = cast<SCEVConstant>(Ops[0]); 2941 } 2942 2943 // If we have a multiply of zero, it will always be zero. 2944 if (LHSC->getValue()->isZero()) 2945 return LHSC; 2946 2947 // If we are left with a constant one being multiplied, strip it off. 2948 if (LHSC->getValue()->isOne()) { 2949 Ops.erase(Ops.begin()); 2950 --Idx; 2951 } 2952 2953 if (Ops.size() == 1) 2954 return Ops[0]; 2955 } 2956 2957 // Delay expensive flag strengthening until necessary. 2958 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 2959 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags); 2960 }; 2961 2962 // Limit recursion calls depth. 2963 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2964 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 2965 2966 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scMulExpr, Ops))) { 2967 // Don't strengthen flags if we have no new information. 2968 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S); 2969 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags) 2970 Mul->setNoWrapFlags(ComputeFlags(Ops)); 2971 return S; 2972 } 2973 2974 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2975 if (Ops.size() == 2) { 2976 // C1*(C2+V) -> C1*C2 + C1*V 2977 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2978 // If any of Add's ops are Adds or Muls with a constant, apply this 2979 // transformation as well. 2980 // 2981 // TODO: There are some cases where this transformation is not 2982 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 2983 // this transformation should be narrowed down. 2984 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) 2985 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2986 SCEV::FlagAnyWrap, Depth + 1), 2987 getMulExpr(LHSC, Add->getOperand(1), 2988 SCEV::FlagAnyWrap, Depth + 1), 2989 SCEV::FlagAnyWrap, Depth + 1); 2990 2991 if (Ops[0]->isAllOnesValue()) { 2992 // If we have a mul by -1 of an add, try distributing the -1 among the 2993 // add operands. 2994 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2995 SmallVector<const SCEV *, 4> NewOps; 2996 bool AnyFolded = false; 2997 for (const SCEV *AddOp : Add->operands()) { 2998 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2999 Depth + 1); 3000 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 3001 NewOps.push_back(Mul); 3002 } 3003 if (AnyFolded) 3004 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 3005 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 3006 // Negation preserves a recurrence's no self-wrap property. 3007 SmallVector<const SCEV *, 4> Operands; 3008 for (const SCEV *AddRecOp : AddRec->operands()) 3009 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 3010 Depth + 1)); 3011 3012 return getAddRecExpr(Operands, AddRec->getLoop(), 3013 AddRec->getNoWrapFlags(SCEV::FlagNW)); 3014 } 3015 } 3016 } 3017 } 3018 3019 // Skip over the add expression until we get to a multiply. 3020 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 3021 ++Idx; 3022 3023 // If there are mul operands inline them all into this expression. 3024 if (Idx < Ops.size()) { 3025 bool DeletedMul = false; 3026 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 3027 if (Ops.size() > MulOpsInlineThreshold) 3028 break; 3029 // If we have an mul, expand the mul operands onto the end of the 3030 // operands list. 3031 Ops.erase(Ops.begin()+Idx); 3032 Ops.append(Mul->op_begin(), Mul->op_end()); 3033 DeletedMul = true; 3034 } 3035 3036 // If we deleted at least one mul, we added operands to the end of the 3037 // list, and they are not necessarily sorted. Recurse to resort and 3038 // resimplify any operands we just acquired. 3039 if (DeletedMul) 3040 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3041 } 3042 3043 // If there are any add recurrences in the operands list, see if any other 3044 // added values are loop invariant. If so, we can fold them into the 3045 // recurrence. 3046 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 3047 ++Idx; 3048 3049 // Scan over all recurrences, trying to fold loop invariants into them. 3050 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 3051 // Scan all of the other operands to this mul and add them to the vector 3052 // if they are loop invariant w.r.t. the recurrence. 3053 SmallVector<const SCEV *, 8> LIOps; 3054 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 3055 const Loop *AddRecLoop = AddRec->getLoop(); 3056 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3057 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 3058 LIOps.push_back(Ops[i]); 3059 Ops.erase(Ops.begin()+i); 3060 --i; --e; 3061 } 3062 3063 // If we found some loop invariants, fold them into the recurrence. 3064 if (!LIOps.empty()) { 3065 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 3066 SmallVector<const SCEV *, 4> NewOps; 3067 NewOps.reserve(AddRec->getNumOperands()); 3068 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 3069 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 3070 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 3071 SCEV::FlagAnyWrap, Depth + 1)); 3072 3073 // Build the new addrec. Propagate the NUW and NSW flags if both the 3074 // outer mul and the inner addrec are guaranteed to have no overflow. 3075 // 3076 // No self-wrap cannot be guaranteed after changing the step size, but 3077 // will be inferred if either NUW or NSW is true. 3078 SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec}); 3079 const SCEV *NewRec = getAddRecExpr( 3080 NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags)); 3081 3082 // If all of the other operands were loop invariant, we are done. 3083 if (Ops.size() == 1) return NewRec; 3084 3085 // Otherwise, multiply the folded AddRec by the non-invariant parts. 3086 for (unsigned i = 0;; ++i) 3087 if (Ops[i] == AddRec) { 3088 Ops[i] = NewRec; 3089 break; 3090 } 3091 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3092 } 3093 3094 // Okay, if there weren't any loop invariants to be folded, check to see 3095 // if there are multiple AddRec's with the same loop induction variable 3096 // being multiplied together. If so, we can fold them. 3097 3098 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 3099 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 3100 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 3101 // ]]],+,...up to x=2n}. 3102 // Note that the arguments to choose() are always integers with values 3103 // known at compile time, never SCEV objects. 3104 // 3105 // The implementation avoids pointless extra computations when the two 3106 // addrec's are of different length (mathematically, it's equivalent to 3107 // an infinite stream of zeros on the right). 3108 bool OpsModified = false; 3109 for (unsigned OtherIdx = Idx+1; 3110 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 3111 ++OtherIdx) { 3112 const SCEVAddRecExpr *OtherAddRec = 3113 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 3114 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 3115 continue; 3116 3117 // Limit max number of arguments to avoid creation of unreasonably big 3118 // SCEVAddRecs with very complex operands. 3119 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 3120 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) 3121 continue; 3122 3123 bool Overflow = false; 3124 Type *Ty = AddRec->getType(); 3125 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 3126 SmallVector<const SCEV*, 7> AddRecOps; 3127 for (int x = 0, xe = AddRec->getNumOperands() + 3128 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 3129 SmallVector <const SCEV *, 7> SumOps; 3130 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 3131 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 3132 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 3133 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 3134 z < ze && !Overflow; ++z) { 3135 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 3136 uint64_t Coeff; 3137 if (LargerThan64Bits) 3138 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 3139 else 3140 Coeff = Coeff1*Coeff2; 3141 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 3142 const SCEV *Term1 = AddRec->getOperand(y-z); 3143 const SCEV *Term2 = OtherAddRec->getOperand(z); 3144 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 3145 SCEV::FlagAnyWrap, Depth + 1)); 3146 } 3147 } 3148 if (SumOps.empty()) 3149 SumOps.push_back(getZero(Ty)); 3150 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 3151 } 3152 if (!Overflow) { 3153 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, 3154 SCEV::FlagAnyWrap); 3155 if (Ops.size() == 2) return NewAddRec; 3156 Ops[Idx] = NewAddRec; 3157 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3158 OpsModified = true; 3159 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 3160 if (!AddRec) 3161 break; 3162 } 3163 } 3164 if (OpsModified) 3165 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3166 3167 // Otherwise couldn't fold anything into this recurrence. Move onto the 3168 // next one. 3169 } 3170 3171 // Okay, it looks like we really DO need an mul expr. Check to see if we 3172 // already have one, otherwise create a new one. 3173 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3174 } 3175 3176 /// Represents an unsigned remainder expression based on unsigned division. 3177 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3178 const SCEV *RHS) { 3179 assert(getEffectiveSCEVType(LHS->getType()) == 3180 getEffectiveSCEVType(RHS->getType()) && 3181 "SCEVURemExpr operand types don't match!"); 3182 3183 // Short-circuit easy cases 3184 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3185 // If constant is one, the result is trivial 3186 if (RHSC->getValue()->isOne()) 3187 return getZero(LHS->getType()); // X urem 1 --> 0 3188 3189 // If constant is a power of two, fold into a zext(trunc(LHS)). 3190 if (RHSC->getAPInt().isPowerOf2()) { 3191 Type *FullTy = LHS->getType(); 3192 Type *TruncTy = 3193 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3194 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3195 } 3196 } 3197 3198 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3199 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3200 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3201 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3202 } 3203 3204 /// Get a canonical unsigned division expression, or something simpler if 3205 /// possible. 3206 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3207 const SCEV *RHS) { 3208 assert(getEffectiveSCEVType(LHS->getType()) == 3209 getEffectiveSCEVType(RHS->getType()) && 3210 "SCEVUDivExpr operand types don't match!"); 3211 3212 FoldingSetNodeID ID; 3213 ID.AddInteger(scUDivExpr); 3214 ID.AddPointer(LHS); 3215 ID.AddPointer(RHS); 3216 void *IP = nullptr; 3217 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3218 return S; 3219 3220 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3221 if (RHSC->getValue()->isOne()) 3222 return LHS; // X udiv 1 --> x 3223 // If the denominator is zero, the result of the udiv is undefined. Don't 3224 // try to analyze it, because the resolution chosen here may differ from 3225 // the resolution chosen in other parts of the compiler. 3226 if (!RHSC->getValue()->isZero()) { 3227 // Determine if the division can be folded into the operands of 3228 // its operands. 3229 // TODO: Generalize this to non-constants by using known-bits information. 3230 Type *Ty = LHS->getType(); 3231 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3232 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3233 // For non-power-of-two values, effectively round the value up to the 3234 // nearest power of two. 3235 if (!RHSC->getAPInt().isPowerOf2()) 3236 ++MaxShiftAmt; 3237 IntegerType *ExtTy = 3238 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3239 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3240 if (const SCEVConstant *Step = 3241 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3242 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3243 const APInt &StepInt = Step->getAPInt(); 3244 const APInt &DivInt = RHSC->getAPInt(); 3245 if (!StepInt.urem(DivInt) && 3246 getZeroExtendExpr(AR, ExtTy) == 3247 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3248 getZeroExtendExpr(Step, ExtTy), 3249 AR->getLoop(), SCEV::FlagAnyWrap)) { 3250 SmallVector<const SCEV *, 4> Operands; 3251 for (const SCEV *Op : AR->operands()) 3252 Operands.push_back(getUDivExpr(Op, RHS)); 3253 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3254 } 3255 /// Get a canonical UDivExpr for a recurrence. 3256 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3257 // We can currently only fold X%N if X is constant. 3258 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3259 if (StartC && !DivInt.urem(StepInt) && 3260 getZeroExtendExpr(AR, ExtTy) == 3261 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3262 getZeroExtendExpr(Step, ExtTy), 3263 AR->getLoop(), SCEV::FlagAnyWrap)) { 3264 const APInt &StartInt = StartC->getAPInt(); 3265 const APInt &StartRem = StartInt.urem(StepInt); 3266 if (StartRem != 0) { 3267 const SCEV *NewLHS = 3268 getAddRecExpr(getConstant(StartInt - StartRem), Step, 3269 AR->getLoop(), SCEV::FlagNW); 3270 if (LHS != NewLHS) { 3271 LHS = NewLHS; 3272 3273 // Reset the ID to include the new LHS, and check if it is 3274 // already cached. 3275 ID.clear(); 3276 ID.AddInteger(scUDivExpr); 3277 ID.AddPointer(LHS); 3278 ID.AddPointer(RHS); 3279 IP = nullptr; 3280 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3281 return S; 3282 } 3283 } 3284 } 3285 } 3286 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3287 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3288 SmallVector<const SCEV *, 4> Operands; 3289 for (const SCEV *Op : M->operands()) 3290 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3291 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3292 // Find an operand that's safely divisible. 3293 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3294 const SCEV *Op = M->getOperand(i); 3295 const SCEV *Div = getUDivExpr(Op, RHSC); 3296 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3297 Operands = SmallVector<const SCEV *, 4>(M->operands()); 3298 Operands[i] = Div; 3299 return getMulExpr(Operands); 3300 } 3301 } 3302 } 3303 3304 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3305 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3306 if (auto *DivisorConstant = 3307 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3308 bool Overflow = false; 3309 APInt NewRHS = 3310 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3311 if (Overflow) { 3312 return getConstant(RHSC->getType(), 0, false); 3313 } 3314 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3315 } 3316 } 3317 3318 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3319 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3320 SmallVector<const SCEV *, 4> Operands; 3321 for (const SCEV *Op : A->operands()) 3322 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3323 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3324 Operands.clear(); 3325 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3326 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3327 if (isa<SCEVUDivExpr>(Op) || 3328 getMulExpr(Op, RHS) != A->getOperand(i)) 3329 break; 3330 Operands.push_back(Op); 3331 } 3332 if (Operands.size() == A->getNumOperands()) 3333 return getAddExpr(Operands); 3334 } 3335 } 3336 3337 // Fold if both operands are constant. 3338 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3339 Constant *LHSCV = LHSC->getValue(); 3340 Constant *RHSCV = RHSC->getValue(); 3341 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3342 RHSCV))); 3343 } 3344 } 3345 } 3346 3347 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs 3348 // changes). Make sure we get a new one. 3349 IP = nullptr; 3350 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3351 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3352 LHS, RHS); 3353 UniqueSCEVs.InsertNode(S, IP); 3354 addToLoopUseLists(S); 3355 return S; 3356 } 3357 3358 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3359 APInt A = C1->getAPInt().abs(); 3360 APInt B = C2->getAPInt().abs(); 3361 uint32_t ABW = A.getBitWidth(); 3362 uint32_t BBW = B.getBitWidth(); 3363 3364 if (ABW > BBW) 3365 B = B.zext(ABW); 3366 else if (ABW < BBW) 3367 A = A.zext(BBW); 3368 3369 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3370 } 3371 3372 /// Get a canonical unsigned division expression, or something simpler if 3373 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3374 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3375 /// it's not exact because the udiv may be clearing bits. 3376 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3377 const SCEV *RHS) { 3378 // TODO: we could try to find factors in all sorts of things, but for now we 3379 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3380 // end of this file for inspiration. 3381 3382 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3383 if (!Mul || !Mul->hasNoUnsignedWrap()) 3384 return getUDivExpr(LHS, RHS); 3385 3386 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3387 // If the mulexpr multiplies by a constant, then that constant must be the 3388 // first element of the mulexpr. 3389 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3390 if (LHSCst == RHSCst) { 3391 SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands())); 3392 return getMulExpr(Operands); 3393 } 3394 3395 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3396 // that there's a factor provided by one of the other terms. We need to 3397 // check. 3398 APInt Factor = gcd(LHSCst, RHSCst); 3399 if (!Factor.isIntN(1)) { 3400 LHSCst = 3401 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3402 RHSCst = 3403 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3404 SmallVector<const SCEV *, 2> Operands; 3405 Operands.push_back(LHSCst); 3406 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3407 LHS = getMulExpr(Operands); 3408 RHS = RHSCst; 3409 Mul = dyn_cast<SCEVMulExpr>(LHS); 3410 if (!Mul) 3411 return getUDivExactExpr(LHS, RHS); 3412 } 3413 } 3414 } 3415 3416 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3417 if (Mul->getOperand(i) == RHS) { 3418 SmallVector<const SCEV *, 2> Operands; 3419 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3420 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3421 return getMulExpr(Operands); 3422 } 3423 } 3424 3425 return getUDivExpr(LHS, RHS); 3426 } 3427 3428 /// Get an add recurrence expression for the specified loop. Simplify the 3429 /// expression as much as possible. 3430 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3431 const Loop *L, 3432 SCEV::NoWrapFlags Flags) { 3433 SmallVector<const SCEV *, 4> Operands; 3434 Operands.push_back(Start); 3435 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3436 if (StepChrec->getLoop() == L) { 3437 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3438 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3439 } 3440 3441 Operands.push_back(Step); 3442 return getAddRecExpr(Operands, L, Flags); 3443 } 3444 3445 /// Get an add recurrence expression for the specified loop. Simplify the 3446 /// expression as much as possible. 3447 const SCEV * 3448 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3449 const Loop *L, SCEV::NoWrapFlags Flags) { 3450 if (Operands.size() == 1) return Operands[0]; 3451 #ifndef NDEBUG 3452 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3453 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3454 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3455 "SCEVAddRecExpr operand types don't match!"); 3456 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3457 assert(isLoopInvariant(Operands[i], L) && 3458 "SCEVAddRecExpr operand is not loop-invariant!"); 3459 #endif 3460 3461 if (Operands.back()->isZero()) { 3462 Operands.pop_back(); 3463 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3464 } 3465 3466 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and 3467 // use that information to infer NUW and NSW flags. However, computing a 3468 // BE count requires calling getAddRecExpr, so we may not yet have a 3469 // meaningful BE count at this point (and if we don't, we'd be stuck 3470 // with a SCEVCouldNotCompute as the cached BE count). 3471 3472 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3473 3474 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3475 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3476 const Loop *NestedLoop = NestedAR->getLoop(); 3477 if (L->contains(NestedLoop) 3478 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3479 : (!NestedLoop->contains(L) && 3480 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3481 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands()); 3482 Operands[0] = NestedAR->getStart(); 3483 // AddRecs require their operands be loop-invariant with respect to their 3484 // loops. Don't perform this transformation if it would break this 3485 // requirement. 3486 bool AllInvariant = all_of( 3487 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3488 3489 if (AllInvariant) { 3490 // Create a recurrence for the outer loop with the same step size. 3491 // 3492 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3493 // inner recurrence has the same property. 3494 SCEV::NoWrapFlags OuterFlags = 3495 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3496 3497 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3498 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3499 return isLoopInvariant(Op, NestedLoop); 3500 }); 3501 3502 if (AllInvariant) { 3503 // Ok, both add recurrences are valid after the transformation. 3504 // 3505 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3506 // the outer recurrence has the same property. 3507 SCEV::NoWrapFlags InnerFlags = 3508 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3509 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3510 } 3511 } 3512 // Reset Operands to its original state. 3513 Operands[0] = NestedAR; 3514 } 3515 } 3516 3517 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3518 // already have one, otherwise create a new one. 3519 return getOrCreateAddRecExpr(Operands, L, Flags); 3520 } 3521 3522 const SCEV * 3523 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3524 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3525 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3526 // getSCEV(Base)->getType() has the same address space as Base->getType() 3527 // because SCEV::getType() preserves the address space. 3528 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); 3529 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3530 // instruction to its SCEV, because the Instruction may be guarded by control 3531 // flow and the no-overflow bits may not be valid for the expression in any 3532 // context. This can be fixed similarly to how these flags are handled for 3533 // adds. 3534 SCEV::NoWrapFlags OffsetWrap = 3535 GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3536 3537 Type *CurTy = GEP->getType(); 3538 bool FirstIter = true; 3539 SmallVector<const SCEV *, 4> Offsets; 3540 for (const SCEV *IndexExpr : IndexExprs) { 3541 // Compute the (potentially symbolic) offset in bytes for this index. 3542 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3543 // For a struct, add the member offset. 3544 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3545 unsigned FieldNo = Index->getZExtValue(); 3546 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); 3547 Offsets.push_back(FieldOffset); 3548 3549 // Update CurTy to the type of the field at Index. 3550 CurTy = STy->getTypeAtIndex(Index); 3551 } else { 3552 // Update CurTy to its element type. 3553 if (FirstIter) { 3554 assert(isa<PointerType>(CurTy) && 3555 "The first index of a GEP indexes a pointer"); 3556 CurTy = GEP->getSourceElementType(); 3557 FirstIter = false; 3558 } else { 3559 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); 3560 } 3561 // For an array, add the element offset, explicitly scaled. 3562 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); 3563 // Getelementptr indices are signed. 3564 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); 3565 3566 // Multiply the index by the element size to compute the element offset. 3567 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap); 3568 Offsets.push_back(LocalOffset); 3569 } 3570 } 3571 3572 // Handle degenerate case of GEP without offsets. 3573 if (Offsets.empty()) 3574 return BaseExpr; 3575 3576 // Add the offsets together, assuming nsw if inbounds. 3577 const SCEV *Offset = getAddExpr(Offsets, OffsetWrap); 3578 // Add the base address and the offset. We cannot use the nsw flag, as the 3579 // base address is unsigned. However, if we know that the offset is 3580 // non-negative, we can use nuw. 3581 SCEV::NoWrapFlags BaseWrap = GEP->isInBounds() && isKnownNonNegative(Offset) 3582 ? SCEV::FlagNUW : SCEV::FlagAnyWrap; 3583 return getAddExpr(BaseExpr, Offset, BaseWrap); 3584 } 3585 3586 std::tuple<SCEV *, FoldingSetNodeID, void *> 3587 ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType, 3588 ArrayRef<const SCEV *> Ops) { 3589 FoldingSetNodeID ID; 3590 void *IP = nullptr; 3591 ID.AddInteger(SCEVType); 3592 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3593 ID.AddPointer(Ops[i]); 3594 return std::tuple<SCEV *, FoldingSetNodeID, void *>( 3595 UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP); 3596 } 3597 3598 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { 3599 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3600 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); 3601 } 3602 3603 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind, 3604 SmallVectorImpl<const SCEV *> &Ops) { 3605 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 3606 if (Ops.size() == 1) return Ops[0]; 3607 #ifndef NDEBUG 3608 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3609 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3610 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3611 "Operand types don't match!"); 3612 #endif 3613 3614 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; 3615 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; 3616 3617 // Sort by complexity, this groups all similar expression types together. 3618 GroupByComplexity(Ops, &LI, DT); 3619 3620 // Check if we have created the same expression before. 3621 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) { 3622 return S; 3623 } 3624 3625 // If there are any constants, fold them together. 3626 unsigned Idx = 0; 3627 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3628 ++Idx; 3629 assert(Idx < Ops.size()); 3630 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { 3631 if (Kind == scSMaxExpr) 3632 return APIntOps::smax(LHS, RHS); 3633 else if (Kind == scSMinExpr) 3634 return APIntOps::smin(LHS, RHS); 3635 else if (Kind == scUMaxExpr) 3636 return APIntOps::umax(LHS, RHS); 3637 else if (Kind == scUMinExpr) 3638 return APIntOps::umin(LHS, RHS); 3639 llvm_unreachable("Unknown SCEV min/max opcode"); 3640 }; 3641 3642 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3643 // We found two constants, fold them together! 3644 ConstantInt *Fold = ConstantInt::get( 3645 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); 3646 Ops[0] = getConstant(Fold); 3647 Ops.erase(Ops.begin()+1); // Erase the folded element 3648 if (Ops.size() == 1) return Ops[0]; 3649 LHSC = cast<SCEVConstant>(Ops[0]); 3650 } 3651 3652 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); 3653 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); 3654 3655 if (IsMax ? IsMinV : IsMaxV) { 3656 // If we are left with a constant minimum(/maximum)-int, strip it off. 3657 Ops.erase(Ops.begin()); 3658 --Idx; 3659 } else if (IsMax ? IsMaxV : IsMinV) { 3660 // If we have a max(/min) with a constant maximum(/minimum)-int, 3661 // it will always be the extremum. 3662 return LHSC; 3663 } 3664 3665 if (Ops.size() == 1) return Ops[0]; 3666 } 3667 3668 // Find the first operation of the same kind 3669 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) 3670 ++Idx; 3671 3672 // Check to see if one of the operands is of the same kind. If so, expand its 3673 // operands onto our operand list, and recurse to simplify. 3674 if (Idx < Ops.size()) { 3675 bool DeletedAny = false; 3676 while (Ops[Idx]->getSCEVType() == Kind) { 3677 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); 3678 Ops.erase(Ops.begin()+Idx); 3679 Ops.append(SMME->op_begin(), SMME->op_end()); 3680 DeletedAny = true; 3681 } 3682 3683 if (DeletedAny) 3684 return getMinMaxExpr(Kind, Ops); 3685 } 3686 3687 // Okay, check to see if the same value occurs in the operand list twice. If 3688 // so, delete one. Since we sorted the list, these values are required to 3689 // be adjacent. 3690 llvm::CmpInst::Predicate GEPred = 3691 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 3692 llvm::CmpInst::Predicate LEPred = 3693 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 3694 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; 3695 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; 3696 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { 3697 if (Ops[i] == Ops[i + 1] || 3698 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { 3699 // X op Y op Y --> X op Y 3700 // X op Y --> X, if we know X, Y are ordered appropriately 3701 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3702 --i; 3703 --e; 3704 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], 3705 Ops[i + 1])) { 3706 // X op Y --> Y, if we know X, Y are ordered appropriately 3707 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3708 --i; 3709 --e; 3710 } 3711 } 3712 3713 if (Ops.size() == 1) return Ops[0]; 3714 3715 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3716 3717 // Okay, it looks like we really DO need an expr. Check to see if we 3718 // already have one, otherwise create a new one. 3719 const SCEV *ExistingSCEV; 3720 FoldingSetNodeID ID; 3721 void *IP; 3722 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops); 3723 if (ExistingSCEV) 3724 return ExistingSCEV; 3725 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3726 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3727 SCEV *S = new (SCEVAllocator) 3728 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); 3729 3730 UniqueSCEVs.InsertNode(S, IP); 3731 addToLoopUseLists(S); 3732 return S; 3733 } 3734 3735 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3736 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3737 return getSMaxExpr(Ops); 3738 } 3739 3740 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3741 return getMinMaxExpr(scSMaxExpr, Ops); 3742 } 3743 3744 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3745 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3746 return getUMaxExpr(Ops); 3747 } 3748 3749 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3750 return getMinMaxExpr(scUMaxExpr, Ops); 3751 } 3752 3753 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3754 const SCEV *RHS) { 3755 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3756 return getSMinExpr(Ops); 3757 } 3758 3759 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3760 return getMinMaxExpr(scSMinExpr, Ops); 3761 } 3762 3763 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3764 const SCEV *RHS) { 3765 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3766 return getUMinExpr(Ops); 3767 } 3768 3769 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3770 return getMinMaxExpr(scUMinExpr, Ops); 3771 } 3772 3773 const SCEV * 3774 ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy, 3775 ScalableVectorType *ScalableTy) { 3776 Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo()); 3777 Constant *One = ConstantInt::get(IntTy, 1); 3778 Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One); 3779 // Note that the expression we created is the final expression, we don't 3780 // want to simplify it any further Also, if we call a normal getSCEV(), 3781 // we'll end up in an endless recursion. So just create an SCEVUnknown. 3782 return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy)); 3783 } 3784 3785 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3786 if (auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy)) 3787 return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy); 3788 // We can bypass creating a target-independent constant expression and then 3789 // folding it back into a ConstantInt. This is just a compile-time 3790 // optimization. 3791 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3792 } 3793 3794 const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) { 3795 if (auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy)) 3796 return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy); 3797 // We can bypass creating a target-independent constant expression and then 3798 // folding it back into a ConstantInt. This is just a compile-time 3799 // optimization. 3800 return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy)); 3801 } 3802 3803 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3804 StructType *STy, 3805 unsigned FieldNo) { 3806 // We can bypass creating a target-independent constant expression and then 3807 // folding it back into a ConstantInt. This is just a compile-time 3808 // optimization. 3809 return getConstant( 3810 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3811 } 3812 3813 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3814 // Don't attempt to do anything other than create a SCEVUnknown object 3815 // here. createSCEV only calls getUnknown after checking for all other 3816 // interesting possibilities, and any other code that calls getUnknown 3817 // is doing so in order to hide a value from SCEV canonicalization. 3818 3819 FoldingSetNodeID ID; 3820 ID.AddInteger(scUnknown); 3821 ID.AddPointer(V); 3822 void *IP = nullptr; 3823 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3824 assert(cast<SCEVUnknown>(S)->getValue() == V && 3825 "Stale SCEVUnknown in uniquing map!"); 3826 return S; 3827 } 3828 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3829 FirstUnknown); 3830 FirstUnknown = cast<SCEVUnknown>(S); 3831 UniqueSCEVs.InsertNode(S, IP); 3832 return S; 3833 } 3834 3835 //===----------------------------------------------------------------------===// 3836 // Basic SCEV Analysis and PHI Idiom Recognition Code 3837 // 3838 3839 /// Test if values of the given type are analyzable within the SCEV 3840 /// framework. This primarily includes integer types, and it can optionally 3841 /// include pointer types if the ScalarEvolution class has access to 3842 /// target-specific information. 3843 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3844 // Integers and pointers are always SCEVable. 3845 return Ty->isIntOrPtrTy(); 3846 } 3847 3848 /// Return the size in bits of the specified type, for which isSCEVable must 3849 /// return true. 3850 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3851 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3852 if (Ty->isPointerTy()) 3853 return getDataLayout().getIndexTypeSizeInBits(Ty); 3854 return getDataLayout().getTypeSizeInBits(Ty); 3855 } 3856 3857 /// Return a type with the same bitwidth as the given type and which represents 3858 /// how SCEV will treat the given type, for which isSCEVable must return 3859 /// true. For pointer types, this is the pointer index sized integer type. 3860 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3861 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3862 3863 if (Ty->isIntegerTy()) 3864 return Ty; 3865 3866 // The only other support type is pointer. 3867 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3868 return getDataLayout().getIndexType(Ty); 3869 } 3870 3871 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3872 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3873 } 3874 3875 const SCEV *ScalarEvolution::getCouldNotCompute() { 3876 return CouldNotCompute.get(); 3877 } 3878 3879 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3880 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3881 auto *SU = dyn_cast<SCEVUnknown>(S); 3882 return SU && SU->getValue() == nullptr; 3883 }); 3884 3885 return !ContainsNulls; 3886 } 3887 3888 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3889 HasRecMapType::iterator I = HasRecMap.find(S); 3890 if (I != HasRecMap.end()) 3891 return I->second; 3892 3893 bool FoundAddRec = 3894 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); 3895 HasRecMap.insert({S, FoundAddRec}); 3896 return FoundAddRec; 3897 } 3898 3899 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3900 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3901 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3902 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3903 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3904 if (!Add) 3905 return {S, nullptr}; 3906 3907 if (Add->getNumOperands() != 2) 3908 return {S, nullptr}; 3909 3910 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3911 if (!ConstOp) 3912 return {S, nullptr}; 3913 3914 return {Add->getOperand(1), ConstOp->getValue()}; 3915 } 3916 3917 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3918 /// by the value and offset from any ValueOffsetPair in the set. 3919 ScalarEvolution::ValueOffsetPairSetVector * 3920 ScalarEvolution::getSCEVValues(const SCEV *S) { 3921 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3922 if (SI == ExprValueMap.end()) 3923 return nullptr; 3924 #ifndef NDEBUG 3925 if (VerifySCEVMap) { 3926 // Check there is no dangling Value in the set returned. 3927 for (const auto &VE : SI->second) 3928 assert(ValueExprMap.count(VE.first)); 3929 } 3930 #endif 3931 return &SI->second; 3932 } 3933 3934 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3935 /// cannot be used separately. eraseValueFromMap should be used to remove 3936 /// V from ValueExprMap and ExprValueMap at the same time. 3937 void ScalarEvolution::eraseValueFromMap(Value *V) { 3938 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3939 if (I != ValueExprMap.end()) { 3940 const SCEV *S = I->second; 3941 // Remove {V, 0} from the set of ExprValueMap[S] 3942 if (auto *SV = getSCEVValues(S)) 3943 SV->remove({V, nullptr}); 3944 3945 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3946 const SCEV *Stripped; 3947 ConstantInt *Offset; 3948 std::tie(Stripped, Offset) = splitAddExpr(S); 3949 if (Offset != nullptr) { 3950 if (auto *SV = getSCEVValues(Stripped)) 3951 SV->remove({V, Offset}); 3952 } 3953 ValueExprMap.erase(V); 3954 } 3955 } 3956 3957 /// Check whether value has nuw/nsw/exact set but SCEV does not. 3958 /// TODO: In reality it is better to check the poison recursively 3959 /// but this is better than nothing. 3960 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { 3961 if (auto *I = dyn_cast<Instruction>(V)) { 3962 if (isa<OverflowingBinaryOperator>(I)) { 3963 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { 3964 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) 3965 return true; 3966 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) 3967 return true; 3968 } 3969 } else if (isa<PossiblyExactOperator>(I) && I->isExact()) 3970 return true; 3971 } 3972 return false; 3973 } 3974 3975 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3976 /// create a new one. 3977 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3978 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3979 3980 const SCEV *S = getExistingSCEV(V); 3981 if (S == nullptr) { 3982 S = createSCEV(V); 3983 // During PHI resolution, it is possible to create two SCEVs for the same 3984 // V, so it is needed to double check whether V->S is inserted into 3985 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3986 std::pair<ValueExprMapType::iterator, bool> Pair = 3987 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3988 if (Pair.second && !SCEVLostPoisonFlags(S, V)) { 3989 ExprValueMap[S].insert({V, nullptr}); 3990 3991 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3992 // ExprValueMap. 3993 const SCEV *Stripped = S; 3994 ConstantInt *Offset = nullptr; 3995 std::tie(Stripped, Offset) = splitAddExpr(S); 3996 // If stripped is SCEVUnknown, don't bother to save 3997 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3998 // increase the complexity of the expansion code. 3999 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 4000 // because it may generate add/sub instead of GEP in SCEV expansion. 4001 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 4002 !isa<GetElementPtrInst>(V)) 4003 ExprValueMap[Stripped].insert({V, Offset}); 4004 } 4005 } 4006 return S; 4007 } 4008 4009 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 4010 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 4011 4012 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 4013 if (I != ValueExprMap.end()) { 4014 const SCEV *S = I->second; 4015 if (checkValidity(S)) 4016 return S; 4017 eraseValueFromMap(V); 4018 forgetMemoizedResults(S); 4019 } 4020 return nullptr; 4021 } 4022 4023 /// Return a SCEV corresponding to -V = -1*V 4024 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 4025 SCEV::NoWrapFlags Flags) { 4026 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4027 return getConstant( 4028 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 4029 4030 Type *Ty = V->getType(); 4031 Ty = getEffectiveSCEVType(Ty); 4032 return getMulExpr(V, getMinusOne(Ty), Flags); 4033 } 4034 4035 /// If Expr computes ~A, return A else return nullptr 4036 static const SCEV *MatchNotExpr(const SCEV *Expr) { 4037 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 4038 if (!Add || Add->getNumOperands() != 2 || 4039 !Add->getOperand(0)->isAllOnesValue()) 4040 return nullptr; 4041 4042 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 4043 if (!AddRHS || AddRHS->getNumOperands() != 2 || 4044 !AddRHS->getOperand(0)->isAllOnesValue()) 4045 return nullptr; 4046 4047 return AddRHS->getOperand(1); 4048 } 4049 4050 /// Return a SCEV corresponding to ~V = -1-V 4051 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 4052 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4053 return getConstant( 4054 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 4055 4056 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) 4057 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { 4058 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { 4059 SmallVector<const SCEV *, 2> MatchedOperands; 4060 for (const SCEV *Operand : MME->operands()) { 4061 const SCEV *Matched = MatchNotExpr(Operand); 4062 if (!Matched) 4063 return (const SCEV *)nullptr; 4064 MatchedOperands.push_back(Matched); 4065 } 4066 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()), 4067 MatchedOperands); 4068 }; 4069 if (const SCEV *Replaced = MatchMinMaxNegation(MME)) 4070 return Replaced; 4071 } 4072 4073 Type *Ty = V->getType(); 4074 Ty = getEffectiveSCEVType(Ty); 4075 return getMinusSCEV(getMinusOne(Ty), V); 4076 } 4077 4078 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 4079 SCEV::NoWrapFlags Flags, 4080 unsigned Depth) { 4081 // Fast path: X - X --> 0. 4082 if (LHS == RHS) 4083 return getZero(LHS->getType()); 4084 4085 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 4086 // makes it so that we cannot make much use of NUW. 4087 auto AddFlags = SCEV::FlagAnyWrap; 4088 const bool RHSIsNotMinSigned = 4089 !getSignedRangeMin(RHS).isMinSignedValue(); 4090 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 4091 // Let M be the minimum representable signed value. Then (-1)*RHS 4092 // signed-wraps if and only if RHS is M. That can happen even for 4093 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 4094 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 4095 // (-1)*RHS, we need to prove that RHS != M. 4096 // 4097 // If LHS is non-negative and we know that LHS - RHS does not 4098 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 4099 // either by proving that RHS > M or that LHS >= 0. 4100 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 4101 AddFlags = SCEV::FlagNSW; 4102 } 4103 } 4104 4105 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 4106 // RHS is NSW and LHS >= 0. 4107 // 4108 // The difficulty here is that the NSW flag may have been proven 4109 // relative to a loop that is to be found in a recurrence in LHS and 4110 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 4111 // larger scope than intended. 4112 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 4113 4114 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 4115 } 4116 4117 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 4118 unsigned Depth) { 4119 Type *SrcTy = V->getType(); 4120 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4121 "Cannot truncate or zero extend with non-integer arguments!"); 4122 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4123 return V; // No conversion 4124 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4125 return getTruncateExpr(V, Ty, Depth); 4126 return getZeroExtendExpr(V, Ty, Depth); 4127 } 4128 4129 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 4130 unsigned Depth) { 4131 Type *SrcTy = V->getType(); 4132 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4133 "Cannot truncate or zero extend with non-integer arguments!"); 4134 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4135 return V; // No conversion 4136 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4137 return getTruncateExpr(V, Ty, Depth); 4138 return getSignExtendExpr(V, Ty, Depth); 4139 } 4140 4141 const SCEV * 4142 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 4143 Type *SrcTy = V->getType(); 4144 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4145 "Cannot noop or zero extend with non-integer arguments!"); 4146 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4147 "getNoopOrZeroExtend cannot truncate!"); 4148 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4149 return V; // No conversion 4150 return getZeroExtendExpr(V, Ty); 4151 } 4152 4153 const SCEV * 4154 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 4155 Type *SrcTy = V->getType(); 4156 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4157 "Cannot noop or sign extend with non-integer arguments!"); 4158 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4159 "getNoopOrSignExtend cannot truncate!"); 4160 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4161 return V; // No conversion 4162 return getSignExtendExpr(V, Ty); 4163 } 4164 4165 const SCEV * 4166 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 4167 Type *SrcTy = V->getType(); 4168 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4169 "Cannot noop or any extend with non-integer arguments!"); 4170 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4171 "getNoopOrAnyExtend cannot truncate!"); 4172 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4173 return V; // No conversion 4174 return getAnyExtendExpr(V, Ty); 4175 } 4176 4177 const SCEV * 4178 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 4179 Type *SrcTy = V->getType(); 4180 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4181 "Cannot truncate or noop with non-integer arguments!"); 4182 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4183 "getTruncateOrNoop cannot extend!"); 4184 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4185 return V; // No conversion 4186 return getTruncateExpr(V, Ty); 4187 } 4188 4189 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4190 const SCEV *RHS) { 4191 const SCEV *PromotedLHS = LHS; 4192 const SCEV *PromotedRHS = RHS; 4193 4194 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4195 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4196 else 4197 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4198 4199 return getUMaxExpr(PromotedLHS, PromotedRHS); 4200 } 4201 4202 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4203 const SCEV *RHS) { 4204 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4205 return getUMinFromMismatchedTypes(Ops); 4206 } 4207 4208 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( 4209 SmallVectorImpl<const SCEV *> &Ops) { 4210 assert(!Ops.empty() && "At least one operand must be!"); 4211 // Trivial case. 4212 if (Ops.size() == 1) 4213 return Ops[0]; 4214 4215 // Find the max type first. 4216 Type *MaxType = nullptr; 4217 for (auto *S : Ops) 4218 if (MaxType) 4219 MaxType = getWiderType(MaxType, S->getType()); 4220 else 4221 MaxType = S->getType(); 4222 assert(MaxType && "Failed to find maximum type!"); 4223 4224 // Extend all ops to max type. 4225 SmallVector<const SCEV *, 2> PromotedOps; 4226 for (auto *S : Ops) 4227 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4228 4229 // Generate umin. 4230 return getUMinExpr(PromotedOps); 4231 } 4232 4233 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4234 // A pointer operand may evaluate to a nonpointer expression, such as null. 4235 if (!V->getType()->isPointerTy()) 4236 return V; 4237 4238 while (true) { 4239 if (const SCEVIntegralCastExpr *Cast = dyn_cast<SCEVIntegralCastExpr>(V)) { 4240 V = Cast->getOperand(); 4241 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 4242 const SCEV *PtrOp = nullptr; 4243 for (const SCEV *NAryOp : NAry->operands()) { 4244 if (NAryOp->getType()->isPointerTy()) { 4245 // Cannot find the base of an expression with multiple pointer ops. 4246 if (PtrOp) 4247 return V; 4248 PtrOp = NAryOp; 4249 } 4250 } 4251 if (!PtrOp) // All operands were non-pointer. 4252 return V; 4253 V = PtrOp; 4254 } else // Not something we can look further into. 4255 return V; 4256 } 4257 } 4258 4259 /// Push users of the given Instruction onto the given Worklist. 4260 static void 4261 PushDefUseChildren(Instruction *I, 4262 SmallVectorImpl<Instruction *> &Worklist) { 4263 // Push the def-use children onto the Worklist stack. 4264 for (User *U : I->users()) 4265 Worklist.push_back(cast<Instruction>(U)); 4266 } 4267 4268 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 4269 SmallVector<Instruction *, 16> Worklist; 4270 PushDefUseChildren(PN, Worklist); 4271 4272 SmallPtrSet<Instruction *, 8> Visited; 4273 Visited.insert(PN); 4274 while (!Worklist.empty()) { 4275 Instruction *I = Worklist.pop_back_val(); 4276 if (!Visited.insert(I).second) 4277 continue; 4278 4279 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4280 if (It != ValueExprMap.end()) { 4281 const SCEV *Old = It->second; 4282 4283 // Short-circuit the def-use traversal if the symbolic name 4284 // ceases to appear in expressions. 4285 if (Old != SymName && !hasOperand(Old, SymName)) 4286 continue; 4287 4288 // SCEVUnknown for a PHI either means that it has an unrecognized 4289 // structure, it's a PHI that's in the progress of being computed 4290 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4291 // additional loop trip count information isn't going to change anything. 4292 // In the second case, createNodeForPHI will perform the necessary 4293 // updates on its own when it gets to that point. In the third, we do 4294 // want to forget the SCEVUnknown. 4295 if (!isa<PHINode>(I) || 4296 !isa<SCEVUnknown>(Old) || 4297 (I != PN && Old == SymName)) { 4298 eraseValueFromMap(It->first); 4299 forgetMemoizedResults(Old); 4300 } 4301 } 4302 4303 PushDefUseChildren(I, Worklist); 4304 } 4305 } 4306 4307 namespace { 4308 4309 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4310 /// expression in case its Loop is L. If it is not L then 4311 /// if IgnoreOtherLoops is true then use AddRec itself 4312 /// otherwise rewrite cannot be done. 4313 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4314 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4315 public: 4316 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4317 bool IgnoreOtherLoops = true) { 4318 SCEVInitRewriter Rewriter(L, SE); 4319 const SCEV *Result = Rewriter.visit(S); 4320 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4321 return SE.getCouldNotCompute(); 4322 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4323 ? SE.getCouldNotCompute() 4324 : Result; 4325 } 4326 4327 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4328 if (!SE.isLoopInvariant(Expr, L)) 4329 SeenLoopVariantSCEVUnknown = true; 4330 return Expr; 4331 } 4332 4333 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4334 // Only re-write AddRecExprs for this loop. 4335 if (Expr->getLoop() == L) 4336 return Expr->getStart(); 4337 SeenOtherLoops = true; 4338 return Expr; 4339 } 4340 4341 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4342 4343 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4344 4345 private: 4346 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4347 : SCEVRewriteVisitor(SE), L(L) {} 4348 4349 const Loop *L; 4350 bool SeenLoopVariantSCEVUnknown = false; 4351 bool SeenOtherLoops = false; 4352 }; 4353 4354 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4355 /// increment expression in case its Loop is L. If it is not L then 4356 /// use AddRec itself. 4357 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4358 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4359 public: 4360 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4361 SCEVPostIncRewriter Rewriter(L, SE); 4362 const SCEV *Result = Rewriter.visit(S); 4363 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4364 ? SE.getCouldNotCompute() 4365 : Result; 4366 } 4367 4368 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4369 if (!SE.isLoopInvariant(Expr, L)) 4370 SeenLoopVariantSCEVUnknown = true; 4371 return Expr; 4372 } 4373 4374 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4375 // Only re-write AddRecExprs for this loop. 4376 if (Expr->getLoop() == L) 4377 return Expr->getPostIncExpr(SE); 4378 SeenOtherLoops = true; 4379 return Expr; 4380 } 4381 4382 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4383 4384 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4385 4386 private: 4387 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4388 : SCEVRewriteVisitor(SE), L(L) {} 4389 4390 const Loop *L; 4391 bool SeenLoopVariantSCEVUnknown = false; 4392 bool SeenOtherLoops = false; 4393 }; 4394 4395 /// This class evaluates the compare condition by matching it against the 4396 /// condition of loop latch. If there is a match we assume a true value 4397 /// for the condition while building SCEV nodes. 4398 class SCEVBackedgeConditionFolder 4399 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4400 public: 4401 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4402 ScalarEvolution &SE) { 4403 bool IsPosBECond = false; 4404 Value *BECond = nullptr; 4405 if (BasicBlock *Latch = L->getLoopLatch()) { 4406 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4407 if (BI && BI->isConditional()) { 4408 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4409 "Both outgoing branches should not target same header!"); 4410 BECond = BI->getCondition(); 4411 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4412 } else { 4413 return S; 4414 } 4415 } 4416 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4417 return Rewriter.visit(S); 4418 } 4419 4420 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4421 const SCEV *Result = Expr; 4422 bool InvariantF = SE.isLoopInvariant(Expr, L); 4423 4424 if (!InvariantF) { 4425 Instruction *I = cast<Instruction>(Expr->getValue()); 4426 switch (I->getOpcode()) { 4427 case Instruction::Select: { 4428 SelectInst *SI = cast<SelectInst>(I); 4429 Optional<const SCEV *> Res = 4430 compareWithBackedgeCondition(SI->getCondition()); 4431 if (Res.hasValue()) { 4432 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4433 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4434 } 4435 break; 4436 } 4437 default: { 4438 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4439 if (Res.hasValue()) 4440 Result = Res.getValue(); 4441 break; 4442 } 4443 } 4444 } 4445 return Result; 4446 } 4447 4448 private: 4449 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4450 bool IsPosBECond, ScalarEvolution &SE) 4451 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4452 IsPositiveBECond(IsPosBECond) {} 4453 4454 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4455 4456 const Loop *L; 4457 /// Loop back condition. 4458 Value *BackedgeCond = nullptr; 4459 /// Set to true if loop back is on positive branch condition. 4460 bool IsPositiveBECond; 4461 }; 4462 4463 Optional<const SCEV *> 4464 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4465 4466 // If value matches the backedge condition for loop latch, 4467 // then return a constant evolution node based on loopback 4468 // branch taken. 4469 if (BackedgeCond == IC) 4470 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4471 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4472 return None; 4473 } 4474 4475 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4476 public: 4477 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4478 ScalarEvolution &SE) { 4479 SCEVShiftRewriter Rewriter(L, SE); 4480 const SCEV *Result = Rewriter.visit(S); 4481 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4482 } 4483 4484 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4485 // Only allow AddRecExprs for this loop. 4486 if (!SE.isLoopInvariant(Expr, L)) 4487 Valid = false; 4488 return Expr; 4489 } 4490 4491 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4492 if (Expr->getLoop() == L && Expr->isAffine()) 4493 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4494 Valid = false; 4495 return Expr; 4496 } 4497 4498 bool isValid() { return Valid; } 4499 4500 private: 4501 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4502 : SCEVRewriteVisitor(SE), L(L) {} 4503 4504 const Loop *L; 4505 bool Valid = true; 4506 }; 4507 4508 } // end anonymous namespace 4509 4510 SCEV::NoWrapFlags 4511 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4512 if (!AR->isAffine()) 4513 return SCEV::FlagAnyWrap; 4514 4515 using OBO = OverflowingBinaryOperator; 4516 4517 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4518 4519 if (!AR->hasNoSignedWrap()) { 4520 ConstantRange AddRecRange = getSignedRange(AR); 4521 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4522 4523 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4524 Instruction::Add, IncRange, OBO::NoSignedWrap); 4525 if (NSWRegion.contains(AddRecRange)) 4526 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4527 } 4528 4529 if (!AR->hasNoUnsignedWrap()) { 4530 ConstantRange AddRecRange = getUnsignedRange(AR); 4531 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4532 4533 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4534 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4535 if (NUWRegion.contains(AddRecRange)) 4536 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4537 } 4538 4539 return Result; 4540 } 4541 4542 SCEV::NoWrapFlags 4543 ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) { 4544 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 4545 4546 if (AR->hasNoSignedWrap()) 4547 return Result; 4548 4549 if (!AR->isAffine()) 4550 return Result; 4551 4552 const SCEV *Step = AR->getStepRecurrence(*this); 4553 const Loop *L = AR->getLoop(); 4554 4555 // Check whether the backedge-taken count is SCEVCouldNotCompute. 4556 // Note that this serves two purposes: It filters out loops that are 4557 // simply not analyzable, and it covers the case where this code is 4558 // being called from within backedge-taken count analysis, such that 4559 // attempting to ask for the backedge-taken count would likely result 4560 // in infinite recursion. In the later case, the analysis code will 4561 // cope with a conservative value, and it will take care to purge 4562 // that value once it has finished. 4563 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 4564 4565 // Normally, in the cases we can prove no-overflow via a 4566 // backedge guarding condition, we can also compute a backedge 4567 // taken count for the loop. The exceptions are assumptions and 4568 // guards present in the loop -- SCEV is not great at exploiting 4569 // these to compute max backedge taken counts, but can still use 4570 // these to prove lack of overflow. Use this fact to avoid 4571 // doing extra work that may not pay off. 4572 4573 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 4574 AC.assumptions().empty()) 4575 return Result; 4576 4577 // If the backedge is guarded by a comparison with the pre-inc value the 4578 // addrec is safe. Also, if the entry is guarded by a comparison with the 4579 // start value and the backedge is guarded by a comparison with the post-inc 4580 // value, the addrec is safe. 4581 ICmpInst::Predicate Pred; 4582 const SCEV *OverflowLimit = 4583 getSignedOverflowLimitForStep(Step, &Pred, this); 4584 if (OverflowLimit && 4585 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 4586 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 4587 Result = setFlags(Result, SCEV::FlagNSW); 4588 } 4589 return Result; 4590 } 4591 SCEV::NoWrapFlags 4592 ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) { 4593 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 4594 4595 if (AR->hasNoUnsignedWrap()) 4596 return Result; 4597 4598 if (!AR->isAffine()) 4599 return Result; 4600 4601 const SCEV *Step = AR->getStepRecurrence(*this); 4602 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 4603 const Loop *L = AR->getLoop(); 4604 4605 // Check whether the backedge-taken count is SCEVCouldNotCompute. 4606 // Note that this serves two purposes: It filters out loops that are 4607 // simply not analyzable, and it covers the case where this code is 4608 // being called from within backedge-taken count analysis, such that 4609 // attempting to ask for the backedge-taken count would likely result 4610 // in infinite recursion. In the later case, the analysis code will 4611 // cope with a conservative value, and it will take care to purge 4612 // that value once it has finished. 4613 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 4614 4615 // Normally, in the cases we can prove no-overflow via a 4616 // backedge guarding condition, we can also compute a backedge 4617 // taken count for the loop. The exceptions are assumptions and 4618 // guards present in the loop -- SCEV is not great at exploiting 4619 // these to compute max backedge taken counts, but can still use 4620 // these to prove lack of overflow. Use this fact to avoid 4621 // doing extra work that may not pay off. 4622 4623 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 4624 AC.assumptions().empty()) 4625 return Result; 4626 4627 // If the backedge is guarded by a comparison with the pre-inc value the 4628 // addrec is safe. Also, if the entry is guarded by a comparison with the 4629 // start value and the backedge is guarded by a comparison with the post-inc 4630 // value, the addrec is safe. 4631 if (isKnownPositive(Step)) { 4632 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 4633 getUnsignedRangeMax(Step)); 4634 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 4635 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 4636 Result = setFlags(Result, SCEV::FlagNUW); 4637 } 4638 } 4639 4640 return Result; 4641 } 4642 4643 namespace { 4644 4645 /// Represents an abstract binary operation. This may exist as a 4646 /// normal instruction or constant expression, or may have been 4647 /// derived from an expression tree. 4648 struct BinaryOp { 4649 unsigned Opcode; 4650 Value *LHS; 4651 Value *RHS; 4652 bool IsNSW = false; 4653 bool IsNUW = false; 4654 4655 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4656 /// constant expression. 4657 Operator *Op = nullptr; 4658 4659 explicit BinaryOp(Operator *Op) 4660 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4661 Op(Op) { 4662 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4663 IsNSW = OBO->hasNoSignedWrap(); 4664 IsNUW = OBO->hasNoUnsignedWrap(); 4665 } 4666 } 4667 4668 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4669 bool IsNUW = false) 4670 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 4671 }; 4672 4673 } // end anonymous namespace 4674 4675 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4676 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4677 auto *Op = dyn_cast<Operator>(V); 4678 if (!Op) 4679 return None; 4680 4681 // Implementation detail: all the cleverness here should happen without 4682 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4683 // SCEV expressions when possible, and we should not break that. 4684 4685 switch (Op->getOpcode()) { 4686 case Instruction::Add: 4687 case Instruction::Sub: 4688 case Instruction::Mul: 4689 case Instruction::UDiv: 4690 case Instruction::URem: 4691 case Instruction::And: 4692 case Instruction::Or: 4693 case Instruction::AShr: 4694 case Instruction::Shl: 4695 return BinaryOp(Op); 4696 4697 case Instruction::Xor: 4698 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4699 // If the RHS of the xor is a signmask, then this is just an add. 4700 // Instcombine turns add of signmask into xor as a strength reduction step. 4701 if (RHSC->getValue().isSignMask()) 4702 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4703 return BinaryOp(Op); 4704 4705 case Instruction::LShr: 4706 // Turn logical shift right of a constant into a unsigned divide. 4707 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4708 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4709 4710 // If the shift count is not less than the bitwidth, the result of 4711 // the shift is undefined. Don't try to analyze it, because the 4712 // resolution chosen here may differ from the resolution chosen in 4713 // other parts of the compiler. 4714 if (SA->getValue().ult(BitWidth)) { 4715 Constant *X = 4716 ConstantInt::get(SA->getContext(), 4717 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4718 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4719 } 4720 } 4721 return BinaryOp(Op); 4722 4723 case Instruction::ExtractValue: { 4724 auto *EVI = cast<ExtractValueInst>(Op); 4725 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4726 break; 4727 4728 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 4729 if (!WO) 4730 break; 4731 4732 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 4733 bool Signed = WO->isSigned(); 4734 // TODO: Should add nuw/nsw flags for mul as well. 4735 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 4736 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 4737 4738 // Now that we know that all uses of the arithmetic-result component of 4739 // CI are guarded by the overflow check, we can go ahead and pretend 4740 // that the arithmetic is non-overflowing. 4741 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 4742 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 4743 } 4744 4745 default: 4746 break; 4747 } 4748 4749 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same 4750 // semantics as a Sub, return a binary sub expression. 4751 if (auto *II = dyn_cast<IntrinsicInst>(V)) 4752 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) 4753 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); 4754 4755 return None; 4756 } 4757 4758 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4759 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4760 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4761 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4762 /// follows one of the following patterns: 4763 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4764 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4765 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4766 /// we return the type of the truncation operation, and indicate whether the 4767 /// truncated type should be treated as signed/unsigned by setting 4768 /// \p Signed to true/false, respectively. 4769 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4770 bool &Signed, ScalarEvolution &SE) { 4771 // The case where Op == SymbolicPHI (that is, with no type conversions on 4772 // the way) is handled by the regular add recurrence creating logic and 4773 // would have already been triggered in createAddRecForPHI. Reaching it here 4774 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4775 // because one of the other operands of the SCEVAddExpr updating this PHI is 4776 // not invariant). 4777 // 4778 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4779 // this case predicates that allow us to prove that Op == SymbolicPHI will 4780 // be added. 4781 if (Op == SymbolicPHI) 4782 return nullptr; 4783 4784 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4785 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4786 if (SourceBits != NewBits) 4787 return nullptr; 4788 4789 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4790 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4791 if (!SExt && !ZExt) 4792 return nullptr; 4793 const SCEVTruncateExpr *Trunc = 4794 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4795 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4796 if (!Trunc) 4797 return nullptr; 4798 const SCEV *X = Trunc->getOperand(); 4799 if (X != SymbolicPHI) 4800 return nullptr; 4801 Signed = SExt != nullptr; 4802 return Trunc->getType(); 4803 } 4804 4805 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4806 if (!PN->getType()->isIntegerTy()) 4807 return nullptr; 4808 const Loop *L = LI.getLoopFor(PN->getParent()); 4809 if (!L || L->getHeader() != PN->getParent()) 4810 return nullptr; 4811 return L; 4812 } 4813 4814 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4815 // computation that updates the phi follows the following pattern: 4816 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4817 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4818 // If so, try to see if it can be rewritten as an AddRecExpr under some 4819 // Predicates. If successful, return them as a pair. Also cache the results 4820 // of the analysis. 4821 // 4822 // Example usage scenario: 4823 // Say the Rewriter is called for the following SCEV: 4824 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4825 // where: 4826 // %X = phi i64 (%Start, %BEValue) 4827 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4828 // and call this function with %SymbolicPHI = %X. 4829 // 4830 // The analysis will find that the value coming around the backedge has 4831 // the following SCEV: 4832 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4833 // Upon concluding that this matches the desired pattern, the function 4834 // will return the pair {NewAddRec, SmallPredsVec} where: 4835 // NewAddRec = {%Start,+,%Step} 4836 // SmallPredsVec = {P1, P2, P3} as follows: 4837 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4838 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4839 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4840 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4841 // under the predicates {P1,P2,P3}. 4842 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4843 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4844 // 4845 // TODO's: 4846 // 4847 // 1) Extend the Induction descriptor to also support inductions that involve 4848 // casts: When needed (namely, when we are called in the context of the 4849 // vectorizer induction analysis), a Set of cast instructions will be 4850 // populated by this method, and provided back to isInductionPHI. This is 4851 // needed to allow the vectorizer to properly record them to be ignored by 4852 // the cost model and to avoid vectorizing them (otherwise these casts, 4853 // which are redundant under the runtime overflow checks, will be 4854 // vectorized, which can be costly). 4855 // 4856 // 2) Support additional induction/PHISCEV patterns: We also want to support 4857 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4858 // after the induction update operation (the induction increment): 4859 // 4860 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4861 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4862 // 4863 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4864 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4865 // 4866 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4867 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4868 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4869 SmallVector<const SCEVPredicate *, 3> Predicates; 4870 4871 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4872 // return an AddRec expression under some predicate. 4873 4874 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4875 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4876 assert(L && "Expecting an integer loop header phi"); 4877 4878 // The loop may have multiple entrances or multiple exits; we can analyze 4879 // this phi as an addrec if it has a unique entry value and a unique 4880 // backedge value. 4881 Value *BEValueV = nullptr, *StartValueV = nullptr; 4882 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4883 Value *V = PN->getIncomingValue(i); 4884 if (L->contains(PN->getIncomingBlock(i))) { 4885 if (!BEValueV) { 4886 BEValueV = V; 4887 } else if (BEValueV != V) { 4888 BEValueV = nullptr; 4889 break; 4890 } 4891 } else if (!StartValueV) { 4892 StartValueV = V; 4893 } else if (StartValueV != V) { 4894 StartValueV = nullptr; 4895 break; 4896 } 4897 } 4898 if (!BEValueV || !StartValueV) 4899 return None; 4900 4901 const SCEV *BEValue = getSCEV(BEValueV); 4902 4903 // If the value coming around the backedge is an add with the symbolic 4904 // value we just inserted, possibly with casts that we can ignore under 4905 // an appropriate runtime guard, then we found a simple induction variable! 4906 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4907 if (!Add) 4908 return None; 4909 4910 // If there is a single occurrence of the symbolic value, possibly 4911 // casted, replace it with a recurrence. 4912 unsigned FoundIndex = Add->getNumOperands(); 4913 Type *TruncTy = nullptr; 4914 bool Signed; 4915 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4916 if ((TruncTy = 4917 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4918 if (FoundIndex == e) { 4919 FoundIndex = i; 4920 break; 4921 } 4922 4923 if (FoundIndex == Add->getNumOperands()) 4924 return None; 4925 4926 // Create an add with everything but the specified operand. 4927 SmallVector<const SCEV *, 8> Ops; 4928 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4929 if (i != FoundIndex) 4930 Ops.push_back(Add->getOperand(i)); 4931 const SCEV *Accum = getAddExpr(Ops); 4932 4933 // The runtime checks will not be valid if the step amount is 4934 // varying inside the loop. 4935 if (!isLoopInvariant(Accum, L)) 4936 return None; 4937 4938 // *** Part2: Create the predicates 4939 4940 // Analysis was successful: we have a phi-with-cast pattern for which we 4941 // can return an AddRec expression under the following predicates: 4942 // 4943 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4944 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4945 // P2: An Equal predicate that guarantees that 4946 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4947 // P3: An Equal predicate that guarantees that 4948 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4949 // 4950 // As we next prove, the above predicates guarantee that: 4951 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4952 // 4953 // 4954 // More formally, we want to prove that: 4955 // Expr(i+1) = Start + (i+1) * Accum 4956 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4957 // 4958 // Given that: 4959 // 1) Expr(0) = Start 4960 // 2) Expr(1) = Start + Accum 4961 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4962 // 3) Induction hypothesis (step i): 4963 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4964 // 4965 // Proof: 4966 // Expr(i+1) = 4967 // = Start + (i+1)*Accum 4968 // = (Start + i*Accum) + Accum 4969 // = Expr(i) + Accum 4970 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4971 // :: from step i 4972 // 4973 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4974 // 4975 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4976 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4977 // + Accum :: from P3 4978 // 4979 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4980 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4981 // 4982 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4983 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4984 // 4985 // By induction, the same applies to all iterations 1<=i<n: 4986 // 4987 4988 // Create a truncated addrec for which we will add a no overflow check (P1). 4989 const SCEV *StartVal = getSCEV(StartValueV); 4990 const SCEV *PHISCEV = 4991 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4992 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4993 4994 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4995 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4996 // will be constant. 4997 // 4998 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4999 // add P1. 5000 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 5001 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 5002 Signed ? SCEVWrapPredicate::IncrementNSSW 5003 : SCEVWrapPredicate::IncrementNUSW; 5004 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 5005 Predicates.push_back(AddRecPred); 5006 } 5007 5008 // Create the Equal Predicates P2,P3: 5009 5010 // It is possible that the predicates P2 and/or P3 are computable at 5011 // compile time due to StartVal and/or Accum being constants. 5012 // If either one is, then we can check that now and escape if either P2 5013 // or P3 is false. 5014 5015 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 5016 // for each of StartVal and Accum 5017 auto getExtendedExpr = [&](const SCEV *Expr, 5018 bool CreateSignExtend) -> const SCEV * { 5019 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 5020 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 5021 const SCEV *ExtendedExpr = 5022 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 5023 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 5024 return ExtendedExpr; 5025 }; 5026 5027 // Given: 5028 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 5029 // = getExtendedExpr(Expr) 5030 // Determine whether the predicate P: Expr == ExtendedExpr 5031 // is known to be false at compile time 5032 auto PredIsKnownFalse = [&](const SCEV *Expr, 5033 const SCEV *ExtendedExpr) -> bool { 5034 return Expr != ExtendedExpr && 5035 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 5036 }; 5037 5038 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 5039 if (PredIsKnownFalse(StartVal, StartExtended)) { 5040 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 5041 return None; 5042 } 5043 5044 // The Step is always Signed (because the overflow checks are either 5045 // NSSW or NUSW) 5046 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 5047 if (PredIsKnownFalse(Accum, AccumExtended)) { 5048 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 5049 return None; 5050 } 5051 5052 auto AppendPredicate = [&](const SCEV *Expr, 5053 const SCEV *ExtendedExpr) -> void { 5054 if (Expr != ExtendedExpr && 5055 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 5056 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 5057 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 5058 Predicates.push_back(Pred); 5059 } 5060 }; 5061 5062 AppendPredicate(StartVal, StartExtended); 5063 AppendPredicate(Accum, AccumExtended); 5064 5065 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 5066 // which the casts had been folded away. The caller can rewrite SymbolicPHI 5067 // into NewAR if it will also add the runtime overflow checks specified in 5068 // Predicates. 5069 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 5070 5071 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 5072 std::make_pair(NewAR, Predicates); 5073 // Remember the result of the analysis for this SCEV at this locayyytion. 5074 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 5075 return PredRewrite; 5076 } 5077 5078 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5079 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 5080 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 5081 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 5082 if (!L) 5083 return None; 5084 5085 // Check to see if we already analyzed this PHI. 5086 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 5087 if (I != PredicatedSCEVRewrites.end()) { 5088 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 5089 I->second; 5090 // Analysis was done before and failed to create an AddRec: 5091 if (Rewrite.first == SymbolicPHI) 5092 return None; 5093 // Analysis was done before and succeeded to create an AddRec under 5094 // a predicate: 5095 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 5096 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 5097 return Rewrite; 5098 } 5099 5100 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5101 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 5102 5103 // Record in the cache that the analysis failed 5104 if (!Rewrite) { 5105 SmallVector<const SCEVPredicate *, 3> Predicates; 5106 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 5107 return None; 5108 } 5109 5110 return Rewrite; 5111 } 5112 5113 // FIXME: This utility is currently required because the Rewriter currently 5114 // does not rewrite this expression: 5115 // {0, +, (sext ix (trunc iy to ix) to iy)} 5116 // into {0, +, %step}, 5117 // even when the following Equal predicate exists: 5118 // "%step == (sext ix (trunc iy to ix) to iy)". 5119 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 5120 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 5121 if (AR1 == AR2) 5122 return true; 5123 5124 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 5125 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 5126 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 5127 return false; 5128 return true; 5129 }; 5130 5131 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 5132 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 5133 return false; 5134 return true; 5135 } 5136 5137 /// A helper function for createAddRecFromPHI to handle simple cases. 5138 /// 5139 /// This function tries to find an AddRec expression for the simplest (yet most 5140 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 5141 /// If it fails, createAddRecFromPHI will use a more general, but slow, 5142 /// technique for finding the AddRec expression. 5143 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 5144 Value *BEValueV, 5145 Value *StartValueV) { 5146 const Loop *L = LI.getLoopFor(PN->getParent()); 5147 assert(L && L->getHeader() == PN->getParent()); 5148 assert(BEValueV && StartValueV); 5149 5150 auto BO = MatchBinaryOp(BEValueV, DT); 5151 if (!BO) 5152 return nullptr; 5153 5154 if (BO->Opcode != Instruction::Add) 5155 return nullptr; 5156 5157 const SCEV *Accum = nullptr; 5158 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 5159 Accum = getSCEV(BO->RHS); 5160 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 5161 Accum = getSCEV(BO->LHS); 5162 5163 if (!Accum) 5164 return nullptr; 5165 5166 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5167 if (BO->IsNUW) 5168 Flags = setFlags(Flags, SCEV::FlagNUW); 5169 if (BO->IsNSW) 5170 Flags = setFlags(Flags, SCEV::FlagNSW); 5171 5172 const SCEV *StartVal = getSCEV(StartValueV); 5173 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5174 5175 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5176 5177 // We can add Flags to the post-inc expression only if we 5178 // know that it is *undefined behavior* for BEValueV to 5179 // overflow. 5180 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5181 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5182 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5183 5184 return PHISCEV; 5185 } 5186 5187 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 5188 const Loop *L = LI.getLoopFor(PN->getParent()); 5189 if (!L || L->getHeader() != PN->getParent()) 5190 return nullptr; 5191 5192 // The loop may have multiple entrances or multiple exits; we can analyze 5193 // this phi as an addrec if it has a unique entry value and a unique 5194 // backedge value. 5195 Value *BEValueV = nullptr, *StartValueV = nullptr; 5196 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5197 Value *V = PN->getIncomingValue(i); 5198 if (L->contains(PN->getIncomingBlock(i))) { 5199 if (!BEValueV) { 5200 BEValueV = V; 5201 } else if (BEValueV != V) { 5202 BEValueV = nullptr; 5203 break; 5204 } 5205 } else if (!StartValueV) { 5206 StartValueV = V; 5207 } else if (StartValueV != V) { 5208 StartValueV = nullptr; 5209 break; 5210 } 5211 } 5212 if (!BEValueV || !StartValueV) 5213 return nullptr; 5214 5215 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 5216 "PHI node already processed?"); 5217 5218 // First, try to find AddRec expression without creating a fictituos symbolic 5219 // value for PN. 5220 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 5221 return S; 5222 5223 // Handle PHI node value symbolically. 5224 const SCEV *SymbolicName = getUnknown(PN); 5225 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 5226 5227 // Using this symbolic name for the PHI, analyze the value coming around 5228 // the back-edge. 5229 const SCEV *BEValue = getSCEV(BEValueV); 5230 5231 // NOTE: If BEValue is loop invariant, we know that the PHI node just 5232 // has a special value for the first iteration of the loop. 5233 5234 // If the value coming around the backedge is an add with the symbolic 5235 // value we just inserted, then we found a simple induction variable! 5236 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 5237 // If there is a single occurrence of the symbolic value, replace it 5238 // with a recurrence. 5239 unsigned FoundIndex = Add->getNumOperands(); 5240 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5241 if (Add->getOperand(i) == SymbolicName) 5242 if (FoundIndex == e) { 5243 FoundIndex = i; 5244 break; 5245 } 5246 5247 if (FoundIndex != Add->getNumOperands()) { 5248 // Create an add with everything but the specified operand. 5249 SmallVector<const SCEV *, 8> Ops; 5250 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5251 if (i != FoundIndex) 5252 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 5253 L, *this)); 5254 const SCEV *Accum = getAddExpr(Ops); 5255 5256 // This is not a valid addrec if the step amount is varying each 5257 // loop iteration, but is not itself an addrec in this loop. 5258 if (isLoopInvariant(Accum, L) || 5259 (isa<SCEVAddRecExpr>(Accum) && 5260 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5261 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5262 5263 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 5264 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5265 if (BO->IsNUW) 5266 Flags = setFlags(Flags, SCEV::FlagNUW); 5267 if (BO->IsNSW) 5268 Flags = setFlags(Flags, SCEV::FlagNSW); 5269 } 5270 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5271 // If the increment is an inbounds GEP, then we know the address 5272 // space cannot be wrapped around. We cannot make any guarantee 5273 // about signed or unsigned overflow because pointers are 5274 // unsigned but we may have a negative index from the base 5275 // pointer. We can guarantee that no unsigned wrap occurs if the 5276 // indices form a positive value. 5277 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5278 Flags = setFlags(Flags, SCEV::FlagNW); 5279 5280 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 5281 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 5282 Flags = setFlags(Flags, SCEV::FlagNUW); 5283 } 5284 5285 // We cannot transfer nuw and nsw flags from subtraction 5286 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5287 // for instance. 5288 } 5289 5290 const SCEV *StartVal = getSCEV(StartValueV); 5291 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5292 5293 // Okay, for the entire analysis of this edge we assumed the PHI 5294 // to be symbolic. We now need to go back and purge all of the 5295 // entries for the scalars that use the symbolic expression. 5296 forgetSymbolicName(PN, SymbolicName); 5297 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5298 5299 // We can add Flags to the post-inc expression only if we 5300 // know that it is *undefined behavior* for BEValueV to 5301 // overflow. 5302 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5303 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5304 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5305 5306 return PHISCEV; 5307 } 5308 } 5309 } else { 5310 // Otherwise, this could be a loop like this: 5311 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5312 // In this case, j = {1,+,1} and BEValue is j. 5313 // Because the other in-value of i (0) fits the evolution of BEValue 5314 // i really is an addrec evolution. 5315 // 5316 // We can generalize this saying that i is the shifted value of BEValue 5317 // by one iteration: 5318 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5319 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5320 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5321 if (Shifted != getCouldNotCompute() && 5322 Start != getCouldNotCompute()) { 5323 const SCEV *StartVal = getSCEV(StartValueV); 5324 if (Start == StartVal) { 5325 // Okay, for the entire analysis of this edge we assumed the PHI 5326 // to be symbolic. We now need to go back and purge all of the 5327 // entries for the scalars that use the symbolic expression. 5328 forgetSymbolicName(PN, SymbolicName); 5329 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 5330 return Shifted; 5331 } 5332 } 5333 } 5334 5335 // Remove the temporary PHI node SCEV that has been inserted while intending 5336 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5337 // as it will prevent later (possibly simpler) SCEV expressions to be added 5338 // to the ValueExprMap. 5339 eraseValueFromMap(PN); 5340 5341 return nullptr; 5342 } 5343 5344 // Checks if the SCEV S is available at BB. S is considered available at BB 5345 // if S can be materialized at BB without introducing a fault. 5346 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 5347 BasicBlock *BB) { 5348 struct CheckAvailable { 5349 bool TraversalDone = false; 5350 bool Available = true; 5351 5352 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 5353 BasicBlock *BB = nullptr; 5354 DominatorTree &DT; 5355 5356 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5357 : L(L), BB(BB), DT(DT) {} 5358 5359 bool setUnavailable() { 5360 TraversalDone = true; 5361 Available = false; 5362 return false; 5363 } 5364 5365 bool follow(const SCEV *S) { 5366 switch (S->getSCEVType()) { 5367 case scConstant: 5368 case scPtrToInt: 5369 case scTruncate: 5370 case scZeroExtend: 5371 case scSignExtend: 5372 case scAddExpr: 5373 case scMulExpr: 5374 case scUMaxExpr: 5375 case scSMaxExpr: 5376 case scUMinExpr: 5377 case scSMinExpr: 5378 // These expressions are available if their operand(s) is/are. 5379 return true; 5380 5381 case scAddRecExpr: { 5382 // We allow add recurrences that are on the loop BB is in, or some 5383 // outer loop. This guarantees availability because the value of the 5384 // add recurrence at BB is simply the "current" value of the induction 5385 // variable. We can relax this in the future; for instance an add 5386 // recurrence on a sibling dominating loop is also available at BB. 5387 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5388 if (L && (ARLoop == L || ARLoop->contains(L))) 5389 return true; 5390 5391 return setUnavailable(); 5392 } 5393 5394 case scUnknown: { 5395 // For SCEVUnknown, we check for simple dominance. 5396 const auto *SU = cast<SCEVUnknown>(S); 5397 Value *V = SU->getValue(); 5398 5399 if (isa<Argument>(V)) 5400 return false; 5401 5402 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5403 return false; 5404 5405 return setUnavailable(); 5406 } 5407 5408 case scUDivExpr: 5409 case scCouldNotCompute: 5410 // We do not try to smart about these at all. 5411 return setUnavailable(); 5412 } 5413 llvm_unreachable("Unknown SCEV kind!"); 5414 } 5415 5416 bool isDone() { return TraversalDone; } 5417 }; 5418 5419 CheckAvailable CA(L, BB, DT); 5420 SCEVTraversal<CheckAvailable> ST(CA); 5421 5422 ST.visitAll(S); 5423 return CA.Available; 5424 } 5425 5426 // Try to match a control flow sequence that branches out at BI and merges back 5427 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5428 // match. 5429 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5430 Value *&C, Value *&LHS, Value *&RHS) { 5431 C = BI->getCondition(); 5432 5433 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5434 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5435 5436 if (!LeftEdge.isSingleEdge()) 5437 return false; 5438 5439 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5440 5441 Use &LeftUse = Merge->getOperandUse(0); 5442 Use &RightUse = Merge->getOperandUse(1); 5443 5444 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5445 LHS = LeftUse; 5446 RHS = RightUse; 5447 return true; 5448 } 5449 5450 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5451 LHS = RightUse; 5452 RHS = LeftUse; 5453 return true; 5454 } 5455 5456 return false; 5457 } 5458 5459 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5460 auto IsReachable = 5461 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5462 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5463 const Loop *L = LI.getLoopFor(PN->getParent()); 5464 5465 // We don't want to break LCSSA, even in a SCEV expression tree. 5466 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5467 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5468 return nullptr; 5469 5470 // Try to match 5471 // 5472 // br %cond, label %left, label %right 5473 // left: 5474 // br label %merge 5475 // right: 5476 // br label %merge 5477 // merge: 5478 // V = phi [ %x, %left ], [ %y, %right ] 5479 // 5480 // as "select %cond, %x, %y" 5481 5482 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5483 assert(IDom && "At least the entry block should dominate PN"); 5484 5485 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5486 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5487 5488 if (BI && BI->isConditional() && 5489 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5490 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5491 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5492 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5493 } 5494 5495 return nullptr; 5496 } 5497 5498 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5499 if (const SCEV *S = createAddRecFromPHI(PN)) 5500 return S; 5501 5502 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5503 return S; 5504 5505 // If the PHI has a single incoming value, follow that value, unless the 5506 // PHI's incoming blocks are in a different loop, in which case doing so 5507 // risks breaking LCSSA form. Instcombine would normally zap these, but 5508 // it doesn't have DominatorTree information, so it may miss cases. 5509 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5510 if (LI.replacementPreservesLCSSAForm(PN, V)) 5511 return getSCEV(V); 5512 5513 // If it's not a loop phi, we can't handle it yet. 5514 return getUnknown(PN); 5515 } 5516 5517 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5518 Value *Cond, 5519 Value *TrueVal, 5520 Value *FalseVal) { 5521 // Handle "constant" branch or select. This can occur for instance when a 5522 // loop pass transforms an inner loop and moves on to process the outer loop. 5523 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5524 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5525 5526 // Try to match some simple smax or umax patterns. 5527 auto *ICI = dyn_cast<ICmpInst>(Cond); 5528 if (!ICI) 5529 return getUnknown(I); 5530 5531 Value *LHS = ICI->getOperand(0); 5532 Value *RHS = ICI->getOperand(1); 5533 5534 switch (ICI->getPredicate()) { 5535 case ICmpInst::ICMP_SLT: 5536 case ICmpInst::ICMP_SLE: 5537 case ICmpInst::ICMP_ULT: 5538 case ICmpInst::ICMP_ULE: 5539 std::swap(LHS, RHS); 5540 LLVM_FALLTHROUGH; 5541 case ICmpInst::ICMP_SGT: 5542 case ICmpInst::ICMP_SGE: 5543 case ICmpInst::ICMP_UGT: 5544 case ICmpInst::ICMP_UGE: 5545 // a > b ? a+x : b+x -> max(a, b)+x 5546 // a > b ? b+x : a+x -> min(a, b)+x 5547 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5548 bool Signed = ICI->isSigned(); 5549 const SCEV *LS = Signed ? getNoopOrSignExtend(getSCEV(LHS), I->getType()) 5550 : getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5551 const SCEV *RS = Signed ? getNoopOrSignExtend(getSCEV(RHS), I->getType()) 5552 : getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5553 const SCEV *LA = getSCEV(TrueVal); 5554 const SCEV *RA = getSCEV(FalseVal); 5555 const SCEV *LDiff = getMinusSCEV(LA, LS); 5556 const SCEV *RDiff = getMinusSCEV(RA, RS); 5557 if (LDiff == RDiff) 5558 return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS), 5559 LDiff); 5560 LDiff = getMinusSCEV(LA, RS); 5561 RDiff = getMinusSCEV(RA, LS); 5562 if (LDiff == RDiff) 5563 return getAddExpr(Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS), 5564 LDiff); 5565 } 5566 break; 5567 case ICmpInst::ICMP_NE: 5568 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5569 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5570 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5571 const SCEV *One = getOne(I->getType()); 5572 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5573 const SCEV *LA = getSCEV(TrueVal); 5574 const SCEV *RA = getSCEV(FalseVal); 5575 const SCEV *LDiff = getMinusSCEV(LA, LS); 5576 const SCEV *RDiff = getMinusSCEV(RA, One); 5577 if (LDiff == RDiff) 5578 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5579 } 5580 break; 5581 case ICmpInst::ICMP_EQ: 5582 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5583 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5584 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5585 const SCEV *One = getOne(I->getType()); 5586 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5587 const SCEV *LA = getSCEV(TrueVal); 5588 const SCEV *RA = getSCEV(FalseVal); 5589 const SCEV *LDiff = getMinusSCEV(LA, One); 5590 const SCEV *RDiff = getMinusSCEV(RA, LS); 5591 if (LDiff == RDiff) 5592 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5593 } 5594 break; 5595 default: 5596 break; 5597 } 5598 5599 return getUnknown(I); 5600 } 5601 5602 /// Expand GEP instructions into add and multiply operations. This allows them 5603 /// to be analyzed by regular SCEV code. 5604 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5605 // Don't attempt to analyze GEPs over unsized objects. 5606 if (!GEP->getSourceElementType()->isSized()) 5607 return getUnknown(GEP); 5608 5609 SmallVector<const SCEV *, 4> IndexExprs; 5610 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5611 IndexExprs.push_back(getSCEV(*Index)); 5612 return getGEPExpr(GEP, IndexExprs); 5613 } 5614 5615 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5616 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5617 return C->getAPInt().countTrailingZeros(); 5618 5619 if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S)) 5620 return GetMinTrailingZeros(I->getOperand()); 5621 5622 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5623 return std::min(GetMinTrailingZeros(T->getOperand()), 5624 (uint32_t)getTypeSizeInBits(T->getType())); 5625 5626 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5627 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5628 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5629 ? getTypeSizeInBits(E->getType()) 5630 : OpRes; 5631 } 5632 5633 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5634 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5635 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5636 ? getTypeSizeInBits(E->getType()) 5637 : OpRes; 5638 } 5639 5640 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5641 // The result is the min of all operands results. 5642 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5643 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5644 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5645 return MinOpRes; 5646 } 5647 5648 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5649 // The result is the sum of all operands results. 5650 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5651 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5652 for (unsigned i = 1, e = M->getNumOperands(); 5653 SumOpRes != BitWidth && i != e; ++i) 5654 SumOpRes = 5655 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5656 return SumOpRes; 5657 } 5658 5659 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5660 // The result is the min of all operands results. 5661 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5662 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5663 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5664 return MinOpRes; 5665 } 5666 5667 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5668 // The result is the min of all operands results. 5669 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5670 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5671 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5672 return MinOpRes; 5673 } 5674 5675 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5676 // The result is the min of all operands results. 5677 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5678 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5679 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5680 return MinOpRes; 5681 } 5682 5683 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5684 // For a SCEVUnknown, ask ValueTracking. 5685 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5686 return Known.countMinTrailingZeros(); 5687 } 5688 5689 // SCEVUDivExpr 5690 return 0; 5691 } 5692 5693 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5694 auto I = MinTrailingZerosCache.find(S); 5695 if (I != MinTrailingZerosCache.end()) 5696 return I->second; 5697 5698 uint32_t Result = GetMinTrailingZerosImpl(S); 5699 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5700 assert(InsertPair.second && "Should insert a new key"); 5701 return InsertPair.first->second; 5702 } 5703 5704 /// Helper method to assign a range to V from metadata present in the IR. 5705 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5706 if (Instruction *I = dyn_cast<Instruction>(V)) 5707 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5708 return getConstantRangeFromMetadata(*MD); 5709 5710 return None; 5711 } 5712 5713 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec, 5714 SCEV::NoWrapFlags Flags) { 5715 if (AddRec->getNoWrapFlags(Flags) != Flags) { 5716 AddRec->setNoWrapFlags(Flags); 5717 UnsignedRanges.erase(AddRec); 5718 SignedRanges.erase(AddRec); 5719 } 5720 } 5721 5722 ConstantRange ScalarEvolution:: 5723 getRangeForUnknownRecurrence(const SCEVUnknown *U) { 5724 const DataLayout &DL = getDataLayout(); 5725 5726 unsigned BitWidth = getTypeSizeInBits(U->getType()); 5727 const ConstantRange FullSet(BitWidth, /*isFullSet=*/true); 5728 5729 // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then 5730 // use information about the trip count to improve our available range. Note 5731 // that the trip count independent cases are already handled by known bits. 5732 // WARNING: The definition of recurrence used here is subtly different than 5733 // the one used by AddRec (and thus most of this file). Step is allowed to 5734 // be arbitrarily loop varying here, where AddRec allows only loop invariant 5735 // and other addrecs in the same loop (for non-affine addrecs). The code 5736 // below intentionally handles the case where step is not loop invariant. 5737 auto *P = dyn_cast<PHINode>(U->getValue()); 5738 if (!P) 5739 return FullSet; 5740 5741 // Make sure that no Phi input comes from an unreachable block. Otherwise, 5742 // even the values that are not available in these blocks may come from them, 5743 // and this leads to false-positive recurrence test. 5744 for (auto *Pred : predecessors(P->getParent())) 5745 if (!DT.isReachableFromEntry(Pred)) 5746 return FullSet; 5747 5748 BinaryOperator *BO; 5749 Value *Start, *Step; 5750 if (!matchSimpleRecurrence(P, BO, Start, Step)) 5751 return FullSet; 5752 5753 // If we found a recurrence in reachable code, we must be in a loop. Note 5754 // that BO might be in some subloop of L, and that's completely okay. 5755 auto *L = LI.getLoopFor(P->getParent()); 5756 assert(L && L->getHeader() == P->getParent()); 5757 if (!L->contains(BO->getParent())) 5758 // NOTE: This bailout should be an assert instead. However, asserting 5759 // the condition here exposes a case where LoopFusion is querying SCEV 5760 // with malformed loop information during the midst of the transform. 5761 // There doesn't appear to be an obvious fix, so for the moment bailout 5762 // until the caller issue can be fixed. PR49566 tracks the bug. 5763 return FullSet; 5764 5765 // TODO: Extend to other opcodes such as mul, and div 5766 switch (BO->getOpcode()) { 5767 default: 5768 return FullSet; 5769 case Instruction::AShr: 5770 case Instruction::LShr: 5771 case Instruction::Shl: 5772 break; 5773 }; 5774 5775 if (BO->getOperand(0) != P) 5776 // TODO: Handle the power function forms some day. 5777 return FullSet; 5778 5779 unsigned TC = getSmallConstantMaxTripCount(L); 5780 if (!TC || TC >= BitWidth) 5781 return FullSet; 5782 5783 auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT); 5784 auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT); 5785 assert(KnownStart.getBitWidth() == BitWidth && 5786 KnownStep.getBitWidth() == BitWidth); 5787 5788 // Compute total shift amount, being careful of overflow and bitwidths. 5789 auto MaxShiftAmt = KnownStep.getMaxValue(); 5790 APInt TCAP(BitWidth, TC-1); 5791 bool Overflow = false; 5792 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow); 5793 if (Overflow) 5794 return FullSet; 5795 5796 switch (BO->getOpcode()) { 5797 default: 5798 llvm_unreachable("filtered out above"); 5799 case Instruction::AShr: { 5800 // For each ashr, three cases: 5801 // shift = 0 => unchanged value 5802 // saturation => 0 or -1 5803 // other => a value closer to zero (of the same sign) 5804 // Thus, the end value is closer to zero than the start. 5805 auto KnownEnd = KnownBits::ashr(KnownStart, 5806 KnownBits::makeConstant(TotalShift)); 5807 if (KnownStart.isNonNegative()) 5808 // Analogous to lshr (simply not yet canonicalized) 5809 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), 5810 KnownStart.getMaxValue() + 1); 5811 if (KnownStart.isNegative()) 5812 // End >=u Start && End <=s Start 5813 return ConstantRange::getNonEmpty(KnownStart.getMinValue(), 5814 KnownEnd.getMaxValue() + 1); 5815 break; 5816 } 5817 case Instruction::LShr: { 5818 // For each lshr, three cases: 5819 // shift = 0 => unchanged value 5820 // saturation => 0 5821 // other => a smaller positive number 5822 // Thus, the low end of the unsigned range is the last value produced. 5823 auto KnownEnd = KnownBits::lshr(KnownStart, 5824 KnownBits::makeConstant(TotalShift)); 5825 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), 5826 KnownStart.getMaxValue() + 1); 5827 } 5828 case Instruction::Shl: { 5829 // Iff no bits are shifted out, value increases on every shift. 5830 auto KnownEnd = KnownBits::shl(KnownStart, 5831 KnownBits::makeConstant(TotalShift)); 5832 if (TotalShift.ult(KnownStart.countMinLeadingZeros())) 5833 return ConstantRange(KnownStart.getMinValue(), 5834 KnownEnd.getMaxValue() + 1); 5835 break; 5836 } 5837 }; 5838 return FullSet; 5839 } 5840 5841 /// Determine the range for a particular SCEV. If SignHint is 5842 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5843 /// with a "cleaner" unsigned (resp. signed) representation. 5844 const ConstantRange & 5845 ScalarEvolution::getRangeRef(const SCEV *S, 5846 ScalarEvolution::RangeSignHint SignHint) { 5847 DenseMap<const SCEV *, ConstantRange> &Cache = 5848 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5849 : SignedRanges; 5850 ConstantRange::PreferredRangeType RangeType = 5851 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED 5852 ? ConstantRange::Unsigned : ConstantRange::Signed; 5853 5854 // See if we've computed this range already. 5855 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5856 if (I != Cache.end()) 5857 return I->second; 5858 5859 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5860 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5861 5862 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5863 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5864 using OBO = OverflowingBinaryOperator; 5865 5866 // If the value has known zeros, the maximum value will have those known zeros 5867 // as well. 5868 uint32_t TZ = GetMinTrailingZeros(S); 5869 if (TZ != 0) { 5870 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5871 ConservativeResult = 5872 ConstantRange(APInt::getMinValue(BitWidth), 5873 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5874 else 5875 ConservativeResult = ConstantRange( 5876 APInt::getSignedMinValue(BitWidth), 5877 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5878 } 5879 5880 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5881 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5882 unsigned WrapType = OBO::AnyWrap; 5883 if (Add->hasNoSignedWrap()) 5884 WrapType |= OBO::NoSignedWrap; 5885 if (Add->hasNoUnsignedWrap()) 5886 WrapType |= OBO::NoUnsignedWrap; 5887 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5888 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint), 5889 WrapType, RangeType); 5890 return setRange(Add, SignHint, 5891 ConservativeResult.intersectWith(X, RangeType)); 5892 } 5893 5894 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5895 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5896 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5897 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5898 return setRange(Mul, SignHint, 5899 ConservativeResult.intersectWith(X, RangeType)); 5900 } 5901 5902 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5903 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5904 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5905 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5906 return setRange(SMax, SignHint, 5907 ConservativeResult.intersectWith(X, RangeType)); 5908 } 5909 5910 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5911 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5912 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5913 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5914 return setRange(UMax, SignHint, 5915 ConservativeResult.intersectWith(X, RangeType)); 5916 } 5917 5918 if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) { 5919 ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint); 5920 for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i) 5921 X = X.smin(getRangeRef(SMin->getOperand(i), SignHint)); 5922 return setRange(SMin, SignHint, 5923 ConservativeResult.intersectWith(X, RangeType)); 5924 } 5925 5926 if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) { 5927 ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint); 5928 for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i) 5929 X = X.umin(getRangeRef(UMin->getOperand(i), SignHint)); 5930 return setRange(UMin, SignHint, 5931 ConservativeResult.intersectWith(X, RangeType)); 5932 } 5933 5934 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5935 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5936 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5937 return setRange(UDiv, SignHint, 5938 ConservativeResult.intersectWith(X.udiv(Y), RangeType)); 5939 } 5940 5941 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5942 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5943 return setRange(ZExt, SignHint, 5944 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), 5945 RangeType)); 5946 } 5947 5948 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5949 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5950 return setRange(SExt, SignHint, 5951 ConservativeResult.intersectWith(X.signExtend(BitWidth), 5952 RangeType)); 5953 } 5954 5955 if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast<SCEVPtrToIntExpr>(S)) { 5956 ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint); 5957 return setRange(PtrToInt, SignHint, X); 5958 } 5959 5960 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5961 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5962 return setRange(Trunc, SignHint, 5963 ConservativeResult.intersectWith(X.truncate(BitWidth), 5964 RangeType)); 5965 } 5966 5967 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5968 // If there's no unsigned wrap, the value will never be less than its 5969 // initial value. 5970 if (AddRec->hasNoUnsignedWrap()) { 5971 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); 5972 if (!UnsignedMinValue.isNullValue()) 5973 ConservativeResult = ConservativeResult.intersectWith( 5974 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); 5975 } 5976 5977 // If there's no signed wrap, and all the operands except initial value have 5978 // the same sign or zero, the value won't ever be: 5979 // 1: smaller than initial value if operands are non negative, 5980 // 2: bigger than initial value if operands are non positive. 5981 // For both cases, value can not cross signed min/max boundary. 5982 if (AddRec->hasNoSignedWrap()) { 5983 bool AllNonNeg = true; 5984 bool AllNonPos = true; 5985 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { 5986 if (!isKnownNonNegative(AddRec->getOperand(i))) 5987 AllNonNeg = false; 5988 if (!isKnownNonPositive(AddRec->getOperand(i))) 5989 AllNonPos = false; 5990 } 5991 if (AllNonNeg) 5992 ConservativeResult = ConservativeResult.intersectWith( 5993 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), 5994 APInt::getSignedMinValue(BitWidth)), 5995 RangeType); 5996 else if (AllNonPos) 5997 ConservativeResult = ConservativeResult.intersectWith( 5998 ConstantRange::getNonEmpty( 5999 APInt::getSignedMinValue(BitWidth), 6000 getSignedRangeMax(AddRec->getStart()) + 1), 6001 RangeType); 6002 } 6003 6004 // TODO: non-affine addrec 6005 if (AddRec->isAffine()) { 6006 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop()); 6007 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 6008 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 6009 auto RangeFromAffine = getRangeForAffineAR( 6010 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 6011 BitWidth); 6012 ConservativeResult = 6013 ConservativeResult.intersectWith(RangeFromAffine, RangeType); 6014 6015 auto RangeFromFactoring = getRangeViaFactoring( 6016 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 6017 BitWidth); 6018 ConservativeResult = 6019 ConservativeResult.intersectWith(RangeFromFactoring, RangeType); 6020 } 6021 6022 // Now try symbolic BE count and more powerful methods. 6023 if (UseExpensiveRangeSharpening) { 6024 const SCEV *SymbolicMaxBECount = 6025 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop()); 6026 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) && 6027 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 6028 AddRec->hasNoSelfWrap()) { 6029 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR( 6030 AddRec, SymbolicMaxBECount, BitWidth, SignHint); 6031 ConservativeResult = 6032 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType); 6033 } 6034 } 6035 } 6036 6037 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 6038 } 6039 6040 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 6041 6042 // Check if the IR explicitly contains !range metadata. 6043 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 6044 if (MDRange.hasValue()) 6045 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(), 6046 RangeType); 6047 6048 // Use facts about recurrences in the underlying IR. Note that add 6049 // recurrences are AddRecExprs and thus don't hit this path. This 6050 // primarily handles shift recurrences. 6051 auto CR = getRangeForUnknownRecurrence(U); 6052 ConservativeResult = ConservativeResult.intersectWith(CR); 6053 6054 // See if ValueTracking can give us a useful range. 6055 const DataLayout &DL = getDataLayout(); 6056 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 6057 if (Known.getBitWidth() != BitWidth) 6058 Known = Known.zextOrTrunc(BitWidth); 6059 6060 // ValueTracking may be able to compute a tighter result for the number of 6061 // sign bits than for the value of those sign bits. 6062 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 6063 if (U->getType()->isPointerTy()) { 6064 // If the pointer size is larger than the index size type, this can cause 6065 // NS to be larger than BitWidth. So compensate for this. 6066 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); 6067 int ptrIdxDiff = ptrSize - BitWidth; 6068 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) 6069 NS -= ptrIdxDiff; 6070 } 6071 6072 if (NS > 1) { 6073 // If we know any of the sign bits, we know all of the sign bits. 6074 if (!Known.Zero.getHiBits(NS).isNullValue()) 6075 Known.Zero.setHighBits(NS); 6076 if (!Known.One.getHiBits(NS).isNullValue()) 6077 Known.One.setHighBits(NS); 6078 } 6079 6080 if (Known.getMinValue() != Known.getMaxValue() + 1) 6081 ConservativeResult = ConservativeResult.intersectWith( 6082 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), 6083 RangeType); 6084 if (NS > 1) 6085 ConservativeResult = ConservativeResult.intersectWith( 6086 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 6087 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), 6088 RangeType); 6089 6090 // A range of Phi is a subset of union of all ranges of its input. 6091 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 6092 // Make sure that we do not run over cycled Phis. 6093 if (PendingPhiRanges.insert(Phi).second) { 6094 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 6095 for (auto &Op : Phi->operands()) { 6096 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 6097 RangeFromOps = RangeFromOps.unionWith(OpRange); 6098 // No point to continue if we already have a full set. 6099 if (RangeFromOps.isFullSet()) 6100 break; 6101 } 6102 ConservativeResult = 6103 ConservativeResult.intersectWith(RangeFromOps, RangeType); 6104 bool Erased = PendingPhiRanges.erase(Phi); 6105 assert(Erased && "Failed to erase Phi properly?"); 6106 (void) Erased; 6107 } 6108 } 6109 6110 return setRange(U, SignHint, std::move(ConservativeResult)); 6111 } 6112 6113 return setRange(S, SignHint, std::move(ConservativeResult)); 6114 } 6115 6116 // Given a StartRange, Step and MaxBECount for an expression compute a range of 6117 // values that the expression can take. Initially, the expression has a value 6118 // from StartRange and then is changed by Step up to MaxBECount times. Signed 6119 // argument defines if we treat Step as signed or unsigned. 6120 static ConstantRange getRangeForAffineARHelper(APInt Step, 6121 const ConstantRange &StartRange, 6122 const APInt &MaxBECount, 6123 unsigned BitWidth, bool Signed) { 6124 // If either Step or MaxBECount is 0, then the expression won't change, and we 6125 // just need to return the initial range. 6126 if (Step == 0 || MaxBECount == 0) 6127 return StartRange; 6128 6129 // If we don't know anything about the initial value (i.e. StartRange is 6130 // FullRange), then we don't know anything about the final range either. 6131 // Return FullRange. 6132 if (StartRange.isFullSet()) 6133 return ConstantRange::getFull(BitWidth); 6134 6135 // If Step is signed and negative, then we use its absolute value, but we also 6136 // note that we're moving in the opposite direction. 6137 bool Descending = Signed && Step.isNegative(); 6138 6139 if (Signed) 6140 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 6141 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 6142 // This equations hold true due to the well-defined wrap-around behavior of 6143 // APInt. 6144 Step = Step.abs(); 6145 6146 // Check if Offset is more than full span of BitWidth. If it is, the 6147 // expression is guaranteed to overflow. 6148 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 6149 return ConstantRange::getFull(BitWidth); 6150 6151 // Offset is by how much the expression can change. Checks above guarantee no 6152 // overflow here. 6153 APInt Offset = Step * MaxBECount; 6154 6155 // Minimum value of the final range will match the minimal value of StartRange 6156 // if the expression is increasing and will be decreased by Offset otherwise. 6157 // Maximum value of the final range will match the maximal value of StartRange 6158 // if the expression is decreasing and will be increased by Offset otherwise. 6159 APInt StartLower = StartRange.getLower(); 6160 APInt StartUpper = StartRange.getUpper() - 1; 6161 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 6162 : (StartUpper + std::move(Offset)); 6163 6164 // It's possible that the new minimum/maximum value will fall into the initial 6165 // range (due to wrap around). This means that the expression can take any 6166 // value in this bitwidth, and we have to return full range. 6167 if (StartRange.contains(MovedBoundary)) 6168 return ConstantRange::getFull(BitWidth); 6169 6170 APInt NewLower = 6171 Descending ? std::move(MovedBoundary) : std::move(StartLower); 6172 APInt NewUpper = 6173 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 6174 NewUpper += 1; 6175 6176 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 6177 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); 6178 } 6179 6180 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 6181 const SCEV *Step, 6182 const SCEV *MaxBECount, 6183 unsigned BitWidth) { 6184 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 6185 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 6186 "Precondition!"); 6187 6188 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 6189 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 6190 6191 // First, consider step signed. 6192 ConstantRange StartSRange = getSignedRange(Start); 6193 ConstantRange StepSRange = getSignedRange(Step); 6194 6195 // If Step can be both positive and negative, we need to find ranges for the 6196 // maximum absolute step values in both directions and union them. 6197 ConstantRange SR = 6198 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 6199 MaxBECountValue, BitWidth, /* Signed = */ true); 6200 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 6201 StartSRange, MaxBECountValue, 6202 BitWidth, /* Signed = */ true)); 6203 6204 // Next, consider step unsigned. 6205 ConstantRange UR = getRangeForAffineARHelper( 6206 getUnsignedRangeMax(Step), getUnsignedRange(Start), 6207 MaxBECountValue, BitWidth, /* Signed = */ false); 6208 6209 // Finally, intersect signed and unsigned ranges. 6210 return SR.intersectWith(UR, ConstantRange::Smallest); 6211 } 6212 6213 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR( 6214 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth, 6215 ScalarEvolution::RangeSignHint SignHint) { 6216 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n"); 6217 assert(AddRec->hasNoSelfWrap() && 6218 "This only works for non-self-wrapping AddRecs!"); 6219 const bool IsSigned = SignHint == HINT_RANGE_SIGNED; 6220 const SCEV *Step = AddRec->getStepRecurrence(*this); 6221 // Only deal with constant step to save compile time. 6222 if (!isa<SCEVConstant>(Step)) 6223 return ConstantRange::getFull(BitWidth); 6224 // Let's make sure that we can prove that we do not self-wrap during 6225 // MaxBECount iterations. We need this because MaxBECount is a maximum 6226 // iteration count estimate, and we might infer nw from some exit for which we 6227 // do not know max exit count (or any other side reasoning). 6228 // TODO: Turn into assert at some point. 6229 if (getTypeSizeInBits(MaxBECount->getType()) > 6230 getTypeSizeInBits(AddRec->getType())) 6231 return ConstantRange::getFull(BitWidth); 6232 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType()); 6233 const SCEV *RangeWidth = getMinusOne(AddRec->getType()); 6234 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step)); 6235 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs); 6236 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount, 6237 MaxItersWithoutWrap)) 6238 return ConstantRange::getFull(BitWidth); 6239 6240 ICmpInst::Predicate LEPred = 6241 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 6242 ICmpInst::Predicate GEPred = 6243 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 6244 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); 6245 6246 // We know that there is no self-wrap. Let's take Start and End values and 6247 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during 6248 // the iteration. They either lie inside the range [Min(Start, End), 6249 // Max(Start, End)] or outside it: 6250 // 6251 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax; 6252 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax; 6253 // 6254 // No self wrap flag guarantees that the intermediate values cannot be BOTH 6255 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that 6256 // knowledge, let's try to prove that we are dealing with Case 1. It is so if 6257 // Start <= End and step is positive, or Start >= End and step is negative. 6258 const SCEV *Start = AddRec->getStart(); 6259 ConstantRange StartRange = getRangeRef(Start, SignHint); 6260 ConstantRange EndRange = getRangeRef(End, SignHint); 6261 ConstantRange RangeBetween = StartRange.unionWith(EndRange); 6262 // If they already cover full iteration space, we will know nothing useful 6263 // even if we prove what we want to prove. 6264 if (RangeBetween.isFullSet()) 6265 return RangeBetween; 6266 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax). 6267 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet() 6268 : RangeBetween.isWrappedSet(); 6269 if (IsWrappedSet) 6270 return ConstantRange::getFull(BitWidth); 6271 6272 if (isKnownPositive(Step) && 6273 isKnownPredicateViaConstantRanges(LEPred, Start, End)) 6274 return RangeBetween; 6275 else if (isKnownNegative(Step) && 6276 isKnownPredicateViaConstantRanges(GEPred, Start, End)) 6277 return RangeBetween; 6278 return ConstantRange::getFull(BitWidth); 6279 } 6280 6281 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 6282 const SCEV *Step, 6283 const SCEV *MaxBECount, 6284 unsigned BitWidth) { 6285 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 6286 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 6287 6288 struct SelectPattern { 6289 Value *Condition = nullptr; 6290 APInt TrueValue; 6291 APInt FalseValue; 6292 6293 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 6294 const SCEV *S) { 6295 Optional<unsigned> CastOp; 6296 APInt Offset(BitWidth, 0); 6297 6298 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 6299 "Should be!"); 6300 6301 // Peel off a constant offset: 6302 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 6303 // In the future we could consider being smarter here and handle 6304 // {Start+Step,+,Step} too. 6305 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 6306 return; 6307 6308 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 6309 S = SA->getOperand(1); 6310 } 6311 6312 // Peel off a cast operation 6313 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) { 6314 CastOp = SCast->getSCEVType(); 6315 S = SCast->getOperand(); 6316 } 6317 6318 using namespace llvm::PatternMatch; 6319 6320 auto *SU = dyn_cast<SCEVUnknown>(S); 6321 const APInt *TrueVal, *FalseVal; 6322 if (!SU || 6323 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 6324 m_APInt(FalseVal)))) { 6325 Condition = nullptr; 6326 return; 6327 } 6328 6329 TrueValue = *TrueVal; 6330 FalseValue = *FalseVal; 6331 6332 // Re-apply the cast we peeled off earlier 6333 if (CastOp.hasValue()) 6334 switch (*CastOp) { 6335 default: 6336 llvm_unreachable("Unknown SCEV cast type!"); 6337 6338 case scTruncate: 6339 TrueValue = TrueValue.trunc(BitWidth); 6340 FalseValue = FalseValue.trunc(BitWidth); 6341 break; 6342 case scZeroExtend: 6343 TrueValue = TrueValue.zext(BitWidth); 6344 FalseValue = FalseValue.zext(BitWidth); 6345 break; 6346 case scSignExtend: 6347 TrueValue = TrueValue.sext(BitWidth); 6348 FalseValue = FalseValue.sext(BitWidth); 6349 break; 6350 } 6351 6352 // Re-apply the constant offset we peeled off earlier 6353 TrueValue += Offset; 6354 FalseValue += Offset; 6355 } 6356 6357 bool isRecognized() { return Condition != nullptr; } 6358 }; 6359 6360 SelectPattern StartPattern(*this, BitWidth, Start); 6361 if (!StartPattern.isRecognized()) 6362 return ConstantRange::getFull(BitWidth); 6363 6364 SelectPattern StepPattern(*this, BitWidth, Step); 6365 if (!StepPattern.isRecognized()) 6366 return ConstantRange::getFull(BitWidth); 6367 6368 if (StartPattern.Condition != StepPattern.Condition) { 6369 // We don't handle this case today; but we could, by considering four 6370 // possibilities below instead of two. I'm not sure if there are cases where 6371 // that will help over what getRange already does, though. 6372 return ConstantRange::getFull(BitWidth); 6373 } 6374 6375 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 6376 // construct arbitrary general SCEV expressions here. This function is called 6377 // from deep in the call stack, and calling getSCEV (on a sext instruction, 6378 // say) can end up caching a suboptimal value. 6379 6380 // FIXME: without the explicit `this` receiver below, MSVC errors out with 6381 // C2352 and C2512 (otherwise it isn't needed). 6382 6383 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 6384 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 6385 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 6386 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 6387 6388 ConstantRange TrueRange = 6389 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 6390 ConstantRange FalseRange = 6391 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 6392 6393 return TrueRange.unionWith(FalseRange); 6394 } 6395 6396 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 6397 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 6398 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 6399 6400 // Return early if there are no flags to propagate to the SCEV. 6401 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6402 if (BinOp->hasNoUnsignedWrap()) 6403 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 6404 if (BinOp->hasNoSignedWrap()) 6405 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 6406 if (Flags == SCEV::FlagAnyWrap) 6407 return SCEV::FlagAnyWrap; 6408 6409 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 6410 } 6411 6412 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 6413 // Here we check that I is in the header of the innermost loop containing I, 6414 // since we only deal with instructions in the loop header. The actual loop we 6415 // need to check later will come from an add recurrence, but getting that 6416 // requires computing the SCEV of the operands, which can be expensive. This 6417 // check we can do cheaply to rule out some cases early. 6418 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 6419 if (InnermostContainingLoop == nullptr || 6420 InnermostContainingLoop->getHeader() != I->getParent()) 6421 return false; 6422 6423 // Only proceed if we can prove that I does not yield poison. 6424 if (!programUndefinedIfPoison(I)) 6425 return false; 6426 6427 // At this point we know that if I is executed, then it does not wrap 6428 // according to at least one of NSW or NUW. If I is not executed, then we do 6429 // not know if the calculation that I represents would wrap. Multiple 6430 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 6431 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 6432 // derived from other instructions that map to the same SCEV. We cannot make 6433 // that guarantee for cases where I is not executed. So we need to find the 6434 // loop that I is considered in relation to and prove that I is executed for 6435 // every iteration of that loop. That implies that the value that I 6436 // calculates does not wrap anywhere in the loop, so then we can apply the 6437 // flags to the SCEV. 6438 // 6439 // We check isLoopInvariant to disambiguate in case we are adding recurrences 6440 // from different loops, so that we know which loop to prove that I is 6441 // executed in. 6442 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 6443 // I could be an extractvalue from a call to an overflow intrinsic. 6444 // TODO: We can do better here in some cases. 6445 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 6446 return false; 6447 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 6448 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 6449 bool AllOtherOpsLoopInvariant = true; 6450 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 6451 ++OtherOpIndex) { 6452 if (OtherOpIndex != OpIndex) { 6453 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 6454 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 6455 AllOtherOpsLoopInvariant = false; 6456 break; 6457 } 6458 } 6459 } 6460 if (AllOtherOpsLoopInvariant && 6461 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 6462 return true; 6463 } 6464 } 6465 return false; 6466 } 6467 6468 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 6469 // If we know that \c I can never be poison period, then that's enough. 6470 if (isSCEVExprNeverPoison(I)) 6471 return true; 6472 6473 // For an add recurrence specifically, we assume that infinite loops without 6474 // side effects are undefined behavior, and then reason as follows: 6475 // 6476 // If the add recurrence is poison in any iteration, it is poison on all 6477 // future iterations (since incrementing poison yields poison). If the result 6478 // of the add recurrence is fed into the loop latch condition and the loop 6479 // does not contain any throws or exiting blocks other than the latch, we now 6480 // have the ability to "choose" whether the backedge is taken or not (by 6481 // choosing a sufficiently evil value for the poison feeding into the branch) 6482 // for every iteration including and after the one in which \p I first became 6483 // poison. There are two possibilities (let's call the iteration in which \p 6484 // I first became poison as K): 6485 // 6486 // 1. In the set of iterations including and after K, the loop body executes 6487 // no side effects. In this case executing the backege an infinte number 6488 // of times will yield undefined behavior. 6489 // 6490 // 2. In the set of iterations including and after K, the loop body executes 6491 // at least one side effect. In this case, that specific instance of side 6492 // effect is control dependent on poison, which also yields undefined 6493 // behavior. 6494 6495 auto *ExitingBB = L->getExitingBlock(); 6496 auto *LatchBB = L->getLoopLatch(); 6497 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 6498 return false; 6499 6500 SmallPtrSet<const Instruction *, 16> Pushed; 6501 SmallVector<const Instruction *, 8> PoisonStack; 6502 6503 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 6504 // things that are known to be poison under that assumption go on the 6505 // PoisonStack. 6506 Pushed.insert(I); 6507 PoisonStack.push_back(I); 6508 6509 bool LatchControlDependentOnPoison = false; 6510 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 6511 const Instruction *Poison = PoisonStack.pop_back_val(); 6512 6513 for (auto *PoisonUser : Poison->users()) { 6514 if (propagatesPoison(cast<Operator>(PoisonUser))) { 6515 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 6516 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 6517 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 6518 assert(BI->isConditional() && "Only possibility!"); 6519 if (BI->getParent() == LatchBB) { 6520 LatchControlDependentOnPoison = true; 6521 break; 6522 } 6523 } 6524 } 6525 } 6526 6527 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 6528 } 6529 6530 ScalarEvolution::LoopProperties 6531 ScalarEvolution::getLoopProperties(const Loop *L) { 6532 using LoopProperties = ScalarEvolution::LoopProperties; 6533 6534 auto Itr = LoopPropertiesCache.find(L); 6535 if (Itr == LoopPropertiesCache.end()) { 6536 auto HasSideEffects = [](Instruction *I) { 6537 if (auto *SI = dyn_cast<StoreInst>(I)) 6538 return !SI->isSimple(); 6539 6540 return I->mayHaveSideEffects(); 6541 }; 6542 6543 LoopProperties LP = {/* HasNoAbnormalExits */ true, 6544 /*HasNoSideEffects*/ true}; 6545 6546 for (auto *BB : L->getBlocks()) 6547 for (auto &I : *BB) { 6548 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6549 LP.HasNoAbnormalExits = false; 6550 if (HasSideEffects(&I)) 6551 LP.HasNoSideEffects = false; 6552 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 6553 break; // We're already as pessimistic as we can get. 6554 } 6555 6556 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 6557 assert(InsertPair.second && "We just checked!"); 6558 Itr = InsertPair.first; 6559 } 6560 6561 return Itr->second; 6562 } 6563 6564 bool ScalarEvolution::loopIsFiniteByAssumption(const Loop *L) { 6565 // A mustprogress loop without side effects must be finite. 6566 // TODO: The check used here is very conservative. It's only *specific* 6567 // side effects which are well defined in infinite loops. 6568 return isMustProgress(L) && loopHasNoSideEffects(L); 6569 } 6570 6571 const SCEV *ScalarEvolution::createSCEV(Value *V) { 6572 if (!isSCEVable(V->getType())) 6573 return getUnknown(V); 6574 6575 if (Instruction *I = dyn_cast<Instruction>(V)) { 6576 // Don't attempt to analyze instructions in blocks that aren't 6577 // reachable. Such instructions don't matter, and they aren't required 6578 // to obey basic rules for definitions dominating uses which this 6579 // analysis depends on. 6580 if (!DT.isReachableFromEntry(I->getParent())) 6581 return getUnknown(UndefValue::get(V->getType())); 6582 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 6583 return getConstant(CI); 6584 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 6585 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 6586 else if (!isa<ConstantExpr>(V)) 6587 return getUnknown(V); 6588 6589 Operator *U = cast<Operator>(V); 6590 if (auto BO = MatchBinaryOp(U, DT)) { 6591 switch (BO->Opcode) { 6592 case Instruction::Add: { 6593 // The simple thing to do would be to just call getSCEV on both operands 6594 // and call getAddExpr with the result. However if we're looking at a 6595 // bunch of things all added together, this can be quite inefficient, 6596 // because it leads to N-1 getAddExpr calls for N ultimate operands. 6597 // Instead, gather up all the operands and make a single getAddExpr call. 6598 // LLVM IR canonical form means we need only traverse the left operands. 6599 SmallVector<const SCEV *, 4> AddOps; 6600 do { 6601 if (BO->Op) { 6602 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6603 AddOps.push_back(OpSCEV); 6604 break; 6605 } 6606 6607 // If a NUW or NSW flag can be applied to the SCEV for this 6608 // addition, then compute the SCEV for this addition by itself 6609 // with a separate call to getAddExpr. We need to do that 6610 // instead of pushing the operands of the addition onto AddOps, 6611 // since the flags are only known to apply to this particular 6612 // addition - they may not apply to other additions that can be 6613 // formed with operands from AddOps. 6614 const SCEV *RHS = getSCEV(BO->RHS); 6615 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6616 if (Flags != SCEV::FlagAnyWrap) { 6617 const SCEV *LHS = getSCEV(BO->LHS); 6618 if (BO->Opcode == Instruction::Sub) 6619 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 6620 else 6621 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 6622 break; 6623 } 6624 } 6625 6626 if (BO->Opcode == Instruction::Sub) 6627 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 6628 else 6629 AddOps.push_back(getSCEV(BO->RHS)); 6630 6631 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6632 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6633 NewBO->Opcode != Instruction::Sub)) { 6634 AddOps.push_back(getSCEV(BO->LHS)); 6635 break; 6636 } 6637 BO = NewBO; 6638 } while (true); 6639 6640 return getAddExpr(AddOps); 6641 } 6642 6643 case Instruction::Mul: { 6644 SmallVector<const SCEV *, 4> MulOps; 6645 do { 6646 if (BO->Op) { 6647 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6648 MulOps.push_back(OpSCEV); 6649 break; 6650 } 6651 6652 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6653 if (Flags != SCEV::FlagAnyWrap) { 6654 MulOps.push_back( 6655 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6656 break; 6657 } 6658 } 6659 6660 MulOps.push_back(getSCEV(BO->RHS)); 6661 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6662 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6663 MulOps.push_back(getSCEV(BO->LHS)); 6664 break; 6665 } 6666 BO = NewBO; 6667 } while (true); 6668 6669 return getMulExpr(MulOps); 6670 } 6671 case Instruction::UDiv: 6672 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6673 case Instruction::URem: 6674 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6675 case Instruction::Sub: { 6676 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6677 if (BO->Op) 6678 Flags = getNoWrapFlagsFromUB(BO->Op); 6679 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6680 } 6681 case Instruction::And: 6682 // For an expression like x&255 that merely masks off the high bits, 6683 // use zext(trunc(x)) as the SCEV expression. 6684 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6685 if (CI->isZero()) 6686 return getSCEV(BO->RHS); 6687 if (CI->isMinusOne()) 6688 return getSCEV(BO->LHS); 6689 const APInt &A = CI->getValue(); 6690 6691 // Instcombine's ShrinkDemandedConstant may strip bits out of 6692 // constants, obscuring what would otherwise be a low-bits mask. 6693 // Use computeKnownBits to compute what ShrinkDemandedConstant 6694 // knew about to reconstruct a low-bits mask value. 6695 unsigned LZ = A.countLeadingZeros(); 6696 unsigned TZ = A.countTrailingZeros(); 6697 unsigned BitWidth = A.getBitWidth(); 6698 KnownBits Known(BitWidth); 6699 computeKnownBits(BO->LHS, Known, getDataLayout(), 6700 0, &AC, nullptr, &DT); 6701 6702 APInt EffectiveMask = 6703 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6704 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6705 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6706 const SCEV *LHS = getSCEV(BO->LHS); 6707 const SCEV *ShiftedLHS = nullptr; 6708 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6709 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6710 // For an expression like (x * 8) & 8, simplify the multiply. 6711 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6712 unsigned GCD = std::min(MulZeros, TZ); 6713 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6714 SmallVector<const SCEV*, 4> MulOps; 6715 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6716 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6717 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6718 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6719 } 6720 } 6721 if (!ShiftedLHS) 6722 ShiftedLHS = getUDivExpr(LHS, MulCount); 6723 return getMulExpr( 6724 getZeroExtendExpr( 6725 getTruncateExpr(ShiftedLHS, 6726 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6727 BO->LHS->getType()), 6728 MulCount); 6729 } 6730 } 6731 break; 6732 6733 case Instruction::Or: 6734 // If the RHS of the Or is a constant, we may have something like: 6735 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6736 // optimizations will transparently handle this case. 6737 // 6738 // In order for this transformation to be safe, the LHS must be of the 6739 // form X*(2^n) and the Or constant must be less than 2^n. 6740 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6741 const SCEV *LHS = getSCEV(BO->LHS); 6742 const APInt &CIVal = CI->getValue(); 6743 if (GetMinTrailingZeros(LHS) >= 6744 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6745 // Build a plain add SCEV. 6746 return getAddExpr(LHS, getSCEV(CI), 6747 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 6748 } 6749 } 6750 break; 6751 6752 case Instruction::Xor: 6753 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6754 // If the RHS of xor is -1, then this is a not operation. 6755 if (CI->isMinusOne()) 6756 return getNotSCEV(getSCEV(BO->LHS)); 6757 6758 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6759 // This is a variant of the check for xor with -1, and it handles 6760 // the case where instcombine has trimmed non-demanded bits out 6761 // of an xor with -1. 6762 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6763 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6764 if (LBO->getOpcode() == Instruction::And && 6765 LCI->getValue() == CI->getValue()) 6766 if (const SCEVZeroExtendExpr *Z = 6767 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6768 Type *UTy = BO->LHS->getType(); 6769 const SCEV *Z0 = Z->getOperand(); 6770 Type *Z0Ty = Z0->getType(); 6771 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6772 6773 // If C is a low-bits mask, the zero extend is serving to 6774 // mask off the high bits. Complement the operand and 6775 // re-apply the zext. 6776 if (CI->getValue().isMask(Z0TySize)) 6777 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6778 6779 // If C is a single bit, it may be in the sign-bit position 6780 // before the zero-extend. In this case, represent the xor 6781 // using an add, which is equivalent, and re-apply the zext. 6782 APInt Trunc = CI->getValue().trunc(Z0TySize); 6783 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6784 Trunc.isSignMask()) 6785 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6786 UTy); 6787 } 6788 } 6789 break; 6790 6791 case Instruction::Shl: 6792 // Turn shift left of a constant amount into a multiply. 6793 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6794 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6795 6796 // If the shift count is not less than the bitwidth, the result of 6797 // the shift is undefined. Don't try to analyze it, because the 6798 // resolution chosen here may differ from the resolution chosen in 6799 // other parts of the compiler. 6800 if (SA->getValue().uge(BitWidth)) 6801 break; 6802 6803 // We can safely preserve the nuw flag in all cases. It's also safe to 6804 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation 6805 // requires special handling. It can be preserved as long as we're not 6806 // left shifting by bitwidth - 1. 6807 auto Flags = SCEV::FlagAnyWrap; 6808 if (BO->Op) { 6809 auto MulFlags = getNoWrapFlagsFromUB(BO->Op); 6810 if ((MulFlags & SCEV::FlagNSW) && 6811 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) 6812 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); 6813 if (MulFlags & SCEV::FlagNUW) 6814 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); 6815 } 6816 6817 Constant *X = ConstantInt::get( 6818 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6819 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6820 } 6821 break; 6822 6823 case Instruction::AShr: { 6824 // AShr X, C, where C is a constant. 6825 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6826 if (!CI) 6827 break; 6828 6829 Type *OuterTy = BO->LHS->getType(); 6830 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6831 // If the shift count is not less than the bitwidth, the result of 6832 // the shift is undefined. Don't try to analyze it, because the 6833 // resolution chosen here may differ from the resolution chosen in 6834 // other parts of the compiler. 6835 if (CI->getValue().uge(BitWidth)) 6836 break; 6837 6838 if (CI->isZero()) 6839 return getSCEV(BO->LHS); // shift by zero --> noop 6840 6841 uint64_t AShrAmt = CI->getZExtValue(); 6842 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6843 6844 Operator *L = dyn_cast<Operator>(BO->LHS); 6845 if (L && L->getOpcode() == Instruction::Shl) { 6846 // X = Shl A, n 6847 // Y = AShr X, m 6848 // Both n and m are constant. 6849 6850 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6851 if (L->getOperand(1) == BO->RHS) 6852 // For a two-shift sext-inreg, i.e. n = m, 6853 // use sext(trunc(x)) as the SCEV expression. 6854 return getSignExtendExpr( 6855 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6856 6857 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6858 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6859 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6860 if (ShlAmt > AShrAmt) { 6861 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6862 // expression. We already checked that ShlAmt < BitWidth, so 6863 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6864 // ShlAmt - AShrAmt < Amt. 6865 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6866 ShlAmt - AShrAmt); 6867 return getSignExtendExpr( 6868 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6869 getConstant(Mul)), OuterTy); 6870 } 6871 } 6872 } 6873 break; 6874 } 6875 } 6876 } 6877 6878 switch (U->getOpcode()) { 6879 case Instruction::Trunc: 6880 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6881 6882 case Instruction::ZExt: 6883 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6884 6885 case Instruction::SExt: 6886 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6887 // The NSW flag of a subtract does not always survive the conversion to 6888 // A + (-1)*B. By pushing sign extension onto its operands we are much 6889 // more likely to preserve NSW and allow later AddRec optimisations. 6890 // 6891 // NOTE: This is effectively duplicating this logic from getSignExtend: 6892 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6893 // but by that point the NSW information has potentially been lost. 6894 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6895 Type *Ty = U->getType(); 6896 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6897 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6898 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6899 } 6900 } 6901 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6902 6903 case Instruction::BitCast: 6904 // BitCasts are no-op casts so we just eliminate the cast. 6905 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6906 return getSCEV(U->getOperand(0)); 6907 break; 6908 6909 case Instruction::PtrToInt: { 6910 // Pointer to integer cast is straight-forward, so do model it. 6911 const SCEV *Op = getSCEV(U->getOperand(0)); 6912 Type *DstIntTy = U->getType(); 6913 // But only if effective SCEV (integer) type is wide enough to represent 6914 // all possible pointer values. 6915 const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy); 6916 if (isa<SCEVCouldNotCompute>(IntOp)) 6917 return getUnknown(V); 6918 return IntOp; 6919 } 6920 case Instruction::IntToPtr: 6921 // Just don't deal with inttoptr casts. 6922 return getUnknown(V); 6923 6924 case Instruction::SDiv: 6925 // If both operands are non-negative, this is just an udiv. 6926 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 6927 isKnownNonNegative(getSCEV(U->getOperand(1)))) 6928 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 6929 break; 6930 6931 case Instruction::SRem: 6932 // If both operands are non-negative, this is just an urem. 6933 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 6934 isKnownNonNegative(getSCEV(U->getOperand(1)))) 6935 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 6936 break; 6937 6938 case Instruction::GetElementPtr: 6939 return createNodeForGEP(cast<GEPOperator>(U)); 6940 6941 case Instruction::PHI: 6942 return createNodeForPHI(cast<PHINode>(U)); 6943 6944 case Instruction::Select: 6945 // U can also be a select constant expr, which let fall through. Since 6946 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6947 // constant expressions cannot have instructions as operands, we'd have 6948 // returned getUnknown for a select constant expressions anyway. 6949 if (isa<Instruction>(U)) 6950 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6951 U->getOperand(1), U->getOperand(2)); 6952 break; 6953 6954 case Instruction::Call: 6955 case Instruction::Invoke: 6956 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) 6957 return getSCEV(RV); 6958 6959 if (auto *II = dyn_cast<IntrinsicInst>(U)) { 6960 switch (II->getIntrinsicID()) { 6961 case Intrinsic::abs: 6962 return getAbsExpr( 6963 getSCEV(II->getArgOperand(0)), 6964 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne()); 6965 case Intrinsic::umax: 6966 return getUMaxExpr(getSCEV(II->getArgOperand(0)), 6967 getSCEV(II->getArgOperand(1))); 6968 case Intrinsic::umin: 6969 return getUMinExpr(getSCEV(II->getArgOperand(0)), 6970 getSCEV(II->getArgOperand(1))); 6971 case Intrinsic::smax: 6972 return getSMaxExpr(getSCEV(II->getArgOperand(0)), 6973 getSCEV(II->getArgOperand(1))); 6974 case Intrinsic::smin: 6975 return getSMinExpr(getSCEV(II->getArgOperand(0)), 6976 getSCEV(II->getArgOperand(1))); 6977 case Intrinsic::usub_sat: { 6978 const SCEV *X = getSCEV(II->getArgOperand(0)); 6979 const SCEV *Y = getSCEV(II->getArgOperand(1)); 6980 const SCEV *ClampedY = getUMinExpr(X, Y); 6981 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW); 6982 } 6983 case Intrinsic::uadd_sat: { 6984 const SCEV *X = getSCEV(II->getArgOperand(0)); 6985 const SCEV *Y = getSCEV(II->getArgOperand(1)); 6986 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y)); 6987 return getAddExpr(ClampedX, Y, SCEV::FlagNUW); 6988 } 6989 case Intrinsic::start_loop_iterations: 6990 // A start_loop_iterations is just equivalent to the first operand for 6991 // SCEV purposes. 6992 return getSCEV(II->getArgOperand(0)); 6993 default: 6994 break; 6995 } 6996 } 6997 break; 6998 } 6999 7000 return getUnknown(V); 7001 } 7002 7003 //===----------------------------------------------------------------------===// 7004 // Iteration Count Computation Code 7005 // 7006 7007 const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount) { 7008 // Get the trip count from the BE count by adding 1. Overflow, results 7009 // in zero which means "unknown". 7010 return getAddExpr(ExitCount, getOne(ExitCount->getType())); 7011 } 7012 7013 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 7014 if (!ExitCount) 7015 return 0; 7016 7017 ConstantInt *ExitConst = ExitCount->getValue(); 7018 7019 // Guard against huge trip counts. 7020 if (ExitConst->getValue().getActiveBits() > 32) 7021 return 0; 7022 7023 // In case of integer overflow, this returns 0, which is correct. 7024 return ((unsigned)ExitConst->getZExtValue()) + 1; 7025 } 7026 7027 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 7028 auto *ExitCount = dyn_cast<SCEVConstant>(getBackedgeTakenCount(L, Exact)); 7029 return getConstantTripCount(ExitCount); 7030 } 7031 7032 unsigned 7033 ScalarEvolution::getSmallConstantTripCount(const Loop *L, 7034 const BasicBlock *ExitingBlock) { 7035 assert(ExitingBlock && "Must pass a non-null exiting block!"); 7036 assert(L->isLoopExiting(ExitingBlock) && 7037 "Exiting block must actually branch out of the loop!"); 7038 const SCEVConstant *ExitCount = 7039 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 7040 return getConstantTripCount(ExitCount); 7041 } 7042 7043 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 7044 const auto *MaxExitCount = 7045 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); 7046 return getConstantTripCount(MaxExitCount); 7047 } 7048 7049 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 7050 SmallVector<BasicBlock *, 8> ExitingBlocks; 7051 L->getExitingBlocks(ExitingBlocks); 7052 7053 Optional<unsigned> Res = None; 7054 for (auto *ExitingBB : ExitingBlocks) { 7055 unsigned Multiple = getSmallConstantTripMultiple(L, ExitingBB); 7056 if (!Res) 7057 Res = Multiple; 7058 Res = (unsigned)GreatestCommonDivisor64(*Res, Multiple); 7059 } 7060 return Res.getValueOr(1); 7061 } 7062 7063 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 7064 const SCEV *ExitCount) { 7065 if (ExitCount == getCouldNotCompute()) 7066 return 1; 7067 7068 // Get the trip count 7069 const SCEV *TCExpr = getTripCountFromExitCount(ExitCount); 7070 7071 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 7072 if (!TC) 7073 // Attempt to factor more general cases. Returns the greatest power of 7074 // two divisor. If overflow happens, the trip count expression is still 7075 // divisible by the greatest power of 2 divisor returned. 7076 return 1U << std::min((uint32_t)31, 7077 GetMinTrailingZeros(applyLoopGuards(TCExpr, L))); 7078 7079 ConstantInt *Result = TC->getValue(); 7080 7081 // Guard against huge trip counts (this requires checking 7082 // for zero to handle the case where the trip count == -1 and the 7083 // addition wraps). 7084 if (!Result || Result->getValue().getActiveBits() > 32 || 7085 Result->getValue().getActiveBits() == 0) 7086 return 1; 7087 7088 return (unsigned)Result->getZExtValue(); 7089 } 7090 7091 /// Returns the largest constant divisor of the trip count of this loop as a 7092 /// normal unsigned value, if possible. This means that the actual trip count is 7093 /// always a multiple of the returned value (don't forget the trip count could 7094 /// very well be zero as well!). 7095 /// 7096 /// Returns 1 if the trip count is unknown or not guaranteed to be the 7097 /// multiple of a constant (which is also the case if the trip count is simply 7098 /// constant, use getSmallConstantTripCount for that case), Will also return 1 7099 /// if the trip count is very large (>= 2^32). 7100 /// 7101 /// As explained in the comments for getSmallConstantTripCount, this assumes 7102 /// that control exits the loop via ExitingBlock. 7103 unsigned 7104 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 7105 const BasicBlock *ExitingBlock) { 7106 assert(ExitingBlock && "Must pass a non-null exiting block!"); 7107 assert(L->isLoopExiting(ExitingBlock) && 7108 "Exiting block must actually branch out of the loop!"); 7109 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 7110 return getSmallConstantTripMultiple(L, ExitCount); 7111 } 7112 7113 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 7114 const BasicBlock *ExitingBlock, 7115 ExitCountKind Kind) { 7116 switch (Kind) { 7117 case Exact: 7118 case SymbolicMaximum: 7119 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 7120 case ConstantMaximum: 7121 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this); 7122 }; 7123 llvm_unreachable("Invalid ExitCountKind!"); 7124 } 7125 7126 const SCEV * 7127 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 7128 SCEVUnionPredicate &Preds) { 7129 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 7130 } 7131 7132 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, 7133 ExitCountKind Kind) { 7134 switch (Kind) { 7135 case Exact: 7136 return getBackedgeTakenInfo(L).getExact(L, this); 7137 case ConstantMaximum: 7138 return getBackedgeTakenInfo(L).getConstantMax(this); 7139 case SymbolicMaximum: 7140 return getBackedgeTakenInfo(L).getSymbolicMax(L, this); 7141 }; 7142 llvm_unreachable("Invalid ExitCountKind!"); 7143 } 7144 7145 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 7146 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this); 7147 } 7148 7149 /// Push PHI nodes in the header of the given loop onto the given Worklist. 7150 static void 7151 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 7152 BasicBlock *Header = L->getHeader(); 7153 7154 // Push all Loop-header PHIs onto the Worklist stack. 7155 for (PHINode &PN : Header->phis()) 7156 Worklist.push_back(&PN); 7157 } 7158 7159 const ScalarEvolution::BackedgeTakenInfo & 7160 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 7161 auto &BTI = getBackedgeTakenInfo(L); 7162 if (BTI.hasFullInfo()) 7163 return BTI; 7164 7165 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 7166 7167 if (!Pair.second) 7168 return Pair.first->second; 7169 7170 BackedgeTakenInfo Result = 7171 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 7172 7173 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 7174 } 7175 7176 ScalarEvolution::BackedgeTakenInfo & 7177 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 7178 // Initially insert an invalid entry for this loop. If the insertion 7179 // succeeds, proceed to actually compute a backedge-taken count and 7180 // update the value. The temporary CouldNotCompute value tells SCEV 7181 // code elsewhere that it shouldn't attempt to request a new 7182 // backedge-taken count, which could result in infinite recursion. 7183 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 7184 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 7185 if (!Pair.second) 7186 return Pair.first->second; 7187 7188 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 7189 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 7190 // must be cleared in this scope. 7191 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 7192 7193 // In product build, there are no usage of statistic. 7194 (void)NumTripCountsComputed; 7195 (void)NumTripCountsNotComputed; 7196 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 7197 const SCEV *BEExact = Result.getExact(L, this); 7198 if (BEExact != getCouldNotCompute()) { 7199 assert(isLoopInvariant(BEExact, L) && 7200 isLoopInvariant(Result.getConstantMax(this), L) && 7201 "Computed backedge-taken count isn't loop invariant for loop!"); 7202 ++NumTripCountsComputed; 7203 } else if (Result.getConstantMax(this) == getCouldNotCompute() && 7204 isa<PHINode>(L->getHeader()->begin())) { 7205 // Only count loops that have phi nodes as not being computable. 7206 ++NumTripCountsNotComputed; 7207 } 7208 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 7209 7210 // Now that we know more about the trip count for this loop, forget any 7211 // existing SCEV values for PHI nodes in this loop since they are only 7212 // conservative estimates made without the benefit of trip count 7213 // information. This is similar to the code in forgetLoop, except that 7214 // it handles SCEVUnknown PHI nodes specially. 7215 if (Result.hasAnyInfo()) { 7216 SmallVector<Instruction *, 16> Worklist; 7217 PushLoopPHIs(L, Worklist); 7218 7219 SmallPtrSet<Instruction *, 8> Discovered; 7220 while (!Worklist.empty()) { 7221 Instruction *I = Worklist.pop_back_val(); 7222 7223 ValueExprMapType::iterator It = 7224 ValueExprMap.find_as(static_cast<Value *>(I)); 7225 if (It != ValueExprMap.end()) { 7226 const SCEV *Old = It->second; 7227 7228 // SCEVUnknown for a PHI either means that it has an unrecognized 7229 // structure, or it's a PHI that's in the progress of being computed 7230 // by createNodeForPHI. In the former case, additional loop trip 7231 // count information isn't going to change anything. In the later 7232 // case, createNodeForPHI will perform the necessary updates on its 7233 // own when it gets to that point. 7234 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 7235 eraseValueFromMap(It->first); 7236 forgetMemoizedResults(Old); 7237 } 7238 if (PHINode *PN = dyn_cast<PHINode>(I)) 7239 ConstantEvolutionLoopExitValue.erase(PN); 7240 } 7241 7242 // Since we don't need to invalidate anything for correctness and we're 7243 // only invalidating to make SCEV's results more precise, we get to stop 7244 // early to avoid invalidating too much. This is especially important in 7245 // cases like: 7246 // 7247 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 7248 // loop0: 7249 // %pn0 = phi 7250 // ... 7251 // loop1: 7252 // %pn1 = phi 7253 // ... 7254 // 7255 // where both loop0 and loop1's backedge taken count uses the SCEV 7256 // expression for %v. If we don't have the early stop below then in cases 7257 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 7258 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 7259 // count for loop1, effectively nullifying SCEV's trip count cache. 7260 for (auto *U : I->users()) 7261 if (auto *I = dyn_cast<Instruction>(U)) { 7262 auto *LoopForUser = LI.getLoopFor(I->getParent()); 7263 if (LoopForUser && L->contains(LoopForUser) && 7264 Discovered.insert(I).second) 7265 Worklist.push_back(I); 7266 } 7267 } 7268 } 7269 7270 // Re-lookup the insert position, since the call to 7271 // computeBackedgeTakenCount above could result in a 7272 // recusive call to getBackedgeTakenInfo (on a different 7273 // loop), which would invalidate the iterator computed 7274 // earlier. 7275 return BackedgeTakenCounts.find(L)->second = std::move(Result); 7276 } 7277 7278 void ScalarEvolution::forgetAllLoops() { 7279 // This method is intended to forget all info about loops. It should 7280 // invalidate caches as if the following happened: 7281 // - The trip counts of all loops have changed arbitrarily 7282 // - Every llvm::Value has been updated in place to produce a different 7283 // result. 7284 BackedgeTakenCounts.clear(); 7285 PredicatedBackedgeTakenCounts.clear(); 7286 LoopPropertiesCache.clear(); 7287 ConstantEvolutionLoopExitValue.clear(); 7288 ValueExprMap.clear(); 7289 ValuesAtScopes.clear(); 7290 LoopDispositions.clear(); 7291 BlockDispositions.clear(); 7292 UnsignedRanges.clear(); 7293 SignedRanges.clear(); 7294 ExprValueMap.clear(); 7295 HasRecMap.clear(); 7296 MinTrailingZerosCache.clear(); 7297 PredicatedSCEVRewrites.clear(); 7298 } 7299 7300 void ScalarEvolution::forgetLoop(const Loop *L) { 7301 SmallVector<const Loop *, 16> LoopWorklist(1, L); 7302 SmallVector<Instruction *, 32> Worklist; 7303 SmallPtrSet<Instruction *, 16> Visited; 7304 7305 // Iterate over all the loops and sub-loops to drop SCEV information. 7306 while (!LoopWorklist.empty()) { 7307 auto *CurrL = LoopWorklist.pop_back_val(); 7308 7309 // Drop any stored trip count value. 7310 BackedgeTakenCounts.erase(CurrL); 7311 PredicatedBackedgeTakenCounts.erase(CurrL); 7312 7313 // Drop information about predicated SCEV rewrites for this loop. 7314 for (auto I = PredicatedSCEVRewrites.begin(); 7315 I != PredicatedSCEVRewrites.end();) { 7316 std::pair<const SCEV *, const Loop *> Entry = I->first; 7317 if (Entry.second == CurrL) 7318 PredicatedSCEVRewrites.erase(I++); 7319 else 7320 ++I; 7321 } 7322 7323 auto LoopUsersItr = LoopUsers.find(CurrL); 7324 if (LoopUsersItr != LoopUsers.end()) { 7325 for (auto *S : LoopUsersItr->second) 7326 forgetMemoizedResults(S); 7327 LoopUsers.erase(LoopUsersItr); 7328 } 7329 7330 // Drop information about expressions based on loop-header PHIs. 7331 PushLoopPHIs(CurrL, Worklist); 7332 7333 while (!Worklist.empty()) { 7334 Instruction *I = Worklist.pop_back_val(); 7335 if (!Visited.insert(I).second) 7336 continue; 7337 7338 ValueExprMapType::iterator It = 7339 ValueExprMap.find_as(static_cast<Value *>(I)); 7340 if (It != ValueExprMap.end()) { 7341 eraseValueFromMap(It->first); 7342 forgetMemoizedResults(It->second); 7343 if (PHINode *PN = dyn_cast<PHINode>(I)) 7344 ConstantEvolutionLoopExitValue.erase(PN); 7345 } 7346 7347 PushDefUseChildren(I, Worklist); 7348 } 7349 7350 LoopPropertiesCache.erase(CurrL); 7351 // Forget all contained loops too, to avoid dangling entries in the 7352 // ValuesAtScopes map. 7353 LoopWorklist.append(CurrL->begin(), CurrL->end()); 7354 } 7355 } 7356 7357 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 7358 while (Loop *Parent = L->getParentLoop()) 7359 L = Parent; 7360 forgetLoop(L); 7361 } 7362 7363 void ScalarEvolution::forgetValue(Value *V) { 7364 Instruction *I = dyn_cast<Instruction>(V); 7365 if (!I) return; 7366 7367 // Drop information about expressions based on loop-header PHIs. 7368 SmallVector<Instruction *, 16> Worklist; 7369 Worklist.push_back(I); 7370 7371 SmallPtrSet<Instruction *, 8> Visited; 7372 while (!Worklist.empty()) { 7373 I = Worklist.pop_back_val(); 7374 if (!Visited.insert(I).second) 7375 continue; 7376 7377 ValueExprMapType::iterator It = 7378 ValueExprMap.find_as(static_cast<Value *>(I)); 7379 if (It != ValueExprMap.end()) { 7380 eraseValueFromMap(It->first); 7381 forgetMemoizedResults(It->second); 7382 if (PHINode *PN = dyn_cast<PHINode>(I)) 7383 ConstantEvolutionLoopExitValue.erase(PN); 7384 } 7385 7386 PushDefUseChildren(I, Worklist); 7387 } 7388 } 7389 7390 void ScalarEvolution::forgetLoopDispositions(const Loop *L) { 7391 LoopDispositions.clear(); 7392 } 7393 7394 /// Get the exact loop backedge taken count considering all loop exits. A 7395 /// computable result can only be returned for loops with all exiting blocks 7396 /// dominating the latch. howFarToZero assumes that the limit of each loop test 7397 /// is never skipped. This is a valid assumption as long as the loop exits via 7398 /// that test. For precise results, it is the caller's responsibility to specify 7399 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 7400 const SCEV * 7401 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 7402 SCEVUnionPredicate *Preds) const { 7403 // If any exits were not computable, the loop is not computable. 7404 if (!isComplete() || ExitNotTaken.empty()) 7405 return SE->getCouldNotCompute(); 7406 7407 const BasicBlock *Latch = L->getLoopLatch(); 7408 // All exiting blocks we have collected must dominate the only backedge. 7409 if (!Latch) 7410 return SE->getCouldNotCompute(); 7411 7412 // All exiting blocks we have gathered dominate loop's latch, so exact trip 7413 // count is simply a minimum out of all these calculated exit counts. 7414 SmallVector<const SCEV *, 2> Ops; 7415 for (auto &ENT : ExitNotTaken) { 7416 const SCEV *BECount = ENT.ExactNotTaken; 7417 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 7418 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 7419 "We should only have known counts for exiting blocks that dominate " 7420 "latch!"); 7421 7422 Ops.push_back(BECount); 7423 7424 if (Preds && !ENT.hasAlwaysTruePredicate()) 7425 Preds->add(ENT.Predicate.get()); 7426 7427 assert((Preds || ENT.hasAlwaysTruePredicate()) && 7428 "Predicate should be always true!"); 7429 } 7430 7431 return SE->getUMinFromMismatchedTypes(Ops); 7432 } 7433 7434 /// Get the exact not taken count for this loop exit. 7435 const SCEV * 7436 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock, 7437 ScalarEvolution *SE) const { 7438 for (auto &ENT : ExitNotTaken) 7439 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 7440 return ENT.ExactNotTaken; 7441 7442 return SE->getCouldNotCompute(); 7443 } 7444 7445 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax( 7446 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { 7447 for (auto &ENT : ExitNotTaken) 7448 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 7449 return ENT.MaxNotTaken; 7450 7451 return SE->getCouldNotCompute(); 7452 } 7453 7454 /// getConstantMax - Get the constant max backedge taken count for the loop. 7455 const SCEV * 7456 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const { 7457 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 7458 return !ENT.hasAlwaysTruePredicate(); 7459 }; 7460 7461 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getConstantMax()) 7462 return SE->getCouldNotCompute(); 7463 7464 assert((isa<SCEVCouldNotCompute>(getConstantMax()) || 7465 isa<SCEVConstant>(getConstantMax())) && 7466 "No point in having a non-constant max backedge taken count!"); 7467 return getConstantMax(); 7468 } 7469 7470 const SCEV * 7471 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L, 7472 ScalarEvolution *SE) { 7473 if (!SymbolicMax) 7474 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L); 7475 return SymbolicMax; 7476 } 7477 7478 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero( 7479 ScalarEvolution *SE) const { 7480 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 7481 return !ENT.hasAlwaysTruePredicate(); 7482 }; 7483 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 7484 } 7485 7486 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S) const { 7487 return Operands.contains(S); 7488 } 7489 7490 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 7491 : ExactNotTaken(E), MaxNotTaken(E) { 7492 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7493 isa<SCEVConstant>(MaxNotTaken)) && 7494 "No point in having a non-constant max backedge taken count!"); 7495 } 7496 7497 ScalarEvolution::ExitLimit::ExitLimit( 7498 const SCEV *E, const SCEV *M, bool MaxOrZero, 7499 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 7500 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 7501 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 7502 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 7503 "Exact is not allowed to be less precise than Max"); 7504 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7505 isa<SCEVConstant>(MaxNotTaken)) && 7506 "No point in having a non-constant max backedge taken count!"); 7507 for (auto *PredSet : PredSetList) 7508 for (auto *P : *PredSet) 7509 addPredicate(P); 7510 } 7511 7512 ScalarEvolution::ExitLimit::ExitLimit( 7513 const SCEV *E, const SCEV *M, bool MaxOrZero, 7514 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 7515 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 7516 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7517 isa<SCEVConstant>(MaxNotTaken)) && 7518 "No point in having a non-constant max backedge taken count!"); 7519 } 7520 7521 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 7522 bool MaxOrZero) 7523 : ExitLimit(E, M, MaxOrZero, None) { 7524 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7525 isa<SCEVConstant>(MaxNotTaken)) && 7526 "No point in having a non-constant max backedge taken count!"); 7527 } 7528 7529 class SCEVRecordOperands { 7530 SmallPtrSetImpl<const SCEV *> &Operands; 7531 7532 public: 7533 SCEVRecordOperands(SmallPtrSetImpl<const SCEV *> &Operands) 7534 : Operands(Operands) {} 7535 bool follow(const SCEV *S) { 7536 Operands.insert(S); 7537 return true; 7538 } 7539 bool isDone() { return false; } 7540 }; 7541 7542 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 7543 /// computable exit into a persistent ExitNotTakenInfo array. 7544 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 7545 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts, 7546 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero) 7547 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) { 7548 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7549 7550 ExitNotTaken.reserve(ExitCounts.size()); 7551 std::transform( 7552 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 7553 [&](const EdgeExitInfo &EEI) { 7554 BasicBlock *ExitBB = EEI.first; 7555 const ExitLimit &EL = EEI.second; 7556 if (EL.Predicates.empty()) 7557 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7558 nullptr); 7559 7560 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 7561 for (auto *Pred : EL.Predicates) 7562 Predicate->add(Pred); 7563 7564 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7565 std::move(Predicate)); 7566 }); 7567 assert((isa<SCEVCouldNotCompute>(ConstantMax) || 7568 isa<SCEVConstant>(ConstantMax)) && 7569 "No point in having a non-constant max backedge taken count!"); 7570 7571 SCEVRecordOperands RecordOperands(Operands); 7572 SCEVTraversal<SCEVRecordOperands> ST(RecordOperands); 7573 if (!isa<SCEVCouldNotCompute>(ConstantMax)) 7574 ST.visitAll(ConstantMax); 7575 for (auto &ENT : ExitNotTaken) 7576 if (!isa<SCEVCouldNotCompute>(ENT.ExactNotTaken)) 7577 ST.visitAll(ENT.ExactNotTaken); 7578 } 7579 7580 /// Compute the number of times the backedge of the specified loop will execute. 7581 ScalarEvolution::BackedgeTakenInfo 7582 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 7583 bool AllowPredicates) { 7584 SmallVector<BasicBlock *, 8> ExitingBlocks; 7585 L->getExitingBlocks(ExitingBlocks); 7586 7587 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7588 7589 SmallVector<EdgeExitInfo, 4> ExitCounts; 7590 bool CouldComputeBECount = true; 7591 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 7592 const SCEV *MustExitMaxBECount = nullptr; 7593 const SCEV *MayExitMaxBECount = nullptr; 7594 bool MustExitMaxOrZero = false; 7595 7596 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 7597 // and compute maxBECount. 7598 // Do a union of all the predicates here. 7599 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 7600 BasicBlock *ExitBB = ExitingBlocks[i]; 7601 7602 // We canonicalize untaken exits to br (constant), ignore them so that 7603 // proving an exit untaken doesn't negatively impact our ability to reason 7604 // about the loop as whole. 7605 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) 7606 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { 7607 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7608 if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne())) 7609 continue; 7610 } 7611 7612 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 7613 7614 assert((AllowPredicates || EL.Predicates.empty()) && 7615 "Predicated exit limit when predicates are not allowed!"); 7616 7617 // 1. For each exit that can be computed, add an entry to ExitCounts. 7618 // CouldComputeBECount is true only if all exits can be computed. 7619 if (EL.ExactNotTaken == getCouldNotCompute()) 7620 // We couldn't compute an exact value for this exit, so 7621 // we won't be able to compute an exact value for the loop. 7622 CouldComputeBECount = false; 7623 else 7624 ExitCounts.emplace_back(ExitBB, EL); 7625 7626 // 2. Derive the loop's MaxBECount from each exit's max number of 7627 // non-exiting iterations. Partition the loop exits into two kinds: 7628 // LoopMustExits and LoopMayExits. 7629 // 7630 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 7631 // is a LoopMayExit. If any computable LoopMustExit is found, then 7632 // MaxBECount is the minimum EL.MaxNotTaken of computable 7633 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 7634 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 7635 // computable EL.MaxNotTaken. 7636 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 7637 DT.dominates(ExitBB, Latch)) { 7638 if (!MustExitMaxBECount) { 7639 MustExitMaxBECount = EL.MaxNotTaken; 7640 MustExitMaxOrZero = EL.MaxOrZero; 7641 } else { 7642 MustExitMaxBECount = 7643 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 7644 } 7645 } else if (MayExitMaxBECount != getCouldNotCompute()) { 7646 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 7647 MayExitMaxBECount = EL.MaxNotTaken; 7648 else { 7649 MayExitMaxBECount = 7650 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 7651 } 7652 } 7653 } 7654 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 7655 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 7656 // The loop backedge will be taken the maximum or zero times if there's 7657 // a single exit that must be taken the maximum or zero times. 7658 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 7659 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 7660 MaxBECount, MaxOrZero); 7661 } 7662 7663 ScalarEvolution::ExitLimit 7664 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 7665 bool AllowPredicates) { 7666 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 7667 // If our exiting block does not dominate the latch, then its connection with 7668 // loop's exit limit may be far from trivial. 7669 const BasicBlock *Latch = L->getLoopLatch(); 7670 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 7671 return getCouldNotCompute(); 7672 7673 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 7674 Instruction *Term = ExitingBlock->getTerminator(); 7675 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 7676 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 7677 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7678 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 7679 "It should have one successor in loop and one exit block!"); 7680 // Proceed to the next level to examine the exit condition expression. 7681 return computeExitLimitFromCond( 7682 L, BI->getCondition(), ExitIfTrue, 7683 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 7684 } 7685 7686 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 7687 // For switch, make sure that there is a single exit from the loop. 7688 BasicBlock *Exit = nullptr; 7689 for (auto *SBB : successors(ExitingBlock)) 7690 if (!L->contains(SBB)) { 7691 if (Exit) // Multiple exit successors. 7692 return getCouldNotCompute(); 7693 Exit = SBB; 7694 } 7695 assert(Exit && "Exiting block must have at least one exit"); 7696 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 7697 /*ControlsExit=*/IsOnlyExit); 7698 } 7699 7700 return getCouldNotCompute(); 7701 } 7702 7703 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 7704 const Loop *L, Value *ExitCond, bool ExitIfTrue, 7705 bool ControlsExit, bool AllowPredicates) { 7706 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 7707 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 7708 ControlsExit, AllowPredicates); 7709 } 7710 7711 Optional<ScalarEvolution::ExitLimit> 7712 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 7713 bool ExitIfTrue, bool ControlsExit, 7714 bool AllowPredicates) { 7715 (void)this->L; 7716 (void)this->ExitIfTrue; 7717 (void)this->AllowPredicates; 7718 7719 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7720 this->AllowPredicates == AllowPredicates && 7721 "Variance in assumed invariant key components!"); 7722 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 7723 if (Itr == TripCountMap.end()) 7724 return None; 7725 return Itr->second; 7726 } 7727 7728 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 7729 bool ExitIfTrue, 7730 bool ControlsExit, 7731 bool AllowPredicates, 7732 const ExitLimit &EL) { 7733 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7734 this->AllowPredicates == AllowPredicates && 7735 "Variance in assumed invariant key components!"); 7736 7737 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 7738 assert(InsertResult.second && "Expected successful insertion!"); 7739 (void)InsertResult; 7740 (void)ExitIfTrue; 7741 } 7742 7743 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 7744 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7745 bool ControlsExit, bool AllowPredicates) { 7746 7747 if (auto MaybeEL = 7748 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7749 return *MaybeEL; 7750 7751 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 7752 ControlsExit, AllowPredicates); 7753 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 7754 return EL; 7755 } 7756 7757 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 7758 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7759 bool ControlsExit, bool AllowPredicates) { 7760 // Handle BinOp conditions (And, Or). 7761 if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp( 7762 Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7763 return *LimitFromBinOp; 7764 7765 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7766 // Proceed to the next level to examine the icmp. 7767 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7768 ExitLimit EL = 7769 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 7770 if (EL.hasFullInfo() || !AllowPredicates) 7771 return EL; 7772 7773 // Try again, but use SCEV predicates this time. 7774 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 7775 /*AllowPredicates=*/true); 7776 } 7777 7778 // Check for a constant condition. These are normally stripped out by 7779 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7780 // preserve the CFG and is temporarily leaving constant conditions 7781 // in place. 7782 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7783 if (ExitIfTrue == !CI->getZExtValue()) 7784 // The backedge is always taken. 7785 return getCouldNotCompute(); 7786 else 7787 // The backedge is never taken. 7788 return getZero(CI->getType()); 7789 } 7790 7791 // If it's not an integer or pointer comparison then compute it the hard way. 7792 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7793 } 7794 7795 Optional<ScalarEvolution::ExitLimit> 7796 ScalarEvolution::computeExitLimitFromCondFromBinOp( 7797 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7798 bool ControlsExit, bool AllowPredicates) { 7799 // Check if the controlling expression for this loop is an And or Or. 7800 Value *Op0, *Op1; 7801 bool IsAnd = false; 7802 if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) 7803 IsAnd = true; 7804 else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) 7805 IsAnd = false; 7806 else 7807 return None; 7808 7809 // EitherMayExit is true in these two cases: 7810 // br (and Op0 Op1), loop, exit 7811 // br (or Op0 Op1), exit, loop 7812 bool EitherMayExit = IsAnd ^ ExitIfTrue; 7813 ExitLimit EL0 = computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue, 7814 ControlsExit && !EitherMayExit, 7815 AllowPredicates); 7816 ExitLimit EL1 = computeExitLimitFromCondCached(Cache, L, Op1, ExitIfTrue, 7817 ControlsExit && !EitherMayExit, 7818 AllowPredicates); 7819 7820 // Be robust against unsimplified IR for the form "op i1 X, NeutralElement" 7821 const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd); 7822 if (isa<ConstantInt>(Op1)) 7823 return Op1 == NeutralElement ? EL0 : EL1; 7824 if (isa<ConstantInt>(Op0)) 7825 return Op0 == NeutralElement ? EL1 : EL0; 7826 7827 const SCEV *BECount = getCouldNotCompute(); 7828 const SCEV *MaxBECount = getCouldNotCompute(); 7829 if (EitherMayExit) { 7830 // Both conditions must be same for the loop to continue executing. 7831 // Choose the less conservative count. 7832 // If ExitCond is a short-circuit form (select), using 7833 // umin(EL0.ExactNotTaken, EL1.ExactNotTaken) is unsafe in general. 7834 // To see the detailed examples, please see 7835 // test/Analysis/ScalarEvolution/exit-count-select.ll 7836 bool PoisonSafe = isa<BinaryOperator>(ExitCond); 7837 if (!PoisonSafe) 7838 // Even if ExitCond is select, we can safely derive BECount using both 7839 // EL0 and EL1 in these cases: 7840 // (1) EL0.ExactNotTaken is non-zero 7841 // (2) EL1.ExactNotTaken is non-poison 7842 // (3) EL0.ExactNotTaken is zero (BECount should be simply zero and 7843 // it cannot be umin(0, ..)) 7844 // The PoisonSafe assignment below is simplified and the assertion after 7845 // BECount calculation fully guarantees the condition (3). 7846 PoisonSafe = isa<SCEVConstant>(EL0.ExactNotTaken) || 7847 isa<SCEVConstant>(EL1.ExactNotTaken); 7848 if (EL0.ExactNotTaken != getCouldNotCompute() && 7849 EL1.ExactNotTaken != getCouldNotCompute() && PoisonSafe) { 7850 BECount = 7851 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7852 7853 // If EL0.ExactNotTaken was zero and ExitCond was a short-circuit form, 7854 // it should have been simplified to zero (see the condition (3) above) 7855 assert(!isa<BinaryOperator>(ExitCond) || !EL0.ExactNotTaken->isZero() || 7856 BECount->isZero()); 7857 } 7858 if (EL0.MaxNotTaken == getCouldNotCompute()) 7859 MaxBECount = EL1.MaxNotTaken; 7860 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7861 MaxBECount = EL0.MaxNotTaken; 7862 else 7863 MaxBECount = getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7864 } else { 7865 // Both conditions must be same at the same time for the loop to exit. 7866 // For now, be conservative. 7867 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7868 BECount = EL0.ExactNotTaken; 7869 } 7870 7871 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7872 // to be more aggressive when computing BECount than when computing 7873 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7874 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7875 // to not. 7876 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7877 !isa<SCEVCouldNotCompute>(BECount)) 7878 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7879 7880 return ExitLimit(BECount, MaxBECount, false, 7881 { &EL0.Predicates, &EL1.Predicates }); 7882 } 7883 7884 ScalarEvolution::ExitLimit 7885 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7886 ICmpInst *ExitCond, 7887 bool ExitIfTrue, 7888 bool ControlsExit, 7889 bool AllowPredicates) { 7890 // If the condition was exit on true, convert the condition to exit on false 7891 ICmpInst::Predicate Pred; 7892 if (!ExitIfTrue) 7893 Pred = ExitCond->getPredicate(); 7894 else 7895 Pred = ExitCond->getInversePredicate(); 7896 const ICmpInst::Predicate OriginalPred = Pred; 7897 7898 // Handle common loops like: for (X = "string"; *X; ++X) 7899 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7900 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7901 ExitLimit ItCnt = 7902 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); 7903 if (ItCnt.hasAnyInfo()) 7904 return ItCnt; 7905 } 7906 7907 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7908 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7909 7910 // Try to evaluate any dependencies out of the loop. 7911 LHS = getSCEVAtScope(LHS, L); 7912 RHS = getSCEVAtScope(RHS, L); 7913 7914 // At this point, we would like to compute how many iterations of the 7915 // loop the predicate will return true for these inputs. 7916 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7917 // If there is a loop-invariant, force it into the RHS. 7918 std::swap(LHS, RHS); 7919 Pred = ICmpInst::getSwappedPredicate(Pred); 7920 } 7921 7922 // Simplify the operands before analyzing them. 7923 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7924 7925 // If we have a comparison of a chrec against a constant, try to use value 7926 // ranges to answer this query. 7927 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7928 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7929 if (AddRec->getLoop() == L) { 7930 // Form the constant range. 7931 ConstantRange CompRange = 7932 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 7933 7934 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7935 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7936 } 7937 7938 switch (Pred) { 7939 case ICmpInst::ICMP_NE: { // while (X != Y) 7940 // Convert to: while (X-Y != 0) 7941 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7942 AllowPredicates); 7943 if (EL.hasAnyInfo()) return EL; 7944 break; 7945 } 7946 case ICmpInst::ICMP_EQ: { // while (X == Y) 7947 // Convert to: while (X-Y == 0) 7948 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7949 if (EL.hasAnyInfo()) return EL; 7950 break; 7951 } 7952 case ICmpInst::ICMP_SLT: 7953 case ICmpInst::ICMP_ULT: { // while (X < Y) 7954 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 7955 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7956 AllowPredicates); 7957 if (EL.hasAnyInfo()) return EL; 7958 break; 7959 } 7960 case ICmpInst::ICMP_SGT: 7961 case ICmpInst::ICMP_UGT: { // while (X > Y) 7962 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 7963 ExitLimit EL = 7964 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7965 AllowPredicates); 7966 if (EL.hasAnyInfo()) return EL; 7967 break; 7968 } 7969 default: 7970 break; 7971 } 7972 7973 auto *ExhaustiveCount = 7974 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7975 7976 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7977 return ExhaustiveCount; 7978 7979 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7980 ExitCond->getOperand(1), L, OriginalPred); 7981 } 7982 7983 ScalarEvolution::ExitLimit 7984 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7985 SwitchInst *Switch, 7986 BasicBlock *ExitingBlock, 7987 bool ControlsExit) { 7988 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7989 7990 // Give up if the exit is the default dest of a switch. 7991 if (Switch->getDefaultDest() == ExitingBlock) 7992 return getCouldNotCompute(); 7993 7994 assert(L->contains(Switch->getDefaultDest()) && 7995 "Default case must not exit the loop!"); 7996 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7997 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7998 7999 // while (X != Y) --> while (X-Y != 0) 8000 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 8001 if (EL.hasAnyInfo()) 8002 return EL; 8003 8004 return getCouldNotCompute(); 8005 } 8006 8007 static ConstantInt * 8008 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 8009 ScalarEvolution &SE) { 8010 const SCEV *InVal = SE.getConstant(C); 8011 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 8012 assert(isa<SCEVConstant>(Val) && 8013 "Evaluation of SCEV at constant didn't fold correctly?"); 8014 return cast<SCEVConstant>(Val)->getValue(); 8015 } 8016 8017 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 8018 /// compute the backedge execution count. 8019 ScalarEvolution::ExitLimit 8020 ScalarEvolution::computeLoadConstantCompareExitLimit( 8021 LoadInst *LI, 8022 Constant *RHS, 8023 const Loop *L, 8024 ICmpInst::Predicate predicate) { 8025 if (LI->isVolatile()) return getCouldNotCompute(); 8026 8027 // Check to see if the loaded pointer is a getelementptr of a global. 8028 // TODO: Use SCEV instead of manually grubbing with GEPs. 8029 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 8030 if (!GEP) return getCouldNotCompute(); 8031 8032 // Make sure that it is really a constant global we are gepping, with an 8033 // initializer, and make sure the first IDX is really 0. 8034 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 8035 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 8036 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 8037 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 8038 return getCouldNotCompute(); 8039 8040 // Okay, we allow one non-constant index into the GEP instruction. 8041 Value *VarIdx = nullptr; 8042 std::vector<Constant*> Indexes; 8043 unsigned VarIdxNum = 0; 8044 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 8045 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 8046 Indexes.push_back(CI); 8047 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 8048 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 8049 VarIdx = GEP->getOperand(i); 8050 VarIdxNum = i-2; 8051 Indexes.push_back(nullptr); 8052 } 8053 8054 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 8055 if (!VarIdx) 8056 return getCouldNotCompute(); 8057 8058 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 8059 // Check to see if X is a loop variant variable value now. 8060 const SCEV *Idx = getSCEV(VarIdx); 8061 Idx = getSCEVAtScope(Idx, L); 8062 8063 // We can only recognize very limited forms of loop index expressions, in 8064 // particular, only affine AddRec's like {C1,+,C2}<L>. 8065 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 8066 if (!IdxExpr || IdxExpr->getLoop() != L || !IdxExpr->isAffine() || 8067 isLoopInvariant(IdxExpr, L) || 8068 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 8069 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 8070 return getCouldNotCompute(); 8071 8072 unsigned MaxSteps = MaxBruteForceIterations; 8073 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 8074 ConstantInt *ItCst = ConstantInt::get( 8075 cast<IntegerType>(IdxExpr->getType()), IterationNum); 8076 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 8077 8078 // Form the GEP offset. 8079 Indexes[VarIdxNum] = Val; 8080 8081 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 8082 Indexes); 8083 if (!Result) break; // Cannot compute! 8084 8085 // Evaluate the condition for this iteration. 8086 Result = ConstantExpr::getICmp(predicate, Result, RHS); 8087 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 8088 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 8089 ++NumArrayLenItCounts; 8090 return getConstant(ItCst); // Found terminating iteration! 8091 } 8092 } 8093 return getCouldNotCompute(); 8094 } 8095 8096 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 8097 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 8098 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 8099 if (!RHS) 8100 return getCouldNotCompute(); 8101 8102 const BasicBlock *Latch = L->getLoopLatch(); 8103 if (!Latch) 8104 return getCouldNotCompute(); 8105 8106 const BasicBlock *Predecessor = L->getLoopPredecessor(); 8107 if (!Predecessor) 8108 return getCouldNotCompute(); 8109 8110 // Return true if V is of the form "LHS `shift_op` <positive constant>". 8111 // Return LHS in OutLHS and shift_opt in OutOpCode. 8112 auto MatchPositiveShift = 8113 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 8114 8115 using namespace PatternMatch; 8116 8117 ConstantInt *ShiftAmt; 8118 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 8119 OutOpCode = Instruction::LShr; 8120 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 8121 OutOpCode = Instruction::AShr; 8122 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 8123 OutOpCode = Instruction::Shl; 8124 else 8125 return false; 8126 8127 return ShiftAmt->getValue().isStrictlyPositive(); 8128 }; 8129 8130 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 8131 // 8132 // loop: 8133 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 8134 // %iv.shifted = lshr i32 %iv, <positive constant> 8135 // 8136 // Return true on a successful match. Return the corresponding PHI node (%iv 8137 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 8138 auto MatchShiftRecurrence = 8139 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 8140 Optional<Instruction::BinaryOps> PostShiftOpCode; 8141 8142 { 8143 Instruction::BinaryOps OpC; 8144 Value *V; 8145 8146 // If we encounter a shift instruction, "peel off" the shift operation, 8147 // and remember that we did so. Later when we inspect %iv's backedge 8148 // value, we will make sure that the backedge value uses the same 8149 // operation. 8150 // 8151 // Note: the peeled shift operation does not have to be the same 8152 // instruction as the one feeding into the PHI's backedge value. We only 8153 // really care about it being the same *kind* of shift instruction -- 8154 // that's all that is required for our later inferences to hold. 8155 if (MatchPositiveShift(LHS, V, OpC)) { 8156 PostShiftOpCode = OpC; 8157 LHS = V; 8158 } 8159 } 8160 8161 PNOut = dyn_cast<PHINode>(LHS); 8162 if (!PNOut || PNOut->getParent() != L->getHeader()) 8163 return false; 8164 8165 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 8166 Value *OpLHS; 8167 8168 return 8169 // The backedge value for the PHI node must be a shift by a positive 8170 // amount 8171 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 8172 8173 // of the PHI node itself 8174 OpLHS == PNOut && 8175 8176 // and the kind of shift should be match the kind of shift we peeled 8177 // off, if any. 8178 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 8179 }; 8180 8181 PHINode *PN; 8182 Instruction::BinaryOps OpCode; 8183 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 8184 return getCouldNotCompute(); 8185 8186 const DataLayout &DL = getDataLayout(); 8187 8188 // The key rationale for this optimization is that for some kinds of shift 8189 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 8190 // within a finite number of iterations. If the condition guarding the 8191 // backedge (in the sense that the backedge is taken if the condition is true) 8192 // is false for the value the shift recurrence stabilizes to, then we know 8193 // that the backedge is taken only a finite number of times. 8194 8195 ConstantInt *StableValue = nullptr; 8196 switch (OpCode) { 8197 default: 8198 llvm_unreachable("Impossible case!"); 8199 8200 case Instruction::AShr: { 8201 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 8202 // bitwidth(K) iterations. 8203 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 8204 KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC, 8205 Predecessor->getTerminator(), &DT); 8206 auto *Ty = cast<IntegerType>(RHS->getType()); 8207 if (Known.isNonNegative()) 8208 StableValue = ConstantInt::get(Ty, 0); 8209 else if (Known.isNegative()) 8210 StableValue = ConstantInt::get(Ty, -1, true); 8211 else 8212 return getCouldNotCompute(); 8213 8214 break; 8215 } 8216 case Instruction::LShr: 8217 case Instruction::Shl: 8218 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 8219 // stabilize to 0 in at most bitwidth(K) iterations. 8220 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 8221 break; 8222 } 8223 8224 auto *Result = 8225 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 8226 assert(Result->getType()->isIntegerTy(1) && 8227 "Otherwise cannot be an operand to a branch instruction"); 8228 8229 if (Result->isZeroValue()) { 8230 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 8231 const SCEV *UpperBound = 8232 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 8233 return ExitLimit(getCouldNotCompute(), UpperBound, false); 8234 } 8235 8236 return getCouldNotCompute(); 8237 } 8238 8239 /// Return true if we can constant fold an instruction of the specified type, 8240 /// assuming that all operands were constants. 8241 static bool CanConstantFold(const Instruction *I) { 8242 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 8243 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 8244 isa<LoadInst>(I) || isa<ExtractValueInst>(I)) 8245 return true; 8246 8247 if (const CallInst *CI = dyn_cast<CallInst>(I)) 8248 if (const Function *F = CI->getCalledFunction()) 8249 return canConstantFoldCallTo(CI, F); 8250 return false; 8251 } 8252 8253 /// Determine whether this instruction can constant evolve within this loop 8254 /// assuming its operands can all constant evolve. 8255 static bool canConstantEvolve(Instruction *I, const Loop *L) { 8256 // An instruction outside of the loop can't be derived from a loop PHI. 8257 if (!L->contains(I)) return false; 8258 8259 if (isa<PHINode>(I)) { 8260 // We don't currently keep track of the control flow needed to evaluate 8261 // PHIs, so we cannot handle PHIs inside of loops. 8262 return L->getHeader() == I->getParent(); 8263 } 8264 8265 // If we won't be able to constant fold this expression even if the operands 8266 // are constants, bail early. 8267 return CanConstantFold(I); 8268 } 8269 8270 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 8271 /// recursing through each instruction operand until reaching a loop header phi. 8272 static PHINode * 8273 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 8274 DenseMap<Instruction *, PHINode *> &PHIMap, 8275 unsigned Depth) { 8276 if (Depth > MaxConstantEvolvingDepth) 8277 return nullptr; 8278 8279 // Otherwise, we can evaluate this instruction if all of its operands are 8280 // constant or derived from a PHI node themselves. 8281 PHINode *PHI = nullptr; 8282 for (Value *Op : UseInst->operands()) { 8283 if (isa<Constant>(Op)) continue; 8284 8285 Instruction *OpInst = dyn_cast<Instruction>(Op); 8286 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 8287 8288 PHINode *P = dyn_cast<PHINode>(OpInst); 8289 if (!P) 8290 // If this operand is already visited, reuse the prior result. 8291 // We may have P != PHI if this is the deepest point at which the 8292 // inconsistent paths meet. 8293 P = PHIMap.lookup(OpInst); 8294 if (!P) { 8295 // Recurse and memoize the results, whether a phi is found or not. 8296 // This recursive call invalidates pointers into PHIMap. 8297 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 8298 PHIMap[OpInst] = P; 8299 } 8300 if (!P) 8301 return nullptr; // Not evolving from PHI 8302 if (PHI && PHI != P) 8303 return nullptr; // Evolving from multiple different PHIs. 8304 PHI = P; 8305 } 8306 // This is a expression evolving from a constant PHI! 8307 return PHI; 8308 } 8309 8310 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 8311 /// in the loop that V is derived from. We allow arbitrary operations along the 8312 /// way, but the operands of an operation must either be constants or a value 8313 /// derived from a constant PHI. If this expression does not fit with these 8314 /// constraints, return null. 8315 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 8316 Instruction *I = dyn_cast<Instruction>(V); 8317 if (!I || !canConstantEvolve(I, L)) return nullptr; 8318 8319 if (PHINode *PN = dyn_cast<PHINode>(I)) 8320 return PN; 8321 8322 // Record non-constant instructions contained by the loop. 8323 DenseMap<Instruction *, PHINode *> PHIMap; 8324 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 8325 } 8326 8327 /// EvaluateExpression - Given an expression that passes the 8328 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 8329 /// in the loop has the value PHIVal. If we can't fold this expression for some 8330 /// reason, return null. 8331 static Constant *EvaluateExpression(Value *V, const Loop *L, 8332 DenseMap<Instruction *, Constant *> &Vals, 8333 const DataLayout &DL, 8334 const TargetLibraryInfo *TLI) { 8335 // Convenient constant check, but redundant for recursive calls. 8336 if (Constant *C = dyn_cast<Constant>(V)) return C; 8337 Instruction *I = dyn_cast<Instruction>(V); 8338 if (!I) return nullptr; 8339 8340 if (Constant *C = Vals.lookup(I)) return C; 8341 8342 // An instruction inside the loop depends on a value outside the loop that we 8343 // weren't given a mapping for, or a value such as a call inside the loop. 8344 if (!canConstantEvolve(I, L)) return nullptr; 8345 8346 // An unmapped PHI can be due to a branch or another loop inside this loop, 8347 // or due to this not being the initial iteration through a loop where we 8348 // couldn't compute the evolution of this particular PHI last time. 8349 if (isa<PHINode>(I)) return nullptr; 8350 8351 std::vector<Constant*> Operands(I->getNumOperands()); 8352 8353 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 8354 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 8355 if (!Operand) { 8356 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 8357 if (!Operands[i]) return nullptr; 8358 continue; 8359 } 8360 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 8361 Vals[Operand] = C; 8362 if (!C) return nullptr; 8363 Operands[i] = C; 8364 } 8365 8366 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 8367 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8368 Operands[1], DL, TLI); 8369 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 8370 if (!LI->isVolatile()) 8371 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 8372 } 8373 return ConstantFoldInstOperands(I, Operands, DL, TLI); 8374 } 8375 8376 8377 // If every incoming value to PN except the one for BB is a specific Constant, 8378 // return that, else return nullptr. 8379 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 8380 Constant *IncomingVal = nullptr; 8381 8382 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 8383 if (PN->getIncomingBlock(i) == BB) 8384 continue; 8385 8386 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 8387 if (!CurrentVal) 8388 return nullptr; 8389 8390 if (IncomingVal != CurrentVal) { 8391 if (IncomingVal) 8392 return nullptr; 8393 IncomingVal = CurrentVal; 8394 } 8395 } 8396 8397 return IncomingVal; 8398 } 8399 8400 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 8401 /// in the header of its containing loop, we know the loop executes a 8402 /// constant number of times, and the PHI node is just a recurrence 8403 /// involving constants, fold it. 8404 Constant * 8405 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 8406 const APInt &BEs, 8407 const Loop *L) { 8408 auto I = ConstantEvolutionLoopExitValue.find(PN); 8409 if (I != ConstantEvolutionLoopExitValue.end()) 8410 return I->second; 8411 8412 if (BEs.ugt(MaxBruteForceIterations)) 8413 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 8414 8415 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 8416 8417 DenseMap<Instruction *, Constant *> CurrentIterVals; 8418 BasicBlock *Header = L->getHeader(); 8419 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 8420 8421 BasicBlock *Latch = L->getLoopLatch(); 8422 if (!Latch) 8423 return nullptr; 8424 8425 for (PHINode &PHI : Header->phis()) { 8426 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8427 CurrentIterVals[&PHI] = StartCST; 8428 } 8429 if (!CurrentIterVals.count(PN)) 8430 return RetVal = nullptr; 8431 8432 Value *BEValue = PN->getIncomingValueForBlock(Latch); 8433 8434 // Execute the loop symbolically to determine the exit value. 8435 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 8436 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 8437 8438 unsigned NumIterations = BEs.getZExtValue(); // must be in range 8439 unsigned IterationNum = 0; 8440 const DataLayout &DL = getDataLayout(); 8441 for (; ; ++IterationNum) { 8442 if (IterationNum == NumIterations) 8443 return RetVal = CurrentIterVals[PN]; // Got exit value! 8444 8445 // Compute the value of the PHIs for the next iteration. 8446 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 8447 DenseMap<Instruction *, Constant *> NextIterVals; 8448 Constant *NextPHI = 8449 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8450 if (!NextPHI) 8451 return nullptr; // Couldn't evaluate! 8452 NextIterVals[PN] = NextPHI; 8453 8454 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 8455 8456 // Also evaluate the other PHI nodes. However, we don't get to stop if we 8457 // cease to be able to evaluate one of them or if they stop evolving, 8458 // because that doesn't necessarily prevent us from computing PN. 8459 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 8460 for (const auto &I : CurrentIterVals) { 8461 PHINode *PHI = dyn_cast<PHINode>(I.first); 8462 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 8463 PHIsToCompute.emplace_back(PHI, I.second); 8464 } 8465 // We use two distinct loops because EvaluateExpression may invalidate any 8466 // iterators into CurrentIterVals. 8467 for (const auto &I : PHIsToCompute) { 8468 PHINode *PHI = I.first; 8469 Constant *&NextPHI = NextIterVals[PHI]; 8470 if (!NextPHI) { // Not already computed. 8471 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8472 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8473 } 8474 if (NextPHI != I.second) 8475 StoppedEvolving = false; 8476 } 8477 8478 // If all entries in CurrentIterVals == NextIterVals then we can stop 8479 // iterating, the loop can't continue to change. 8480 if (StoppedEvolving) 8481 return RetVal = CurrentIterVals[PN]; 8482 8483 CurrentIterVals.swap(NextIterVals); 8484 } 8485 } 8486 8487 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 8488 Value *Cond, 8489 bool ExitWhen) { 8490 PHINode *PN = getConstantEvolvingPHI(Cond, L); 8491 if (!PN) return getCouldNotCompute(); 8492 8493 // If the loop is canonicalized, the PHI will have exactly two entries. 8494 // That's the only form we support here. 8495 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 8496 8497 DenseMap<Instruction *, Constant *> CurrentIterVals; 8498 BasicBlock *Header = L->getHeader(); 8499 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 8500 8501 BasicBlock *Latch = L->getLoopLatch(); 8502 assert(Latch && "Should follow from NumIncomingValues == 2!"); 8503 8504 for (PHINode &PHI : Header->phis()) { 8505 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8506 CurrentIterVals[&PHI] = StartCST; 8507 } 8508 if (!CurrentIterVals.count(PN)) 8509 return getCouldNotCompute(); 8510 8511 // Okay, we find a PHI node that defines the trip count of this loop. Execute 8512 // the loop symbolically to determine when the condition gets a value of 8513 // "ExitWhen". 8514 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 8515 const DataLayout &DL = getDataLayout(); 8516 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 8517 auto *CondVal = dyn_cast_or_null<ConstantInt>( 8518 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 8519 8520 // Couldn't symbolically evaluate. 8521 if (!CondVal) return getCouldNotCompute(); 8522 8523 if (CondVal->getValue() == uint64_t(ExitWhen)) { 8524 ++NumBruteForceTripCountsComputed; 8525 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 8526 } 8527 8528 // Update all the PHI nodes for the next iteration. 8529 DenseMap<Instruction *, Constant *> NextIterVals; 8530 8531 // Create a list of which PHIs we need to compute. We want to do this before 8532 // calling EvaluateExpression on them because that may invalidate iterators 8533 // into CurrentIterVals. 8534 SmallVector<PHINode *, 8> PHIsToCompute; 8535 for (const auto &I : CurrentIterVals) { 8536 PHINode *PHI = dyn_cast<PHINode>(I.first); 8537 if (!PHI || PHI->getParent() != Header) continue; 8538 PHIsToCompute.push_back(PHI); 8539 } 8540 for (PHINode *PHI : PHIsToCompute) { 8541 Constant *&NextPHI = NextIterVals[PHI]; 8542 if (NextPHI) continue; // Already computed! 8543 8544 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8545 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8546 } 8547 CurrentIterVals.swap(NextIterVals); 8548 } 8549 8550 // Too many iterations were needed to evaluate. 8551 return getCouldNotCompute(); 8552 } 8553 8554 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 8555 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 8556 ValuesAtScopes[V]; 8557 // Check to see if we've folded this expression at this loop before. 8558 for (auto &LS : Values) 8559 if (LS.first == L) 8560 return LS.second ? LS.second : V; 8561 8562 Values.emplace_back(L, nullptr); 8563 8564 // Otherwise compute it. 8565 const SCEV *C = computeSCEVAtScope(V, L); 8566 for (auto &LS : reverse(ValuesAtScopes[V])) 8567 if (LS.first == L) { 8568 LS.second = C; 8569 break; 8570 } 8571 return C; 8572 } 8573 8574 /// This builds up a Constant using the ConstantExpr interface. That way, we 8575 /// will return Constants for objects which aren't represented by a 8576 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 8577 /// Returns NULL if the SCEV isn't representable as a Constant. 8578 static Constant *BuildConstantFromSCEV(const SCEV *V) { 8579 switch (V->getSCEVType()) { 8580 case scCouldNotCompute: 8581 case scAddRecExpr: 8582 return nullptr; 8583 case scConstant: 8584 return cast<SCEVConstant>(V)->getValue(); 8585 case scUnknown: 8586 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 8587 case scSignExtend: { 8588 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 8589 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 8590 return ConstantExpr::getSExt(CastOp, SS->getType()); 8591 return nullptr; 8592 } 8593 case scZeroExtend: { 8594 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 8595 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 8596 return ConstantExpr::getZExt(CastOp, SZ->getType()); 8597 return nullptr; 8598 } 8599 case scPtrToInt: { 8600 const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V); 8601 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand())) 8602 return ConstantExpr::getPtrToInt(CastOp, P2I->getType()); 8603 8604 return nullptr; 8605 } 8606 case scTruncate: { 8607 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 8608 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 8609 return ConstantExpr::getTrunc(CastOp, ST->getType()); 8610 return nullptr; 8611 } 8612 case scAddExpr: { 8613 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 8614 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 8615 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8616 unsigned AS = PTy->getAddressSpace(); 8617 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8618 C = ConstantExpr::getBitCast(C, DestPtrTy); 8619 } 8620 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 8621 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 8622 if (!C2) 8623 return nullptr; 8624 8625 // First pointer! 8626 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 8627 unsigned AS = C2->getType()->getPointerAddressSpace(); 8628 std::swap(C, C2); 8629 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8630 // The offsets have been converted to bytes. We can add bytes to an 8631 // i8* by GEP with the byte count in the first index. 8632 C = ConstantExpr::getBitCast(C, DestPtrTy); 8633 } 8634 8635 // Don't bother trying to sum two pointers. We probably can't 8636 // statically compute a load that results from it anyway. 8637 if (C2->getType()->isPointerTy()) 8638 return nullptr; 8639 8640 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8641 if (PTy->getElementType()->isStructTy()) 8642 C2 = ConstantExpr::getIntegerCast( 8643 C2, Type::getInt32Ty(C->getContext()), true); 8644 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 8645 } else 8646 C = ConstantExpr::getAdd(C, C2); 8647 } 8648 return C; 8649 } 8650 return nullptr; 8651 } 8652 case scMulExpr: { 8653 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 8654 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 8655 // Don't bother with pointers at all. 8656 if (C->getType()->isPointerTy()) 8657 return nullptr; 8658 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 8659 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 8660 if (!C2 || C2->getType()->isPointerTy()) 8661 return nullptr; 8662 C = ConstantExpr::getMul(C, C2); 8663 } 8664 return C; 8665 } 8666 return nullptr; 8667 } 8668 case scUDivExpr: { 8669 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 8670 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 8671 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 8672 if (LHS->getType() == RHS->getType()) 8673 return ConstantExpr::getUDiv(LHS, RHS); 8674 return nullptr; 8675 } 8676 case scSMaxExpr: 8677 case scUMaxExpr: 8678 case scSMinExpr: 8679 case scUMinExpr: 8680 return nullptr; // TODO: smax, umax, smin, umax. 8681 } 8682 llvm_unreachable("Unknown SCEV kind!"); 8683 } 8684 8685 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 8686 if (isa<SCEVConstant>(V)) return V; 8687 8688 // If this instruction is evolved from a constant-evolving PHI, compute the 8689 // exit value from the loop without using SCEVs. 8690 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 8691 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 8692 if (PHINode *PN = dyn_cast<PHINode>(I)) { 8693 const Loop *CurrLoop = this->LI[I->getParent()]; 8694 // Looking for loop exit value. 8695 if (CurrLoop && CurrLoop->getParentLoop() == L && 8696 PN->getParent() == CurrLoop->getHeader()) { 8697 // Okay, there is no closed form solution for the PHI node. Check 8698 // to see if the loop that contains it has a known backedge-taken 8699 // count. If so, we may be able to force computation of the exit 8700 // value. 8701 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop); 8702 // This trivial case can show up in some degenerate cases where 8703 // the incoming IR has not yet been fully simplified. 8704 if (BackedgeTakenCount->isZero()) { 8705 Value *InitValue = nullptr; 8706 bool MultipleInitValues = false; 8707 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 8708 if (!CurrLoop->contains(PN->getIncomingBlock(i))) { 8709 if (!InitValue) 8710 InitValue = PN->getIncomingValue(i); 8711 else if (InitValue != PN->getIncomingValue(i)) { 8712 MultipleInitValues = true; 8713 break; 8714 } 8715 } 8716 } 8717 if (!MultipleInitValues && InitValue) 8718 return getSCEV(InitValue); 8719 } 8720 // Do we have a loop invariant value flowing around the backedge 8721 // for a loop which must execute the backedge? 8722 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 8723 isKnownPositive(BackedgeTakenCount) && 8724 PN->getNumIncomingValues() == 2) { 8725 8726 unsigned InLoopPred = 8727 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1; 8728 Value *BackedgeVal = PN->getIncomingValue(InLoopPred); 8729 if (CurrLoop->isLoopInvariant(BackedgeVal)) 8730 return getSCEV(BackedgeVal); 8731 } 8732 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 8733 // Okay, we know how many times the containing loop executes. If 8734 // this is a constant evolving PHI node, get the final value at 8735 // the specified iteration number. 8736 Constant *RV = getConstantEvolutionLoopExitValue( 8737 PN, BTCC->getAPInt(), CurrLoop); 8738 if (RV) return getSCEV(RV); 8739 } 8740 } 8741 8742 // If there is a single-input Phi, evaluate it at our scope. If we can 8743 // prove that this replacement does not break LCSSA form, use new value. 8744 if (PN->getNumOperands() == 1) { 8745 const SCEV *Input = getSCEV(PN->getOperand(0)); 8746 const SCEV *InputAtScope = getSCEVAtScope(Input, L); 8747 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, 8748 // for the simplest case just support constants. 8749 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope; 8750 } 8751 } 8752 8753 // Okay, this is an expression that we cannot symbolically evaluate 8754 // into a SCEV. Check to see if it's possible to symbolically evaluate 8755 // the arguments into constants, and if so, try to constant propagate the 8756 // result. This is particularly useful for computing loop exit values. 8757 if (CanConstantFold(I)) { 8758 SmallVector<Constant *, 4> Operands; 8759 bool MadeImprovement = false; 8760 for (Value *Op : I->operands()) { 8761 if (Constant *C = dyn_cast<Constant>(Op)) { 8762 Operands.push_back(C); 8763 continue; 8764 } 8765 8766 // If any of the operands is non-constant and if they are 8767 // non-integer and non-pointer, don't even try to analyze them 8768 // with scev techniques. 8769 if (!isSCEVable(Op->getType())) 8770 return V; 8771 8772 const SCEV *OrigV = getSCEV(Op); 8773 const SCEV *OpV = getSCEVAtScope(OrigV, L); 8774 MadeImprovement |= OrigV != OpV; 8775 8776 Constant *C = BuildConstantFromSCEV(OpV); 8777 if (!C) return V; 8778 if (C->getType() != Op->getType()) 8779 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 8780 Op->getType(), 8781 false), 8782 C, Op->getType()); 8783 Operands.push_back(C); 8784 } 8785 8786 // Check to see if getSCEVAtScope actually made an improvement. 8787 if (MadeImprovement) { 8788 Constant *C = nullptr; 8789 const DataLayout &DL = getDataLayout(); 8790 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 8791 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8792 Operands[1], DL, &TLI); 8793 else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) { 8794 if (!Load->isVolatile()) 8795 C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(), 8796 DL); 8797 } else 8798 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 8799 if (!C) return V; 8800 return getSCEV(C); 8801 } 8802 } 8803 } 8804 8805 // This is some other type of SCEVUnknown, just return it. 8806 return V; 8807 } 8808 8809 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 8810 // Avoid performing the look-up in the common case where the specified 8811 // expression has no loop-variant portions. 8812 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 8813 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8814 if (OpAtScope != Comm->getOperand(i)) { 8815 // Okay, at least one of these operands is loop variant but might be 8816 // foldable. Build a new instance of the folded commutative expression. 8817 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 8818 Comm->op_begin()+i); 8819 NewOps.push_back(OpAtScope); 8820 8821 for (++i; i != e; ++i) { 8822 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8823 NewOps.push_back(OpAtScope); 8824 } 8825 if (isa<SCEVAddExpr>(Comm)) 8826 return getAddExpr(NewOps, Comm->getNoWrapFlags()); 8827 if (isa<SCEVMulExpr>(Comm)) 8828 return getMulExpr(NewOps, Comm->getNoWrapFlags()); 8829 if (isa<SCEVMinMaxExpr>(Comm)) 8830 return getMinMaxExpr(Comm->getSCEVType(), NewOps); 8831 llvm_unreachable("Unknown commutative SCEV type!"); 8832 } 8833 } 8834 // If we got here, all operands are loop invariant. 8835 return Comm; 8836 } 8837 8838 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 8839 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 8840 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 8841 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 8842 return Div; // must be loop invariant 8843 return getUDivExpr(LHS, RHS); 8844 } 8845 8846 // If this is a loop recurrence for a loop that does not contain L, then we 8847 // are dealing with the final value computed by the loop. 8848 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 8849 // First, attempt to evaluate each operand. 8850 // Avoid performing the look-up in the common case where the specified 8851 // expression has no loop-variant portions. 8852 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 8853 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 8854 if (OpAtScope == AddRec->getOperand(i)) 8855 continue; 8856 8857 // Okay, at least one of these operands is loop variant but might be 8858 // foldable. Build a new instance of the folded commutative expression. 8859 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8860 AddRec->op_begin()+i); 8861 NewOps.push_back(OpAtScope); 8862 for (++i; i != e; ++i) 8863 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8864 8865 const SCEV *FoldedRec = 8866 getAddRecExpr(NewOps, AddRec->getLoop(), 8867 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8868 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8869 // The addrec may be folded to a nonrecurrence, for example, if the 8870 // induction variable is multiplied by zero after constant folding. Go 8871 // ahead and return the folded value. 8872 if (!AddRec) 8873 return FoldedRec; 8874 break; 8875 } 8876 8877 // If the scope is outside the addrec's loop, evaluate it by using the 8878 // loop exit value of the addrec. 8879 if (!AddRec->getLoop()->contains(L)) { 8880 // To evaluate this recurrence, we need to know how many times the AddRec 8881 // loop iterates. Compute this now. 8882 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8883 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8884 8885 // Then, evaluate the AddRec. 8886 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8887 } 8888 8889 return AddRec; 8890 } 8891 8892 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8893 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8894 if (Op == Cast->getOperand()) 8895 return Cast; // must be loop invariant 8896 return getZeroExtendExpr(Op, Cast->getType()); 8897 } 8898 8899 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 8900 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8901 if (Op == Cast->getOperand()) 8902 return Cast; // must be loop invariant 8903 return getSignExtendExpr(Op, Cast->getType()); 8904 } 8905 8906 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 8907 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8908 if (Op == Cast->getOperand()) 8909 return Cast; // must be loop invariant 8910 return getTruncateExpr(Op, Cast->getType()); 8911 } 8912 8913 if (const SCEVPtrToIntExpr *Cast = dyn_cast<SCEVPtrToIntExpr>(V)) { 8914 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8915 if (Op == Cast->getOperand()) 8916 return Cast; // must be loop invariant 8917 return getPtrToIntExpr(Op, Cast->getType()); 8918 } 8919 8920 llvm_unreachable("Unknown SCEV type!"); 8921 } 8922 8923 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 8924 return getSCEVAtScope(getSCEV(V), L); 8925 } 8926 8927 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 8928 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 8929 return stripInjectiveFunctions(ZExt->getOperand()); 8930 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 8931 return stripInjectiveFunctions(SExt->getOperand()); 8932 return S; 8933 } 8934 8935 /// Finds the minimum unsigned root of the following equation: 8936 /// 8937 /// A * X = B (mod N) 8938 /// 8939 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 8940 /// A and B isn't important. 8941 /// 8942 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 8943 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 8944 ScalarEvolution &SE) { 8945 uint32_t BW = A.getBitWidth(); 8946 assert(BW == SE.getTypeSizeInBits(B->getType())); 8947 assert(A != 0 && "A must be non-zero."); 8948 8949 // 1. D = gcd(A, N) 8950 // 8951 // The gcd of A and N may have only one prime factor: 2. The number of 8952 // trailing zeros in A is its multiplicity 8953 uint32_t Mult2 = A.countTrailingZeros(); 8954 // D = 2^Mult2 8955 8956 // 2. Check if B is divisible by D. 8957 // 8958 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8959 // is not less than multiplicity of this prime factor for D. 8960 if (SE.GetMinTrailingZeros(B) < Mult2) 8961 return SE.getCouldNotCompute(); 8962 8963 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8964 // modulo (N / D). 8965 // 8966 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8967 // (N / D) in general. The inverse itself always fits into BW bits, though, 8968 // so we immediately truncate it. 8969 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8970 APInt Mod(BW + 1, 0); 8971 Mod.setBit(BW - Mult2); // Mod = N / D 8972 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8973 8974 // 4. Compute the minimum unsigned root of the equation: 8975 // I * (B / D) mod (N / D) 8976 // To simplify the computation, we factor out the divide by D: 8977 // (I * B mod N) / D 8978 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8979 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8980 } 8981 8982 /// For a given quadratic addrec, generate coefficients of the corresponding 8983 /// quadratic equation, multiplied by a common value to ensure that they are 8984 /// integers. 8985 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 8986 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 8987 /// were multiplied by, and BitWidth is the bit width of the original addrec 8988 /// coefficients. 8989 /// This function returns None if the addrec coefficients are not compile- 8990 /// time constants. 8991 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 8992 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 8993 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8994 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8995 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8996 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8997 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 8998 << *AddRec << '\n'); 8999 9000 // We currently can only solve this if the coefficients are constants. 9001 if (!LC || !MC || !NC) { 9002 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 9003 return None; 9004 } 9005 9006 APInt L = LC->getAPInt(); 9007 APInt M = MC->getAPInt(); 9008 APInt N = NC->getAPInt(); 9009 assert(!N.isNullValue() && "This is not a quadratic addrec"); 9010 9011 unsigned BitWidth = LC->getAPInt().getBitWidth(); 9012 unsigned NewWidth = BitWidth + 1; 9013 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 9014 << BitWidth << '\n'); 9015 // The sign-extension (as opposed to a zero-extension) here matches the 9016 // extension used in SolveQuadraticEquationWrap (with the same motivation). 9017 N = N.sext(NewWidth); 9018 M = M.sext(NewWidth); 9019 L = L.sext(NewWidth); 9020 9021 // The increments are M, M+N, M+2N, ..., so the accumulated values are 9022 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 9023 // L+M, L+2M+N, L+3M+3N, ... 9024 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 9025 // 9026 // The equation Acc = 0 is then 9027 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 9028 // In a quadratic form it becomes: 9029 // N n^2 + (2M-N) n + 2L = 0. 9030 9031 APInt A = N; 9032 APInt B = 2 * M - A; 9033 APInt C = 2 * L; 9034 APInt T = APInt(NewWidth, 2); 9035 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 9036 << "x + " << C << ", coeff bw: " << NewWidth 9037 << ", multiplied by " << T << '\n'); 9038 return std::make_tuple(A, B, C, T, BitWidth); 9039 } 9040 9041 /// Helper function to compare optional APInts: 9042 /// (a) if X and Y both exist, return min(X, Y), 9043 /// (b) if neither X nor Y exist, return None, 9044 /// (c) if exactly one of X and Y exists, return that value. 9045 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 9046 if (X.hasValue() && Y.hasValue()) { 9047 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 9048 APInt XW = X->sextOrSelf(W); 9049 APInt YW = Y->sextOrSelf(W); 9050 return XW.slt(YW) ? *X : *Y; 9051 } 9052 if (!X.hasValue() && !Y.hasValue()) 9053 return None; 9054 return X.hasValue() ? *X : *Y; 9055 } 9056 9057 /// Helper function to truncate an optional APInt to a given BitWidth. 9058 /// When solving addrec-related equations, it is preferable to return a value 9059 /// that has the same bit width as the original addrec's coefficients. If the 9060 /// solution fits in the original bit width, truncate it (except for i1). 9061 /// Returning a value of a different bit width may inhibit some optimizations. 9062 /// 9063 /// In general, a solution to a quadratic equation generated from an addrec 9064 /// may require BW+1 bits, where BW is the bit width of the addrec's 9065 /// coefficients. The reason is that the coefficients of the quadratic 9066 /// equation are BW+1 bits wide (to avoid truncation when converting from 9067 /// the addrec to the equation). 9068 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 9069 if (!X.hasValue()) 9070 return None; 9071 unsigned W = X->getBitWidth(); 9072 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 9073 return X->trunc(BitWidth); 9074 return X; 9075 } 9076 9077 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 9078 /// iterations. The values L, M, N are assumed to be signed, and they 9079 /// should all have the same bit widths. 9080 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 9081 /// where BW is the bit width of the addrec's coefficients. 9082 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 9083 /// returned as such, otherwise the bit width of the returned value may 9084 /// be greater than BW. 9085 /// 9086 /// This function returns None if 9087 /// (a) the addrec coefficients are not constant, or 9088 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 9089 /// like x^2 = 5, no integer solutions exist, in other cases an integer 9090 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 9091 static Optional<APInt> 9092 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 9093 APInt A, B, C, M; 9094 unsigned BitWidth; 9095 auto T = GetQuadraticEquation(AddRec); 9096 if (!T.hasValue()) 9097 return None; 9098 9099 std::tie(A, B, C, M, BitWidth) = *T; 9100 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 9101 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 9102 if (!X.hasValue()) 9103 return None; 9104 9105 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 9106 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 9107 if (!V->isZero()) 9108 return None; 9109 9110 return TruncIfPossible(X, BitWidth); 9111 } 9112 9113 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 9114 /// iterations. The values M, N are assumed to be signed, and they 9115 /// should all have the same bit widths. 9116 /// Find the least n such that c(n) does not belong to the given range, 9117 /// while c(n-1) does. 9118 /// 9119 /// This function returns None if 9120 /// (a) the addrec coefficients are not constant, or 9121 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 9122 /// bounds of the range. 9123 static Optional<APInt> 9124 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 9125 const ConstantRange &Range, ScalarEvolution &SE) { 9126 assert(AddRec->getOperand(0)->isZero() && 9127 "Starting value of addrec should be 0"); 9128 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 9129 << Range << ", addrec " << *AddRec << '\n'); 9130 // This case is handled in getNumIterationsInRange. Here we can assume that 9131 // we start in the range. 9132 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 9133 "Addrec's initial value should be in range"); 9134 9135 APInt A, B, C, M; 9136 unsigned BitWidth; 9137 auto T = GetQuadraticEquation(AddRec); 9138 if (!T.hasValue()) 9139 return None; 9140 9141 // Be careful about the return value: there can be two reasons for not 9142 // returning an actual number. First, if no solutions to the equations 9143 // were found, and second, if the solutions don't leave the given range. 9144 // The first case means that the actual solution is "unknown", the second 9145 // means that it's known, but not valid. If the solution is unknown, we 9146 // cannot make any conclusions. 9147 // Return a pair: the optional solution and a flag indicating if the 9148 // solution was found. 9149 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 9150 // Solve for signed overflow and unsigned overflow, pick the lower 9151 // solution. 9152 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 9153 << Bound << " (before multiplying by " << M << ")\n"); 9154 Bound *= M; // The quadratic equation multiplier. 9155 9156 Optional<APInt> SO = None; 9157 if (BitWidth > 1) { 9158 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 9159 "signed overflow\n"); 9160 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 9161 } 9162 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 9163 "unsigned overflow\n"); 9164 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 9165 BitWidth+1); 9166 9167 auto LeavesRange = [&] (const APInt &X) { 9168 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 9169 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 9170 if (Range.contains(V0->getValue())) 9171 return false; 9172 // X should be at least 1, so X-1 is non-negative. 9173 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 9174 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 9175 if (Range.contains(V1->getValue())) 9176 return true; 9177 return false; 9178 }; 9179 9180 // If SolveQuadraticEquationWrap returns None, it means that there can 9181 // be a solution, but the function failed to find it. We cannot treat it 9182 // as "no solution". 9183 if (!SO.hasValue() || !UO.hasValue()) 9184 return { None, false }; 9185 9186 // Check the smaller value first to see if it leaves the range. 9187 // At this point, both SO and UO must have values. 9188 Optional<APInt> Min = MinOptional(SO, UO); 9189 if (LeavesRange(*Min)) 9190 return { Min, true }; 9191 Optional<APInt> Max = Min == SO ? UO : SO; 9192 if (LeavesRange(*Max)) 9193 return { Max, true }; 9194 9195 // Solutions were found, but were eliminated, hence the "true". 9196 return { None, true }; 9197 }; 9198 9199 std::tie(A, B, C, M, BitWidth) = *T; 9200 // Lower bound is inclusive, subtract 1 to represent the exiting value. 9201 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; 9202 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); 9203 auto SL = SolveForBoundary(Lower); 9204 auto SU = SolveForBoundary(Upper); 9205 // If any of the solutions was unknown, no meaninigful conclusions can 9206 // be made. 9207 if (!SL.second || !SU.second) 9208 return None; 9209 9210 // Claim: The correct solution is not some value between Min and Max. 9211 // 9212 // Justification: Assuming that Min and Max are different values, one of 9213 // them is when the first signed overflow happens, the other is when the 9214 // first unsigned overflow happens. Crossing the range boundary is only 9215 // possible via an overflow (treating 0 as a special case of it, modeling 9216 // an overflow as crossing k*2^W for some k). 9217 // 9218 // The interesting case here is when Min was eliminated as an invalid 9219 // solution, but Max was not. The argument is that if there was another 9220 // overflow between Min and Max, it would also have been eliminated if 9221 // it was considered. 9222 // 9223 // For a given boundary, it is possible to have two overflows of the same 9224 // type (signed/unsigned) without having the other type in between: this 9225 // can happen when the vertex of the parabola is between the iterations 9226 // corresponding to the overflows. This is only possible when the two 9227 // overflows cross k*2^W for the same k. In such case, if the second one 9228 // left the range (and was the first one to do so), the first overflow 9229 // would have to enter the range, which would mean that either we had left 9230 // the range before or that we started outside of it. Both of these cases 9231 // are contradictions. 9232 // 9233 // Claim: In the case where SolveForBoundary returns None, the correct 9234 // solution is not some value between the Max for this boundary and the 9235 // Min of the other boundary. 9236 // 9237 // Justification: Assume that we had such Max_A and Min_B corresponding 9238 // to range boundaries A and B and such that Max_A < Min_B. If there was 9239 // a solution between Max_A and Min_B, it would have to be caused by an 9240 // overflow corresponding to either A or B. It cannot correspond to B, 9241 // since Min_B is the first occurrence of such an overflow. If it 9242 // corresponded to A, it would have to be either a signed or an unsigned 9243 // overflow that is larger than both eliminated overflows for A. But 9244 // between the eliminated overflows and this overflow, the values would 9245 // cover the entire value space, thus crossing the other boundary, which 9246 // is a contradiction. 9247 9248 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 9249 } 9250 9251 ScalarEvolution::ExitLimit 9252 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 9253 bool AllowPredicates) { 9254 9255 // This is only used for loops with a "x != y" exit test. The exit condition 9256 // is now expressed as a single expression, V = x-y. So the exit test is 9257 // effectively V != 0. We know and take advantage of the fact that this 9258 // expression only being used in a comparison by zero context. 9259 9260 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9261 // If the value is a constant 9262 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 9263 // If the value is already zero, the branch will execute zero times. 9264 if (C->getValue()->isZero()) return C; 9265 return getCouldNotCompute(); // Otherwise it will loop infinitely. 9266 } 9267 9268 const SCEVAddRecExpr *AddRec = 9269 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 9270 9271 if (!AddRec && AllowPredicates) 9272 // Try to make this an AddRec using runtime tests, in the first X 9273 // iterations of this loop, where X is the SCEV expression found by the 9274 // algorithm below. 9275 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 9276 9277 if (!AddRec || AddRec->getLoop() != L) 9278 return getCouldNotCompute(); 9279 9280 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 9281 // the quadratic equation to solve it. 9282 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 9283 // We can only use this value if the chrec ends up with an exact zero 9284 // value at this index. When solving for "X*X != 5", for example, we 9285 // should not accept a root of 2. 9286 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 9287 const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); 9288 return ExitLimit(R, R, false, Predicates); 9289 } 9290 return getCouldNotCompute(); 9291 } 9292 9293 // Otherwise we can only handle this if it is affine. 9294 if (!AddRec->isAffine()) 9295 return getCouldNotCompute(); 9296 9297 // If this is an affine expression, the execution count of this branch is 9298 // the minimum unsigned root of the following equation: 9299 // 9300 // Start + Step*N = 0 (mod 2^BW) 9301 // 9302 // equivalent to: 9303 // 9304 // Step*N = -Start (mod 2^BW) 9305 // 9306 // where BW is the common bit width of Start and Step. 9307 9308 // Get the initial value for the loop. 9309 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 9310 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 9311 9312 // For now we handle only constant steps. 9313 // 9314 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 9315 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 9316 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 9317 // We have not yet seen any such cases. 9318 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 9319 if (!StepC || StepC->getValue()->isZero()) 9320 return getCouldNotCompute(); 9321 9322 // For positive steps (counting up until unsigned overflow): 9323 // N = -Start/Step (as unsigned) 9324 // For negative steps (counting down to zero): 9325 // N = Start/-Step 9326 // First compute the unsigned distance from zero in the direction of Step. 9327 bool CountDown = StepC->getAPInt().isNegative(); 9328 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 9329 9330 // Handle unitary steps, which cannot wraparound. 9331 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 9332 // N = Distance (as unsigned) 9333 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 9334 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L)); 9335 APInt MaxBECountBase = getUnsignedRangeMax(Distance); 9336 if (MaxBECountBase.ult(MaxBECount)) 9337 MaxBECount = MaxBECountBase; 9338 9339 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 9340 // we end up with a loop whose backedge-taken count is n - 1. Detect this 9341 // case, and see if we can improve the bound. 9342 // 9343 // Explicitly handling this here is necessary because getUnsignedRange 9344 // isn't context-sensitive; it doesn't know that we only care about the 9345 // range inside the loop. 9346 const SCEV *Zero = getZero(Distance->getType()); 9347 const SCEV *One = getOne(Distance->getType()); 9348 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 9349 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 9350 // If Distance + 1 doesn't overflow, we can compute the maximum distance 9351 // as "unsigned_max(Distance + 1) - 1". 9352 ConstantRange CR = getUnsignedRange(DistancePlusOne); 9353 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 9354 } 9355 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 9356 } 9357 9358 // If the condition controls loop exit (the loop exits only if the expression 9359 // is true) and the addition is no-wrap we can use unsigned divide to 9360 // compute the backedge count. In this case, the step may not divide the 9361 // distance, but we don't care because if the condition is "missed" the loop 9362 // will have undefined behavior due to wrapping. 9363 if (ControlsExit && AddRec->hasNoSelfWrap() && 9364 loopHasNoAbnormalExits(AddRec->getLoop())) { 9365 const SCEV *Exact = 9366 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 9367 const SCEV *Max = getCouldNotCompute(); 9368 if (Exact != getCouldNotCompute()) { 9369 APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, L)); 9370 APInt BaseMaxInt = getUnsignedRangeMax(Exact); 9371 if (BaseMaxInt.ult(MaxInt)) 9372 Max = getConstant(BaseMaxInt); 9373 else 9374 Max = getConstant(MaxInt); 9375 } 9376 return ExitLimit(Exact, Max, false, Predicates); 9377 } 9378 9379 // Solve the general equation. 9380 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 9381 getNegativeSCEV(Start), *this); 9382 const SCEV *M = E == getCouldNotCompute() 9383 ? E 9384 : getConstant(getUnsignedRangeMax(E)); 9385 return ExitLimit(E, M, false, Predicates); 9386 } 9387 9388 ScalarEvolution::ExitLimit 9389 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 9390 // Loops that look like: while (X == 0) are very strange indeed. We don't 9391 // handle them yet except for the trivial case. This could be expanded in the 9392 // future as needed. 9393 9394 // If the value is a constant, check to see if it is known to be non-zero 9395 // already. If so, the backedge will execute zero times. 9396 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 9397 if (!C->getValue()->isZero()) 9398 return getZero(C->getType()); 9399 return getCouldNotCompute(); // Otherwise it will loop infinitely. 9400 } 9401 9402 // We could implement others, but I really doubt anyone writes loops like 9403 // this, and if they did, they would already be constant folded. 9404 return getCouldNotCompute(); 9405 } 9406 9407 std::pair<const BasicBlock *, const BasicBlock *> 9408 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) 9409 const { 9410 // If the block has a unique predecessor, then there is no path from the 9411 // predecessor to the block that does not go through the direct edge 9412 // from the predecessor to the block. 9413 if (const BasicBlock *Pred = BB->getSinglePredecessor()) 9414 return {Pred, BB}; 9415 9416 // A loop's header is defined to be a block that dominates the loop. 9417 // If the header has a unique predecessor outside the loop, it must be 9418 // a block that has exactly one successor that can reach the loop. 9419 if (const Loop *L = LI.getLoopFor(BB)) 9420 return {L->getLoopPredecessor(), L->getHeader()}; 9421 9422 return {nullptr, nullptr}; 9423 } 9424 9425 /// SCEV structural equivalence is usually sufficient for testing whether two 9426 /// expressions are equal, however for the purposes of looking for a condition 9427 /// guarding a loop, it can be useful to be a little more general, since a 9428 /// front-end may have replicated the controlling expression. 9429 static bool HasSameValue(const SCEV *A, const SCEV *B) { 9430 // Quick check to see if they are the same SCEV. 9431 if (A == B) return true; 9432 9433 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 9434 // Not all instructions that are "identical" compute the same value. For 9435 // instance, two distinct alloca instructions allocating the same type are 9436 // identical and do not read memory; but compute distinct values. 9437 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 9438 }; 9439 9440 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 9441 // two different instructions with the same value. Check for this case. 9442 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 9443 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 9444 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 9445 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 9446 if (ComputesEqualValues(AI, BI)) 9447 return true; 9448 9449 // Otherwise assume they may have a different value. 9450 return false; 9451 } 9452 9453 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 9454 const SCEV *&LHS, const SCEV *&RHS, 9455 unsigned Depth) { 9456 bool Changed = false; 9457 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 9458 // '0 != 0'. 9459 auto TrivialCase = [&](bool TriviallyTrue) { 9460 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 9461 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 9462 return true; 9463 }; 9464 // If we hit the max recursion limit bail out. 9465 if (Depth >= 3) 9466 return false; 9467 9468 // Canonicalize a constant to the right side. 9469 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 9470 // Check for both operands constant. 9471 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 9472 if (ConstantExpr::getICmp(Pred, 9473 LHSC->getValue(), 9474 RHSC->getValue())->isNullValue()) 9475 return TrivialCase(false); 9476 else 9477 return TrivialCase(true); 9478 } 9479 // Otherwise swap the operands to put the constant on the right. 9480 std::swap(LHS, RHS); 9481 Pred = ICmpInst::getSwappedPredicate(Pred); 9482 Changed = true; 9483 } 9484 9485 // If we're comparing an addrec with a value which is loop-invariant in the 9486 // addrec's loop, put the addrec on the left. Also make a dominance check, 9487 // as both operands could be addrecs loop-invariant in each other's loop. 9488 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 9489 const Loop *L = AR->getLoop(); 9490 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 9491 std::swap(LHS, RHS); 9492 Pred = ICmpInst::getSwappedPredicate(Pred); 9493 Changed = true; 9494 } 9495 } 9496 9497 // If there's a constant operand, canonicalize comparisons with boundary 9498 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 9499 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 9500 const APInt &RA = RC->getAPInt(); 9501 9502 bool SimplifiedByConstantRange = false; 9503 9504 if (!ICmpInst::isEquality(Pred)) { 9505 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 9506 if (ExactCR.isFullSet()) 9507 return TrivialCase(true); 9508 else if (ExactCR.isEmptySet()) 9509 return TrivialCase(false); 9510 9511 APInt NewRHS; 9512 CmpInst::Predicate NewPred; 9513 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 9514 ICmpInst::isEquality(NewPred)) { 9515 // We were able to convert an inequality to an equality. 9516 Pred = NewPred; 9517 RHS = getConstant(NewRHS); 9518 Changed = SimplifiedByConstantRange = true; 9519 } 9520 } 9521 9522 if (!SimplifiedByConstantRange) { 9523 switch (Pred) { 9524 default: 9525 break; 9526 case ICmpInst::ICMP_EQ: 9527 case ICmpInst::ICMP_NE: 9528 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 9529 if (!RA) 9530 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 9531 if (const SCEVMulExpr *ME = 9532 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 9533 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 9534 ME->getOperand(0)->isAllOnesValue()) { 9535 RHS = AE->getOperand(1); 9536 LHS = ME->getOperand(1); 9537 Changed = true; 9538 } 9539 break; 9540 9541 9542 // The "Should have been caught earlier!" messages refer to the fact 9543 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 9544 // should have fired on the corresponding cases, and canonicalized the 9545 // check to trivial case. 9546 9547 case ICmpInst::ICMP_UGE: 9548 assert(!RA.isMinValue() && "Should have been caught earlier!"); 9549 Pred = ICmpInst::ICMP_UGT; 9550 RHS = getConstant(RA - 1); 9551 Changed = true; 9552 break; 9553 case ICmpInst::ICMP_ULE: 9554 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 9555 Pred = ICmpInst::ICMP_ULT; 9556 RHS = getConstant(RA + 1); 9557 Changed = true; 9558 break; 9559 case ICmpInst::ICMP_SGE: 9560 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 9561 Pred = ICmpInst::ICMP_SGT; 9562 RHS = getConstant(RA - 1); 9563 Changed = true; 9564 break; 9565 case ICmpInst::ICMP_SLE: 9566 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 9567 Pred = ICmpInst::ICMP_SLT; 9568 RHS = getConstant(RA + 1); 9569 Changed = true; 9570 break; 9571 } 9572 } 9573 } 9574 9575 // Check for obvious equality. 9576 if (HasSameValue(LHS, RHS)) { 9577 if (ICmpInst::isTrueWhenEqual(Pred)) 9578 return TrivialCase(true); 9579 if (ICmpInst::isFalseWhenEqual(Pred)) 9580 return TrivialCase(false); 9581 } 9582 9583 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 9584 // adding or subtracting 1 from one of the operands. 9585 switch (Pred) { 9586 case ICmpInst::ICMP_SLE: 9587 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 9588 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9589 SCEV::FlagNSW); 9590 Pred = ICmpInst::ICMP_SLT; 9591 Changed = true; 9592 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 9593 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 9594 SCEV::FlagNSW); 9595 Pred = ICmpInst::ICMP_SLT; 9596 Changed = true; 9597 } 9598 break; 9599 case ICmpInst::ICMP_SGE: 9600 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 9601 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 9602 SCEV::FlagNSW); 9603 Pred = ICmpInst::ICMP_SGT; 9604 Changed = true; 9605 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 9606 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9607 SCEV::FlagNSW); 9608 Pred = ICmpInst::ICMP_SGT; 9609 Changed = true; 9610 } 9611 break; 9612 case ICmpInst::ICMP_ULE: 9613 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 9614 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9615 SCEV::FlagNUW); 9616 Pred = ICmpInst::ICMP_ULT; 9617 Changed = true; 9618 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 9619 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 9620 Pred = ICmpInst::ICMP_ULT; 9621 Changed = true; 9622 } 9623 break; 9624 case ICmpInst::ICMP_UGE: 9625 if (!getUnsignedRangeMin(RHS).isMinValue()) { 9626 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 9627 Pred = ICmpInst::ICMP_UGT; 9628 Changed = true; 9629 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 9630 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9631 SCEV::FlagNUW); 9632 Pred = ICmpInst::ICMP_UGT; 9633 Changed = true; 9634 } 9635 break; 9636 default: 9637 break; 9638 } 9639 9640 // TODO: More simplifications are possible here. 9641 9642 // Recursively simplify until we either hit a recursion limit or nothing 9643 // changes. 9644 if (Changed) 9645 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 9646 9647 return Changed; 9648 } 9649 9650 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 9651 return getSignedRangeMax(S).isNegative(); 9652 } 9653 9654 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 9655 return getSignedRangeMin(S).isStrictlyPositive(); 9656 } 9657 9658 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 9659 return !getSignedRangeMin(S).isNegative(); 9660 } 9661 9662 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 9663 return !getSignedRangeMax(S).isStrictlyPositive(); 9664 } 9665 9666 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 9667 return isKnownNegative(S) || isKnownPositive(S); 9668 } 9669 9670 std::pair<const SCEV *, const SCEV *> 9671 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 9672 // Compute SCEV on entry of loop L. 9673 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 9674 if (Start == getCouldNotCompute()) 9675 return { Start, Start }; 9676 // Compute post increment SCEV for loop L. 9677 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 9678 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 9679 return { Start, PostInc }; 9680 } 9681 9682 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 9683 const SCEV *LHS, const SCEV *RHS) { 9684 // First collect all loops. 9685 SmallPtrSet<const Loop *, 8> LoopsUsed; 9686 getUsedLoops(LHS, LoopsUsed); 9687 getUsedLoops(RHS, LoopsUsed); 9688 9689 if (LoopsUsed.empty()) 9690 return false; 9691 9692 // Domination relationship must be a linear order on collected loops. 9693 #ifndef NDEBUG 9694 for (auto *L1 : LoopsUsed) 9695 for (auto *L2 : LoopsUsed) 9696 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 9697 DT.dominates(L2->getHeader(), L1->getHeader())) && 9698 "Domination relationship is not a linear order"); 9699 #endif 9700 9701 const Loop *MDL = 9702 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 9703 [&](const Loop *L1, const Loop *L2) { 9704 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 9705 }); 9706 9707 // Get init and post increment value for LHS. 9708 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 9709 // if LHS contains unknown non-invariant SCEV then bail out. 9710 if (SplitLHS.first == getCouldNotCompute()) 9711 return false; 9712 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 9713 // Get init and post increment value for RHS. 9714 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 9715 // if RHS contains unknown non-invariant SCEV then bail out. 9716 if (SplitRHS.first == getCouldNotCompute()) 9717 return false; 9718 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 9719 // It is possible that init SCEV contains an invariant load but it does 9720 // not dominate MDL and is not available at MDL loop entry, so we should 9721 // check it here. 9722 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 9723 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 9724 return false; 9725 9726 // It seems backedge guard check is faster than entry one so in some cases 9727 // it can speed up whole estimation by short circuit 9728 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 9729 SplitRHS.second) && 9730 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); 9731 } 9732 9733 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 9734 const SCEV *LHS, const SCEV *RHS) { 9735 // Canonicalize the inputs first. 9736 (void)SimplifyICmpOperands(Pred, LHS, RHS); 9737 9738 if (isKnownViaInduction(Pred, LHS, RHS)) 9739 return true; 9740 9741 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 9742 return true; 9743 9744 // Otherwise see what can be done with some simple reasoning. 9745 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 9746 } 9747 9748 Optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred, 9749 const SCEV *LHS, 9750 const SCEV *RHS) { 9751 if (isKnownPredicate(Pred, LHS, RHS)) 9752 return true; 9753 else if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS)) 9754 return false; 9755 return None; 9756 } 9757 9758 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred, 9759 const SCEV *LHS, const SCEV *RHS, 9760 const Instruction *Context) { 9761 // TODO: Analyze guards and assumes from Context's block. 9762 return isKnownPredicate(Pred, LHS, RHS) || 9763 isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS); 9764 } 9765 9766 Optional<bool> 9767 ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, 9768 const SCEV *RHS, 9769 const Instruction *Context) { 9770 Optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS); 9771 if (KnownWithoutContext) 9772 return KnownWithoutContext; 9773 9774 if (isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS)) 9775 return true; 9776 else if (isBasicBlockEntryGuardedByCond(Context->getParent(), 9777 ICmpInst::getInversePredicate(Pred), 9778 LHS, RHS)) 9779 return false; 9780 return None; 9781 } 9782 9783 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 9784 const SCEVAddRecExpr *LHS, 9785 const SCEV *RHS) { 9786 const Loop *L = LHS->getLoop(); 9787 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 9788 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 9789 } 9790 9791 Optional<ScalarEvolution::MonotonicPredicateType> 9792 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS, 9793 ICmpInst::Predicate Pred) { 9794 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred); 9795 9796 #ifndef NDEBUG 9797 // Verify an invariant: inverting the predicate should turn a monotonically 9798 // increasing change to a monotonically decreasing one, and vice versa. 9799 if (Result) { 9800 auto ResultSwapped = 9801 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred)); 9802 9803 assert(ResultSwapped.hasValue() && "should be able to analyze both!"); 9804 assert(ResultSwapped.getValue() != Result.getValue() && 9805 "monotonicity should flip as we flip the predicate"); 9806 } 9807 #endif 9808 9809 return Result; 9810 } 9811 9812 Optional<ScalarEvolution::MonotonicPredicateType> 9813 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS, 9814 ICmpInst::Predicate Pred) { 9815 // A zero step value for LHS means the induction variable is essentially a 9816 // loop invariant value. We don't really depend on the predicate actually 9817 // flipping from false to true (for increasing predicates, and the other way 9818 // around for decreasing predicates), all we care about is that *if* the 9819 // predicate changes then it only changes from false to true. 9820 // 9821 // A zero step value in itself is not very useful, but there may be places 9822 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 9823 // as general as possible. 9824 9825 // Only handle LE/LT/GE/GT predicates. 9826 if (!ICmpInst::isRelational(Pred)) 9827 return None; 9828 9829 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred); 9830 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) && 9831 "Should be greater or less!"); 9832 9833 // Check that AR does not wrap. 9834 if (ICmpInst::isUnsigned(Pred)) { 9835 if (!LHS->hasNoUnsignedWrap()) 9836 return None; 9837 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9838 } else { 9839 assert(ICmpInst::isSigned(Pred) && 9840 "Relational predicate is either signed or unsigned!"); 9841 if (!LHS->hasNoSignedWrap()) 9842 return None; 9843 9844 const SCEV *Step = LHS->getStepRecurrence(*this); 9845 9846 if (isKnownNonNegative(Step)) 9847 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9848 9849 if (isKnownNonPositive(Step)) 9850 return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9851 9852 return None; 9853 } 9854 } 9855 9856 Optional<ScalarEvolution::LoopInvariantPredicate> 9857 ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred, 9858 const SCEV *LHS, const SCEV *RHS, 9859 const Loop *L) { 9860 9861 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9862 if (!isLoopInvariant(RHS, L)) { 9863 if (!isLoopInvariant(LHS, L)) 9864 return None; 9865 9866 std::swap(LHS, RHS); 9867 Pred = ICmpInst::getSwappedPredicate(Pred); 9868 } 9869 9870 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9871 if (!ArLHS || ArLHS->getLoop() != L) 9872 return None; 9873 9874 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred); 9875 if (!MonotonicType) 9876 return None; 9877 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 9878 // true as the loop iterates, and the backedge is control dependent on 9879 // "ArLHS `Pred` RHS" == true then we can reason as follows: 9880 // 9881 // * if the predicate was false in the first iteration then the predicate 9882 // is never evaluated again, since the loop exits without taking the 9883 // backedge. 9884 // * if the predicate was true in the first iteration then it will 9885 // continue to be true for all future iterations since it is 9886 // monotonically increasing. 9887 // 9888 // For both the above possibilities, we can replace the loop varying 9889 // predicate with its value on the first iteration of the loop (which is 9890 // loop invariant). 9891 // 9892 // A similar reasoning applies for a monotonically decreasing predicate, by 9893 // replacing true with false and false with true in the above two bullets. 9894 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing; 9895 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 9896 9897 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 9898 return None; 9899 9900 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), RHS); 9901 } 9902 9903 Optional<ScalarEvolution::LoopInvariantPredicate> 9904 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations( 9905 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 9906 const Instruction *Context, const SCEV *MaxIter) { 9907 // Try to prove the following set of facts: 9908 // - The predicate is monotonic in the iteration space. 9909 // - If the check does not fail on the 1st iteration: 9910 // - No overflow will happen during first MaxIter iterations; 9911 // - It will not fail on the MaxIter'th iteration. 9912 // If the check does fail on the 1st iteration, we leave the loop and no 9913 // other checks matter. 9914 9915 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9916 if (!isLoopInvariant(RHS, L)) { 9917 if (!isLoopInvariant(LHS, L)) 9918 return None; 9919 9920 std::swap(LHS, RHS); 9921 Pred = ICmpInst::getSwappedPredicate(Pred); 9922 } 9923 9924 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS); 9925 if (!AR || AR->getLoop() != L) 9926 return None; 9927 9928 // The predicate must be relational (i.e. <, <=, >=, >). 9929 if (!ICmpInst::isRelational(Pred)) 9930 return None; 9931 9932 // TODO: Support steps other than +/- 1. 9933 const SCEV *Step = AR->getStepRecurrence(*this); 9934 auto *One = getOne(Step->getType()); 9935 auto *MinusOne = getNegativeSCEV(One); 9936 if (Step != One && Step != MinusOne) 9937 return None; 9938 9939 // Type mismatch here means that MaxIter is potentially larger than max 9940 // unsigned value in start type, which mean we cannot prove no wrap for the 9941 // indvar. 9942 if (AR->getType() != MaxIter->getType()) 9943 return None; 9944 9945 // Value of IV on suggested last iteration. 9946 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this); 9947 // Does it still meet the requirement? 9948 if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS)) 9949 return None; 9950 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does 9951 // not exceed max unsigned value of this type), this effectively proves 9952 // that there is no wrap during the iteration. To prove that there is no 9953 // signed/unsigned wrap, we need to check that 9954 // Start <= Last for step = 1 or Start >= Last for step = -1. 9955 ICmpInst::Predicate NoOverflowPred = 9956 CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 9957 if (Step == MinusOne) 9958 NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred); 9959 const SCEV *Start = AR->getStart(); 9960 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, Context)) 9961 return None; 9962 9963 // Everything is fine. 9964 return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS); 9965 } 9966 9967 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 9968 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 9969 if (HasSameValue(LHS, RHS)) 9970 return ICmpInst::isTrueWhenEqual(Pred); 9971 9972 // This code is split out from isKnownPredicate because it is called from 9973 // within isLoopEntryGuardedByCond. 9974 9975 auto CheckRanges = [&](const ConstantRange &RangeLHS, 9976 const ConstantRange &RangeRHS) { 9977 return RangeLHS.icmp(Pred, RangeRHS); 9978 }; 9979 9980 // The check at the top of the function catches the case where the values are 9981 // known to be equal. 9982 if (Pred == CmpInst::ICMP_EQ) 9983 return false; 9984 9985 if (Pred == CmpInst::ICMP_NE) 9986 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 9987 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 9988 isKnownNonZero(getMinusSCEV(LHS, RHS)); 9989 9990 if (CmpInst::isSigned(Pred)) 9991 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 9992 9993 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 9994 } 9995 9996 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 9997 const SCEV *LHS, 9998 const SCEV *RHS) { 9999 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 10000 // Return Y via OutY. 10001 auto MatchBinaryAddToConst = 10002 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 10003 SCEV::NoWrapFlags ExpectedFlags) { 10004 const SCEV *NonConstOp, *ConstOp; 10005 SCEV::NoWrapFlags FlagsPresent; 10006 10007 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 10008 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 10009 return false; 10010 10011 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 10012 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 10013 }; 10014 10015 APInt C; 10016 10017 switch (Pred) { 10018 default: 10019 break; 10020 10021 case ICmpInst::ICMP_SGE: 10022 std::swap(LHS, RHS); 10023 LLVM_FALLTHROUGH; 10024 case ICmpInst::ICMP_SLE: 10025 // X s<= (X + C)<nsw> if C >= 0 10026 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 10027 return true; 10028 10029 // (X + C)<nsw> s<= X if C <= 0 10030 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 10031 !C.isStrictlyPositive()) 10032 return true; 10033 break; 10034 10035 case ICmpInst::ICMP_SGT: 10036 std::swap(LHS, RHS); 10037 LLVM_FALLTHROUGH; 10038 case ICmpInst::ICMP_SLT: 10039 // X s< (X + C)<nsw> if C > 0 10040 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 10041 C.isStrictlyPositive()) 10042 return true; 10043 10044 // (X + C)<nsw> s< X if C < 0 10045 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 10046 return true; 10047 break; 10048 10049 case ICmpInst::ICMP_UGE: 10050 std::swap(LHS, RHS); 10051 LLVM_FALLTHROUGH; 10052 case ICmpInst::ICMP_ULE: 10053 // X u<= (X + C)<nuw> for any C 10054 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW)) 10055 return true; 10056 break; 10057 10058 case ICmpInst::ICMP_UGT: 10059 std::swap(LHS, RHS); 10060 LLVM_FALLTHROUGH; 10061 case ICmpInst::ICMP_ULT: 10062 // X u< (X + C)<nuw> if C != 0 10063 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW) && !C.isNullValue()) 10064 return true; 10065 break; 10066 } 10067 10068 return false; 10069 } 10070 10071 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 10072 const SCEV *LHS, 10073 const SCEV *RHS) { 10074 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 10075 return false; 10076 10077 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 10078 // the stack can result in exponential time complexity. 10079 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 10080 10081 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 10082 // 10083 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 10084 // isKnownPredicate. isKnownPredicate is more powerful, but also more 10085 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 10086 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 10087 // use isKnownPredicate later if needed. 10088 return isKnownNonNegative(RHS) && 10089 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 10090 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 10091 } 10092 10093 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, 10094 ICmpInst::Predicate Pred, 10095 const SCEV *LHS, const SCEV *RHS) { 10096 // No need to even try if we know the module has no guards. 10097 if (!HasGuards) 10098 return false; 10099 10100 return any_of(*BB, [&](const Instruction &I) { 10101 using namespace llvm::PatternMatch; 10102 10103 Value *Condition; 10104 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 10105 m_Value(Condition))) && 10106 isImpliedCond(Pred, LHS, RHS, Condition, false); 10107 }); 10108 } 10109 10110 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 10111 /// protected by a conditional between LHS and RHS. This is used to 10112 /// to eliminate casts. 10113 bool 10114 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 10115 ICmpInst::Predicate Pred, 10116 const SCEV *LHS, const SCEV *RHS) { 10117 // Interpret a null as meaning no loop, where there is obviously no guard 10118 // (interprocedural conditions notwithstanding). 10119 if (!L) return true; 10120 10121 if (VerifyIR) 10122 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 10123 "This cannot be done on broken IR!"); 10124 10125 10126 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 10127 return true; 10128 10129 BasicBlock *Latch = L->getLoopLatch(); 10130 if (!Latch) 10131 return false; 10132 10133 BranchInst *LoopContinuePredicate = 10134 dyn_cast<BranchInst>(Latch->getTerminator()); 10135 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 10136 isImpliedCond(Pred, LHS, RHS, 10137 LoopContinuePredicate->getCondition(), 10138 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 10139 return true; 10140 10141 // We don't want more than one activation of the following loops on the stack 10142 // -- that can lead to O(n!) time complexity. 10143 if (WalkingBEDominatingConds) 10144 return false; 10145 10146 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 10147 10148 // See if we can exploit a trip count to prove the predicate. 10149 const auto &BETakenInfo = getBackedgeTakenInfo(L); 10150 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 10151 if (LatchBECount != getCouldNotCompute()) { 10152 // We know that Latch branches back to the loop header exactly 10153 // LatchBECount times. This means the backdege condition at Latch is 10154 // equivalent to "{0,+,1} u< LatchBECount". 10155 Type *Ty = LatchBECount->getType(); 10156 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 10157 const SCEV *LoopCounter = 10158 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 10159 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 10160 LatchBECount)) 10161 return true; 10162 } 10163 10164 // Check conditions due to any @llvm.assume intrinsics. 10165 for (auto &AssumeVH : AC.assumptions()) { 10166 if (!AssumeVH) 10167 continue; 10168 auto *CI = cast<CallInst>(AssumeVH); 10169 if (!DT.dominates(CI, Latch->getTerminator())) 10170 continue; 10171 10172 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 10173 return true; 10174 } 10175 10176 // If the loop is not reachable from the entry block, we risk running into an 10177 // infinite loop as we walk up into the dom tree. These loops do not matter 10178 // anyway, so we just return a conservative answer when we see them. 10179 if (!DT.isReachableFromEntry(L->getHeader())) 10180 return false; 10181 10182 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 10183 return true; 10184 10185 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 10186 DTN != HeaderDTN; DTN = DTN->getIDom()) { 10187 assert(DTN && "should reach the loop header before reaching the root!"); 10188 10189 BasicBlock *BB = DTN->getBlock(); 10190 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 10191 return true; 10192 10193 BasicBlock *PBB = BB->getSinglePredecessor(); 10194 if (!PBB) 10195 continue; 10196 10197 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 10198 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 10199 continue; 10200 10201 Value *Condition = ContinuePredicate->getCondition(); 10202 10203 // If we have an edge `E` within the loop body that dominates the only 10204 // latch, the condition guarding `E` also guards the backedge. This 10205 // reasoning works only for loops with a single latch. 10206 10207 BasicBlockEdge DominatingEdge(PBB, BB); 10208 if (DominatingEdge.isSingleEdge()) { 10209 // We're constructively (and conservatively) enumerating edges within the 10210 // loop body that dominate the latch. The dominator tree better agree 10211 // with us on this: 10212 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 10213 10214 if (isImpliedCond(Pred, LHS, RHS, Condition, 10215 BB != ContinuePredicate->getSuccessor(0))) 10216 return true; 10217 } 10218 } 10219 10220 return false; 10221 } 10222 10223 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, 10224 ICmpInst::Predicate Pred, 10225 const SCEV *LHS, 10226 const SCEV *RHS) { 10227 if (VerifyIR) 10228 assert(!verifyFunction(*BB->getParent(), &dbgs()) && 10229 "This cannot be done on broken IR!"); 10230 10231 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 10232 // the facts (a >= b && a != b) separately. A typical situation is when the 10233 // non-strict comparison is known from ranges and non-equality is known from 10234 // dominating predicates. If we are proving strict comparison, we always try 10235 // to prove non-equality and non-strict comparison separately. 10236 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 10237 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 10238 bool ProvedNonStrictComparison = false; 10239 bool ProvedNonEquality = false; 10240 10241 auto SplitAndProve = 10242 [&](std::function<bool(ICmpInst::Predicate)> Fn) -> bool { 10243 if (!ProvedNonStrictComparison) 10244 ProvedNonStrictComparison = Fn(NonStrictPredicate); 10245 if (!ProvedNonEquality) 10246 ProvedNonEquality = Fn(ICmpInst::ICMP_NE); 10247 if (ProvedNonStrictComparison && ProvedNonEquality) 10248 return true; 10249 return false; 10250 }; 10251 10252 if (ProvingStrictComparison) { 10253 auto ProofFn = [&](ICmpInst::Predicate P) { 10254 return isKnownViaNonRecursiveReasoning(P, LHS, RHS); 10255 }; 10256 if (SplitAndProve(ProofFn)) 10257 return true; 10258 } 10259 10260 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 10261 auto ProveViaGuard = [&](const BasicBlock *Block) { 10262 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 10263 return true; 10264 if (ProvingStrictComparison) { 10265 auto ProofFn = [&](ICmpInst::Predicate P) { 10266 return isImpliedViaGuard(Block, P, LHS, RHS); 10267 }; 10268 if (SplitAndProve(ProofFn)) 10269 return true; 10270 } 10271 return false; 10272 }; 10273 10274 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 10275 auto ProveViaCond = [&](const Value *Condition, bool Inverse) { 10276 const Instruction *Context = &BB->front(); 10277 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, Context)) 10278 return true; 10279 if (ProvingStrictComparison) { 10280 auto ProofFn = [&](ICmpInst::Predicate P) { 10281 return isImpliedCond(P, LHS, RHS, Condition, Inverse, Context); 10282 }; 10283 if (SplitAndProve(ProofFn)) 10284 return true; 10285 } 10286 return false; 10287 }; 10288 10289 // Starting at the block's predecessor, climb up the predecessor chain, as long 10290 // as there are predecessors that can be found that have unique successors 10291 // leading to the original block. 10292 const Loop *ContainingLoop = LI.getLoopFor(BB); 10293 const BasicBlock *PredBB; 10294 if (ContainingLoop && ContainingLoop->getHeader() == BB) 10295 PredBB = ContainingLoop->getLoopPredecessor(); 10296 else 10297 PredBB = BB->getSinglePredecessor(); 10298 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB); 10299 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 10300 if (ProveViaGuard(Pair.first)) 10301 return true; 10302 10303 const BranchInst *LoopEntryPredicate = 10304 dyn_cast<BranchInst>(Pair.first->getTerminator()); 10305 if (!LoopEntryPredicate || 10306 LoopEntryPredicate->isUnconditional()) 10307 continue; 10308 10309 if (ProveViaCond(LoopEntryPredicate->getCondition(), 10310 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 10311 return true; 10312 } 10313 10314 // Check conditions due to any @llvm.assume intrinsics. 10315 for (auto &AssumeVH : AC.assumptions()) { 10316 if (!AssumeVH) 10317 continue; 10318 auto *CI = cast<CallInst>(AssumeVH); 10319 if (!DT.dominates(CI, BB)) 10320 continue; 10321 10322 if (ProveViaCond(CI->getArgOperand(0), false)) 10323 return true; 10324 } 10325 10326 return false; 10327 } 10328 10329 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 10330 ICmpInst::Predicate Pred, 10331 const SCEV *LHS, 10332 const SCEV *RHS) { 10333 // Interpret a null as meaning no loop, where there is obviously no guard 10334 // (interprocedural conditions notwithstanding). 10335 if (!L) 10336 return false; 10337 10338 // Both LHS and RHS must be available at loop entry. 10339 assert(isAvailableAtLoopEntry(LHS, L) && 10340 "LHS is not available at Loop Entry"); 10341 assert(isAvailableAtLoopEntry(RHS, L) && 10342 "RHS is not available at Loop Entry"); 10343 10344 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 10345 return true; 10346 10347 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS); 10348 } 10349 10350 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 10351 const SCEV *RHS, 10352 const Value *FoundCondValue, bool Inverse, 10353 const Instruction *Context) { 10354 // False conditions implies anything. Do not bother analyzing it further. 10355 if (FoundCondValue == 10356 ConstantInt::getBool(FoundCondValue->getContext(), Inverse)) 10357 return true; 10358 10359 if (!PendingLoopPredicates.insert(FoundCondValue).second) 10360 return false; 10361 10362 auto ClearOnExit = 10363 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 10364 10365 // Recursively handle And and Or conditions. 10366 const Value *Op0, *Op1; 10367 if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { 10368 if (!Inverse) 10369 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, Context) || 10370 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, Context); 10371 } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) { 10372 if (Inverse) 10373 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, Context) || 10374 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, Context); 10375 } 10376 10377 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 10378 if (!ICI) return false; 10379 10380 // Now that we found a conditional branch that dominates the loop or controls 10381 // the loop latch. Check to see if it is the comparison we are looking for. 10382 ICmpInst::Predicate FoundPred; 10383 if (Inverse) 10384 FoundPred = ICI->getInversePredicate(); 10385 else 10386 FoundPred = ICI->getPredicate(); 10387 10388 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 10389 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 10390 10391 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, Context); 10392 } 10393 10394 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 10395 const SCEV *RHS, 10396 ICmpInst::Predicate FoundPred, 10397 const SCEV *FoundLHS, const SCEV *FoundRHS, 10398 const Instruction *Context) { 10399 // Balance the types. 10400 if (getTypeSizeInBits(LHS->getType()) < 10401 getTypeSizeInBits(FoundLHS->getType())) { 10402 // For unsigned and equality predicates, try to prove that both found 10403 // operands fit into narrow unsigned range. If so, try to prove facts in 10404 // narrow types. 10405 if (!CmpInst::isSigned(FoundPred)) { 10406 auto *NarrowType = LHS->getType(); 10407 auto *WideType = FoundLHS->getType(); 10408 auto BitWidth = getTypeSizeInBits(NarrowType); 10409 const SCEV *MaxValue = getZeroExtendExpr( 10410 getConstant(APInt::getMaxValue(BitWidth)), WideType); 10411 if (isKnownPredicate(ICmpInst::ICMP_ULE, FoundLHS, MaxValue) && 10412 isKnownPredicate(ICmpInst::ICMP_ULE, FoundRHS, MaxValue)) { 10413 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType); 10414 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType); 10415 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS, 10416 TruncFoundRHS, Context)) 10417 return true; 10418 } 10419 } 10420 10421 if (CmpInst::isSigned(Pred)) { 10422 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 10423 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 10424 } else { 10425 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 10426 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 10427 } 10428 } else if (getTypeSizeInBits(LHS->getType()) > 10429 getTypeSizeInBits(FoundLHS->getType())) { 10430 if (CmpInst::isSigned(FoundPred)) { 10431 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 10432 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 10433 } else { 10434 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 10435 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 10436 } 10437 } 10438 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS, 10439 FoundRHS, Context); 10440 } 10441 10442 bool ScalarEvolution::isImpliedCondBalancedTypes( 10443 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10444 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS, 10445 const Instruction *Context) { 10446 assert(getTypeSizeInBits(LHS->getType()) == 10447 getTypeSizeInBits(FoundLHS->getType()) && 10448 "Types should be balanced!"); 10449 // Canonicalize the query to match the way instcombine will have 10450 // canonicalized the comparison. 10451 if (SimplifyICmpOperands(Pred, LHS, RHS)) 10452 if (LHS == RHS) 10453 return CmpInst::isTrueWhenEqual(Pred); 10454 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 10455 if (FoundLHS == FoundRHS) 10456 return CmpInst::isFalseWhenEqual(FoundPred); 10457 10458 // Check to see if we can make the LHS or RHS match. 10459 if (LHS == FoundRHS || RHS == FoundLHS) { 10460 if (isa<SCEVConstant>(RHS)) { 10461 std::swap(FoundLHS, FoundRHS); 10462 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 10463 } else { 10464 std::swap(LHS, RHS); 10465 Pred = ICmpInst::getSwappedPredicate(Pred); 10466 } 10467 } 10468 10469 // Check whether the found predicate is the same as the desired predicate. 10470 if (FoundPred == Pred) 10471 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); 10472 10473 // Check whether swapping the found predicate makes it the same as the 10474 // desired predicate. 10475 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 10476 // We can write the implication 10477 // 0. LHS Pred RHS <- FoundLHS SwapPred FoundRHS 10478 // using one of the following ways: 10479 // 1. LHS Pred RHS <- FoundRHS Pred FoundLHS 10480 // 2. RHS SwapPred LHS <- FoundLHS SwapPred FoundRHS 10481 // 3. LHS Pred RHS <- ~FoundLHS Pred ~FoundRHS 10482 // 4. ~LHS SwapPred ~RHS <- FoundLHS SwapPred FoundRHS 10483 // Forms 1. and 2. require swapping the operands of one condition. Don't 10484 // do this if it would break canonical constant/addrec ordering. 10485 if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS)) 10486 return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS, 10487 Context); 10488 if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS)) 10489 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, Context); 10490 10491 // There's no clear preference between forms 3. and 4., try both. 10492 return isImpliedCondOperands(FoundPred, getNotSCEV(LHS), getNotSCEV(RHS), 10493 FoundLHS, FoundRHS, Context) || 10494 isImpliedCondOperands(Pred, LHS, RHS, getNotSCEV(FoundLHS), 10495 getNotSCEV(FoundRHS), Context); 10496 } 10497 10498 // Unsigned comparison is the same as signed comparison when both the operands 10499 // are non-negative. 10500 if (CmpInst::isUnsigned(FoundPred) && 10501 CmpInst::getSignedPredicate(FoundPred) == Pred && 10502 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 10503 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); 10504 10505 // Check if we can make progress by sharpening ranges. 10506 if (FoundPred == ICmpInst::ICMP_NE && 10507 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 10508 10509 const SCEVConstant *C = nullptr; 10510 const SCEV *V = nullptr; 10511 10512 if (isa<SCEVConstant>(FoundLHS)) { 10513 C = cast<SCEVConstant>(FoundLHS); 10514 V = FoundRHS; 10515 } else { 10516 C = cast<SCEVConstant>(FoundRHS); 10517 V = FoundLHS; 10518 } 10519 10520 // The guarding predicate tells us that C != V. If the known range 10521 // of V is [C, t), we can sharpen the range to [C + 1, t). The 10522 // range we consider has to correspond to same signedness as the 10523 // predicate we're interested in folding. 10524 10525 APInt Min = ICmpInst::isSigned(Pred) ? 10526 getSignedRangeMin(V) : getUnsignedRangeMin(V); 10527 10528 if (Min == C->getAPInt()) { 10529 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 10530 // This is true even if (Min + 1) wraps around -- in case of 10531 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 10532 10533 APInt SharperMin = Min + 1; 10534 10535 switch (Pred) { 10536 case ICmpInst::ICMP_SGE: 10537 case ICmpInst::ICMP_UGE: 10538 // We know V `Pred` SharperMin. If this implies LHS `Pred` 10539 // RHS, we're done. 10540 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin), 10541 Context)) 10542 return true; 10543 LLVM_FALLTHROUGH; 10544 10545 case ICmpInst::ICMP_SGT: 10546 case ICmpInst::ICMP_UGT: 10547 // We know from the range information that (V `Pred` Min || 10548 // V == Min). We know from the guarding condition that !(V 10549 // == Min). This gives us 10550 // 10551 // V `Pred` Min || V == Min && !(V == Min) 10552 // => V `Pred` Min 10553 // 10554 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 10555 10556 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), 10557 Context)) 10558 return true; 10559 break; 10560 10561 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively. 10562 case ICmpInst::ICMP_SLE: 10563 case ICmpInst::ICMP_ULE: 10564 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 10565 LHS, V, getConstant(SharperMin), Context)) 10566 return true; 10567 LLVM_FALLTHROUGH; 10568 10569 case ICmpInst::ICMP_SLT: 10570 case ICmpInst::ICMP_ULT: 10571 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 10572 LHS, V, getConstant(Min), Context)) 10573 return true; 10574 break; 10575 10576 default: 10577 // No change 10578 break; 10579 } 10580 } 10581 } 10582 10583 // Check whether the actual condition is beyond sufficient. 10584 if (FoundPred == ICmpInst::ICMP_EQ) 10585 if (ICmpInst::isTrueWhenEqual(Pred)) 10586 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context)) 10587 return true; 10588 if (Pred == ICmpInst::ICMP_NE) 10589 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 10590 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, 10591 Context)) 10592 return true; 10593 10594 // Otherwise assume the worst. 10595 return false; 10596 } 10597 10598 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 10599 const SCEV *&L, const SCEV *&R, 10600 SCEV::NoWrapFlags &Flags) { 10601 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 10602 if (!AE || AE->getNumOperands() != 2) 10603 return false; 10604 10605 L = AE->getOperand(0); 10606 R = AE->getOperand(1); 10607 Flags = AE->getNoWrapFlags(); 10608 return true; 10609 } 10610 10611 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 10612 const SCEV *Less) { 10613 // We avoid subtracting expressions here because this function is usually 10614 // fairly deep in the call stack (i.e. is called many times). 10615 10616 // X - X = 0. 10617 if (More == Less) 10618 return APInt(getTypeSizeInBits(More->getType()), 0); 10619 10620 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 10621 const auto *LAR = cast<SCEVAddRecExpr>(Less); 10622 const auto *MAR = cast<SCEVAddRecExpr>(More); 10623 10624 if (LAR->getLoop() != MAR->getLoop()) 10625 return None; 10626 10627 // We look at affine expressions only; not for correctness but to keep 10628 // getStepRecurrence cheap. 10629 if (!LAR->isAffine() || !MAR->isAffine()) 10630 return None; 10631 10632 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 10633 return None; 10634 10635 Less = LAR->getStart(); 10636 More = MAR->getStart(); 10637 10638 // fall through 10639 } 10640 10641 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 10642 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 10643 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 10644 return M - L; 10645 } 10646 10647 SCEV::NoWrapFlags Flags; 10648 const SCEV *LLess = nullptr, *RLess = nullptr; 10649 const SCEV *LMore = nullptr, *RMore = nullptr; 10650 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 10651 // Compare (X + C1) vs X. 10652 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 10653 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 10654 if (RLess == More) 10655 return -(C1->getAPInt()); 10656 10657 // Compare X vs (X + C2). 10658 if (splitBinaryAdd(More, LMore, RMore, Flags)) 10659 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 10660 if (RMore == Less) 10661 return C2->getAPInt(); 10662 10663 // Compare (X + C1) vs (X + C2). 10664 if (C1 && C2 && RLess == RMore) 10665 return C2->getAPInt() - C1->getAPInt(); 10666 10667 return None; 10668 } 10669 10670 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart( 10671 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10672 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *Context) { 10673 // Try to recognize the following pattern: 10674 // 10675 // FoundRHS = ... 10676 // ... 10677 // loop: 10678 // FoundLHS = {Start,+,W} 10679 // context_bb: // Basic block from the same loop 10680 // known(Pred, FoundLHS, FoundRHS) 10681 // 10682 // If some predicate is known in the context of a loop, it is also known on 10683 // each iteration of this loop, including the first iteration. Therefore, in 10684 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to 10685 // prove the original pred using this fact. 10686 if (!Context) 10687 return false; 10688 const BasicBlock *ContextBB = Context->getParent(); 10689 // Make sure AR varies in the context block. 10690 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) { 10691 const Loop *L = AR->getLoop(); 10692 // Make sure that context belongs to the loop and executes on 1st iteration 10693 // (if it ever executes at all). 10694 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 10695 return false; 10696 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop())) 10697 return false; 10698 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS); 10699 } 10700 10701 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) { 10702 const Loop *L = AR->getLoop(); 10703 // Make sure that context belongs to the loop and executes on 1st iteration 10704 // (if it ever executes at all). 10705 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 10706 return false; 10707 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop())) 10708 return false; 10709 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart()); 10710 } 10711 10712 return false; 10713 } 10714 10715 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 10716 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10717 const SCEV *FoundLHS, const SCEV *FoundRHS) { 10718 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 10719 return false; 10720 10721 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 10722 if (!AddRecLHS) 10723 return false; 10724 10725 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 10726 if (!AddRecFoundLHS) 10727 return false; 10728 10729 // We'd like to let SCEV reason about control dependencies, so we constrain 10730 // both the inequalities to be about add recurrences on the same loop. This 10731 // way we can use isLoopEntryGuardedByCond later. 10732 10733 const Loop *L = AddRecFoundLHS->getLoop(); 10734 if (L != AddRecLHS->getLoop()) 10735 return false; 10736 10737 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 10738 // 10739 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 10740 // ... (2) 10741 // 10742 // Informal proof for (2), assuming (1) [*]: 10743 // 10744 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 10745 // 10746 // Then 10747 // 10748 // FoundLHS s< FoundRHS s< INT_MIN - C 10749 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 10750 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 10751 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 10752 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 10753 // <=> FoundLHS + C s< FoundRHS + C 10754 // 10755 // [*]: (1) can be proved by ruling out overflow. 10756 // 10757 // [**]: This can be proved by analyzing all the four possibilities: 10758 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 10759 // (A s>= 0, B s>= 0). 10760 // 10761 // Note: 10762 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 10763 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 10764 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 10765 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 10766 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 10767 // C)". 10768 10769 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 10770 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 10771 if (!LDiff || !RDiff || *LDiff != *RDiff) 10772 return false; 10773 10774 if (LDiff->isMinValue()) 10775 return true; 10776 10777 APInt FoundRHSLimit; 10778 10779 if (Pred == CmpInst::ICMP_ULT) { 10780 FoundRHSLimit = -(*RDiff); 10781 } else { 10782 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 10783 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 10784 } 10785 10786 // Try to prove (1) or (2), as needed. 10787 return isAvailableAtLoopEntry(FoundRHS, L) && 10788 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 10789 getConstant(FoundRHSLimit)); 10790 } 10791 10792 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 10793 const SCEV *LHS, const SCEV *RHS, 10794 const SCEV *FoundLHS, 10795 const SCEV *FoundRHS, unsigned Depth) { 10796 const PHINode *LPhi = nullptr, *RPhi = nullptr; 10797 10798 auto ClearOnExit = make_scope_exit([&]() { 10799 if (LPhi) { 10800 bool Erased = PendingMerges.erase(LPhi); 10801 assert(Erased && "Failed to erase LPhi!"); 10802 (void)Erased; 10803 } 10804 if (RPhi) { 10805 bool Erased = PendingMerges.erase(RPhi); 10806 assert(Erased && "Failed to erase RPhi!"); 10807 (void)Erased; 10808 } 10809 }); 10810 10811 // Find respective Phis and check that they are not being pending. 10812 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 10813 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 10814 if (!PendingMerges.insert(Phi).second) 10815 return false; 10816 LPhi = Phi; 10817 } 10818 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 10819 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 10820 // If we detect a loop of Phi nodes being processed by this method, for 10821 // example: 10822 // 10823 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 10824 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 10825 // 10826 // we don't want to deal with a case that complex, so return conservative 10827 // answer false. 10828 if (!PendingMerges.insert(Phi).second) 10829 return false; 10830 RPhi = Phi; 10831 } 10832 10833 // If none of LHS, RHS is a Phi, nothing to do here. 10834 if (!LPhi && !RPhi) 10835 return false; 10836 10837 // If there is a SCEVUnknown Phi we are interested in, make it left. 10838 if (!LPhi) { 10839 std::swap(LHS, RHS); 10840 std::swap(FoundLHS, FoundRHS); 10841 std::swap(LPhi, RPhi); 10842 Pred = ICmpInst::getSwappedPredicate(Pred); 10843 } 10844 10845 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 10846 const BasicBlock *LBB = LPhi->getParent(); 10847 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10848 10849 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 10850 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 10851 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 10852 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 10853 }; 10854 10855 if (RPhi && RPhi->getParent() == LBB) { 10856 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 10857 // If we compare two Phis from the same block, and for each entry block 10858 // the predicate is true for incoming values from this block, then the 10859 // predicate is also true for the Phis. 10860 for (const BasicBlock *IncBB : predecessors(LBB)) { 10861 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10862 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 10863 if (!ProvedEasily(L, R)) 10864 return false; 10865 } 10866 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 10867 // Case two: RHS is also a Phi from the same basic block, and it is an 10868 // AddRec. It means that there is a loop which has both AddRec and Unknown 10869 // PHIs, for it we can compare incoming values of AddRec from above the loop 10870 // and latch with their respective incoming values of LPhi. 10871 // TODO: Generalize to handle loops with many inputs in a header. 10872 if (LPhi->getNumIncomingValues() != 2) return false; 10873 10874 auto *RLoop = RAR->getLoop(); 10875 auto *Predecessor = RLoop->getLoopPredecessor(); 10876 assert(Predecessor && "Loop with AddRec with no predecessor?"); 10877 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 10878 if (!ProvedEasily(L1, RAR->getStart())) 10879 return false; 10880 auto *Latch = RLoop->getLoopLatch(); 10881 assert(Latch && "Loop with AddRec with no latch?"); 10882 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 10883 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 10884 return false; 10885 } else { 10886 // In all other cases go over inputs of LHS and compare each of them to RHS, 10887 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 10888 // At this point RHS is either a non-Phi, or it is a Phi from some block 10889 // different from LBB. 10890 for (const BasicBlock *IncBB : predecessors(LBB)) { 10891 // Check that RHS is available in this block. 10892 if (!dominates(RHS, IncBB)) 10893 return false; 10894 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10895 // Make sure L does not refer to a value from a potentially previous 10896 // iteration of a loop. 10897 if (!properlyDominates(L, IncBB)) 10898 return false; 10899 if (!ProvedEasily(L, RHS)) 10900 return false; 10901 } 10902 } 10903 return true; 10904 } 10905 10906 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 10907 const SCEV *LHS, const SCEV *RHS, 10908 const SCEV *FoundLHS, 10909 const SCEV *FoundRHS, 10910 const Instruction *Context) { 10911 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10912 return true; 10913 10914 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10915 return true; 10916 10917 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS, 10918 Context)) 10919 return true; 10920 10921 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 10922 FoundLHS, FoundRHS); 10923 } 10924 10925 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? 10926 template <typename MinMaxExprType> 10927 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, 10928 const SCEV *Candidate) { 10929 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); 10930 if (!MinMaxExpr) 10931 return false; 10932 10933 return is_contained(MinMaxExpr->operands(), Candidate); 10934 } 10935 10936 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 10937 ICmpInst::Predicate Pred, 10938 const SCEV *LHS, const SCEV *RHS) { 10939 // If both sides are affine addrecs for the same loop, with equal 10940 // steps, and we know the recurrences don't wrap, then we only 10941 // need to check the predicate on the starting values. 10942 10943 if (!ICmpInst::isRelational(Pred)) 10944 return false; 10945 10946 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 10947 if (!LAR) 10948 return false; 10949 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10950 if (!RAR) 10951 return false; 10952 if (LAR->getLoop() != RAR->getLoop()) 10953 return false; 10954 if (!LAR->isAffine() || !RAR->isAffine()) 10955 return false; 10956 10957 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 10958 return false; 10959 10960 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 10961 SCEV::FlagNSW : SCEV::FlagNUW; 10962 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 10963 return false; 10964 10965 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 10966 } 10967 10968 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 10969 /// expression? 10970 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 10971 ICmpInst::Predicate Pred, 10972 const SCEV *LHS, const SCEV *RHS) { 10973 switch (Pred) { 10974 default: 10975 return false; 10976 10977 case ICmpInst::ICMP_SGE: 10978 std::swap(LHS, RHS); 10979 LLVM_FALLTHROUGH; 10980 case ICmpInst::ICMP_SLE: 10981 return 10982 // min(A, ...) <= A 10983 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || 10984 // A <= max(A, ...) 10985 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 10986 10987 case ICmpInst::ICMP_UGE: 10988 std::swap(LHS, RHS); 10989 LLVM_FALLTHROUGH; 10990 case ICmpInst::ICMP_ULE: 10991 return 10992 // min(A, ...) <= A 10993 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || 10994 // A <= max(A, ...) 10995 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 10996 } 10997 10998 llvm_unreachable("covered switch fell through?!"); 10999 } 11000 11001 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 11002 const SCEV *LHS, const SCEV *RHS, 11003 const SCEV *FoundLHS, 11004 const SCEV *FoundRHS, 11005 unsigned Depth) { 11006 assert(getTypeSizeInBits(LHS->getType()) == 11007 getTypeSizeInBits(RHS->getType()) && 11008 "LHS and RHS have different sizes?"); 11009 assert(getTypeSizeInBits(FoundLHS->getType()) == 11010 getTypeSizeInBits(FoundRHS->getType()) && 11011 "FoundLHS and FoundRHS have different sizes?"); 11012 // We want to avoid hurting the compile time with analysis of too big trees. 11013 if (Depth > MaxSCEVOperationsImplicationDepth) 11014 return false; 11015 11016 // We only want to work with GT comparison so far. 11017 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) { 11018 Pred = CmpInst::getSwappedPredicate(Pred); 11019 std::swap(LHS, RHS); 11020 std::swap(FoundLHS, FoundRHS); 11021 } 11022 11023 // For unsigned, try to reduce it to corresponding signed comparison. 11024 if (Pred == ICmpInst::ICMP_UGT) 11025 // We can replace unsigned predicate with its signed counterpart if all 11026 // involved values are non-negative. 11027 // TODO: We could have better support for unsigned. 11028 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) { 11029 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing 11030 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us 11031 // use this fact to prove that LHS and RHS are non-negative. 11032 const SCEV *MinusOne = getMinusOne(LHS->getType()); 11033 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS, 11034 FoundRHS) && 11035 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS, 11036 FoundRHS)) 11037 Pred = ICmpInst::ICMP_SGT; 11038 } 11039 11040 if (Pred != ICmpInst::ICMP_SGT) 11041 return false; 11042 11043 auto GetOpFromSExt = [&](const SCEV *S) { 11044 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 11045 return Ext->getOperand(); 11046 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 11047 // the constant in some cases. 11048 return S; 11049 }; 11050 11051 // Acquire values from extensions. 11052 auto *OrigLHS = LHS; 11053 auto *OrigFoundLHS = FoundLHS; 11054 LHS = GetOpFromSExt(LHS); 11055 FoundLHS = GetOpFromSExt(FoundLHS); 11056 11057 // Is the SGT predicate can be proved trivially or using the found context. 11058 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 11059 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 11060 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 11061 FoundRHS, Depth + 1); 11062 }; 11063 11064 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 11065 // We want to avoid creation of any new non-constant SCEV. Since we are 11066 // going to compare the operands to RHS, we should be certain that we don't 11067 // need any size extensions for this. So let's decline all cases when the 11068 // sizes of types of LHS and RHS do not match. 11069 // TODO: Maybe try to get RHS from sext to catch more cases? 11070 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 11071 return false; 11072 11073 // Should not overflow. 11074 if (!LHSAddExpr->hasNoSignedWrap()) 11075 return false; 11076 11077 auto *LL = LHSAddExpr->getOperand(0); 11078 auto *LR = LHSAddExpr->getOperand(1); 11079 auto *MinusOne = getMinusOne(RHS->getType()); 11080 11081 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 11082 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 11083 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 11084 }; 11085 // Try to prove the following rule: 11086 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 11087 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 11088 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 11089 return true; 11090 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 11091 Value *LL, *LR; 11092 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 11093 11094 using namespace llvm::PatternMatch; 11095 11096 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 11097 // Rules for division. 11098 // We are going to perform some comparisons with Denominator and its 11099 // derivative expressions. In general case, creating a SCEV for it may 11100 // lead to a complex analysis of the entire graph, and in particular it 11101 // can request trip count recalculation for the same loop. This would 11102 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 11103 // this, we only want to create SCEVs that are constants in this section. 11104 // So we bail if Denominator is not a constant. 11105 if (!isa<ConstantInt>(LR)) 11106 return false; 11107 11108 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 11109 11110 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 11111 // then a SCEV for the numerator already exists and matches with FoundLHS. 11112 auto *Numerator = getExistingSCEV(LL); 11113 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 11114 return false; 11115 11116 // Make sure that the numerator matches with FoundLHS and the denominator 11117 // is positive. 11118 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 11119 return false; 11120 11121 auto *DTy = Denominator->getType(); 11122 auto *FRHSTy = FoundRHS->getType(); 11123 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 11124 // One of types is a pointer and another one is not. We cannot extend 11125 // them properly to a wider type, so let us just reject this case. 11126 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 11127 // to avoid this check. 11128 return false; 11129 11130 // Given that: 11131 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 11132 auto *WTy = getWiderType(DTy, FRHSTy); 11133 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 11134 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 11135 11136 // Try to prove the following rule: 11137 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 11138 // For example, given that FoundLHS > 2. It means that FoundLHS is at 11139 // least 3. If we divide it by Denominator < 4, we will have at least 1. 11140 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 11141 if (isKnownNonPositive(RHS) && 11142 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 11143 return true; 11144 11145 // Try to prove the following rule: 11146 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 11147 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 11148 // If we divide it by Denominator > 2, then: 11149 // 1. If FoundLHS is negative, then the result is 0. 11150 // 2. If FoundLHS is non-negative, then the result is non-negative. 11151 // Anyways, the result is non-negative. 11152 auto *MinusOne = getMinusOne(WTy); 11153 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 11154 if (isKnownNegative(RHS) && 11155 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 11156 return true; 11157 } 11158 } 11159 11160 // If our expression contained SCEVUnknown Phis, and we split it down and now 11161 // need to prove something for them, try to prove the predicate for every 11162 // possible incoming values of those Phis. 11163 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 11164 return true; 11165 11166 return false; 11167 } 11168 11169 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, 11170 const SCEV *LHS, const SCEV *RHS) { 11171 // zext x u<= sext x, sext x s<= zext x 11172 switch (Pred) { 11173 case ICmpInst::ICMP_SGE: 11174 std::swap(LHS, RHS); 11175 LLVM_FALLTHROUGH; 11176 case ICmpInst::ICMP_SLE: { 11177 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. 11178 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); 11179 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); 11180 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 11181 return true; 11182 break; 11183 } 11184 case ICmpInst::ICMP_UGE: 11185 std::swap(LHS, RHS); 11186 LLVM_FALLTHROUGH; 11187 case ICmpInst::ICMP_ULE: { 11188 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. 11189 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); 11190 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); 11191 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 11192 return true; 11193 break; 11194 } 11195 default: 11196 break; 11197 }; 11198 return false; 11199 } 11200 11201 bool 11202 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 11203 const SCEV *LHS, const SCEV *RHS) { 11204 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || 11205 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 11206 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 11207 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 11208 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 11209 } 11210 11211 bool 11212 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 11213 const SCEV *LHS, const SCEV *RHS, 11214 const SCEV *FoundLHS, 11215 const SCEV *FoundRHS) { 11216 switch (Pred) { 11217 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 11218 case ICmpInst::ICMP_EQ: 11219 case ICmpInst::ICMP_NE: 11220 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 11221 return true; 11222 break; 11223 case ICmpInst::ICMP_SLT: 11224 case ICmpInst::ICMP_SLE: 11225 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 11226 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 11227 return true; 11228 break; 11229 case ICmpInst::ICMP_SGT: 11230 case ICmpInst::ICMP_SGE: 11231 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 11232 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 11233 return true; 11234 break; 11235 case ICmpInst::ICMP_ULT: 11236 case ICmpInst::ICMP_ULE: 11237 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 11238 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 11239 return true; 11240 break; 11241 case ICmpInst::ICMP_UGT: 11242 case ICmpInst::ICMP_UGE: 11243 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 11244 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 11245 return true; 11246 break; 11247 } 11248 11249 // Maybe it can be proved via operations? 11250 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 11251 return true; 11252 11253 return false; 11254 } 11255 11256 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 11257 const SCEV *LHS, 11258 const SCEV *RHS, 11259 const SCEV *FoundLHS, 11260 const SCEV *FoundRHS) { 11261 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 11262 // The restriction on `FoundRHS` be lifted easily -- it exists only to 11263 // reduce the compile time impact of this optimization. 11264 return false; 11265 11266 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 11267 if (!Addend) 11268 return false; 11269 11270 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 11271 11272 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 11273 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 11274 ConstantRange FoundLHSRange = 11275 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 11276 11277 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 11278 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 11279 11280 // We can also compute the range of values for `LHS` that satisfy the 11281 // consequent, "`LHS` `Pred` `RHS`": 11282 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 11283 // The antecedent implies the consequent if every value of `LHS` that 11284 // satisfies the antecedent also satisfies the consequent. 11285 return LHSRange.icmp(Pred, ConstRHS); 11286 } 11287 11288 bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 11289 bool IsSigned) { 11290 assert(isKnownPositive(Stride) && "Positive stride expected!"); 11291 11292 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 11293 const SCEV *One = getOne(Stride->getType()); 11294 11295 if (IsSigned) { 11296 APInt MaxRHS = getSignedRangeMax(RHS); 11297 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 11298 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 11299 11300 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 11301 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 11302 } 11303 11304 APInt MaxRHS = getUnsignedRangeMax(RHS); 11305 APInt MaxValue = APInt::getMaxValue(BitWidth); 11306 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 11307 11308 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 11309 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 11310 } 11311 11312 bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 11313 bool IsSigned) { 11314 11315 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 11316 const SCEV *One = getOne(Stride->getType()); 11317 11318 if (IsSigned) { 11319 APInt MinRHS = getSignedRangeMin(RHS); 11320 APInt MinValue = APInt::getSignedMinValue(BitWidth); 11321 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 11322 11323 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 11324 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 11325 } 11326 11327 APInt MinRHS = getUnsignedRangeMin(RHS); 11328 APInt MinValue = APInt::getMinValue(BitWidth); 11329 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 11330 11331 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 11332 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 11333 } 11334 11335 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, 11336 const SCEV *Step) { 11337 const SCEV *One = getOne(Step->getType()); 11338 Delta = getAddExpr(Delta, getMinusSCEV(Step, One)); 11339 return getUDivExpr(Delta, Step); 11340 } 11341 11342 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 11343 const SCEV *Stride, 11344 const SCEV *End, 11345 unsigned BitWidth, 11346 bool IsSigned) { 11347 11348 assert(!isKnownNonPositive(Stride) && 11349 "Stride is expected strictly positive!"); 11350 // Calculate the maximum backedge count based on the range of values 11351 // permitted by Start, End, and Stride. 11352 const SCEV *MaxBECount; 11353 APInt MinStart = 11354 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 11355 11356 APInt StrideForMaxBECount = 11357 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 11358 11359 // We already know that the stride is positive, so we paper over conservatism 11360 // in our range computation by forcing StrideForMaxBECount to be at least one. 11361 // In theory this is unnecessary, but we expect MaxBECount to be a 11362 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 11363 // is nothing to constant fold it to). 11364 APInt One(BitWidth, 1, IsSigned); 11365 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 11366 11367 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 11368 : APInt::getMaxValue(BitWidth); 11369 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 11370 11371 // Although End can be a MAX expression we estimate MaxEnd considering only 11372 // the case End = RHS of the loop termination condition. This is safe because 11373 // in the other case (End - Start) is zero, leading to a zero maximum backedge 11374 // taken count. 11375 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 11376 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 11377 11378 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 11379 getConstant(StrideForMaxBECount) /* Step */); 11380 11381 return MaxBECount; 11382 } 11383 11384 ScalarEvolution::ExitLimit 11385 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 11386 const Loop *L, bool IsSigned, 11387 bool ControlsExit, bool AllowPredicates) { 11388 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 11389 11390 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 11391 bool PredicatedIV = false; 11392 11393 if (!IV && AllowPredicates) { 11394 // Try to make this an AddRec using runtime tests, in the first X 11395 // iterations of this loop, where X is the SCEV expression found by the 11396 // algorithm below. 11397 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 11398 PredicatedIV = true; 11399 } 11400 11401 // Avoid weird loops 11402 if (!IV || IV->getLoop() != L || !IV->isAffine()) 11403 return getCouldNotCompute(); 11404 11405 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; 11406 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType); 11407 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; 11408 11409 const SCEV *Stride = IV->getStepRecurrence(*this); 11410 11411 bool PositiveStride = isKnownPositive(Stride); 11412 11413 // Avoid negative or zero stride values. 11414 if (!PositiveStride) { 11415 // We can compute the correct backedge taken count for loops with unknown 11416 // strides if we can prove that the loop is not an infinite loop with side 11417 // effects. Here's the loop structure we are trying to handle - 11418 // 11419 // i = start 11420 // do { 11421 // A[i] = i; 11422 // i += s; 11423 // } while (i < end); 11424 // 11425 // The backedge taken count for such loops is evaluated as - 11426 // (max(end, start + stride) - start - 1) /u stride 11427 // 11428 // The additional preconditions that we need to check to prove correctness 11429 // of the above formula is as follows - 11430 // 11431 // a) IV is either nuw or nsw depending upon signedness (indicated by the 11432 // NoWrap flag). 11433 // b) loop is single exit with no side effects. 11434 // 11435 // 11436 // Precondition a) implies that if the stride is negative, this is a single 11437 // trip loop. The backedge taken count formula reduces to zero in this case. 11438 // 11439 // Precondition b) implies that the unknown stride cannot be zero otherwise 11440 // we have UB. 11441 // 11442 // The positive stride case is the same as isKnownPositive(Stride) returning 11443 // true (original behavior of the function). 11444 // 11445 // We want to make sure that the stride is truly unknown as there are edge 11446 // cases where ScalarEvolution propagates no wrap flags to the 11447 // post-increment/decrement IV even though the increment/decrement operation 11448 // itself is wrapping. The computed backedge taken count may be wrong in 11449 // such cases. This is prevented by checking that the stride is not known to 11450 // be either positive or non-positive. For example, no wrap flags are 11451 // propagated to the post-increment IV of this loop with a trip count of 2 - 11452 // 11453 // unsigned char i; 11454 // for(i=127; i<128; i+=129) 11455 // A[i] = i; 11456 // 11457 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 11458 !loopIsFiniteByAssumption(L)) 11459 return getCouldNotCompute(); 11460 } else if (!Stride->isOne() && !NoWrap) { 11461 auto isUBOnWrap = [&]() { 11462 // Can we prove this loop *must* be UB if overflow of IV occurs? 11463 // Reasoning goes as follows: 11464 // * Suppose the IV did self wrap. 11465 // * If Stride evenly divides the iteration space, then once wrap 11466 // occurs, the loop must revisit the same values. 11467 // * We know that RHS is invariant, and that none of those values 11468 // caused this exit to be taken previously. Thus, this exit is 11469 // dynamically dead. 11470 // * If this is the sole exit, then a dead exit implies the loop 11471 // must be infinite if there are no abnormal exits. 11472 // * If the loop were infinite, then it must either not be mustprogress 11473 // or have side effects. Otherwise, it must be UB. 11474 // * It can't (by assumption), be UB so we have contradicted our 11475 // premise and can conclude the IV did not in fact self-wrap. 11476 // From no-self-wrap, we need to then prove no-(un)signed-wrap. This 11477 // follows trivially from the fact that every (un)signed-wrapped, but 11478 // not self-wrapped value must be LT than the last value before 11479 // (un)signed wrap. Since we know that last value didn't exit, nor 11480 // will any smaller one. 11481 11482 if (!isLoopInvariant(RHS, L)) 11483 return false; 11484 11485 auto *StrideC = dyn_cast<SCEVConstant>(Stride); 11486 if (!StrideC || !StrideC->getAPInt().isPowerOf2()) 11487 return false; 11488 11489 if (!ControlsExit || !loopHasNoAbnormalExits(L)) 11490 return false; 11491 11492 return loopIsFiniteByAssumption(L); 11493 }; 11494 11495 // Avoid proven overflow cases: this will ensure that the backedge taken 11496 // count will not generate any unsigned overflow. Relaxed no-overflow 11497 // conditions exploit NoWrapFlags, allowing to optimize in presence of 11498 // undefined behaviors like the case of C language. 11499 if (canIVOverflowOnLT(RHS, Stride, IsSigned) && !isUBOnWrap()) 11500 return getCouldNotCompute(); 11501 } 11502 11503 const SCEV *Start = IV->getStart(); 11504 const SCEV *End = RHS; 11505 // When the RHS is not invariant, we do not know the end bound of the loop and 11506 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 11507 // calculate the MaxBECount, given the start, stride and max value for the end 11508 // bound of the loop (RHS), and the fact that IV does not overflow (which is 11509 // checked above). 11510 if (!isLoopInvariant(RHS, L)) { 11511 const SCEV *MaxBECount = computeMaxBECountForLT( 11512 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 11513 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 11514 false /*MaxOrZero*/, Predicates); 11515 } 11516 // If the backedge is taken at least once, then it will be taken 11517 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 11518 // is the LHS value of the less-than comparison the first time it is evaluated 11519 // and End is the RHS. 11520 const SCEV *BECountIfBackedgeTaken = 11521 computeBECount(getMinusSCEV(End, Start), Stride); 11522 // If the loop entry is guarded by the result of the backedge test of the 11523 // first loop iteration, then we know the backedge will be taken at least 11524 // once and so the backedge taken count is as above. If not then we use the 11525 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 11526 // as if the backedge is taken at least once max(End,Start) is End and so the 11527 // result is as above, and if not max(End,Start) is Start so we get a backedge 11528 // count of zero. 11529 const SCEV *BECount; 11530 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 11531 BECount = BECountIfBackedgeTaken; 11532 else { 11533 // If we know that RHS >= Start in the context of loop, then we know that 11534 // max(RHS, Start) = RHS at this point. 11535 if (isLoopEntryGuardedByCond( 11536 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, RHS, Start)) 11537 End = RHS; 11538 else 11539 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 11540 BECount = computeBECount(getMinusSCEV(End, Start), Stride); 11541 } 11542 11543 const SCEV *MaxBECount; 11544 bool MaxOrZero = false; 11545 if (isa<SCEVConstant>(BECount)) 11546 MaxBECount = BECount; 11547 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 11548 // If we know exactly how many times the backedge will be taken if it's 11549 // taken at least once, then the backedge count will either be that or 11550 // zero. 11551 MaxBECount = BECountIfBackedgeTaken; 11552 MaxOrZero = true; 11553 } else { 11554 MaxBECount = computeMaxBECountForLT( 11555 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 11556 } 11557 11558 if (isa<SCEVCouldNotCompute>(MaxBECount) && 11559 !isa<SCEVCouldNotCompute>(BECount)) 11560 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 11561 11562 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 11563 } 11564 11565 ScalarEvolution::ExitLimit 11566 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 11567 const Loop *L, bool IsSigned, 11568 bool ControlsExit, bool AllowPredicates) { 11569 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 11570 // We handle only IV > Invariant 11571 if (!isLoopInvariant(RHS, L)) 11572 return getCouldNotCompute(); 11573 11574 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 11575 if (!IV && AllowPredicates) 11576 // Try to make this an AddRec using runtime tests, in the first X 11577 // iterations of this loop, where X is the SCEV expression found by the 11578 // algorithm below. 11579 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 11580 11581 // Avoid weird loops 11582 if (!IV || IV->getLoop() != L || !IV->isAffine()) 11583 return getCouldNotCompute(); 11584 11585 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; 11586 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType); 11587 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 11588 11589 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 11590 11591 // Avoid negative or zero stride values 11592 if (!isKnownPositive(Stride)) 11593 return getCouldNotCompute(); 11594 11595 // Avoid proven overflow cases: this will ensure that the backedge taken count 11596 // will not generate any unsigned overflow. Relaxed no-overflow conditions 11597 // exploit NoWrapFlags, allowing to optimize in presence of undefined 11598 // behaviors like the case of C language. 11599 if (!Stride->isOne() && !NoWrap) 11600 if (canIVOverflowOnGT(RHS, Stride, IsSigned)) 11601 return getCouldNotCompute(); 11602 11603 const SCEV *Start = IV->getStart(); 11604 const SCEV *End = RHS; 11605 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { 11606 // If we know that Start >= RHS in the context of loop, then we know that 11607 // min(RHS, Start) = RHS at this point. 11608 if (isLoopEntryGuardedByCond( 11609 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS)) 11610 End = RHS; 11611 else 11612 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 11613 } 11614 11615 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride); 11616 11617 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 11618 : getUnsignedRangeMax(Start); 11619 11620 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 11621 : getUnsignedRangeMin(Stride); 11622 11623 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 11624 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 11625 : APInt::getMinValue(BitWidth) + (MinStride - 1); 11626 11627 // Although End can be a MIN expression we estimate MinEnd considering only 11628 // the case End = RHS. This is safe because in the other case (Start - End) 11629 // is zero, leading to a zero maximum backedge taken count. 11630 APInt MinEnd = 11631 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 11632 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 11633 11634 const SCEV *MaxBECount = isa<SCEVConstant>(BECount) 11635 ? BECount 11636 : computeBECount(getConstant(MaxStart - MinEnd), 11637 getConstant(MinStride)); 11638 11639 if (isa<SCEVCouldNotCompute>(MaxBECount)) 11640 MaxBECount = BECount; 11641 11642 return ExitLimit(BECount, MaxBECount, false, Predicates); 11643 } 11644 11645 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 11646 ScalarEvolution &SE) const { 11647 if (Range.isFullSet()) // Infinite loop. 11648 return SE.getCouldNotCompute(); 11649 11650 // If the start is a non-zero constant, shift the range to simplify things. 11651 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 11652 if (!SC->getValue()->isZero()) { 11653 SmallVector<const SCEV *, 4> Operands(operands()); 11654 Operands[0] = SE.getZero(SC->getType()); 11655 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 11656 getNoWrapFlags(FlagNW)); 11657 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 11658 return ShiftedAddRec->getNumIterationsInRange( 11659 Range.subtract(SC->getAPInt()), SE); 11660 // This is strange and shouldn't happen. 11661 return SE.getCouldNotCompute(); 11662 } 11663 11664 // The only time we can solve this is when we have all constant indices. 11665 // Otherwise, we cannot determine the overflow conditions. 11666 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 11667 return SE.getCouldNotCompute(); 11668 11669 // Okay at this point we know that all elements of the chrec are constants and 11670 // that the start element is zero. 11671 11672 // First check to see if the range contains zero. If not, the first 11673 // iteration exits. 11674 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 11675 if (!Range.contains(APInt(BitWidth, 0))) 11676 return SE.getZero(getType()); 11677 11678 if (isAffine()) { 11679 // If this is an affine expression then we have this situation: 11680 // Solve {0,+,A} in Range === Ax in Range 11681 11682 // We know that zero is in the range. If A is positive then we know that 11683 // the upper value of the range must be the first possible exit value. 11684 // If A is negative then the lower of the range is the last possible loop 11685 // value. Also note that we already checked for a full range. 11686 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 11687 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 11688 11689 // The exit value should be (End+A)/A. 11690 APInt ExitVal = (End + A).udiv(A); 11691 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 11692 11693 // Evaluate at the exit value. If we really did fall out of the valid 11694 // range, then we computed our trip count, otherwise wrap around or other 11695 // things must have happened. 11696 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 11697 if (Range.contains(Val->getValue())) 11698 return SE.getCouldNotCompute(); // Something strange happened 11699 11700 // Ensure that the previous value is in the range. This is a sanity check. 11701 assert(Range.contains( 11702 EvaluateConstantChrecAtConstant(this, 11703 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 11704 "Linear scev computation is off in a bad way!"); 11705 return SE.getConstant(ExitValue); 11706 } 11707 11708 if (isQuadratic()) { 11709 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 11710 return SE.getConstant(S.getValue()); 11711 } 11712 11713 return SE.getCouldNotCompute(); 11714 } 11715 11716 const SCEVAddRecExpr * 11717 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 11718 assert(getNumOperands() > 1 && "AddRec with zero step?"); 11719 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 11720 // but in this case we cannot guarantee that the value returned will be an 11721 // AddRec because SCEV does not have a fixed point where it stops 11722 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 11723 // may happen if we reach arithmetic depth limit while simplifying. So we 11724 // construct the returned value explicitly. 11725 SmallVector<const SCEV *, 3> Ops; 11726 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 11727 // (this + Step) is {A+B,+,B+C,+...,+,N}. 11728 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 11729 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 11730 // We know that the last operand is not a constant zero (otherwise it would 11731 // have been popped out earlier). This guarantees us that if the result has 11732 // the same last operand, then it will also not be popped out, meaning that 11733 // the returned value will be an AddRec. 11734 const SCEV *Last = getOperand(getNumOperands() - 1); 11735 assert(!Last->isZero() && "Recurrency with zero step?"); 11736 Ops.push_back(Last); 11737 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 11738 SCEV::FlagAnyWrap)); 11739 } 11740 11741 // Return true when S contains at least an undef value. 11742 static inline bool containsUndefs(const SCEV *S) { 11743 return SCEVExprContains(S, [](const SCEV *S) { 11744 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 11745 return isa<UndefValue>(SU->getValue()); 11746 return false; 11747 }); 11748 } 11749 11750 namespace { 11751 11752 // Collect all steps of SCEV expressions. 11753 struct SCEVCollectStrides { 11754 ScalarEvolution &SE; 11755 SmallVectorImpl<const SCEV *> &Strides; 11756 11757 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 11758 : SE(SE), Strides(S) {} 11759 11760 bool follow(const SCEV *S) { 11761 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 11762 Strides.push_back(AR->getStepRecurrence(SE)); 11763 return true; 11764 } 11765 11766 bool isDone() const { return false; } 11767 }; 11768 11769 // Collect all SCEVUnknown and SCEVMulExpr expressions. 11770 struct SCEVCollectTerms { 11771 SmallVectorImpl<const SCEV *> &Terms; 11772 11773 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 11774 11775 bool follow(const SCEV *S) { 11776 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 11777 isa<SCEVSignExtendExpr>(S)) { 11778 if (!containsUndefs(S)) 11779 Terms.push_back(S); 11780 11781 // Stop recursion: once we collected a term, do not walk its operands. 11782 return false; 11783 } 11784 11785 // Keep looking. 11786 return true; 11787 } 11788 11789 bool isDone() const { return false; } 11790 }; 11791 11792 // Check if a SCEV contains an AddRecExpr. 11793 struct SCEVHasAddRec { 11794 bool &ContainsAddRec; 11795 11796 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 11797 ContainsAddRec = false; 11798 } 11799 11800 bool follow(const SCEV *S) { 11801 if (isa<SCEVAddRecExpr>(S)) { 11802 ContainsAddRec = true; 11803 11804 // Stop recursion: once we collected a term, do not walk its operands. 11805 return false; 11806 } 11807 11808 // Keep looking. 11809 return true; 11810 } 11811 11812 bool isDone() const { return false; } 11813 }; 11814 11815 // Find factors that are multiplied with an expression that (possibly as a 11816 // subexpression) contains an AddRecExpr. In the expression: 11817 // 11818 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 11819 // 11820 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 11821 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 11822 // parameters as they form a product with an induction variable. 11823 // 11824 // This collector expects all array size parameters to be in the same MulExpr. 11825 // It might be necessary to later add support for collecting parameters that are 11826 // spread over different nested MulExpr. 11827 struct SCEVCollectAddRecMultiplies { 11828 SmallVectorImpl<const SCEV *> &Terms; 11829 ScalarEvolution &SE; 11830 11831 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 11832 : Terms(T), SE(SE) {} 11833 11834 bool follow(const SCEV *S) { 11835 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 11836 bool HasAddRec = false; 11837 SmallVector<const SCEV *, 0> Operands; 11838 for (auto Op : Mul->operands()) { 11839 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 11840 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 11841 Operands.push_back(Op); 11842 } else if (Unknown) { 11843 HasAddRec = true; 11844 } else { 11845 bool ContainsAddRec = false; 11846 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 11847 visitAll(Op, ContiansAddRec); 11848 HasAddRec |= ContainsAddRec; 11849 } 11850 } 11851 if (Operands.size() == 0) 11852 return true; 11853 11854 if (!HasAddRec) 11855 return false; 11856 11857 Terms.push_back(SE.getMulExpr(Operands)); 11858 // Stop recursion: once we collected a term, do not walk its operands. 11859 return false; 11860 } 11861 11862 // Keep looking. 11863 return true; 11864 } 11865 11866 bool isDone() const { return false; } 11867 }; 11868 11869 } // end anonymous namespace 11870 11871 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 11872 /// two places: 11873 /// 1) The strides of AddRec expressions. 11874 /// 2) Unknowns that are multiplied with AddRec expressions. 11875 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 11876 SmallVectorImpl<const SCEV *> &Terms) { 11877 SmallVector<const SCEV *, 4> Strides; 11878 SCEVCollectStrides StrideCollector(*this, Strides); 11879 visitAll(Expr, StrideCollector); 11880 11881 LLVM_DEBUG({ 11882 dbgs() << "Strides:\n"; 11883 for (const SCEV *S : Strides) 11884 dbgs() << *S << "\n"; 11885 }); 11886 11887 for (const SCEV *S : Strides) { 11888 SCEVCollectTerms TermCollector(Terms); 11889 visitAll(S, TermCollector); 11890 } 11891 11892 LLVM_DEBUG({ 11893 dbgs() << "Terms:\n"; 11894 for (const SCEV *T : Terms) 11895 dbgs() << *T << "\n"; 11896 }); 11897 11898 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 11899 visitAll(Expr, MulCollector); 11900 } 11901 11902 static bool findArrayDimensionsRec(ScalarEvolution &SE, 11903 SmallVectorImpl<const SCEV *> &Terms, 11904 SmallVectorImpl<const SCEV *> &Sizes) { 11905 int Last = Terms.size() - 1; 11906 const SCEV *Step = Terms[Last]; 11907 11908 // End of recursion. 11909 if (Last == 0) { 11910 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 11911 SmallVector<const SCEV *, 2> Qs; 11912 for (const SCEV *Op : M->operands()) 11913 if (!isa<SCEVConstant>(Op)) 11914 Qs.push_back(Op); 11915 11916 Step = SE.getMulExpr(Qs); 11917 } 11918 11919 Sizes.push_back(Step); 11920 return true; 11921 } 11922 11923 for (const SCEV *&Term : Terms) { 11924 // Normalize the terms before the next call to findArrayDimensionsRec. 11925 const SCEV *Q, *R; 11926 SCEVDivision::divide(SE, Term, Step, &Q, &R); 11927 11928 // Bail out when GCD does not evenly divide one of the terms. 11929 if (!R->isZero()) 11930 return false; 11931 11932 Term = Q; 11933 } 11934 11935 // Remove all SCEVConstants. 11936 erase_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }); 11937 11938 if (Terms.size() > 0) 11939 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 11940 return false; 11941 11942 Sizes.push_back(Step); 11943 return true; 11944 } 11945 11946 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 11947 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 11948 for (const SCEV *T : Terms) 11949 if (SCEVExprContains(T, [](const SCEV *S) { return isa<SCEVUnknown>(S); })) 11950 return true; 11951 11952 return false; 11953 } 11954 11955 // Return the number of product terms in S. 11956 static inline int numberOfTerms(const SCEV *S) { 11957 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 11958 return Expr->getNumOperands(); 11959 return 1; 11960 } 11961 11962 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 11963 if (isa<SCEVConstant>(T)) 11964 return nullptr; 11965 11966 if (isa<SCEVUnknown>(T)) 11967 return T; 11968 11969 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 11970 SmallVector<const SCEV *, 2> Factors; 11971 for (const SCEV *Op : M->operands()) 11972 if (!isa<SCEVConstant>(Op)) 11973 Factors.push_back(Op); 11974 11975 return SE.getMulExpr(Factors); 11976 } 11977 11978 return T; 11979 } 11980 11981 /// Return the size of an element read or written by Inst. 11982 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 11983 Type *Ty; 11984 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 11985 Ty = Store->getValueOperand()->getType(); 11986 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 11987 Ty = Load->getType(); 11988 else 11989 return nullptr; 11990 11991 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 11992 return getSizeOfExpr(ETy, Ty); 11993 } 11994 11995 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 11996 SmallVectorImpl<const SCEV *> &Sizes, 11997 const SCEV *ElementSize) { 11998 if (Terms.size() < 1 || !ElementSize) 11999 return; 12000 12001 // Early return when Terms do not contain parameters: we do not delinearize 12002 // non parametric SCEVs. 12003 if (!containsParameters(Terms)) 12004 return; 12005 12006 LLVM_DEBUG({ 12007 dbgs() << "Terms:\n"; 12008 for (const SCEV *T : Terms) 12009 dbgs() << *T << "\n"; 12010 }); 12011 12012 // Remove duplicates. 12013 array_pod_sort(Terms.begin(), Terms.end()); 12014 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 12015 12016 // Put larger terms first. 12017 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) { 12018 return numberOfTerms(LHS) > numberOfTerms(RHS); 12019 }); 12020 12021 // Try to divide all terms by the element size. If term is not divisible by 12022 // element size, proceed with the original term. 12023 for (const SCEV *&Term : Terms) { 12024 const SCEV *Q, *R; 12025 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 12026 if (!Q->isZero()) 12027 Term = Q; 12028 } 12029 12030 SmallVector<const SCEV *, 4> NewTerms; 12031 12032 // Remove constant factors. 12033 for (const SCEV *T : Terms) 12034 if (const SCEV *NewT = removeConstantFactors(*this, T)) 12035 NewTerms.push_back(NewT); 12036 12037 LLVM_DEBUG({ 12038 dbgs() << "Terms after sorting:\n"; 12039 for (const SCEV *T : NewTerms) 12040 dbgs() << *T << "\n"; 12041 }); 12042 12043 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 12044 Sizes.clear(); 12045 return; 12046 } 12047 12048 // The last element to be pushed into Sizes is the size of an element. 12049 Sizes.push_back(ElementSize); 12050 12051 LLVM_DEBUG({ 12052 dbgs() << "Sizes:\n"; 12053 for (const SCEV *S : Sizes) 12054 dbgs() << *S << "\n"; 12055 }); 12056 } 12057 12058 void ScalarEvolution::computeAccessFunctions( 12059 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 12060 SmallVectorImpl<const SCEV *> &Sizes) { 12061 // Early exit in case this SCEV is not an affine multivariate function. 12062 if (Sizes.empty()) 12063 return; 12064 12065 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 12066 if (!AR->isAffine()) 12067 return; 12068 12069 const SCEV *Res = Expr; 12070 int Last = Sizes.size() - 1; 12071 for (int i = Last; i >= 0; i--) { 12072 const SCEV *Q, *R; 12073 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 12074 12075 LLVM_DEBUG({ 12076 dbgs() << "Res: " << *Res << "\n"; 12077 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 12078 dbgs() << "Res divided by Sizes[i]:\n"; 12079 dbgs() << "Quotient: " << *Q << "\n"; 12080 dbgs() << "Remainder: " << *R << "\n"; 12081 }); 12082 12083 Res = Q; 12084 12085 // Do not record the last subscript corresponding to the size of elements in 12086 // the array. 12087 if (i == Last) { 12088 12089 // Bail out if the remainder is too complex. 12090 if (isa<SCEVAddRecExpr>(R)) { 12091 Subscripts.clear(); 12092 Sizes.clear(); 12093 return; 12094 } 12095 12096 continue; 12097 } 12098 12099 // Record the access function for the current subscript. 12100 Subscripts.push_back(R); 12101 } 12102 12103 // Also push in last position the remainder of the last division: it will be 12104 // the access function of the innermost dimension. 12105 Subscripts.push_back(Res); 12106 12107 std::reverse(Subscripts.begin(), Subscripts.end()); 12108 12109 LLVM_DEBUG({ 12110 dbgs() << "Subscripts:\n"; 12111 for (const SCEV *S : Subscripts) 12112 dbgs() << *S << "\n"; 12113 }); 12114 } 12115 12116 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 12117 /// sizes of an array access. Returns the remainder of the delinearization that 12118 /// is the offset start of the array. The SCEV->delinearize algorithm computes 12119 /// the multiples of SCEV coefficients: that is a pattern matching of sub 12120 /// expressions in the stride and base of a SCEV corresponding to the 12121 /// computation of a GCD (greatest common divisor) of base and stride. When 12122 /// SCEV->delinearize fails, it returns the SCEV unchanged. 12123 /// 12124 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 12125 /// 12126 /// void foo(long n, long m, long o, double A[n][m][o]) { 12127 /// 12128 /// for (long i = 0; i < n; i++) 12129 /// for (long j = 0; j < m; j++) 12130 /// for (long k = 0; k < o; k++) 12131 /// A[i][j][k] = 1.0; 12132 /// } 12133 /// 12134 /// the delinearization input is the following AddRec SCEV: 12135 /// 12136 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 12137 /// 12138 /// From this SCEV, we are able to say that the base offset of the access is %A 12139 /// because it appears as an offset that does not divide any of the strides in 12140 /// the loops: 12141 /// 12142 /// CHECK: Base offset: %A 12143 /// 12144 /// and then SCEV->delinearize determines the size of some of the dimensions of 12145 /// the array as these are the multiples by which the strides are happening: 12146 /// 12147 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 12148 /// 12149 /// Note that the outermost dimension remains of UnknownSize because there are 12150 /// no strides that would help identifying the size of the last dimension: when 12151 /// the array has been statically allocated, one could compute the size of that 12152 /// dimension by dividing the overall size of the array by the size of the known 12153 /// dimensions: %m * %o * 8. 12154 /// 12155 /// Finally delinearize provides the access functions for the array reference 12156 /// that does correspond to A[i][j][k] of the above C testcase: 12157 /// 12158 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 12159 /// 12160 /// The testcases are checking the output of a function pass: 12161 /// DelinearizationPass that walks through all loads and stores of a function 12162 /// asking for the SCEV of the memory access with respect to all enclosing 12163 /// loops, calling SCEV->delinearize on that and printing the results. 12164 void ScalarEvolution::delinearize(const SCEV *Expr, 12165 SmallVectorImpl<const SCEV *> &Subscripts, 12166 SmallVectorImpl<const SCEV *> &Sizes, 12167 const SCEV *ElementSize) { 12168 // First step: collect parametric terms. 12169 SmallVector<const SCEV *, 4> Terms; 12170 collectParametricTerms(Expr, Terms); 12171 12172 if (Terms.empty()) 12173 return; 12174 12175 // Second step: find subscript sizes. 12176 findArrayDimensions(Terms, Sizes, ElementSize); 12177 12178 if (Sizes.empty()) 12179 return; 12180 12181 // Third step: compute the access functions for each subscript. 12182 computeAccessFunctions(Expr, Subscripts, Sizes); 12183 12184 if (Subscripts.empty()) 12185 return; 12186 12187 LLVM_DEBUG({ 12188 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 12189 dbgs() << "ArrayDecl[UnknownSize]"; 12190 for (const SCEV *S : Sizes) 12191 dbgs() << "[" << *S << "]"; 12192 12193 dbgs() << "\nArrayRef"; 12194 for (const SCEV *S : Subscripts) 12195 dbgs() << "[" << *S << "]"; 12196 dbgs() << "\n"; 12197 }); 12198 } 12199 12200 bool ScalarEvolution::getIndexExpressionsFromGEP( 12201 const GetElementPtrInst *GEP, SmallVectorImpl<const SCEV *> &Subscripts, 12202 SmallVectorImpl<int> &Sizes) { 12203 assert(Subscripts.empty() && Sizes.empty() && 12204 "Expected output lists to be empty on entry to this function."); 12205 assert(GEP && "getIndexExpressionsFromGEP called with a null GEP"); 12206 Type *Ty = GEP->getPointerOperandType(); 12207 bool DroppedFirstDim = false; 12208 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 12209 const SCEV *Expr = getSCEV(GEP->getOperand(i)); 12210 if (i == 1) { 12211 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) { 12212 Ty = PtrTy->getElementType(); 12213 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) { 12214 Ty = ArrayTy->getElementType(); 12215 } else { 12216 Subscripts.clear(); 12217 Sizes.clear(); 12218 return false; 12219 } 12220 if (auto *Const = dyn_cast<SCEVConstant>(Expr)) 12221 if (Const->getValue()->isZero()) { 12222 DroppedFirstDim = true; 12223 continue; 12224 } 12225 Subscripts.push_back(Expr); 12226 continue; 12227 } 12228 12229 auto *ArrayTy = dyn_cast<ArrayType>(Ty); 12230 if (!ArrayTy) { 12231 Subscripts.clear(); 12232 Sizes.clear(); 12233 return false; 12234 } 12235 12236 Subscripts.push_back(Expr); 12237 if (!(DroppedFirstDim && i == 2)) 12238 Sizes.push_back(ArrayTy->getNumElements()); 12239 12240 Ty = ArrayTy->getElementType(); 12241 } 12242 return !Subscripts.empty(); 12243 } 12244 12245 //===----------------------------------------------------------------------===// 12246 // SCEVCallbackVH Class Implementation 12247 //===----------------------------------------------------------------------===// 12248 12249 void ScalarEvolution::SCEVCallbackVH::deleted() { 12250 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 12251 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 12252 SE->ConstantEvolutionLoopExitValue.erase(PN); 12253 SE->eraseValueFromMap(getValPtr()); 12254 // this now dangles! 12255 } 12256 12257 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 12258 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 12259 12260 // Forget all the expressions associated with users of the old value, 12261 // so that future queries will recompute the expressions using the new 12262 // value. 12263 Value *Old = getValPtr(); 12264 SmallVector<User *, 16> Worklist(Old->users()); 12265 SmallPtrSet<User *, 8> Visited; 12266 while (!Worklist.empty()) { 12267 User *U = Worklist.pop_back_val(); 12268 // Deleting the Old value will cause this to dangle. Postpone 12269 // that until everything else is done. 12270 if (U == Old) 12271 continue; 12272 if (!Visited.insert(U).second) 12273 continue; 12274 if (PHINode *PN = dyn_cast<PHINode>(U)) 12275 SE->ConstantEvolutionLoopExitValue.erase(PN); 12276 SE->eraseValueFromMap(U); 12277 llvm::append_range(Worklist, U->users()); 12278 } 12279 // Delete the Old value. 12280 if (PHINode *PN = dyn_cast<PHINode>(Old)) 12281 SE->ConstantEvolutionLoopExitValue.erase(PN); 12282 SE->eraseValueFromMap(Old); 12283 // this now dangles! 12284 } 12285 12286 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 12287 : CallbackVH(V), SE(se) {} 12288 12289 //===----------------------------------------------------------------------===// 12290 // ScalarEvolution Class Implementation 12291 //===----------------------------------------------------------------------===// 12292 12293 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 12294 AssumptionCache &AC, DominatorTree &DT, 12295 LoopInfo &LI) 12296 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 12297 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 12298 LoopDispositions(64), BlockDispositions(64) { 12299 // To use guards for proving predicates, we need to scan every instruction in 12300 // relevant basic blocks, and not just terminators. Doing this is a waste of 12301 // time if the IR does not actually contain any calls to 12302 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 12303 // 12304 // This pessimizes the case where a pass that preserves ScalarEvolution wants 12305 // to _add_ guards to the module when there weren't any before, and wants 12306 // ScalarEvolution to optimize based on those guards. For now we prefer to be 12307 // efficient in lieu of being smart in that rather obscure case. 12308 12309 auto *GuardDecl = F.getParent()->getFunction( 12310 Intrinsic::getName(Intrinsic::experimental_guard)); 12311 HasGuards = GuardDecl && !GuardDecl->use_empty(); 12312 } 12313 12314 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 12315 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 12316 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 12317 ValueExprMap(std::move(Arg.ValueExprMap)), 12318 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 12319 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 12320 PendingMerges(std::move(Arg.PendingMerges)), 12321 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 12322 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 12323 PredicatedBackedgeTakenCounts( 12324 std::move(Arg.PredicatedBackedgeTakenCounts)), 12325 ConstantEvolutionLoopExitValue( 12326 std::move(Arg.ConstantEvolutionLoopExitValue)), 12327 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 12328 LoopDispositions(std::move(Arg.LoopDispositions)), 12329 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 12330 BlockDispositions(std::move(Arg.BlockDispositions)), 12331 UnsignedRanges(std::move(Arg.UnsignedRanges)), 12332 SignedRanges(std::move(Arg.SignedRanges)), 12333 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 12334 UniquePreds(std::move(Arg.UniquePreds)), 12335 SCEVAllocator(std::move(Arg.SCEVAllocator)), 12336 LoopUsers(std::move(Arg.LoopUsers)), 12337 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 12338 FirstUnknown(Arg.FirstUnknown) { 12339 Arg.FirstUnknown = nullptr; 12340 } 12341 12342 ScalarEvolution::~ScalarEvolution() { 12343 // Iterate through all the SCEVUnknown instances and call their 12344 // destructors, so that they release their references to their values. 12345 for (SCEVUnknown *U = FirstUnknown; U;) { 12346 SCEVUnknown *Tmp = U; 12347 U = U->Next; 12348 Tmp->~SCEVUnknown(); 12349 } 12350 FirstUnknown = nullptr; 12351 12352 ExprValueMap.clear(); 12353 ValueExprMap.clear(); 12354 HasRecMap.clear(); 12355 BackedgeTakenCounts.clear(); 12356 PredicatedBackedgeTakenCounts.clear(); 12357 12358 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 12359 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 12360 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 12361 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 12362 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 12363 } 12364 12365 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 12366 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 12367 } 12368 12369 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 12370 const Loop *L) { 12371 // Print all inner loops first 12372 for (Loop *I : *L) 12373 PrintLoopInfo(OS, SE, I); 12374 12375 OS << "Loop "; 12376 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12377 OS << ": "; 12378 12379 SmallVector<BasicBlock *, 8> ExitingBlocks; 12380 L->getExitingBlocks(ExitingBlocks); 12381 if (ExitingBlocks.size() != 1) 12382 OS << "<multiple exits> "; 12383 12384 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 12385 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; 12386 else 12387 OS << "Unpredictable backedge-taken count.\n"; 12388 12389 if (ExitingBlocks.size() > 1) 12390 for (BasicBlock *ExitingBlock : ExitingBlocks) { 12391 OS << " exit count for " << ExitingBlock->getName() << ": " 12392 << *SE->getExitCount(L, ExitingBlock) << "\n"; 12393 } 12394 12395 OS << "Loop "; 12396 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12397 OS << ": "; 12398 12399 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) { 12400 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L); 12401 if (SE->isBackedgeTakenCountMaxOrZero(L)) 12402 OS << ", actual taken count either this or zero."; 12403 } else { 12404 OS << "Unpredictable max backedge-taken count. "; 12405 } 12406 12407 OS << "\n" 12408 "Loop "; 12409 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12410 OS << ": "; 12411 12412 SCEVUnionPredicate Pred; 12413 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 12414 if (!isa<SCEVCouldNotCompute>(PBT)) { 12415 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 12416 OS << " Predicates:\n"; 12417 Pred.print(OS, 4); 12418 } else { 12419 OS << "Unpredictable predicated backedge-taken count. "; 12420 } 12421 OS << "\n"; 12422 12423 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 12424 OS << "Loop "; 12425 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12426 OS << ": "; 12427 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 12428 } 12429 } 12430 12431 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 12432 switch (LD) { 12433 case ScalarEvolution::LoopVariant: 12434 return "Variant"; 12435 case ScalarEvolution::LoopInvariant: 12436 return "Invariant"; 12437 case ScalarEvolution::LoopComputable: 12438 return "Computable"; 12439 } 12440 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 12441 } 12442 12443 void ScalarEvolution::print(raw_ostream &OS) const { 12444 // ScalarEvolution's implementation of the print method is to print 12445 // out SCEV values of all instructions that are interesting. Doing 12446 // this potentially causes it to create new SCEV objects though, 12447 // which technically conflicts with the const qualifier. This isn't 12448 // observable from outside the class though, so casting away the 12449 // const isn't dangerous. 12450 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 12451 12452 if (ClassifyExpressions) { 12453 OS << "Classifying expressions for: "; 12454 F.printAsOperand(OS, /*PrintType=*/false); 12455 OS << "\n"; 12456 for (Instruction &I : instructions(F)) 12457 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 12458 OS << I << '\n'; 12459 OS << " --> "; 12460 const SCEV *SV = SE.getSCEV(&I); 12461 SV->print(OS); 12462 if (!isa<SCEVCouldNotCompute>(SV)) { 12463 OS << " U: "; 12464 SE.getUnsignedRange(SV).print(OS); 12465 OS << " S: "; 12466 SE.getSignedRange(SV).print(OS); 12467 } 12468 12469 const Loop *L = LI.getLoopFor(I.getParent()); 12470 12471 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 12472 if (AtUse != SV) { 12473 OS << " --> "; 12474 AtUse->print(OS); 12475 if (!isa<SCEVCouldNotCompute>(AtUse)) { 12476 OS << " U: "; 12477 SE.getUnsignedRange(AtUse).print(OS); 12478 OS << " S: "; 12479 SE.getSignedRange(AtUse).print(OS); 12480 } 12481 } 12482 12483 if (L) { 12484 OS << "\t\t" "Exits: "; 12485 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 12486 if (!SE.isLoopInvariant(ExitValue, L)) { 12487 OS << "<<Unknown>>"; 12488 } else { 12489 OS << *ExitValue; 12490 } 12491 12492 bool First = true; 12493 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 12494 if (First) { 12495 OS << "\t\t" "LoopDispositions: { "; 12496 First = false; 12497 } else { 12498 OS << ", "; 12499 } 12500 12501 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12502 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 12503 } 12504 12505 for (auto *InnerL : depth_first(L)) { 12506 if (InnerL == L) 12507 continue; 12508 if (First) { 12509 OS << "\t\t" "LoopDispositions: { "; 12510 First = false; 12511 } else { 12512 OS << ", "; 12513 } 12514 12515 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12516 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 12517 } 12518 12519 OS << " }"; 12520 } 12521 12522 OS << "\n"; 12523 } 12524 } 12525 12526 OS << "Determining loop execution counts for: "; 12527 F.printAsOperand(OS, /*PrintType=*/false); 12528 OS << "\n"; 12529 for (Loop *I : LI) 12530 PrintLoopInfo(OS, &SE, I); 12531 } 12532 12533 ScalarEvolution::LoopDisposition 12534 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 12535 auto &Values = LoopDispositions[S]; 12536 for (auto &V : Values) { 12537 if (V.getPointer() == L) 12538 return V.getInt(); 12539 } 12540 Values.emplace_back(L, LoopVariant); 12541 LoopDisposition D = computeLoopDisposition(S, L); 12542 auto &Values2 = LoopDispositions[S]; 12543 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 12544 if (V.getPointer() == L) { 12545 V.setInt(D); 12546 break; 12547 } 12548 } 12549 return D; 12550 } 12551 12552 ScalarEvolution::LoopDisposition 12553 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 12554 switch (S->getSCEVType()) { 12555 case scConstant: 12556 return LoopInvariant; 12557 case scPtrToInt: 12558 case scTruncate: 12559 case scZeroExtend: 12560 case scSignExtend: 12561 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 12562 case scAddRecExpr: { 12563 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 12564 12565 // If L is the addrec's loop, it's computable. 12566 if (AR->getLoop() == L) 12567 return LoopComputable; 12568 12569 // Add recurrences are never invariant in the function-body (null loop). 12570 if (!L) 12571 return LoopVariant; 12572 12573 // Everything that is not defined at loop entry is variant. 12574 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 12575 return LoopVariant; 12576 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 12577 " dominate the contained loop's header?"); 12578 12579 // This recurrence is invariant w.r.t. L if AR's loop contains L. 12580 if (AR->getLoop()->contains(L)) 12581 return LoopInvariant; 12582 12583 // This recurrence is variant w.r.t. L if any of its operands 12584 // are variant. 12585 for (auto *Op : AR->operands()) 12586 if (!isLoopInvariant(Op, L)) 12587 return LoopVariant; 12588 12589 // Otherwise it's loop-invariant. 12590 return LoopInvariant; 12591 } 12592 case scAddExpr: 12593 case scMulExpr: 12594 case scUMaxExpr: 12595 case scSMaxExpr: 12596 case scUMinExpr: 12597 case scSMinExpr: { 12598 bool HasVarying = false; 12599 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 12600 LoopDisposition D = getLoopDisposition(Op, L); 12601 if (D == LoopVariant) 12602 return LoopVariant; 12603 if (D == LoopComputable) 12604 HasVarying = true; 12605 } 12606 return HasVarying ? LoopComputable : LoopInvariant; 12607 } 12608 case scUDivExpr: { 12609 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 12610 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 12611 if (LD == LoopVariant) 12612 return LoopVariant; 12613 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 12614 if (RD == LoopVariant) 12615 return LoopVariant; 12616 return (LD == LoopInvariant && RD == LoopInvariant) ? 12617 LoopInvariant : LoopComputable; 12618 } 12619 case scUnknown: 12620 // All non-instruction values are loop invariant. All instructions are loop 12621 // invariant if they are not contained in the specified loop. 12622 // Instructions are never considered invariant in the function body 12623 // (null loop) because they are defined within the "loop". 12624 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 12625 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 12626 return LoopInvariant; 12627 case scCouldNotCompute: 12628 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 12629 } 12630 llvm_unreachable("Unknown SCEV kind!"); 12631 } 12632 12633 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 12634 return getLoopDisposition(S, L) == LoopInvariant; 12635 } 12636 12637 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 12638 return getLoopDisposition(S, L) == LoopComputable; 12639 } 12640 12641 ScalarEvolution::BlockDisposition 12642 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 12643 auto &Values = BlockDispositions[S]; 12644 for (auto &V : Values) { 12645 if (V.getPointer() == BB) 12646 return V.getInt(); 12647 } 12648 Values.emplace_back(BB, DoesNotDominateBlock); 12649 BlockDisposition D = computeBlockDisposition(S, BB); 12650 auto &Values2 = BlockDispositions[S]; 12651 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 12652 if (V.getPointer() == BB) { 12653 V.setInt(D); 12654 break; 12655 } 12656 } 12657 return D; 12658 } 12659 12660 ScalarEvolution::BlockDisposition 12661 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 12662 switch (S->getSCEVType()) { 12663 case scConstant: 12664 return ProperlyDominatesBlock; 12665 case scPtrToInt: 12666 case scTruncate: 12667 case scZeroExtend: 12668 case scSignExtend: 12669 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 12670 case scAddRecExpr: { 12671 // This uses a "dominates" query instead of "properly dominates" query 12672 // to test for proper dominance too, because the instruction which 12673 // produces the addrec's value is a PHI, and a PHI effectively properly 12674 // dominates its entire containing block. 12675 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 12676 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 12677 return DoesNotDominateBlock; 12678 12679 // Fall through into SCEVNAryExpr handling. 12680 LLVM_FALLTHROUGH; 12681 } 12682 case scAddExpr: 12683 case scMulExpr: 12684 case scUMaxExpr: 12685 case scSMaxExpr: 12686 case scUMinExpr: 12687 case scSMinExpr: { 12688 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 12689 bool Proper = true; 12690 for (const SCEV *NAryOp : NAry->operands()) { 12691 BlockDisposition D = getBlockDisposition(NAryOp, BB); 12692 if (D == DoesNotDominateBlock) 12693 return DoesNotDominateBlock; 12694 if (D == DominatesBlock) 12695 Proper = false; 12696 } 12697 return Proper ? ProperlyDominatesBlock : DominatesBlock; 12698 } 12699 case scUDivExpr: { 12700 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 12701 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 12702 BlockDisposition LD = getBlockDisposition(LHS, BB); 12703 if (LD == DoesNotDominateBlock) 12704 return DoesNotDominateBlock; 12705 BlockDisposition RD = getBlockDisposition(RHS, BB); 12706 if (RD == DoesNotDominateBlock) 12707 return DoesNotDominateBlock; 12708 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 12709 ProperlyDominatesBlock : DominatesBlock; 12710 } 12711 case scUnknown: 12712 if (Instruction *I = 12713 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 12714 if (I->getParent() == BB) 12715 return DominatesBlock; 12716 if (DT.properlyDominates(I->getParent(), BB)) 12717 return ProperlyDominatesBlock; 12718 return DoesNotDominateBlock; 12719 } 12720 return ProperlyDominatesBlock; 12721 case scCouldNotCompute: 12722 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 12723 } 12724 llvm_unreachable("Unknown SCEV kind!"); 12725 } 12726 12727 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 12728 return getBlockDisposition(S, BB) >= DominatesBlock; 12729 } 12730 12731 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 12732 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 12733 } 12734 12735 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 12736 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 12737 } 12738 12739 void 12740 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 12741 ValuesAtScopes.erase(S); 12742 LoopDispositions.erase(S); 12743 BlockDispositions.erase(S); 12744 UnsignedRanges.erase(S); 12745 SignedRanges.erase(S); 12746 ExprValueMap.erase(S); 12747 HasRecMap.erase(S); 12748 MinTrailingZerosCache.erase(S); 12749 12750 for (auto I = PredicatedSCEVRewrites.begin(); 12751 I != PredicatedSCEVRewrites.end();) { 12752 std::pair<const SCEV *, const Loop *> Entry = I->first; 12753 if (Entry.first == S) 12754 PredicatedSCEVRewrites.erase(I++); 12755 else 12756 ++I; 12757 } 12758 12759 auto RemoveSCEVFromBackedgeMap = 12760 [S](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 12761 for (auto I = Map.begin(), E = Map.end(); I != E;) { 12762 BackedgeTakenInfo &BEInfo = I->second; 12763 if (BEInfo.hasOperand(S)) 12764 Map.erase(I++); 12765 else 12766 ++I; 12767 } 12768 }; 12769 12770 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 12771 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 12772 } 12773 12774 void 12775 ScalarEvolution::getUsedLoops(const SCEV *S, 12776 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 12777 struct FindUsedLoops { 12778 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 12779 : LoopsUsed(LoopsUsed) {} 12780 SmallPtrSetImpl<const Loop *> &LoopsUsed; 12781 bool follow(const SCEV *S) { 12782 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 12783 LoopsUsed.insert(AR->getLoop()); 12784 return true; 12785 } 12786 12787 bool isDone() const { return false; } 12788 }; 12789 12790 FindUsedLoops F(LoopsUsed); 12791 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 12792 } 12793 12794 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 12795 SmallPtrSet<const Loop *, 8> LoopsUsed; 12796 getUsedLoops(S, LoopsUsed); 12797 for (auto *L : LoopsUsed) 12798 LoopUsers[L].push_back(S); 12799 } 12800 12801 void ScalarEvolution::verify() const { 12802 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 12803 ScalarEvolution SE2(F, TLI, AC, DT, LI); 12804 12805 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 12806 12807 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 12808 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 12809 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 12810 12811 const SCEV *visitConstant(const SCEVConstant *Constant) { 12812 return SE.getConstant(Constant->getAPInt()); 12813 } 12814 12815 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12816 return SE.getUnknown(Expr->getValue()); 12817 } 12818 12819 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 12820 return SE.getCouldNotCompute(); 12821 } 12822 }; 12823 12824 SCEVMapper SCM(SE2); 12825 12826 while (!LoopStack.empty()) { 12827 auto *L = LoopStack.pop_back_val(); 12828 llvm::append_range(LoopStack, *L); 12829 12830 auto *CurBECount = SCM.visit( 12831 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 12832 auto *NewBECount = SE2.getBackedgeTakenCount(L); 12833 12834 if (CurBECount == SE2.getCouldNotCompute() || 12835 NewBECount == SE2.getCouldNotCompute()) { 12836 // NB! This situation is legal, but is very suspicious -- whatever pass 12837 // change the loop to make a trip count go from could not compute to 12838 // computable or vice-versa *should have* invalidated SCEV. However, we 12839 // choose not to assert here (for now) since we don't want false 12840 // positives. 12841 continue; 12842 } 12843 12844 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 12845 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 12846 // not propagate undef aggressively). This means we can (and do) fail 12847 // verification in cases where a transform makes the trip count of a loop 12848 // go from "undef" to "undef+1" (say). The transform is fine, since in 12849 // both cases the loop iterates "undef" times, but SCEV thinks we 12850 // increased the trip count of the loop by 1 incorrectly. 12851 continue; 12852 } 12853 12854 if (SE.getTypeSizeInBits(CurBECount->getType()) > 12855 SE.getTypeSizeInBits(NewBECount->getType())) 12856 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 12857 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 12858 SE.getTypeSizeInBits(NewBECount->getType())) 12859 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 12860 12861 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount); 12862 12863 // Unless VerifySCEVStrict is set, we only compare constant deltas. 12864 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) { 12865 dbgs() << "Trip Count for " << *L << " Changed!\n"; 12866 dbgs() << "Old: " << *CurBECount << "\n"; 12867 dbgs() << "New: " << *NewBECount << "\n"; 12868 dbgs() << "Delta: " << *Delta << "\n"; 12869 std::abort(); 12870 } 12871 } 12872 12873 // Collect all valid loops currently in LoopInfo. 12874 SmallPtrSet<Loop *, 32> ValidLoops; 12875 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end()); 12876 while (!Worklist.empty()) { 12877 Loop *L = Worklist.pop_back_val(); 12878 if (ValidLoops.contains(L)) 12879 continue; 12880 ValidLoops.insert(L); 12881 Worklist.append(L->begin(), L->end()); 12882 } 12883 // Check for SCEV expressions referencing invalid/deleted loops. 12884 for (auto &KV : ValueExprMap) { 12885 auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second); 12886 if (!AR) 12887 continue; 12888 assert(ValidLoops.contains(AR->getLoop()) && 12889 "AddRec references invalid loop"); 12890 } 12891 } 12892 12893 bool ScalarEvolution::invalidate( 12894 Function &F, const PreservedAnalyses &PA, 12895 FunctionAnalysisManager::Invalidator &Inv) { 12896 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 12897 // of its dependencies is invalidated. 12898 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 12899 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 12900 Inv.invalidate<AssumptionAnalysis>(F, PA) || 12901 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 12902 Inv.invalidate<LoopAnalysis>(F, PA); 12903 } 12904 12905 AnalysisKey ScalarEvolutionAnalysis::Key; 12906 12907 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 12908 FunctionAnalysisManager &AM) { 12909 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 12910 AM.getResult<AssumptionAnalysis>(F), 12911 AM.getResult<DominatorTreeAnalysis>(F), 12912 AM.getResult<LoopAnalysis>(F)); 12913 } 12914 12915 PreservedAnalyses 12916 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 12917 AM.getResult<ScalarEvolutionAnalysis>(F).verify(); 12918 return PreservedAnalyses::all(); 12919 } 12920 12921 PreservedAnalyses 12922 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 12923 // For compatibility with opt's -analyze feature under legacy pass manager 12924 // which was not ported to NPM. This keeps tests using 12925 // update_analyze_test_checks.py working. 12926 OS << "Printing analysis 'Scalar Evolution Analysis' for function '" 12927 << F.getName() << "':\n"; 12928 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 12929 return PreservedAnalyses::all(); 12930 } 12931 12932 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 12933 "Scalar Evolution Analysis", false, true) 12934 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 12935 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 12936 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 12937 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 12938 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 12939 "Scalar Evolution Analysis", false, true) 12940 12941 char ScalarEvolutionWrapperPass::ID = 0; 12942 12943 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 12944 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 12945 } 12946 12947 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 12948 SE.reset(new ScalarEvolution( 12949 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 12950 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 12951 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 12952 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 12953 return false; 12954 } 12955 12956 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 12957 12958 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 12959 SE->print(OS); 12960 } 12961 12962 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 12963 if (!VerifySCEV) 12964 return; 12965 12966 SE->verify(); 12967 } 12968 12969 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 12970 AU.setPreservesAll(); 12971 AU.addRequiredTransitive<AssumptionCacheTracker>(); 12972 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 12973 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 12974 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 12975 } 12976 12977 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 12978 const SCEV *RHS) { 12979 FoldingSetNodeID ID; 12980 assert(LHS->getType() == RHS->getType() && 12981 "Type mismatch between LHS and RHS"); 12982 // Unique this node based on the arguments 12983 ID.AddInteger(SCEVPredicate::P_Equal); 12984 ID.AddPointer(LHS); 12985 ID.AddPointer(RHS); 12986 void *IP = nullptr; 12987 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12988 return S; 12989 SCEVEqualPredicate *Eq = new (SCEVAllocator) 12990 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 12991 UniquePreds.InsertNode(Eq, IP); 12992 return Eq; 12993 } 12994 12995 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 12996 const SCEVAddRecExpr *AR, 12997 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12998 FoldingSetNodeID ID; 12999 // Unique this node based on the arguments 13000 ID.AddInteger(SCEVPredicate::P_Wrap); 13001 ID.AddPointer(AR); 13002 ID.AddInteger(AddedFlags); 13003 void *IP = nullptr; 13004 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 13005 return S; 13006 auto *OF = new (SCEVAllocator) 13007 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 13008 UniquePreds.InsertNode(OF, IP); 13009 return OF; 13010 } 13011 13012 namespace { 13013 13014 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 13015 public: 13016 13017 /// Rewrites \p S in the context of a loop L and the SCEV predication 13018 /// infrastructure. 13019 /// 13020 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 13021 /// equivalences present in \p Pred. 13022 /// 13023 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 13024 /// \p NewPreds such that the result will be an AddRecExpr. 13025 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 13026 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 13027 SCEVUnionPredicate *Pred) { 13028 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 13029 return Rewriter.visit(S); 13030 } 13031 13032 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 13033 if (Pred) { 13034 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 13035 for (auto *Pred : ExprPreds) 13036 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 13037 if (IPred->getLHS() == Expr) 13038 return IPred->getRHS(); 13039 } 13040 return convertToAddRecWithPreds(Expr); 13041 } 13042 13043 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 13044 const SCEV *Operand = visit(Expr->getOperand()); 13045 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 13046 if (AR && AR->getLoop() == L && AR->isAffine()) { 13047 // This couldn't be folded because the operand didn't have the nuw 13048 // flag. Add the nusw flag as an assumption that we could make. 13049 const SCEV *Step = AR->getStepRecurrence(SE); 13050 Type *Ty = Expr->getType(); 13051 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 13052 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 13053 SE.getSignExtendExpr(Step, Ty), L, 13054 AR->getNoWrapFlags()); 13055 } 13056 return SE.getZeroExtendExpr(Operand, Expr->getType()); 13057 } 13058 13059 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 13060 const SCEV *Operand = visit(Expr->getOperand()); 13061 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 13062 if (AR && AR->getLoop() == L && AR->isAffine()) { 13063 // This couldn't be folded because the operand didn't have the nsw 13064 // flag. Add the nssw flag as an assumption that we could make. 13065 const SCEV *Step = AR->getStepRecurrence(SE); 13066 Type *Ty = Expr->getType(); 13067 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 13068 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 13069 SE.getSignExtendExpr(Step, Ty), L, 13070 AR->getNoWrapFlags()); 13071 } 13072 return SE.getSignExtendExpr(Operand, Expr->getType()); 13073 } 13074 13075 private: 13076 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 13077 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 13078 SCEVUnionPredicate *Pred) 13079 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 13080 13081 bool addOverflowAssumption(const SCEVPredicate *P) { 13082 if (!NewPreds) { 13083 // Check if we've already made this assumption. 13084 return Pred && Pred->implies(P); 13085 } 13086 NewPreds->insert(P); 13087 return true; 13088 } 13089 13090 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 13091 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 13092 auto *A = SE.getWrapPredicate(AR, AddedFlags); 13093 return addOverflowAssumption(A); 13094 } 13095 13096 // If \p Expr represents a PHINode, we try to see if it can be represented 13097 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 13098 // to add this predicate as a runtime overflow check, we return the AddRec. 13099 // If \p Expr does not meet these conditions (is not a PHI node, or we 13100 // couldn't create an AddRec for it, or couldn't add the predicate), we just 13101 // return \p Expr. 13102 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 13103 if (!isa<PHINode>(Expr->getValue())) 13104 return Expr; 13105 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 13106 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 13107 if (!PredicatedRewrite) 13108 return Expr; 13109 for (auto *P : PredicatedRewrite->second){ 13110 // Wrap predicates from outer loops are not supported. 13111 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 13112 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 13113 if (L != AR->getLoop()) 13114 return Expr; 13115 } 13116 if (!addOverflowAssumption(P)) 13117 return Expr; 13118 } 13119 return PredicatedRewrite->first; 13120 } 13121 13122 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 13123 SCEVUnionPredicate *Pred; 13124 const Loop *L; 13125 }; 13126 13127 } // end anonymous namespace 13128 13129 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 13130 SCEVUnionPredicate &Preds) { 13131 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 13132 } 13133 13134 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 13135 const SCEV *S, const Loop *L, 13136 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 13137 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 13138 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 13139 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 13140 13141 if (!AddRec) 13142 return nullptr; 13143 13144 // Since the transformation was successful, we can now transfer the SCEV 13145 // predicates. 13146 for (auto *P : TransformPreds) 13147 Preds.insert(P); 13148 13149 return AddRec; 13150 } 13151 13152 /// SCEV predicates 13153 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 13154 SCEVPredicateKind Kind) 13155 : FastID(ID), Kind(Kind) {} 13156 13157 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 13158 const SCEV *LHS, const SCEV *RHS) 13159 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 13160 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 13161 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 13162 } 13163 13164 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 13165 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 13166 13167 if (!Op) 13168 return false; 13169 13170 return Op->LHS == LHS && Op->RHS == RHS; 13171 } 13172 13173 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 13174 13175 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 13176 13177 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 13178 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 13179 } 13180 13181 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 13182 const SCEVAddRecExpr *AR, 13183 IncrementWrapFlags Flags) 13184 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 13185 13186 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 13187 13188 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 13189 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 13190 13191 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 13192 } 13193 13194 bool SCEVWrapPredicate::isAlwaysTrue() const { 13195 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 13196 IncrementWrapFlags IFlags = Flags; 13197 13198 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 13199 IFlags = clearFlags(IFlags, IncrementNSSW); 13200 13201 return IFlags == IncrementAnyWrap; 13202 } 13203 13204 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 13205 OS.indent(Depth) << *getExpr() << " Added Flags: "; 13206 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 13207 OS << "<nusw>"; 13208 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 13209 OS << "<nssw>"; 13210 OS << "\n"; 13211 } 13212 13213 SCEVWrapPredicate::IncrementWrapFlags 13214 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 13215 ScalarEvolution &SE) { 13216 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 13217 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 13218 13219 // We can safely transfer the NSW flag as NSSW. 13220 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 13221 ImpliedFlags = IncrementNSSW; 13222 13223 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 13224 // If the increment is positive, the SCEV NUW flag will also imply the 13225 // WrapPredicate NUSW flag. 13226 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 13227 if (Step->getValue()->getValue().isNonNegative()) 13228 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 13229 } 13230 13231 return ImpliedFlags; 13232 } 13233 13234 /// Union predicates don't get cached so create a dummy set ID for it. 13235 SCEVUnionPredicate::SCEVUnionPredicate() 13236 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 13237 13238 bool SCEVUnionPredicate::isAlwaysTrue() const { 13239 return all_of(Preds, 13240 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 13241 } 13242 13243 ArrayRef<const SCEVPredicate *> 13244 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 13245 auto I = SCEVToPreds.find(Expr); 13246 if (I == SCEVToPreds.end()) 13247 return ArrayRef<const SCEVPredicate *>(); 13248 return I->second; 13249 } 13250 13251 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 13252 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 13253 return all_of(Set->Preds, 13254 [this](const SCEVPredicate *I) { return this->implies(I); }); 13255 13256 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 13257 if (ScevPredsIt == SCEVToPreds.end()) 13258 return false; 13259 auto &SCEVPreds = ScevPredsIt->second; 13260 13261 return any_of(SCEVPreds, 13262 [N](const SCEVPredicate *I) { return I->implies(N); }); 13263 } 13264 13265 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 13266 13267 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 13268 for (auto Pred : Preds) 13269 Pred->print(OS, Depth); 13270 } 13271 13272 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 13273 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 13274 for (auto Pred : Set->Preds) 13275 add(Pred); 13276 return; 13277 } 13278 13279 if (implies(N)) 13280 return; 13281 13282 const SCEV *Key = N->getExpr(); 13283 assert(Key && "Only SCEVUnionPredicate doesn't have an " 13284 " associated expression!"); 13285 13286 SCEVToPreds[Key].push_back(N); 13287 Preds.push_back(N); 13288 } 13289 13290 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 13291 Loop &L) 13292 : SE(SE), L(L) {} 13293 13294 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 13295 const SCEV *Expr = SE.getSCEV(V); 13296 RewriteEntry &Entry = RewriteMap[Expr]; 13297 13298 // If we already have an entry and the version matches, return it. 13299 if (Entry.second && Generation == Entry.first) 13300 return Entry.second; 13301 13302 // We found an entry but it's stale. Rewrite the stale entry 13303 // according to the current predicate. 13304 if (Entry.second) 13305 Expr = Entry.second; 13306 13307 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 13308 Entry = {Generation, NewSCEV}; 13309 13310 return NewSCEV; 13311 } 13312 13313 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 13314 if (!BackedgeCount) { 13315 SCEVUnionPredicate BackedgePred; 13316 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 13317 addPredicate(BackedgePred); 13318 } 13319 return BackedgeCount; 13320 } 13321 13322 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 13323 if (Preds.implies(&Pred)) 13324 return; 13325 Preds.add(&Pred); 13326 updateGeneration(); 13327 } 13328 13329 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 13330 return Preds; 13331 } 13332 13333 void PredicatedScalarEvolution::updateGeneration() { 13334 // If the generation number wrapped recompute everything. 13335 if (++Generation == 0) { 13336 for (auto &II : RewriteMap) { 13337 const SCEV *Rewritten = II.second.second; 13338 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 13339 } 13340 } 13341 } 13342 13343 void PredicatedScalarEvolution::setNoOverflow( 13344 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 13345 const SCEV *Expr = getSCEV(V); 13346 const auto *AR = cast<SCEVAddRecExpr>(Expr); 13347 13348 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 13349 13350 // Clear the statically implied flags. 13351 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 13352 addPredicate(*SE.getWrapPredicate(AR, Flags)); 13353 13354 auto II = FlagsMap.insert({V, Flags}); 13355 if (!II.second) 13356 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 13357 } 13358 13359 bool PredicatedScalarEvolution::hasNoOverflow( 13360 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 13361 const SCEV *Expr = getSCEV(V); 13362 const auto *AR = cast<SCEVAddRecExpr>(Expr); 13363 13364 Flags = SCEVWrapPredicate::clearFlags( 13365 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 13366 13367 auto II = FlagsMap.find(V); 13368 13369 if (II != FlagsMap.end()) 13370 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 13371 13372 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 13373 } 13374 13375 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 13376 const SCEV *Expr = this->getSCEV(V); 13377 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 13378 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 13379 13380 if (!New) 13381 return nullptr; 13382 13383 for (auto *P : NewPreds) 13384 Preds.add(P); 13385 13386 updateGeneration(); 13387 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 13388 return New; 13389 } 13390 13391 PredicatedScalarEvolution::PredicatedScalarEvolution( 13392 const PredicatedScalarEvolution &Init) 13393 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 13394 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 13395 for (auto I : Init.FlagsMap) 13396 FlagsMap.insert(I); 13397 } 13398 13399 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 13400 // For each block. 13401 for (auto *BB : L.getBlocks()) 13402 for (auto &I : *BB) { 13403 if (!SE.isSCEVable(I.getType())) 13404 continue; 13405 13406 auto *Expr = SE.getSCEV(&I); 13407 auto II = RewriteMap.find(Expr); 13408 13409 if (II == RewriteMap.end()) 13410 continue; 13411 13412 // Don't print things that are not interesting. 13413 if (II->second.second == Expr) 13414 continue; 13415 13416 OS.indent(Depth) << "[PSE]" << I << ":\n"; 13417 OS.indent(Depth + 2) << *Expr << "\n"; 13418 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 13419 } 13420 } 13421 13422 // Match the mathematical pattern A - (A / B) * B, where A and B can be 13423 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used 13424 // for URem with constant power-of-2 second operands. 13425 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 13426 // 4, A / B becomes X / 8). 13427 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 13428 const SCEV *&RHS) { 13429 // Try to match 'zext (trunc A to iB) to iY', which is used 13430 // for URem with constant power-of-2 second operands. Make sure the size of 13431 // the operand A matches the size of the whole expressions. 13432 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr)) 13433 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) { 13434 LHS = Trunc->getOperand(); 13435 // Bail out if the type of the LHS is larger than the type of the 13436 // expression for now. 13437 if (getTypeSizeInBits(LHS->getType()) > 13438 getTypeSizeInBits(Expr->getType())) 13439 return false; 13440 if (LHS->getType() != Expr->getType()) 13441 LHS = getZeroExtendExpr(LHS, Expr->getType()); 13442 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1) 13443 << getTypeSizeInBits(Trunc->getType())); 13444 return true; 13445 } 13446 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 13447 if (Add == nullptr || Add->getNumOperands() != 2) 13448 return false; 13449 13450 const SCEV *A = Add->getOperand(1); 13451 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 13452 13453 if (Mul == nullptr) 13454 return false; 13455 13456 const auto MatchURemWithDivisor = [&](const SCEV *B) { 13457 // (SomeExpr + (-(SomeExpr / B) * B)). 13458 if (Expr == getURemExpr(A, B)) { 13459 LHS = A; 13460 RHS = B; 13461 return true; 13462 } 13463 return false; 13464 }; 13465 13466 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 13467 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 13468 return MatchURemWithDivisor(Mul->getOperand(1)) || 13469 MatchURemWithDivisor(Mul->getOperand(2)); 13470 13471 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 13472 if (Mul->getNumOperands() == 2) 13473 return MatchURemWithDivisor(Mul->getOperand(1)) || 13474 MatchURemWithDivisor(Mul->getOperand(0)) || 13475 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 13476 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 13477 return false; 13478 } 13479 13480 const SCEV * 13481 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) { 13482 SmallVector<BasicBlock*, 16> ExitingBlocks; 13483 L->getExitingBlocks(ExitingBlocks); 13484 13485 // Form an expression for the maximum exit count possible for this loop. We 13486 // merge the max and exact information to approximate a version of 13487 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants. 13488 SmallVector<const SCEV*, 4> ExitCounts; 13489 for (BasicBlock *ExitingBB : ExitingBlocks) { 13490 const SCEV *ExitCount = getExitCount(L, ExitingBB); 13491 if (isa<SCEVCouldNotCompute>(ExitCount)) 13492 ExitCount = getExitCount(L, ExitingBB, 13493 ScalarEvolution::ConstantMaximum); 13494 if (!isa<SCEVCouldNotCompute>(ExitCount)) { 13495 assert(DT.dominates(ExitingBB, L->getLoopLatch()) && 13496 "We should only have known counts for exiting blocks that " 13497 "dominate latch!"); 13498 ExitCounts.push_back(ExitCount); 13499 } 13500 } 13501 if (ExitCounts.empty()) 13502 return getCouldNotCompute(); 13503 return getUMinFromMismatchedTypes(ExitCounts); 13504 } 13505 13506 /// This rewriter is similar to SCEVParameterRewriter (it replaces SCEVUnknown 13507 /// components following the Map (Value -> SCEV)), but skips AddRecExpr because 13508 /// we cannot guarantee that the replacement is loop invariant in the loop of 13509 /// the AddRec. 13510 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> { 13511 ValueToSCEVMapTy ⤅ 13512 13513 public: 13514 SCEVLoopGuardRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M) 13515 : SCEVRewriteVisitor(SE), Map(M) {} 13516 13517 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 13518 13519 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 13520 auto I = Map.find(Expr->getValue()); 13521 if (I == Map.end()) 13522 return Expr; 13523 return I->second; 13524 } 13525 }; 13526 13527 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) { 13528 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS, 13529 const SCEV *RHS, ValueToSCEVMapTy &RewriteMap) { 13530 // If we have LHS == 0, check if LHS is computing a property of some unknown 13531 // SCEV %v which we can rewrite %v to express explicitly. 13532 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS); 13533 if (Predicate == CmpInst::ICMP_EQ && RHSC && 13534 RHSC->getValue()->isNullValue()) { 13535 // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to 13536 // explicitly express that. 13537 const SCEV *URemLHS = nullptr; 13538 const SCEV *URemRHS = nullptr; 13539 if (matchURem(LHS, URemLHS, URemRHS)) { 13540 if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) { 13541 Value *V = LHSUnknown->getValue(); 13542 auto Multiple = 13543 getMulExpr(getUDivExpr(URemLHS, URemRHS), URemRHS, 13544 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 13545 RewriteMap[V] = Multiple; 13546 return; 13547 } 13548 } 13549 } 13550 13551 if (!isa<SCEVUnknown>(LHS)) { 13552 std::swap(LHS, RHS); 13553 Predicate = CmpInst::getSwappedPredicate(Predicate); 13554 } 13555 13556 // For now, limit to conditions that provide information about unknown 13557 // expressions. 13558 auto *LHSUnknown = dyn_cast<SCEVUnknown>(LHS); 13559 if (!LHSUnknown) 13560 return; 13561 13562 // Check whether LHS has already been rewritten. In that case we want to 13563 // chain further rewrites onto the already rewritten value. 13564 auto I = RewriteMap.find(LHSUnknown->getValue()); 13565 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS; 13566 13567 // TODO: use information from more predicates. 13568 switch (Predicate) { 13569 case CmpInst::ICMP_ULT: 13570 if (!containsAddRecurrence(RHS)) 13571 RewriteMap[LHSUnknown->getValue()] = getUMinExpr( 13572 RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType()))); 13573 break; 13574 case CmpInst::ICMP_ULE: 13575 if (!containsAddRecurrence(RHS)) 13576 RewriteMap[LHSUnknown->getValue()] = getUMinExpr(RewrittenLHS, RHS); 13577 break; 13578 case CmpInst::ICMP_UGT: 13579 if (!containsAddRecurrence(RHS)) 13580 RewriteMap[LHSUnknown->getValue()] = 13581 getUMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType()))); 13582 break; 13583 case CmpInst::ICMP_UGE: 13584 if (!containsAddRecurrence(RHS)) 13585 RewriteMap[LHSUnknown->getValue()] = getUMaxExpr(RewrittenLHS, RHS); 13586 break; 13587 case CmpInst::ICMP_EQ: 13588 if (isa<SCEVConstant>(RHS)) 13589 RewriteMap[LHSUnknown->getValue()] = RHS; 13590 break; 13591 case CmpInst::ICMP_NE: 13592 if (isa<SCEVConstant>(RHS) && 13593 cast<SCEVConstant>(RHS)->getValue()->isNullValue()) 13594 RewriteMap[LHSUnknown->getValue()] = 13595 getUMaxExpr(RewrittenLHS, getOne(RHS->getType())); 13596 break; 13597 default: 13598 break; 13599 } 13600 }; 13601 // Starting at the loop predecessor, climb up the predecessor chain, as long 13602 // as there are predecessors that can be found that have unique successors 13603 // leading to the original header. 13604 // TODO: share this logic with isLoopEntryGuardedByCond. 13605 ValueToSCEVMapTy RewriteMap; 13606 for (std::pair<const BasicBlock *, const BasicBlock *> Pair( 13607 L->getLoopPredecessor(), L->getHeader()); 13608 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 13609 13610 const BranchInst *LoopEntryPredicate = 13611 dyn_cast<BranchInst>(Pair.first->getTerminator()); 13612 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) 13613 continue; 13614 13615 bool EnterIfTrue = LoopEntryPredicate->getSuccessor(0) == Pair.second; 13616 SmallVector<Value *, 8> Worklist; 13617 SmallPtrSet<Value *, 8> Visited; 13618 Worklist.push_back(LoopEntryPredicate->getCondition()); 13619 while (!Worklist.empty()) { 13620 Value *Cond = Worklist.pop_back_val(); 13621 if (!Visited.insert(Cond).second) 13622 continue; 13623 13624 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) { 13625 auto Predicate = 13626 EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate(); 13627 CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)), 13628 getSCEV(Cmp->getOperand(1)), RewriteMap); 13629 continue; 13630 } 13631 13632 Value *L, *R; 13633 if (EnterIfTrue ? match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))) 13634 : match(Cond, m_LogicalOr(m_Value(L), m_Value(R)))) { 13635 Worklist.push_back(L); 13636 Worklist.push_back(R); 13637 } 13638 } 13639 } 13640 13641 // Also collect information from assumptions dominating the loop. 13642 for (auto &AssumeVH : AC.assumptions()) { 13643 if (!AssumeVH) 13644 continue; 13645 auto *AssumeI = cast<CallInst>(AssumeVH); 13646 auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0)); 13647 if (!Cmp || !DT.dominates(AssumeI, L->getHeader())) 13648 continue; 13649 CollectCondition(Cmp->getPredicate(), getSCEV(Cmp->getOperand(0)), 13650 getSCEV(Cmp->getOperand(1)), RewriteMap); 13651 } 13652 13653 if (RewriteMap.empty()) 13654 return Expr; 13655 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 13656 return Rewriter.visit(Expr); 13657 } 13658