1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionDivision.h" 83 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 84 #include "llvm/Analysis/TargetLibraryInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/Config/llvm-config.h" 87 #include "llvm/IR/Argument.h" 88 #include "llvm/IR/BasicBlock.h" 89 #include "llvm/IR/CFG.h" 90 #include "llvm/IR/Constant.h" 91 #include "llvm/IR/ConstantRange.h" 92 #include "llvm/IR/Constants.h" 93 #include "llvm/IR/DataLayout.h" 94 #include "llvm/IR/DerivedTypes.h" 95 #include "llvm/IR/Dominators.h" 96 #include "llvm/IR/Function.h" 97 #include "llvm/IR/GlobalAlias.h" 98 #include "llvm/IR/GlobalValue.h" 99 #include "llvm/IR/GlobalVariable.h" 100 #include "llvm/IR/InstIterator.h" 101 #include "llvm/IR/InstrTypes.h" 102 #include "llvm/IR/Instruction.h" 103 #include "llvm/IR/Instructions.h" 104 #include "llvm/IR/IntrinsicInst.h" 105 #include "llvm/IR/Intrinsics.h" 106 #include "llvm/IR/LLVMContext.h" 107 #include "llvm/IR/Metadata.h" 108 #include "llvm/IR/Operator.h" 109 #include "llvm/IR/PatternMatch.h" 110 #include "llvm/IR/Type.h" 111 #include "llvm/IR/Use.h" 112 #include "llvm/IR/User.h" 113 #include "llvm/IR/Value.h" 114 #include "llvm/IR/Verifier.h" 115 #include "llvm/InitializePasses.h" 116 #include "llvm/Pass.h" 117 #include "llvm/Support/Casting.h" 118 #include "llvm/Support/CommandLine.h" 119 #include "llvm/Support/Compiler.h" 120 #include "llvm/Support/Debug.h" 121 #include "llvm/Support/ErrorHandling.h" 122 #include "llvm/Support/KnownBits.h" 123 #include "llvm/Support/SaveAndRestore.h" 124 #include "llvm/Support/raw_ostream.h" 125 #include <algorithm> 126 #include <cassert> 127 #include <climits> 128 #include <cstddef> 129 #include <cstdint> 130 #include <cstdlib> 131 #include <map> 132 #include <memory> 133 #include <tuple> 134 #include <utility> 135 #include <vector> 136 137 using namespace llvm; 138 using namespace PatternMatch; 139 140 #define DEBUG_TYPE "scalar-evolution" 141 142 STATISTIC(NumArrayLenItCounts, 143 "Number of trip counts computed with array length"); 144 STATISTIC(NumTripCountsComputed, 145 "Number of loops with predictable loop counts"); 146 STATISTIC(NumTripCountsNotComputed, 147 "Number of loops without predictable loop counts"); 148 STATISTIC(NumBruteForceTripCountsComputed, 149 "Number of loops with trip counts computed by force"); 150 151 static cl::opt<unsigned> 152 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 153 cl::ZeroOrMore, 154 cl::desc("Maximum number of iterations SCEV will " 155 "symbolically execute a constant " 156 "derived loop"), 157 cl::init(100)); 158 159 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean. 160 static cl::opt<bool> VerifySCEV( 161 "verify-scev", cl::Hidden, 162 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 163 static cl::opt<bool> VerifySCEVStrict( 164 "verify-scev-strict", cl::Hidden, 165 cl::desc("Enable stricter verification with -verify-scev is passed")); 166 static cl::opt<bool> 167 VerifySCEVMap("verify-scev-maps", cl::Hidden, 168 cl::desc("Verify no dangling value in ScalarEvolution's " 169 "ExprValueMap (slow)")); 170 171 static cl::opt<bool> VerifyIR( 172 "scev-verify-ir", cl::Hidden, 173 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 174 cl::init(false)); 175 176 static cl::opt<unsigned> MulOpsInlineThreshold( 177 "scev-mulops-inline-threshold", cl::Hidden, 178 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 179 cl::init(32)); 180 181 static cl::opt<unsigned> AddOpsInlineThreshold( 182 "scev-addops-inline-threshold", cl::Hidden, 183 cl::desc("Threshold for inlining addition operands into a SCEV"), 184 cl::init(500)); 185 186 static cl::opt<unsigned> MaxSCEVCompareDepth( 187 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 188 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 189 cl::init(32)); 190 191 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 192 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 193 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 194 cl::init(2)); 195 196 static cl::opt<unsigned> MaxValueCompareDepth( 197 "scalar-evolution-max-value-compare-depth", cl::Hidden, 198 cl::desc("Maximum depth of recursive value complexity comparisons"), 199 cl::init(2)); 200 201 static cl::opt<unsigned> 202 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 203 cl::desc("Maximum depth of recursive arithmetics"), 204 cl::init(32)); 205 206 static cl::opt<unsigned> MaxConstantEvolvingDepth( 207 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 208 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 209 210 static cl::opt<unsigned> 211 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 212 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 213 cl::init(8)); 214 215 static cl::opt<unsigned> 216 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 217 cl::desc("Max coefficients in AddRec during evolving"), 218 cl::init(8)); 219 220 static cl::opt<unsigned> 221 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 222 cl::desc("Size of the expression which is considered huge"), 223 cl::init(4096)); 224 225 static cl::opt<bool> 226 ClassifyExpressions("scalar-evolution-classify-expressions", 227 cl::Hidden, cl::init(true), 228 cl::desc("When printing analysis, include information on every instruction")); 229 230 static cl::opt<bool> UseExpensiveRangeSharpening( 231 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden, 232 cl::init(false), 233 cl::desc("Use more powerful methods of sharpening expression ranges. May " 234 "be costly in terms of compile time")); 235 236 //===----------------------------------------------------------------------===// 237 // SCEV class definitions 238 //===----------------------------------------------------------------------===// 239 240 //===----------------------------------------------------------------------===// 241 // Implementation of the SCEV class. 242 // 243 244 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 245 LLVM_DUMP_METHOD void SCEV::dump() const { 246 print(dbgs()); 247 dbgs() << '\n'; 248 } 249 #endif 250 251 void SCEV::print(raw_ostream &OS) const { 252 switch (getSCEVType()) { 253 case scConstant: 254 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 255 return; 256 case scPtrToInt: { 257 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this); 258 const SCEV *Op = PtrToInt->getOperand(); 259 OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to " 260 << *PtrToInt->getType() << ")"; 261 return; 262 } 263 case scTruncate: { 264 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 265 const SCEV *Op = Trunc->getOperand(); 266 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 267 << *Trunc->getType() << ")"; 268 return; 269 } 270 case scZeroExtend: { 271 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 272 const SCEV *Op = ZExt->getOperand(); 273 OS << "(zext " << *Op->getType() << " " << *Op << " to " 274 << *ZExt->getType() << ")"; 275 return; 276 } 277 case scSignExtend: { 278 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 279 const SCEV *Op = SExt->getOperand(); 280 OS << "(sext " << *Op->getType() << " " << *Op << " to " 281 << *SExt->getType() << ")"; 282 return; 283 } 284 case scAddRecExpr: { 285 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 286 OS << "{" << *AR->getOperand(0); 287 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 288 OS << ",+," << *AR->getOperand(i); 289 OS << "}<"; 290 if (AR->hasNoUnsignedWrap()) 291 OS << "nuw><"; 292 if (AR->hasNoSignedWrap()) 293 OS << "nsw><"; 294 if (AR->hasNoSelfWrap() && 295 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 296 OS << "nw><"; 297 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 298 OS << ">"; 299 return; 300 } 301 case scAddExpr: 302 case scMulExpr: 303 case scUMaxExpr: 304 case scSMaxExpr: 305 case scUMinExpr: 306 case scSMinExpr: { 307 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 308 const char *OpStr = nullptr; 309 switch (NAry->getSCEVType()) { 310 case scAddExpr: OpStr = " + "; break; 311 case scMulExpr: OpStr = " * "; break; 312 case scUMaxExpr: OpStr = " umax "; break; 313 case scSMaxExpr: OpStr = " smax "; break; 314 case scUMinExpr: 315 OpStr = " umin "; 316 break; 317 case scSMinExpr: 318 OpStr = " smin "; 319 break; 320 default: 321 llvm_unreachable("There are no other nary expression types."); 322 } 323 OS << "("; 324 ListSeparator LS(OpStr); 325 for (const SCEV *Op : NAry->operands()) 326 OS << LS << *Op; 327 OS << ")"; 328 switch (NAry->getSCEVType()) { 329 case scAddExpr: 330 case scMulExpr: 331 if (NAry->hasNoUnsignedWrap()) 332 OS << "<nuw>"; 333 if (NAry->hasNoSignedWrap()) 334 OS << "<nsw>"; 335 break; 336 default: 337 // Nothing to print for other nary expressions. 338 break; 339 } 340 return; 341 } 342 case scUDivExpr: { 343 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 344 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 345 return; 346 } 347 case scUnknown: { 348 const SCEVUnknown *U = cast<SCEVUnknown>(this); 349 Type *AllocTy; 350 if (U->isSizeOf(AllocTy)) { 351 OS << "sizeof(" << *AllocTy << ")"; 352 return; 353 } 354 if (U->isAlignOf(AllocTy)) { 355 OS << "alignof(" << *AllocTy << ")"; 356 return; 357 } 358 359 Type *CTy; 360 Constant *FieldNo; 361 if (U->isOffsetOf(CTy, FieldNo)) { 362 OS << "offsetof(" << *CTy << ", "; 363 FieldNo->printAsOperand(OS, false); 364 OS << ")"; 365 return; 366 } 367 368 // Otherwise just print it normally. 369 U->getValue()->printAsOperand(OS, false); 370 return; 371 } 372 case scCouldNotCompute: 373 OS << "***COULDNOTCOMPUTE***"; 374 return; 375 } 376 llvm_unreachable("Unknown SCEV kind!"); 377 } 378 379 Type *SCEV::getType() const { 380 switch (getSCEVType()) { 381 case scConstant: 382 return cast<SCEVConstant>(this)->getType(); 383 case scPtrToInt: 384 case scTruncate: 385 case scZeroExtend: 386 case scSignExtend: 387 return cast<SCEVCastExpr>(this)->getType(); 388 case scAddRecExpr: 389 case scMulExpr: 390 case scUMaxExpr: 391 case scSMaxExpr: 392 case scUMinExpr: 393 case scSMinExpr: 394 return cast<SCEVNAryExpr>(this)->getType(); 395 case scAddExpr: 396 return cast<SCEVAddExpr>(this)->getType(); 397 case scUDivExpr: 398 return cast<SCEVUDivExpr>(this)->getType(); 399 case scUnknown: 400 return cast<SCEVUnknown>(this)->getType(); 401 case scCouldNotCompute: 402 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 403 } 404 llvm_unreachable("Unknown SCEV kind!"); 405 } 406 407 bool SCEV::isZero() const { 408 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 409 return SC->getValue()->isZero(); 410 return false; 411 } 412 413 bool SCEV::isOne() const { 414 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 415 return SC->getValue()->isOne(); 416 return false; 417 } 418 419 bool SCEV::isAllOnesValue() const { 420 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 421 return SC->getValue()->isMinusOne(); 422 return false; 423 } 424 425 bool SCEV::isNonConstantNegative() const { 426 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 427 if (!Mul) return false; 428 429 // If there is a constant factor, it will be first. 430 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 431 if (!SC) return false; 432 433 // Return true if the value is negative, this matches things like (-42 * V). 434 return SC->getAPInt().isNegative(); 435 } 436 437 SCEVCouldNotCompute::SCEVCouldNotCompute() : 438 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 439 440 bool SCEVCouldNotCompute::classof(const SCEV *S) { 441 return S->getSCEVType() == scCouldNotCompute; 442 } 443 444 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 445 FoldingSetNodeID ID; 446 ID.AddInteger(scConstant); 447 ID.AddPointer(V); 448 void *IP = nullptr; 449 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 450 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 451 UniqueSCEVs.InsertNode(S, IP); 452 return S; 453 } 454 455 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 456 return getConstant(ConstantInt::get(getContext(), Val)); 457 } 458 459 const SCEV * 460 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 461 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 462 return getConstant(ConstantInt::get(ITy, V, isSigned)); 463 } 464 465 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, 466 const SCEV *op, Type *ty) 467 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) { 468 Operands[0] = op; 469 } 470 471 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, 472 Type *ITy) 473 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) { 474 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() && 475 "Must be a non-bit-width-changing pointer-to-integer cast!"); 476 } 477 478 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, 479 SCEVTypes SCEVTy, const SCEV *op, 480 Type *ty) 481 : SCEVCastExpr(ID, SCEVTy, op, ty) {} 482 483 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, 484 Type *ty) 485 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { 486 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 487 "Cannot truncate non-integer value!"); 488 } 489 490 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 491 const SCEV *op, Type *ty) 492 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { 493 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 494 "Cannot zero extend non-integer value!"); 495 } 496 497 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 498 const SCEV *op, Type *ty) 499 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { 500 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 501 "Cannot sign extend non-integer value!"); 502 } 503 504 void SCEVUnknown::deleted() { 505 // Clear this SCEVUnknown from various maps. 506 SE->forgetMemoizedResults(this); 507 508 // Remove this SCEVUnknown from the uniquing map. 509 SE->UniqueSCEVs.RemoveNode(this); 510 511 // Release the value. 512 setValPtr(nullptr); 513 } 514 515 void SCEVUnknown::allUsesReplacedWith(Value *New) { 516 // Remove this SCEVUnknown from the uniquing map. 517 SE->UniqueSCEVs.RemoveNode(this); 518 519 // Update this SCEVUnknown to point to the new value. This is needed 520 // because there may still be outstanding SCEVs which still point to 521 // this SCEVUnknown. 522 setValPtr(New); 523 } 524 525 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 526 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 527 if (VCE->getOpcode() == Instruction::PtrToInt) 528 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 529 if (CE->getOpcode() == Instruction::GetElementPtr && 530 CE->getOperand(0)->isNullValue() && 531 CE->getNumOperands() == 2) 532 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 533 if (CI->isOne()) { 534 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 535 ->getElementType(); 536 return true; 537 } 538 539 return false; 540 } 541 542 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 543 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 544 if (VCE->getOpcode() == Instruction::PtrToInt) 545 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 546 if (CE->getOpcode() == Instruction::GetElementPtr && 547 CE->getOperand(0)->isNullValue()) { 548 Type *Ty = 549 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 550 if (StructType *STy = dyn_cast<StructType>(Ty)) 551 if (!STy->isPacked() && 552 CE->getNumOperands() == 3 && 553 CE->getOperand(1)->isNullValue()) { 554 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 555 if (CI->isOne() && 556 STy->getNumElements() == 2 && 557 STy->getElementType(0)->isIntegerTy(1)) { 558 AllocTy = STy->getElementType(1); 559 return true; 560 } 561 } 562 } 563 564 return false; 565 } 566 567 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 568 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 569 if (VCE->getOpcode() == Instruction::PtrToInt) 570 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 571 if (CE->getOpcode() == Instruction::GetElementPtr && 572 CE->getNumOperands() == 3 && 573 CE->getOperand(0)->isNullValue() && 574 CE->getOperand(1)->isNullValue()) { 575 Type *Ty = 576 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 577 // Ignore vector types here so that ScalarEvolutionExpander doesn't 578 // emit getelementptrs that index into vectors. 579 if (Ty->isStructTy() || Ty->isArrayTy()) { 580 CTy = Ty; 581 FieldNo = CE->getOperand(2); 582 return true; 583 } 584 } 585 586 return false; 587 } 588 589 //===----------------------------------------------------------------------===// 590 // SCEV Utilities 591 //===----------------------------------------------------------------------===// 592 593 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 594 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 595 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 596 /// have been previously deemed to be "equally complex" by this routine. It is 597 /// intended to avoid exponential time complexity in cases like: 598 /// 599 /// %a = f(%x, %y) 600 /// %b = f(%a, %a) 601 /// %c = f(%b, %b) 602 /// 603 /// %d = f(%x, %y) 604 /// %e = f(%d, %d) 605 /// %f = f(%e, %e) 606 /// 607 /// CompareValueComplexity(%f, %c) 608 /// 609 /// Since we do not continue running this routine on expression trees once we 610 /// have seen unequal values, there is no need to track them in the cache. 611 static int 612 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 613 const LoopInfo *const LI, Value *LV, Value *RV, 614 unsigned Depth) { 615 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 616 return 0; 617 618 // Order pointer values after integer values. This helps SCEVExpander form 619 // GEPs. 620 bool LIsPointer = LV->getType()->isPointerTy(), 621 RIsPointer = RV->getType()->isPointerTy(); 622 if (LIsPointer != RIsPointer) 623 return (int)LIsPointer - (int)RIsPointer; 624 625 // Compare getValueID values. 626 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 627 if (LID != RID) 628 return (int)LID - (int)RID; 629 630 // Sort arguments by their position. 631 if (const auto *LA = dyn_cast<Argument>(LV)) { 632 const auto *RA = cast<Argument>(RV); 633 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 634 return (int)LArgNo - (int)RArgNo; 635 } 636 637 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 638 const auto *RGV = cast<GlobalValue>(RV); 639 640 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 641 auto LT = GV->getLinkage(); 642 return !(GlobalValue::isPrivateLinkage(LT) || 643 GlobalValue::isInternalLinkage(LT)); 644 }; 645 646 // Use the names to distinguish the two values, but only if the 647 // names are semantically important. 648 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 649 return LGV->getName().compare(RGV->getName()); 650 } 651 652 // For instructions, compare their loop depth, and their operand count. This 653 // is pretty loose. 654 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 655 const auto *RInst = cast<Instruction>(RV); 656 657 // Compare loop depths. 658 const BasicBlock *LParent = LInst->getParent(), 659 *RParent = RInst->getParent(); 660 if (LParent != RParent) { 661 unsigned LDepth = LI->getLoopDepth(LParent), 662 RDepth = LI->getLoopDepth(RParent); 663 if (LDepth != RDepth) 664 return (int)LDepth - (int)RDepth; 665 } 666 667 // Compare the number of operands. 668 unsigned LNumOps = LInst->getNumOperands(), 669 RNumOps = RInst->getNumOperands(); 670 if (LNumOps != RNumOps) 671 return (int)LNumOps - (int)RNumOps; 672 673 for (unsigned Idx : seq(0u, LNumOps)) { 674 int Result = 675 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 676 RInst->getOperand(Idx), Depth + 1); 677 if (Result != 0) 678 return Result; 679 } 680 } 681 682 EqCacheValue.unionSets(LV, RV); 683 return 0; 684 } 685 686 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 687 // than RHS, respectively. A three-way result allows recursive comparisons to be 688 // more efficient. 689 // If the max analysis depth was reached, return None, assuming we do not know 690 // if they are equivalent for sure. 691 static Optional<int> 692 CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV, 693 EquivalenceClasses<const Value *> &EqCacheValue, 694 const LoopInfo *const LI, const SCEV *LHS, 695 const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) { 696 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 697 if (LHS == RHS) 698 return 0; 699 700 // Primarily, sort the SCEVs by their getSCEVType(). 701 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 702 if (LType != RType) 703 return (int)LType - (int)RType; 704 705 if (EqCacheSCEV.isEquivalent(LHS, RHS)) 706 return 0; 707 708 if (Depth > MaxSCEVCompareDepth) 709 return None; 710 711 // Aside from the getSCEVType() ordering, the particular ordering 712 // isn't very important except that it's beneficial to be consistent, 713 // so that (a + b) and (b + a) don't end up as different expressions. 714 switch (LType) { 715 case scUnknown: { 716 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 717 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 718 719 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 720 RU->getValue(), Depth + 1); 721 if (X == 0) 722 EqCacheSCEV.unionSets(LHS, RHS); 723 return X; 724 } 725 726 case scConstant: { 727 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 728 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 729 730 // Compare constant values. 731 const APInt &LA = LC->getAPInt(); 732 const APInt &RA = RC->getAPInt(); 733 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 734 if (LBitWidth != RBitWidth) 735 return (int)LBitWidth - (int)RBitWidth; 736 return LA.ult(RA) ? -1 : 1; 737 } 738 739 case scAddRecExpr: { 740 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 741 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 742 743 // There is always a dominance between two recs that are used by one SCEV, 744 // so we can safely sort recs by loop header dominance. We require such 745 // order in getAddExpr. 746 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 747 if (LLoop != RLoop) { 748 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 749 assert(LHead != RHead && "Two loops share the same header?"); 750 if (DT.dominates(LHead, RHead)) 751 return 1; 752 else 753 assert(DT.dominates(RHead, LHead) && 754 "No dominance between recurrences used by one SCEV?"); 755 return -1; 756 } 757 758 // Addrec complexity grows with operand count. 759 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 760 if (LNumOps != RNumOps) 761 return (int)LNumOps - (int)RNumOps; 762 763 // Lexicographically compare. 764 for (unsigned i = 0; i != LNumOps; ++i) { 765 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 766 LA->getOperand(i), RA->getOperand(i), DT, 767 Depth + 1); 768 if (X != 0) 769 return X; 770 } 771 EqCacheSCEV.unionSets(LHS, RHS); 772 return 0; 773 } 774 775 case scAddExpr: 776 case scMulExpr: 777 case scSMaxExpr: 778 case scUMaxExpr: 779 case scSMinExpr: 780 case scUMinExpr: { 781 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 782 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 783 784 // Lexicographically compare n-ary expressions. 785 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 786 if (LNumOps != RNumOps) 787 return (int)LNumOps - (int)RNumOps; 788 789 for (unsigned i = 0; i != LNumOps; ++i) { 790 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 791 LC->getOperand(i), RC->getOperand(i), DT, 792 Depth + 1); 793 if (X != 0) 794 return X; 795 } 796 EqCacheSCEV.unionSets(LHS, RHS); 797 return 0; 798 } 799 800 case scUDivExpr: { 801 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 802 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 803 804 // Lexicographically compare udiv expressions. 805 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 806 RC->getLHS(), DT, Depth + 1); 807 if (X != 0) 808 return X; 809 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 810 RC->getRHS(), DT, Depth + 1); 811 if (X == 0) 812 EqCacheSCEV.unionSets(LHS, RHS); 813 return X; 814 } 815 816 case scPtrToInt: 817 case scTruncate: 818 case scZeroExtend: 819 case scSignExtend: { 820 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 821 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 822 823 // Compare cast expressions by operand. 824 auto X = 825 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(), 826 RC->getOperand(), DT, Depth + 1); 827 if (X == 0) 828 EqCacheSCEV.unionSets(LHS, RHS); 829 return X; 830 } 831 832 case scCouldNotCompute: 833 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 834 } 835 llvm_unreachable("Unknown SCEV kind!"); 836 } 837 838 /// Given a list of SCEV objects, order them by their complexity, and group 839 /// objects of the same complexity together by value. When this routine is 840 /// finished, we know that any duplicates in the vector are consecutive and that 841 /// complexity is monotonically increasing. 842 /// 843 /// Note that we go take special precautions to ensure that we get deterministic 844 /// results from this routine. In other words, we don't want the results of 845 /// this to depend on where the addresses of various SCEV objects happened to 846 /// land in memory. 847 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 848 LoopInfo *LI, DominatorTree &DT) { 849 if (Ops.size() < 2) return; // Noop 850 851 EquivalenceClasses<const SCEV *> EqCacheSCEV; 852 EquivalenceClasses<const Value *> EqCacheValue; 853 854 // Whether LHS has provably less complexity than RHS. 855 auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) { 856 auto Complexity = 857 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT); 858 return Complexity && *Complexity < 0; 859 }; 860 if (Ops.size() == 2) { 861 // This is the common case, which also happens to be trivially simple. 862 // Special case it. 863 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 864 if (IsLessComplex(RHS, LHS)) 865 std::swap(LHS, RHS); 866 return; 867 } 868 869 // Do the rough sort by complexity. 870 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { 871 return IsLessComplex(LHS, RHS); 872 }); 873 874 // Now that we are sorted by complexity, group elements of the same 875 // complexity. Note that this is, at worst, N^2, but the vector is likely to 876 // be extremely short in practice. Note that we take this approach because we 877 // do not want to depend on the addresses of the objects we are grouping. 878 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 879 const SCEV *S = Ops[i]; 880 unsigned Complexity = S->getSCEVType(); 881 882 // If there are any objects of the same complexity and same value as this 883 // one, group them. 884 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 885 if (Ops[j] == S) { // Found a duplicate. 886 // Move it to immediately after i'th element. 887 std::swap(Ops[i+1], Ops[j]); 888 ++i; // no need to rescan it. 889 if (i == e-2) return; // Done! 890 } 891 } 892 } 893 } 894 895 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at 896 /// least HugeExprThreshold nodes). 897 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 898 return any_of(Ops, [](const SCEV *S) { 899 return S->getExpressionSize() >= HugeExprThreshold; 900 }); 901 } 902 903 //===----------------------------------------------------------------------===// 904 // Simple SCEV method implementations 905 //===----------------------------------------------------------------------===// 906 907 /// Compute BC(It, K). The result has width W. Assume, K > 0. 908 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 909 ScalarEvolution &SE, 910 Type *ResultTy) { 911 // Handle the simplest case efficiently. 912 if (K == 1) 913 return SE.getTruncateOrZeroExtend(It, ResultTy); 914 915 // We are using the following formula for BC(It, K): 916 // 917 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 918 // 919 // Suppose, W is the bitwidth of the return value. We must be prepared for 920 // overflow. Hence, we must assure that the result of our computation is 921 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 922 // safe in modular arithmetic. 923 // 924 // However, this code doesn't use exactly that formula; the formula it uses 925 // is something like the following, where T is the number of factors of 2 in 926 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 927 // exponentiation: 928 // 929 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 930 // 931 // This formula is trivially equivalent to the previous formula. However, 932 // this formula can be implemented much more efficiently. The trick is that 933 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 934 // arithmetic. To do exact division in modular arithmetic, all we have 935 // to do is multiply by the inverse. Therefore, this step can be done at 936 // width W. 937 // 938 // The next issue is how to safely do the division by 2^T. The way this 939 // is done is by doing the multiplication step at a width of at least W + T 940 // bits. This way, the bottom W+T bits of the product are accurate. Then, 941 // when we perform the division by 2^T (which is equivalent to a right shift 942 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 943 // truncated out after the division by 2^T. 944 // 945 // In comparison to just directly using the first formula, this technique 946 // is much more efficient; using the first formula requires W * K bits, 947 // but this formula less than W + K bits. Also, the first formula requires 948 // a division step, whereas this formula only requires multiplies and shifts. 949 // 950 // It doesn't matter whether the subtraction step is done in the calculation 951 // width or the input iteration count's width; if the subtraction overflows, 952 // the result must be zero anyway. We prefer here to do it in the width of 953 // the induction variable because it helps a lot for certain cases; CodeGen 954 // isn't smart enough to ignore the overflow, which leads to much less 955 // efficient code if the width of the subtraction is wider than the native 956 // register width. 957 // 958 // (It's possible to not widen at all by pulling out factors of 2 before 959 // the multiplication; for example, K=2 can be calculated as 960 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 961 // extra arithmetic, so it's not an obvious win, and it gets 962 // much more complicated for K > 3.) 963 964 // Protection from insane SCEVs; this bound is conservative, 965 // but it probably doesn't matter. 966 if (K > 1000) 967 return SE.getCouldNotCompute(); 968 969 unsigned W = SE.getTypeSizeInBits(ResultTy); 970 971 // Calculate K! / 2^T and T; we divide out the factors of two before 972 // multiplying for calculating K! / 2^T to avoid overflow. 973 // Other overflow doesn't matter because we only care about the bottom 974 // W bits of the result. 975 APInt OddFactorial(W, 1); 976 unsigned T = 1; 977 for (unsigned i = 3; i <= K; ++i) { 978 APInt Mult(W, i); 979 unsigned TwoFactors = Mult.countTrailingZeros(); 980 T += TwoFactors; 981 Mult.lshrInPlace(TwoFactors); 982 OddFactorial *= Mult; 983 } 984 985 // We need at least W + T bits for the multiplication step 986 unsigned CalculationBits = W + T; 987 988 // Calculate 2^T, at width T+W. 989 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 990 991 // Calculate the multiplicative inverse of K! / 2^T; 992 // this multiplication factor will perform the exact division by 993 // K! / 2^T. 994 APInt Mod = APInt::getSignedMinValue(W+1); 995 APInt MultiplyFactor = OddFactorial.zext(W+1); 996 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 997 MultiplyFactor = MultiplyFactor.trunc(W); 998 999 // Calculate the product, at width T+W 1000 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1001 CalculationBits); 1002 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1003 for (unsigned i = 1; i != K; ++i) { 1004 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1005 Dividend = SE.getMulExpr(Dividend, 1006 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1007 } 1008 1009 // Divide by 2^T 1010 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1011 1012 // Truncate the result, and divide by K! / 2^T. 1013 1014 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1015 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1016 } 1017 1018 /// Return the value of this chain of recurrences at the specified iteration 1019 /// number. We can evaluate this recurrence by multiplying each element in the 1020 /// chain by the binomial coefficient corresponding to it. In other words, we 1021 /// can evaluate {A,+,B,+,C,+,D} as: 1022 /// 1023 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1024 /// 1025 /// where BC(It, k) stands for binomial coefficient. 1026 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1027 ScalarEvolution &SE) const { 1028 const SCEV *Result = getStart(); 1029 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 1030 // The computation is correct in the face of overflow provided that the 1031 // multiplication is performed _after_ the evaluation of the binomial 1032 // coefficient. 1033 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 1034 if (isa<SCEVCouldNotCompute>(Coeff)) 1035 return Coeff; 1036 1037 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 1038 } 1039 return Result; 1040 } 1041 1042 //===----------------------------------------------------------------------===// 1043 // SCEV Expression folder implementations 1044 //===----------------------------------------------------------------------===// 1045 1046 const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op, 1047 unsigned Depth) { 1048 assert(Depth <= 1 && 1049 "getLosslessPtrToIntExpr() should self-recurse at most once."); 1050 1051 // We could be called with an integer-typed operands during SCEV rewrites. 1052 // Since the operand is an integer already, just perform zext/trunc/self cast. 1053 if (!Op->getType()->isPointerTy()) 1054 return Op; 1055 1056 assert(!getDataLayout().isNonIntegralPointerType(Op->getType()) && 1057 "Source pointer type must be integral for ptrtoint!"); 1058 1059 // What would be an ID for such a SCEV cast expression? 1060 FoldingSetNodeID ID; 1061 ID.AddInteger(scPtrToInt); 1062 ID.AddPointer(Op); 1063 1064 void *IP = nullptr; 1065 1066 // Is there already an expression for such a cast? 1067 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1068 return S; 1069 1070 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType()); 1071 1072 // We can only model ptrtoint if SCEV's effective (integer) type 1073 // is sufficiently wide to represent all possible pointer values. 1074 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) != 1075 getDataLayout().getTypeSizeInBits(IntPtrTy)) 1076 return getCouldNotCompute(); 1077 1078 // If not, is this expression something we can't reduce any further? 1079 if (auto *U = dyn_cast<SCEVUnknown>(Op)) { 1080 // Perform some basic constant folding. If the operand of the ptr2int cast 1081 // is a null pointer, don't create a ptr2int SCEV expression (that will be 1082 // left as-is), but produce a zero constant. 1083 // NOTE: We could handle a more general case, but lack motivational cases. 1084 if (isa<ConstantPointerNull>(U->getValue())) 1085 return getZero(IntPtrTy); 1086 1087 // Create an explicit cast node. 1088 // We can reuse the existing insert position since if we get here, 1089 // we won't have made any changes which would invalidate it. 1090 SCEV *S = new (SCEVAllocator) 1091 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy); 1092 UniqueSCEVs.InsertNode(S, IP); 1093 addToLoopUseLists(S); 1094 return S; 1095 } 1096 1097 assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for " 1098 "non-SCEVUnknown's."); 1099 1100 // Otherwise, we've got some expression that is more complex than just a 1101 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an 1102 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown 1103 // only, and the expressions must otherwise be integer-typed. 1104 // So sink the cast down to the SCEVUnknown's. 1105 1106 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression, 1107 /// which computes a pointer-typed value, and rewrites the whole expression 1108 /// tree so that *all* the computations are done on integers, and the only 1109 /// pointer-typed operands in the expression are SCEVUnknown. 1110 class SCEVPtrToIntSinkingRewriter 1111 : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> { 1112 using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>; 1113 1114 public: 1115 SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {} 1116 1117 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) { 1118 SCEVPtrToIntSinkingRewriter Rewriter(SE); 1119 return Rewriter.visit(Scev); 1120 } 1121 1122 const SCEV *visit(const SCEV *S) { 1123 Type *STy = S->getType(); 1124 // If the expression is not pointer-typed, just keep it as-is. 1125 if (!STy->isPointerTy()) 1126 return S; 1127 // Else, recursively sink the cast down into it. 1128 return Base::visit(S); 1129 } 1130 1131 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) { 1132 SmallVector<const SCEV *, 2> Operands; 1133 bool Changed = false; 1134 for (auto *Op : Expr->operands()) { 1135 Operands.push_back(visit(Op)); 1136 Changed |= Op != Operands.back(); 1137 } 1138 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags()); 1139 } 1140 1141 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) { 1142 SmallVector<const SCEV *, 2> Operands; 1143 bool Changed = false; 1144 for (auto *Op : Expr->operands()) { 1145 Operands.push_back(visit(Op)); 1146 Changed |= Op != Operands.back(); 1147 } 1148 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags()); 1149 } 1150 1151 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 1152 Type *ExprPtrTy = Expr->getType(); 1153 assert(ExprPtrTy->isPointerTy() && 1154 "Should only reach pointer-typed SCEVUnknown's."); 1155 return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1); 1156 } 1157 }; 1158 1159 // And actually perform the cast sinking. 1160 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this); 1161 assert(IntOp->getType()->isIntegerTy() && 1162 "We must have succeeded in sinking the cast, " 1163 "and ending up with an integer-typed expression!"); 1164 return IntOp; 1165 } 1166 1167 const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) { 1168 assert(Ty->isIntegerTy() && "Target type must be an integer type!"); 1169 1170 const SCEV *IntOp = getLosslessPtrToIntExpr(Op); 1171 if (isa<SCEVCouldNotCompute>(IntOp)) 1172 return IntOp; 1173 1174 return getTruncateOrZeroExtend(IntOp, Ty); 1175 } 1176 1177 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1178 unsigned Depth) { 1179 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1180 "This is not a truncating conversion!"); 1181 assert(isSCEVable(Ty) && 1182 "This is not a conversion to a SCEVable type!"); 1183 Ty = getEffectiveSCEVType(Ty); 1184 1185 FoldingSetNodeID ID; 1186 ID.AddInteger(scTruncate); 1187 ID.AddPointer(Op); 1188 ID.AddPointer(Ty); 1189 void *IP = nullptr; 1190 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1191 1192 // Fold if the operand is constant. 1193 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1194 return getConstant( 1195 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1196 1197 // trunc(trunc(x)) --> trunc(x) 1198 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1199 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1200 1201 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1202 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1203 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1204 1205 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1206 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1207 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1208 1209 if (Depth > MaxCastDepth) { 1210 SCEV *S = 1211 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1212 UniqueSCEVs.InsertNode(S, IP); 1213 addToLoopUseLists(S); 1214 return S; 1215 } 1216 1217 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1218 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1219 // if after transforming we have at most one truncate, not counting truncates 1220 // that replace other casts. 1221 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1222 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1223 SmallVector<const SCEV *, 4> Operands; 1224 unsigned numTruncs = 0; 1225 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1226 ++i) { 1227 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1228 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) && 1229 isa<SCEVTruncateExpr>(S)) 1230 numTruncs++; 1231 Operands.push_back(S); 1232 } 1233 if (numTruncs < 2) { 1234 if (isa<SCEVAddExpr>(Op)) 1235 return getAddExpr(Operands); 1236 else if (isa<SCEVMulExpr>(Op)) 1237 return getMulExpr(Operands); 1238 else 1239 llvm_unreachable("Unexpected SCEV type for Op."); 1240 } 1241 // Although we checked in the beginning that ID is not in the cache, it is 1242 // possible that during recursion and different modification ID was inserted 1243 // into the cache. So if we find it, just return it. 1244 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1245 return S; 1246 } 1247 1248 // If the input value is a chrec scev, truncate the chrec's operands. 1249 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1250 SmallVector<const SCEV *, 4> Operands; 1251 for (const SCEV *Op : AddRec->operands()) 1252 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1253 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1254 } 1255 1256 // Return zero if truncating to known zeros. 1257 uint32_t MinTrailingZeros = GetMinTrailingZeros(Op); 1258 if (MinTrailingZeros >= getTypeSizeInBits(Ty)) 1259 return getZero(Ty); 1260 1261 // The cast wasn't folded; create an explicit cast node. We can reuse 1262 // the existing insert position since if we get here, we won't have 1263 // made any changes which would invalidate it. 1264 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1265 Op, Ty); 1266 UniqueSCEVs.InsertNode(S, IP); 1267 addToLoopUseLists(S); 1268 return S; 1269 } 1270 1271 // Get the limit of a recurrence such that incrementing by Step cannot cause 1272 // signed overflow as long as the value of the recurrence within the 1273 // loop does not exceed this limit before incrementing. 1274 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1275 ICmpInst::Predicate *Pred, 1276 ScalarEvolution *SE) { 1277 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1278 if (SE->isKnownPositive(Step)) { 1279 *Pred = ICmpInst::ICMP_SLT; 1280 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1281 SE->getSignedRangeMax(Step)); 1282 } 1283 if (SE->isKnownNegative(Step)) { 1284 *Pred = ICmpInst::ICMP_SGT; 1285 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1286 SE->getSignedRangeMin(Step)); 1287 } 1288 return nullptr; 1289 } 1290 1291 // Get the limit of a recurrence such that incrementing by Step cannot cause 1292 // unsigned overflow as long as the value of the recurrence within the loop does 1293 // not exceed this limit before incrementing. 1294 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1295 ICmpInst::Predicate *Pred, 1296 ScalarEvolution *SE) { 1297 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1298 *Pred = ICmpInst::ICMP_ULT; 1299 1300 return SE->getConstant(APInt::getMinValue(BitWidth) - 1301 SE->getUnsignedRangeMax(Step)); 1302 } 1303 1304 namespace { 1305 1306 struct ExtendOpTraitsBase { 1307 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1308 unsigned); 1309 }; 1310 1311 // Used to make code generic over signed and unsigned overflow. 1312 template <typename ExtendOp> struct ExtendOpTraits { 1313 // Members present: 1314 // 1315 // static const SCEV::NoWrapFlags WrapType; 1316 // 1317 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1318 // 1319 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1320 // ICmpInst::Predicate *Pred, 1321 // ScalarEvolution *SE); 1322 }; 1323 1324 template <> 1325 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1326 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1327 1328 static const GetExtendExprTy GetExtendExpr; 1329 1330 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1331 ICmpInst::Predicate *Pred, 1332 ScalarEvolution *SE) { 1333 return getSignedOverflowLimitForStep(Step, Pred, SE); 1334 } 1335 }; 1336 1337 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1338 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1339 1340 template <> 1341 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1342 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1343 1344 static const GetExtendExprTy GetExtendExpr; 1345 1346 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1347 ICmpInst::Predicate *Pred, 1348 ScalarEvolution *SE) { 1349 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1350 } 1351 }; 1352 1353 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1354 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1355 1356 } // end anonymous namespace 1357 1358 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1359 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1360 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1361 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1362 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1363 // expression "Step + sext/zext(PreIncAR)" is congruent with 1364 // "sext/zext(PostIncAR)" 1365 template <typename ExtendOpTy> 1366 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1367 ScalarEvolution *SE, unsigned Depth) { 1368 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1369 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1370 1371 const Loop *L = AR->getLoop(); 1372 const SCEV *Start = AR->getStart(); 1373 const SCEV *Step = AR->getStepRecurrence(*SE); 1374 1375 // Check for a simple looking step prior to loop entry. 1376 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1377 if (!SA) 1378 return nullptr; 1379 1380 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1381 // subtraction is expensive. For this purpose, perform a quick and dirty 1382 // difference, by checking for Step in the operand list. 1383 SmallVector<const SCEV *, 4> DiffOps; 1384 for (const SCEV *Op : SA->operands()) 1385 if (Op != Step) 1386 DiffOps.push_back(Op); 1387 1388 if (DiffOps.size() == SA->getNumOperands()) 1389 return nullptr; 1390 1391 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1392 // `Step`: 1393 1394 // 1. NSW/NUW flags on the step increment. 1395 auto PreStartFlags = 1396 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1397 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1398 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1399 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1400 1401 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1402 // "S+X does not sign/unsign-overflow". 1403 // 1404 1405 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1406 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1407 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1408 return PreStart; 1409 1410 // 2. Direct overflow check on the step operation's expression. 1411 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1412 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1413 const SCEV *OperandExtendedStart = 1414 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1415 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1416 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1417 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1418 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1419 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1420 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1421 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType); 1422 } 1423 return PreStart; 1424 } 1425 1426 // 3. Loop precondition. 1427 ICmpInst::Predicate Pred; 1428 const SCEV *OverflowLimit = 1429 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1430 1431 if (OverflowLimit && 1432 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1433 return PreStart; 1434 1435 return nullptr; 1436 } 1437 1438 // Get the normalized zero or sign extended expression for this AddRec's Start. 1439 template <typename ExtendOpTy> 1440 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1441 ScalarEvolution *SE, 1442 unsigned Depth) { 1443 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1444 1445 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1446 if (!PreStart) 1447 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1448 1449 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1450 Depth), 1451 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1452 } 1453 1454 // Try to prove away overflow by looking at "nearby" add recurrences. A 1455 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1456 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1457 // 1458 // Formally: 1459 // 1460 // {S,+,X} == {S-T,+,X} + T 1461 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1462 // 1463 // If ({S-T,+,X} + T) does not overflow ... (1) 1464 // 1465 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1466 // 1467 // If {S-T,+,X} does not overflow ... (2) 1468 // 1469 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1470 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1471 // 1472 // If (S-T)+T does not overflow ... (3) 1473 // 1474 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1475 // == {Ext(S),+,Ext(X)} == LHS 1476 // 1477 // Thus, if (1), (2) and (3) are true for some T, then 1478 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1479 // 1480 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1481 // does not overflow" restricted to the 0th iteration. Therefore we only need 1482 // to check for (1) and (2). 1483 // 1484 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1485 // is `Delta` (defined below). 1486 template <typename ExtendOpTy> 1487 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1488 const SCEV *Step, 1489 const Loop *L) { 1490 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1491 1492 // We restrict `Start` to a constant to prevent SCEV from spending too much 1493 // time here. It is correct (but more expensive) to continue with a 1494 // non-constant `Start` and do a general SCEV subtraction to compute 1495 // `PreStart` below. 1496 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1497 if (!StartC) 1498 return false; 1499 1500 APInt StartAI = StartC->getAPInt(); 1501 1502 for (unsigned Delta : {-2, -1, 1, 2}) { 1503 const SCEV *PreStart = getConstant(StartAI - Delta); 1504 1505 FoldingSetNodeID ID; 1506 ID.AddInteger(scAddRecExpr); 1507 ID.AddPointer(PreStart); 1508 ID.AddPointer(Step); 1509 ID.AddPointer(L); 1510 void *IP = nullptr; 1511 const auto *PreAR = 1512 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1513 1514 // Give up if we don't already have the add recurrence we need because 1515 // actually constructing an add recurrence is relatively expensive. 1516 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1517 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1518 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1519 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1520 DeltaS, &Pred, this); 1521 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1522 return true; 1523 } 1524 } 1525 1526 return false; 1527 } 1528 1529 // Finds an integer D for an expression (C + x + y + ...) such that the top 1530 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1531 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1532 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1533 // the (C + x + y + ...) expression is \p WholeAddExpr. 1534 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1535 const SCEVConstant *ConstantTerm, 1536 const SCEVAddExpr *WholeAddExpr) { 1537 const APInt &C = ConstantTerm->getAPInt(); 1538 const unsigned BitWidth = C.getBitWidth(); 1539 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1540 uint32_t TZ = BitWidth; 1541 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1542 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1543 if (TZ) { 1544 // Set D to be as many least significant bits of C as possible while still 1545 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1546 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1547 } 1548 return APInt(BitWidth, 0); 1549 } 1550 1551 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1552 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1553 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1554 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1555 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1556 const APInt &ConstantStart, 1557 const SCEV *Step) { 1558 const unsigned BitWidth = ConstantStart.getBitWidth(); 1559 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1560 if (TZ) 1561 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1562 : ConstantStart; 1563 return APInt(BitWidth, 0); 1564 } 1565 1566 const SCEV * 1567 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1568 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1569 "This is not an extending conversion!"); 1570 assert(isSCEVable(Ty) && 1571 "This is not a conversion to a SCEVable type!"); 1572 Ty = getEffectiveSCEVType(Ty); 1573 1574 // Fold if the operand is constant. 1575 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1576 return getConstant( 1577 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1578 1579 // zext(zext(x)) --> zext(x) 1580 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1581 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1582 1583 // Before doing any expensive analysis, check to see if we've already 1584 // computed a SCEV for this Op and Ty. 1585 FoldingSetNodeID ID; 1586 ID.AddInteger(scZeroExtend); 1587 ID.AddPointer(Op); 1588 ID.AddPointer(Ty); 1589 void *IP = nullptr; 1590 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1591 if (Depth > MaxCastDepth) { 1592 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1593 Op, Ty); 1594 UniqueSCEVs.InsertNode(S, IP); 1595 addToLoopUseLists(S); 1596 return S; 1597 } 1598 1599 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1600 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1601 // It's possible the bits taken off by the truncate were all zero bits. If 1602 // so, we should be able to simplify this further. 1603 const SCEV *X = ST->getOperand(); 1604 ConstantRange CR = getUnsignedRange(X); 1605 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1606 unsigned NewBits = getTypeSizeInBits(Ty); 1607 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1608 CR.zextOrTrunc(NewBits))) 1609 return getTruncateOrZeroExtend(X, Ty, Depth); 1610 } 1611 1612 // If the input value is a chrec scev, and we can prove that the value 1613 // did not overflow the old, smaller, value, we can zero extend all of the 1614 // operands (often constants). This allows analysis of something like 1615 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1616 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1617 if (AR->isAffine()) { 1618 const SCEV *Start = AR->getStart(); 1619 const SCEV *Step = AR->getStepRecurrence(*this); 1620 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1621 const Loop *L = AR->getLoop(); 1622 1623 if (!AR->hasNoUnsignedWrap()) { 1624 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1625 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1626 } 1627 1628 // If we have special knowledge that this addrec won't overflow, 1629 // we don't need to do any further analysis. 1630 if (AR->hasNoUnsignedWrap()) 1631 return getAddRecExpr( 1632 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1633 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1634 1635 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1636 // Note that this serves two purposes: It filters out loops that are 1637 // simply not analyzable, and it covers the case where this code is 1638 // being called from within backedge-taken count analysis, such that 1639 // attempting to ask for the backedge-taken count would likely result 1640 // in infinite recursion. In the later case, the analysis code will 1641 // cope with a conservative value, and it will take care to purge 1642 // that value once it has finished. 1643 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1644 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1645 // Manually compute the final value for AR, checking for overflow. 1646 1647 // Check whether the backedge-taken count can be losslessly casted to 1648 // the addrec's type. The count is always unsigned. 1649 const SCEV *CastedMaxBECount = 1650 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1651 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1652 CastedMaxBECount, MaxBECount->getType(), Depth); 1653 if (MaxBECount == RecastedMaxBECount) { 1654 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1655 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1656 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1657 SCEV::FlagAnyWrap, Depth + 1); 1658 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1659 SCEV::FlagAnyWrap, 1660 Depth + 1), 1661 WideTy, Depth + 1); 1662 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1663 const SCEV *WideMaxBECount = 1664 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1665 const SCEV *OperandExtendedAdd = 1666 getAddExpr(WideStart, 1667 getMulExpr(WideMaxBECount, 1668 getZeroExtendExpr(Step, WideTy, Depth + 1), 1669 SCEV::FlagAnyWrap, Depth + 1), 1670 SCEV::FlagAnyWrap, Depth + 1); 1671 if (ZAdd == OperandExtendedAdd) { 1672 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1673 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1674 // Return the expression with the addrec on the outside. 1675 return getAddRecExpr( 1676 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1677 Depth + 1), 1678 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1679 AR->getNoWrapFlags()); 1680 } 1681 // Similar to above, only this time treat the step value as signed. 1682 // This covers loops that count down. 1683 OperandExtendedAdd = 1684 getAddExpr(WideStart, 1685 getMulExpr(WideMaxBECount, 1686 getSignExtendExpr(Step, WideTy, Depth + 1), 1687 SCEV::FlagAnyWrap, Depth + 1), 1688 SCEV::FlagAnyWrap, Depth + 1); 1689 if (ZAdd == OperandExtendedAdd) { 1690 // Cache knowledge of AR NW, which is propagated to this AddRec. 1691 // Negative step causes unsigned wrap, but it still can't self-wrap. 1692 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1693 // Return the expression with the addrec on the outside. 1694 return getAddRecExpr( 1695 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1696 Depth + 1), 1697 getSignExtendExpr(Step, Ty, Depth + 1), L, 1698 AR->getNoWrapFlags()); 1699 } 1700 } 1701 } 1702 1703 // Normally, in the cases we can prove no-overflow via a 1704 // backedge guarding condition, we can also compute a backedge 1705 // taken count for the loop. The exceptions are assumptions and 1706 // guards present in the loop -- SCEV is not great at exploiting 1707 // these to compute max backedge taken counts, but can still use 1708 // these to prove lack of overflow. Use this fact to avoid 1709 // doing extra work that may not pay off. 1710 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1711 !AC.assumptions().empty()) { 1712 1713 auto NewFlags = proveNoUnsignedWrapViaInduction(AR); 1714 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1715 if (AR->hasNoUnsignedWrap()) { 1716 // Same as nuw case above - duplicated here to avoid a compile time 1717 // issue. It's not clear that the order of checks does matter, but 1718 // it's one of two issue possible causes for a change which was 1719 // reverted. Be conservative for the moment. 1720 return getAddRecExpr( 1721 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1722 Depth + 1), 1723 getZeroExtendExpr(Step, Ty, Depth + 1), L, 1724 AR->getNoWrapFlags()); 1725 } 1726 1727 // For a negative step, we can extend the operands iff doing so only 1728 // traverses values in the range zext([0,UINT_MAX]). 1729 if (isKnownNegative(Step)) { 1730 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1731 getSignedRangeMin(Step)); 1732 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1733 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1734 // Cache knowledge of AR NW, which is propagated to this 1735 // AddRec. Negative step causes unsigned wrap, but it 1736 // still can't self-wrap. 1737 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1738 // Return the expression with the addrec on the outside. 1739 return getAddRecExpr( 1740 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1741 Depth + 1), 1742 getSignExtendExpr(Step, Ty, Depth + 1), L, 1743 AR->getNoWrapFlags()); 1744 } 1745 } 1746 } 1747 1748 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1749 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1750 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1751 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1752 const APInt &C = SC->getAPInt(); 1753 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1754 if (D != 0) { 1755 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1756 const SCEV *SResidual = 1757 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1758 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1759 return getAddExpr(SZExtD, SZExtR, 1760 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1761 Depth + 1); 1762 } 1763 } 1764 1765 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1766 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1767 return getAddRecExpr( 1768 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1), 1769 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 1770 } 1771 } 1772 1773 // zext(A % B) --> zext(A) % zext(B) 1774 { 1775 const SCEV *LHS; 1776 const SCEV *RHS; 1777 if (matchURem(Op, LHS, RHS)) 1778 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1779 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1780 } 1781 1782 // zext(A / B) --> zext(A) / zext(B). 1783 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1784 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1785 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1786 1787 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1788 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1789 if (SA->hasNoUnsignedWrap()) { 1790 // If the addition does not unsign overflow then we can, by definition, 1791 // commute the zero extension with the addition operation. 1792 SmallVector<const SCEV *, 4> Ops; 1793 for (const auto *Op : SA->operands()) 1794 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1795 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1796 } 1797 1798 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1799 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1800 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1801 // 1802 // Often address arithmetics contain expressions like 1803 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1804 // This transformation is useful while proving that such expressions are 1805 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1806 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1807 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1808 if (D != 0) { 1809 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1810 const SCEV *SResidual = 1811 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1812 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1813 return getAddExpr(SZExtD, SZExtR, 1814 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1815 Depth + 1); 1816 } 1817 } 1818 } 1819 1820 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1821 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1822 if (SM->hasNoUnsignedWrap()) { 1823 // If the multiply does not unsign overflow then we can, by definition, 1824 // commute the zero extension with the multiply operation. 1825 SmallVector<const SCEV *, 4> Ops; 1826 for (const auto *Op : SM->operands()) 1827 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1828 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1829 } 1830 1831 // zext(2^K * (trunc X to iN)) to iM -> 1832 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1833 // 1834 // Proof: 1835 // 1836 // zext(2^K * (trunc X to iN)) to iM 1837 // = zext((trunc X to iN) << K) to iM 1838 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1839 // (because shl removes the top K bits) 1840 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1841 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1842 // 1843 if (SM->getNumOperands() == 2) 1844 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1845 if (MulLHS->getAPInt().isPowerOf2()) 1846 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1847 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1848 MulLHS->getAPInt().logBase2(); 1849 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1850 return getMulExpr( 1851 getZeroExtendExpr(MulLHS, Ty), 1852 getZeroExtendExpr( 1853 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1854 SCEV::FlagNUW, Depth + 1); 1855 } 1856 } 1857 1858 // The cast wasn't folded; create an explicit cast node. 1859 // Recompute the insert position, as it may have been invalidated. 1860 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1861 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1862 Op, Ty); 1863 UniqueSCEVs.InsertNode(S, IP); 1864 addToLoopUseLists(S); 1865 return S; 1866 } 1867 1868 const SCEV * 1869 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1870 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1871 "This is not an extending conversion!"); 1872 assert(isSCEVable(Ty) && 1873 "This is not a conversion to a SCEVable type!"); 1874 Ty = getEffectiveSCEVType(Ty); 1875 1876 // Fold if the operand is constant. 1877 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1878 return getConstant( 1879 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1880 1881 // sext(sext(x)) --> sext(x) 1882 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1883 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1884 1885 // sext(zext(x)) --> zext(x) 1886 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1887 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1888 1889 // Before doing any expensive analysis, check to see if we've already 1890 // computed a SCEV for this Op and Ty. 1891 FoldingSetNodeID ID; 1892 ID.AddInteger(scSignExtend); 1893 ID.AddPointer(Op); 1894 ID.AddPointer(Ty); 1895 void *IP = nullptr; 1896 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1897 // Limit recursion depth. 1898 if (Depth > MaxCastDepth) { 1899 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1900 Op, Ty); 1901 UniqueSCEVs.InsertNode(S, IP); 1902 addToLoopUseLists(S); 1903 return S; 1904 } 1905 1906 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1907 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1908 // It's possible the bits taken off by the truncate were all sign bits. If 1909 // so, we should be able to simplify this further. 1910 const SCEV *X = ST->getOperand(); 1911 ConstantRange CR = getSignedRange(X); 1912 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1913 unsigned NewBits = getTypeSizeInBits(Ty); 1914 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1915 CR.sextOrTrunc(NewBits))) 1916 return getTruncateOrSignExtend(X, Ty, Depth); 1917 } 1918 1919 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1920 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1921 if (SA->hasNoSignedWrap()) { 1922 // If the addition does not sign overflow then we can, by definition, 1923 // commute the sign extension with the addition operation. 1924 SmallVector<const SCEV *, 4> Ops; 1925 for (const auto *Op : SA->operands()) 1926 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1927 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1928 } 1929 1930 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 1931 // if D + (C - D + x + y + ...) could be proven to not signed wrap 1932 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1933 // 1934 // For instance, this will bring two seemingly different expressions: 1935 // 1 + sext(5 + 20 * %x + 24 * %y) and 1936 // sext(6 + 20 * %x + 24 * %y) 1937 // to the same form: 1938 // 2 + sext(4 + 20 * %x + 24 * %y) 1939 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1940 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1941 if (D != 0) { 1942 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1943 const SCEV *SResidual = 1944 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1945 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1946 return getAddExpr(SSExtD, SSExtR, 1947 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1948 Depth + 1); 1949 } 1950 } 1951 } 1952 // If the input value is a chrec scev, and we can prove that the value 1953 // did not overflow the old, smaller, value, we can sign extend all of the 1954 // operands (often constants). This allows analysis of something like 1955 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1956 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1957 if (AR->isAffine()) { 1958 const SCEV *Start = AR->getStart(); 1959 const SCEV *Step = AR->getStepRecurrence(*this); 1960 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1961 const Loop *L = AR->getLoop(); 1962 1963 if (!AR->hasNoSignedWrap()) { 1964 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1965 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1966 } 1967 1968 // If we have special knowledge that this addrec won't overflow, 1969 // we don't need to do any further analysis. 1970 if (AR->hasNoSignedWrap()) 1971 return getAddRecExpr( 1972 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 1973 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW); 1974 1975 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1976 // Note that this serves two purposes: It filters out loops that are 1977 // simply not analyzable, and it covers the case where this code is 1978 // being called from within backedge-taken count analysis, such that 1979 // attempting to ask for the backedge-taken count would likely result 1980 // in infinite recursion. In the later case, the analysis code will 1981 // cope with a conservative value, and it will take care to purge 1982 // that value once it has finished. 1983 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1984 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1985 // Manually compute the final value for AR, checking for 1986 // overflow. 1987 1988 // Check whether the backedge-taken count can be losslessly casted to 1989 // the addrec's type. The count is always unsigned. 1990 const SCEV *CastedMaxBECount = 1991 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1992 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1993 CastedMaxBECount, MaxBECount->getType(), Depth); 1994 if (MaxBECount == RecastedMaxBECount) { 1995 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1996 // Check whether Start+Step*MaxBECount has no signed overflow. 1997 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 1998 SCEV::FlagAnyWrap, Depth + 1); 1999 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 2000 SCEV::FlagAnyWrap, 2001 Depth + 1), 2002 WideTy, Depth + 1); 2003 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 2004 const SCEV *WideMaxBECount = 2005 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 2006 const SCEV *OperandExtendedAdd = 2007 getAddExpr(WideStart, 2008 getMulExpr(WideMaxBECount, 2009 getSignExtendExpr(Step, WideTy, Depth + 1), 2010 SCEV::FlagAnyWrap, Depth + 1), 2011 SCEV::FlagAnyWrap, Depth + 1); 2012 if (SAdd == OperandExtendedAdd) { 2013 // Cache knowledge of AR NSW, which is propagated to this AddRec. 2014 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2015 // Return the expression with the addrec on the outside. 2016 return getAddRecExpr( 2017 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2018 Depth + 1), 2019 getSignExtendExpr(Step, Ty, Depth + 1), L, 2020 AR->getNoWrapFlags()); 2021 } 2022 // Similar to above, only this time treat the step value as unsigned. 2023 // This covers loops that count up with an unsigned step. 2024 OperandExtendedAdd = 2025 getAddExpr(WideStart, 2026 getMulExpr(WideMaxBECount, 2027 getZeroExtendExpr(Step, WideTy, Depth + 1), 2028 SCEV::FlagAnyWrap, Depth + 1), 2029 SCEV::FlagAnyWrap, Depth + 1); 2030 if (SAdd == OperandExtendedAdd) { 2031 // If AR wraps around then 2032 // 2033 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 2034 // => SAdd != OperandExtendedAdd 2035 // 2036 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 2037 // (SAdd == OperandExtendedAdd => AR is NW) 2038 2039 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 2040 2041 // Return the expression with the addrec on the outside. 2042 return getAddRecExpr( 2043 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2044 Depth + 1), 2045 getZeroExtendExpr(Step, Ty, Depth + 1), L, 2046 AR->getNoWrapFlags()); 2047 } 2048 } 2049 } 2050 2051 auto NewFlags = proveNoSignedWrapViaInduction(AR); 2052 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 2053 if (AR->hasNoSignedWrap()) { 2054 // Same as nsw case above - duplicated here to avoid a compile time 2055 // issue. It's not clear that the order of checks does matter, but 2056 // it's one of two issue possible causes for a change which was 2057 // reverted. Be conservative for the moment. 2058 return getAddRecExpr( 2059 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2060 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2061 } 2062 2063 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 2064 // if D + (C - D + Step * n) could be proven to not signed wrap 2065 // where D maximizes the number of trailing zeros of (C - D + Step * n) 2066 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 2067 const APInt &C = SC->getAPInt(); 2068 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 2069 if (D != 0) { 2070 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2071 const SCEV *SResidual = 2072 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 2073 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2074 return getAddExpr(SSExtD, SSExtR, 2075 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2076 Depth + 1); 2077 } 2078 } 2079 2080 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2081 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2082 return getAddRecExpr( 2083 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1), 2084 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags()); 2085 } 2086 } 2087 2088 // If the input value is provably positive and we could not simplify 2089 // away the sext build a zext instead. 2090 if (isKnownNonNegative(Op)) 2091 return getZeroExtendExpr(Op, Ty, Depth + 1); 2092 2093 // The cast wasn't folded; create an explicit cast node. 2094 // Recompute the insert position, as it may have been invalidated. 2095 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2096 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2097 Op, Ty); 2098 UniqueSCEVs.InsertNode(S, IP); 2099 addToLoopUseLists(S); 2100 return S; 2101 } 2102 2103 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2104 /// unspecified bits out to the given type. 2105 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2106 Type *Ty) { 2107 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2108 "This is not an extending conversion!"); 2109 assert(isSCEVable(Ty) && 2110 "This is not a conversion to a SCEVable type!"); 2111 Ty = getEffectiveSCEVType(Ty); 2112 2113 // Sign-extend negative constants. 2114 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2115 if (SC->getAPInt().isNegative()) 2116 return getSignExtendExpr(Op, Ty); 2117 2118 // Peel off a truncate cast. 2119 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2120 const SCEV *NewOp = T->getOperand(); 2121 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2122 return getAnyExtendExpr(NewOp, Ty); 2123 return getTruncateOrNoop(NewOp, Ty); 2124 } 2125 2126 // Next try a zext cast. If the cast is folded, use it. 2127 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2128 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2129 return ZExt; 2130 2131 // Next try a sext cast. If the cast is folded, use it. 2132 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2133 if (!isa<SCEVSignExtendExpr>(SExt)) 2134 return SExt; 2135 2136 // Force the cast to be folded into the operands of an addrec. 2137 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2138 SmallVector<const SCEV *, 4> Ops; 2139 for (const SCEV *Op : AR->operands()) 2140 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2141 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2142 } 2143 2144 // If the expression is obviously signed, use the sext cast value. 2145 if (isa<SCEVSMaxExpr>(Op)) 2146 return SExt; 2147 2148 // Absent any other information, use the zext cast value. 2149 return ZExt; 2150 } 2151 2152 /// Process the given Ops list, which is a list of operands to be added under 2153 /// the given scale, update the given map. This is a helper function for 2154 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2155 /// that would form an add expression like this: 2156 /// 2157 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2158 /// 2159 /// where A and B are constants, update the map with these values: 2160 /// 2161 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2162 /// 2163 /// and add 13 + A*B*29 to AccumulatedConstant. 2164 /// This will allow getAddRecExpr to produce this: 2165 /// 2166 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2167 /// 2168 /// This form often exposes folding opportunities that are hidden in 2169 /// the original operand list. 2170 /// 2171 /// Return true iff it appears that any interesting folding opportunities 2172 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2173 /// the common case where no interesting opportunities are present, and 2174 /// is also used as a check to avoid infinite recursion. 2175 static bool 2176 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2177 SmallVectorImpl<const SCEV *> &NewOps, 2178 APInt &AccumulatedConstant, 2179 const SCEV *const *Ops, size_t NumOperands, 2180 const APInt &Scale, 2181 ScalarEvolution &SE) { 2182 bool Interesting = false; 2183 2184 // Iterate over the add operands. They are sorted, with constants first. 2185 unsigned i = 0; 2186 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2187 ++i; 2188 // Pull a buried constant out to the outside. 2189 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2190 Interesting = true; 2191 AccumulatedConstant += Scale * C->getAPInt(); 2192 } 2193 2194 // Next comes everything else. We're especially interested in multiplies 2195 // here, but they're in the middle, so just visit the rest with one loop. 2196 for (; i != NumOperands; ++i) { 2197 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2198 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2199 APInt NewScale = 2200 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2201 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2202 // A multiplication of a constant with another add; recurse. 2203 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2204 Interesting |= 2205 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2206 Add->op_begin(), Add->getNumOperands(), 2207 NewScale, SE); 2208 } else { 2209 // A multiplication of a constant with some other value. Update 2210 // the map. 2211 SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands())); 2212 const SCEV *Key = SE.getMulExpr(MulOps); 2213 auto Pair = M.insert({Key, NewScale}); 2214 if (Pair.second) { 2215 NewOps.push_back(Pair.first->first); 2216 } else { 2217 Pair.first->second += NewScale; 2218 // The map already had an entry for this value, which may indicate 2219 // a folding opportunity. 2220 Interesting = true; 2221 } 2222 } 2223 } else { 2224 // An ordinary operand. Update the map. 2225 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2226 M.insert({Ops[i], Scale}); 2227 if (Pair.second) { 2228 NewOps.push_back(Pair.first->first); 2229 } else { 2230 Pair.first->second += Scale; 2231 // The map already had an entry for this value, which may indicate 2232 // a folding opportunity. 2233 Interesting = true; 2234 } 2235 } 2236 } 2237 2238 return Interesting; 2239 } 2240 2241 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2242 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2243 // can't-overflow flags for the operation if possible. 2244 static SCEV::NoWrapFlags 2245 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2246 const ArrayRef<const SCEV *> Ops, 2247 SCEV::NoWrapFlags Flags) { 2248 using namespace std::placeholders; 2249 2250 using OBO = OverflowingBinaryOperator; 2251 2252 bool CanAnalyze = 2253 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2254 (void)CanAnalyze; 2255 assert(CanAnalyze && "don't call from other places!"); 2256 2257 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2258 SCEV::NoWrapFlags SignOrUnsignWrap = 2259 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2260 2261 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2262 auto IsKnownNonNegative = [&](const SCEV *S) { 2263 return SE->isKnownNonNegative(S); 2264 }; 2265 2266 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2267 Flags = 2268 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2269 2270 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2271 2272 if (SignOrUnsignWrap != SignOrUnsignMask && 2273 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2274 isa<SCEVConstant>(Ops[0])) { 2275 2276 auto Opcode = [&] { 2277 switch (Type) { 2278 case scAddExpr: 2279 return Instruction::Add; 2280 case scMulExpr: 2281 return Instruction::Mul; 2282 default: 2283 llvm_unreachable("Unexpected SCEV op."); 2284 } 2285 }(); 2286 2287 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2288 2289 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2290 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2291 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2292 Opcode, C, OBO::NoSignedWrap); 2293 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2294 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2295 } 2296 2297 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2298 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2299 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2300 Opcode, C, OBO::NoUnsignedWrap); 2301 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2302 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2303 } 2304 } 2305 2306 return Flags; 2307 } 2308 2309 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2310 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2311 } 2312 2313 /// Get a canonical add expression, or something simpler if possible. 2314 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2315 SCEV::NoWrapFlags OrigFlags, 2316 unsigned Depth) { 2317 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2318 "only nuw or nsw allowed"); 2319 assert(!Ops.empty() && "Cannot get empty add!"); 2320 if (Ops.size() == 1) return Ops[0]; 2321 #ifndef NDEBUG 2322 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2323 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2324 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2325 "SCEVAddExpr operand types don't match!"); 2326 #endif 2327 2328 // Sort by complexity, this groups all similar expression types together. 2329 GroupByComplexity(Ops, &LI, DT); 2330 2331 // If there are any constants, fold them together. 2332 unsigned Idx = 0; 2333 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2334 ++Idx; 2335 assert(Idx < Ops.size()); 2336 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2337 // We found two constants, fold them together! 2338 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2339 if (Ops.size() == 2) return Ops[0]; 2340 Ops.erase(Ops.begin()+1); // Erase the folded element 2341 LHSC = cast<SCEVConstant>(Ops[0]); 2342 } 2343 2344 // If we are left with a constant zero being added, strip it off. 2345 if (LHSC->getValue()->isZero()) { 2346 Ops.erase(Ops.begin()); 2347 --Idx; 2348 } 2349 2350 if (Ops.size() == 1) return Ops[0]; 2351 } 2352 2353 // Delay expensive flag strengthening until necessary. 2354 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 2355 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags); 2356 }; 2357 2358 // Limit recursion calls depth. 2359 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2360 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2361 2362 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scAddExpr, Ops))) { 2363 // Don't strengthen flags if we have no new information. 2364 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S); 2365 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags) 2366 Add->setNoWrapFlags(ComputeFlags(Ops)); 2367 return S; 2368 } 2369 2370 // Okay, check to see if the same value occurs in the operand list more than 2371 // once. If so, merge them together into an multiply expression. Since we 2372 // sorted the list, these values are required to be adjacent. 2373 Type *Ty = Ops[0]->getType(); 2374 bool FoundMatch = false; 2375 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2376 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2377 // Scan ahead to count how many equal operands there are. 2378 unsigned Count = 2; 2379 while (i+Count != e && Ops[i+Count] == Ops[i]) 2380 ++Count; 2381 // Merge the values into a multiply. 2382 const SCEV *Scale = getConstant(Ty, Count); 2383 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2384 if (Ops.size() == Count) 2385 return Mul; 2386 Ops[i] = Mul; 2387 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2388 --i; e -= Count - 1; 2389 FoundMatch = true; 2390 } 2391 if (FoundMatch) 2392 return getAddExpr(Ops, OrigFlags, Depth + 1); 2393 2394 // Check for truncates. If all the operands are truncated from the same 2395 // type, see if factoring out the truncate would permit the result to be 2396 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2397 // if the contents of the resulting outer trunc fold to something simple. 2398 auto FindTruncSrcType = [&]() -> Type * { 2399 // We're ultimately looking to fold an addrec of truncs and muls of only 2400 // constants and truncs, so if we find any other types of SCEV 2401 // as operands of the addrec then we bail and return nullptr here. 2402 // Otherwise, we return the type of the operand of a trunc that we find. 2403 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2404 return T->getOperand()->getType(); 2405 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2406 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2407 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2408 return T->getOperand()->getType(); 2409 } 2410 return nullptr; 2411 }; 2412 if (auto *SrcType = FindTruncSrcType()) { 2413 SmallVector<const SCEV *, 8> LargeOps; 2414 bool Ok = true; 2415 // Check all the operands to see if they can be represented in the 2416 // source type of the truncate. 2417 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2418 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2419 if (T->getOperand()->getType() != SrcType) { 2420 Ok = false; 2421 break; 2422 } 2423 LargeOps.push_back(T->getOperand()); 2424 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2425 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2426 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2427 SmallVector<const SCEV *, 8> LargeMulOps; 2428 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2429 if (const SCEVTruncateExpr *T = 2430 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2431 if (T->getOperand()->getType() != SrcType) { 2432 Ok = false; 2433 break; 2434 } 2435 LargeMulOps.push_back(T->getOperand()); 2436 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2437 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2438 } else { 2439 Ok = false; 2440 break; 2441 } 2442 } 2443 if (Ok) 2444 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2445 } else { 2446 Ok = false; 2447 break; 2448 } 2449 } 2450 if (Ok) { 2451 // Evaluate the expression in the larger type. 2452 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2453 // If it folds to something simple, use it. Otherwise, don't. 2454 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2455 return getTruncateExpr(Fold, Ty); 2456 } 2457 } 2458 2459 // Skip past any other cast SCEVs. 2460 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2461 ++Idx; 2462 2463 // If there are add operands they would be next. 2464 if (Idx < Ops.size()) { 2465 bool DeletedAdd = false; 2466 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2467 if (Ops.size() > AddOpsInlineThreshold || 2468 Add->getNumOperands() > AddOpsInlineThreshold) 2469 break; 2470 // If we have an add, expand the add operands onto the end of the operands 2471 // list. 2472 Ops.erase(Ops.begin()+Idx); 2473 Ops.append(Add->op_begin(), Add->op_end()); 2474 DeletedAdd = true; 2475 } 2476 2477 // If we deleted at least one add, we added operands to the end of the list, 2478 // and they are not necessarily sorted. Recurse to resort and resimplify 2479 // any operands we just acquired. 2480 if (DeletedAdd) 2481 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2482 } 2483 2484 // Skip over the add expression until we get to a multiply. 2485 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2486 ++Idx; 2487 2488 // Check to see if there are any folding opportunities present with 2489 // operands multiplied by constant values. 2490 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2491 uint64_t BitWidth = getTypeSizeInBits(Ty); 2492 DenseMap<const SCEV *, APInt> M; 2493 SmallVector<const SCEV *, 8> NewOps; 2494 APInt AccumulatedConstant(BitWidth, 0); 2495 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2496 Ops.data(), Ops.size(), 2497 APInt(BitWidth, 1), *this)) { 2498 struct APIntCompare { 2499 bool operator()(const APInt &LHS, const APInt &RHS) const { 2500 return LHS.ult(RHS); 2501 } 2502 }; 2503 2504 // Some interesting folding opportunity is present, so its worthwhile to 2505 // re-generate the operands list. Group the operands by constant scale, 2506 // to avoid multiplying by the same constant scale multiple times. 2507 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2508 for (const SCEV *NewOp : NewOps) 2509 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2510 // Re-generate the operands list. 2511 Ops.clear(); 2512 if (AccumulatedConstant != 0) 2513 Ops.push_back(getConstant(AccumulatedConstant)); 2514 for (auto &MulOp : MulOpLists) 2515 if (MulOp.first != 0) 2516 Ops.push_back(getMulExpr( 2517 getConstant(MulOp.first), 2518 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2519 SCEV::FlagAnyWrap, Depth + 1)); 2520 if (Ops.empty()) 2521 return getZero(Ty); 2522 if (Ops.size() == 1) 2523 return Ops[0]; 2524 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2525 } 2526 } 2527 2528 // If we are adding something to a multiply expression, make sure the 2529 // something is not already an operand of the multiply. If so, merge it into 2530 // the multiply. 2531 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2532 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2533 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2534 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2535 if (isa<SCEVConstant>(MulOpSCEV)) 2536 continue; 2537 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2538 if (MulOpSCEV == Ops[AddOp]) { 2539 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2540 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2541 if (Mul->getNumOperands() != 2) { 2542 // If the multiply has more than two operands, we must get the 2543 // Y*Z term. 2544 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2545 Mul->op_begin()+MulOp); 2546 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2547 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2548 } 2549 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2550 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2551 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2552 SCEV::FlagAnyWrap, Depth + 1); 2553 if (Ops.size() == 2) return OuterMul; 2554 if (AddOp < Idx) { 2555 Ops.erase(Ops.begin()+AddOp); 2556 Ops.erase(Ops.begin()+Idx-1); 2557 } else { 2558 Ops.erase(Ops.begin()+Idx); 2559 Ops.erase(Ops.begin()+AddOp-1); 2560 } 2561 Ops.push_back(OuterMul); 2562 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2563 } 2564 2565 // Check this multiply against other multiplies being added together. 2566 for (unsigned OtherMulIdx = Idx+1; 2567 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2568 ++OtherMulIdx) { 2569 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2570 // If MulOp occurs in OtherMul, we can fold the two multiplies 2571 // together. 2572 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2573 OMulOp != e; ++OMulOp) 2574 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2575 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2576 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2577 if (Mul->getNumOperands() != 2) { 2578 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2579 Mul->op_begin()+MulOp); 2580 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2581 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2582 } 2583 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2584 if (OtherMul->getNumOperands() != 2) { 2585 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2586 OtherMul->op_begin()+OMulOp); 2587 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2588 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2589 } 2590 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2591 const SCEV *InnerMulSum = 2592 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2593 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2594 SCEV::FlagAnyWrap, Depth + 1); 2595 if (Ops.size() == 2) return OuterMul; 2596 Ops.erase(Ops.begin()+Idx); 2597 Ops.erase(Ops.begin()+OtherMulIdx-1); 2598 Ops.push_back(OuterMul); 2599 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2600 } 2601 } 2602 } 2603 } 2604 2605 // If there are any add recurrences in the operands list, see if any other 2606 // added values are loop invariant. If so, we can fold them into the 2607 // recurrence. 2608 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2609 ++Idx; 2610 2611 // Scan over all recurrences, trying to fold loop invariants into them. 2612 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2613 // Scan all of the other operands to this add and add them to the vector if 2614 // they are loop invariant w.r.t. the recurrence. 2615 SmallVector<const SCEV *, 8> LIOps; 2616 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2617 const Loop *AddRecLoop = AddRec->getLoop(); 2618 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2619 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2620 LIOps.push_back(Ops[i]); 2621 Ops.erase(Ops.begin()+i); 2622 --i; --e; 2623 } 2624 2625 // If we found some loop invariants, fold them into the recurrence. 2626 if (!LIOps.empty()) { 2627 // Compute nowrap flags for the addition of the loop-invariant ops and 2628 // the addrec. Temporarily push it as an operand for that purpose. 2629 LIOps.push_back(AddRec); 2630 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps); 2631 LIOps.pop_back(); 2632 2633 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2634 LIOps.push_back(AddRec->getStart()); 2635 2636 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2637 // This follows from the fact that the no-wrap flags on the outer add 2638 // expression are applicable on the 0th iteration, when the add recurrence 2639 // will be equal to its start value. 2640 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1); 2641 2642 // Build the new addrec. Propagate the NUW and NSW flags if both the 2643 // outer add and the inner addrec are guaranteed to have no overflow. 2644 // Always propagate NW. 2645 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2646 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2647 2648 // If all of the other operands were loop invariant, we are done. 2649 if (Ops.size() == 1) return NewRec; 2650 2651 // Otherwise, add the folded AddRec by the non-invariant parts. 2652 for (unsigned i = 0;; ++i) 2653 if (Ops[i] == AddRec) { 2654 Ops[i] = NewRec; 2655 break; 2656 } 2657 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2658 } 2659 2660 // Okay, if there weren't any loop invariants to be folded, check to see if 2661 // there are multiple AddRec's with the same loop induction variable being 2662 // added together. If so, we can fold them. 2663 for (unsigned OtherIdx = Idx+1; 2664 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2665 ++OtherIdx) { 2666 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2667 // so that the 1st found AddRecExpr is dominated by all others. 2668 assert(DT.dominates( 2669 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2670 AddRec->getLoop()->getHeader()) && 2671 "AddRecExprs are not sorted in reverse dominance order?"); 2672 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2673 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2674 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2675 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2676 ++OtherIdx) { 2677 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2678 if (OtherAddRec->getLoop() == AddRecLoop) { 2679 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2680 i != e; ++i) { 2681 if (i >= AddRecOps.size()) { 2682 AddRecOps.append(OtherAddRec->op_begin()+i, 2683 OtherAddRec->op_end()); 2684 break; 2685 } 2686 SmallVector<const SCEV *, 2> TwoOps = { 2687 AddRecOps[i], OtherAddRec->getOperand(i)}; 2688 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2689 } 2690 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2691 } 2692 } 2693 // Step size has changed, so we cannot guarantee no self-wraparound. 2694 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2695 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2696 } 2697 } 2698 2699 // Otherwise couldn't fold anything into this recurrence. Move onto the 2700 // next one. 2701 } 2702 2703 // Okay, it looks like we really DO need an add expr. Check to see if we 2704 // already have one, otherwise create a new one. 2705 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2706 } 2707 2708 const SCEV * 2709 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 2710 SCEV::NoWrapFlags Flags) { 2711 FoldingSetNodeID ID; 2712 ID.AddInteger(scAddExpr); 2713 for (const SCEV *Op : Ops) 2714 ID.AddPointer(Op); 2715 void *IP = nullptr; 2716 SCEVAddExpr *S = 2717 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2718 if (!S) { 2719 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2720 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2721 S = new (SCEVAllocator) 2722 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2723 UniqueSCEVs.InsertNode(S, IP); 2724 addToLoopUseLists(S); 2725 } 2726 S->setNoWrapFlags(Flags); 2727 return S; 2728 } 2729 2730 const SCEV * 2731 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 2732 const Loop *L, SCEV::NoWrapFlags Flags) { 2733 FoldingSetNodeID ID; 2734 ID.AddInteger(scAddRecExpr); 2735 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2736 ID.AddPointer(Ops[i]); 2737 ID.AddPointer(L); 2738 void *IP = nullptr; 2739 SCEVAddRecExpr *S = 2740 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2741 if (!S) { 2742 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2743 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2744 S = new (SCEVAllocator) 2745 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 2746 UniqueSCEVs.InsertNode(S, IP); 2747 addToLoopUseLists(S); 2748 } 2749 setNoWrapFlags(S, Flags); 2750 return S; 2751 } 2752 2753 const SCEV * 2754 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 2755 SCEV::NoWrapFlags Flags) { 2756 FoldingSetNodeID ID; 2757 ID.AddInteger(scMulExpr); 2758 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2759 ID.AddPointer(Ops[i]); 2760 void *IP = nullptr; 2761 SCEVMulExpr *S = 2762 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2763 if (!S) { 2764 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2765 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2766 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2767 O, Ops.size()); 2768 UniqueSCEVs.InsertNode(S, IP); 2769 addToLoopUseLists(S); 2770 } 2771 S->setNoWrapFlags(Flags); 2772 return S; 2773 } 2774 2775 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2776 uint64_t k = i*j; 2777 if (j > 1 && k / j != i) Overflow = true; 2778 return k; 2779 } 2780 2781 /// Compute the result of "n choose k", the binomial coefficient. If an 2782 /// intermediate computation overflows, Overflow will be set and the return will 2783 /// be garbage. Overflow is not cleared on absence of overflow. 2784 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 2785 // We use the multiplicative formula: 2786 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 2787 // At each iteration, we take the n-th term of the numeral and divide by the 2788 // (k-n)th term of the denominator. This division will always produce an 2789 // integral result, and helps reduce the chance of overflow in the 2790 // intermediate computations. However, we can still overflow even when the 2791 // final result would fit. 2792 2793 if (n == 0 || n == k) return 1; 2794 if (k > n) return 0; 2795 2796 if (k > n/2) 2797 k = n-k; 2798 2799 uint64_t r = 1; 2800 for (uint64_t i = 1; i <= k; ++i) { 2801 r = umul_ov(r, n-(i-1), Overflow); 2802 r /= i; 2803 } 2804 return r; 2805 } 2806 2807 /// Determine if any of the operands in this SCEV are a constant or if 2808 /// any of the add or multiply expressions in this SCEV contain a constant. 2809 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 2810 struct FindConstantInAddMulChain { 2811 bool FoundConstant = false; 2812 2813 bool follow(const SCEV *S) { 2814 FoundConstant |= isa<SCEVConstant>(S); 2815 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 2816 } 2817 2818 bool isDone() const { 2819 return FoundConstant; 2820 } 2821 }; 2822 2823 FindConstantInAddMulChain F; 2824 SCEVTraversal<FindConstantInAddMulChain> ST(F); 2825 ST.visitAll(StartExpr); 2826 return F.FoundConstant; 2827 } 2828 2829 /// Get a canonical multiply expression, or something simpler if possible. 2830 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 2831 SCEV::NoWrapFlags OrigFlags, 2832 unsigned Depth) { 2833 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) && 2834 "only nuw or nsw allowed"); 2835 assert(!Ops.empty() && "Cannot get empty mul!"); 2836 if (Ops.size() == 1) return Ops[0]; 2837 #ifndef NDEBUG 2838 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2839 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2840 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2841 "SCEVMulExpr operand types don't match!"); 2842 #endif 2843 2844 // Sort by complexity, this groups all similar expression types together. 2845 GroupByComplexity(Ops, &LI, DT); 2846 2847 // If there are any constants, fold them together. 2848 unsigned Idx = 0; 2849 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2850 ++Idx; 2851 assert(Idx < Ops.size()); 2852 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2853 // We found two constants, fold them together! 2854 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt()); 2855 if (Ops.size() == 2) return Ops[0]; 2856 Ops.erase(Ops.begin()+1); // Erase the folded element 2857 LHSC = cast<SCEVConstant>(Ops[0]); 2858 } 2859 2860 // If we have a multiply of zero, it will always be zero. 2861 if (LHSC->getValue()->isZero()) 2862 return LHSC; 2863 2864 // If we are left with a constant one being multiplied, strip it off. 2865 if (LHSC->getValue()->isOne()) { 2866 Ops.erase(Ops.begin()); 2867 --Idx; 2868 } 2869 2870 if (Ops.size() == 1) 2871 return Ops[0]; 2872 } 2873 2874 // Delay expensive flag strengthening until necessary. 2875 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 2876 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags); 2877 }; 2878 2879 // Limit recursion calls depth. 2880 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2881 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 2882 2883 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scMulExpr, Ops))) { 2884 // Don't strengthen flags if we have no new information. 2885 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S); 2886 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags) 2887 Mul->setNoWrapFlags(ComputeFlags(Ops)); 2888 return S; 2889 } 2890 2891 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2892 if (Ops.size() == 2) { 2893 // C1*(C2+V) -> C1*C2 + C1*V 2894 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 2895 // If any of Add's ops are Adds or Muls with a constant, apply this 2896 // transformation as well. 2897 // 2898 // TODO: There are some cases where this transformation is not 2899 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 2900 // this transformation should be narrowed down. 2901 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) 2902 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0), 2903 SCEV::FlagAnyWrap, Depth + 1), 2904 getMulExpr(LHSC, Add->getOperand(1), 2905 SCEV::FlagAnyWrap, Depth + 1), 2906 SCEV::FlagAnyWrap, Depth + 1); 2907 2908 if (Ops[0]->isAllOnesValue()) { 2909 // If we have a mul by -1 of an add, try distributing the -1 among the 2910 // add operands. 2911 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 2912 SmallVector<const SCEV *, 4> NewOps; 2913 bool AnyFolded = false; 2914 for (const SCEV *AddOp : Add->operands()) { 2915 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 2916 Depth + 1); 2917 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 2918 NewOps.push_back(Mul); 2919 } 2920 if (AnyFolded) 2921 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 2922 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 2923 // Negation preserves a recurrence's no self-wrap property. 2924 SmallVector<const SCEV *, 4> Operands; 2925 for (const SCEV *AddRecOp : AddRec->operands()) 2926 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 2927 Depth + 1)); 2928 2929 return getAddRecExpr(Operands, AddRec->getLoop(), 2930 AddRec->getNoWrapFlags(SCEV::FlagNW)); 2931 } 2932 } 2933 } 2934 } 2935 2936 // Skip over the add expression until we get to a multiply. 2937 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2938 ++Idx; 2939 2940 // If there are mul operands inline them all into this expression. 2941 if (Idx < Ops.size()) { 2942 bool DeletedMul = false; 2943 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2944 if (Ops.size() > MulOpsInlineThreshold) 2945 break; 2946 // If we have an mul, expand the mul operands onto the end of the 2947 // operands list. 2948 Ops.erase(Ops.begin()+Idx); 2949 Ops.append(Mul->op_begin(), Mul->op_end()); 2950 DeletedMul = true; 2951 } 2952 2953 // If we deleted at least one mul, we added operands to the end of the 2954 // list, and they are not necessarily sorted. Recurse to resort and 2955 // resimplify any operands we just acquired. 2956 if (DeletedMul) 2957 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2958 } 2959 2960 // If there are any add recurrences in the operands list, see if any other 2961 // added values are loop invariant. If so, we can fold them into the 2962 // recurrence. 2963 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2964 ++Idx; 2965 2966 // Scan over all recurrences, trying to fold loop invariants into them. 2967 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2968 // Scan all of the other operands to this mul and add them to the vector 2969 // if they are loop invariant w.r.t. the recurrence. 2970 SmallVector<const SCEV *, 8> LIOps; 2971 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2972 const Loop *AddRecLoop = AddRec->getLoop(); 2973 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2974 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2975 LIOps.push_back(Ops[i]); 2976 Ops.erase(Ops.begin()+i); 2977 --i; --e; 2978 } 2979 2980 // If we found some loop invariants, fold them into the recurrence. 2981 if (!LIOps.empty()) { 2982 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 2983 SmallVector<const SCEV *, 4> NewOps; 2984 NewOps.reserve(AddRec->getNumOperands()); 2985 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 2986 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 2987 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 2988 SCEV::FlagAnyWrap, Depth + 1)); 2989 2990 // Build the new addrec. Propagate the NUW and NSW flags if both the 2991 // outer mul and the inner addrec are guaranteed to have no overflow. 2992 // 2993 // No self-wrap cannot be guaranteed after changing the step size, but 2994 // will be inferred if either NUW or NSW is true. 2995 SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec}); 2996 const SCEV *NewRec = getAddRecExpr( 2997 NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags)); 2998 2999 // If all of the other operands were loop invariant, we are done. 3000 if (Ops.size() == 1) return NewRec; 3001 3002 // Otherwise, multiply the folded AddRec by the non-invariant parts. 3003 for (unsigned i = 0;; ++i) 3004 if (Ops[i] == AddRec) { 3005 Ops[i] = NewRec; 3006 break; 3007 } 3008 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3009 } 3010 3011 // Okay, if there weren't any loop invariants to be folded, check to see 3012 // if there are multiple AddRec's with the same loop induction variable 3013 // being multiplied together. If so, we can fold them. 3014 3015 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 3016 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 3017 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 3018 // ]]],+,...up to x=2n}. 3019 // Note that the arguments to choose() are always integers with values 3020 // known at compile time, never SCEV objects. 3021 // 3022 // The implementation avoids pointless extra computations when the two 3023 // addrec's are of different length (mathematically, it's equivalent to 3024 // an infinite stream of zeros on the right). 3025 bool OpsModified = false; 3026 for (unsigned OtherIdx = Idx+1; 3027 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 3028 ++OtherIdx) { 3029 const SCEVAddRecExpr *OtherAddRec = 3030 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 3031 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 3032 continue; 3033 3034 // Limit max number of arguments to avoid creation of unreasonably big 3035 // SCEVAddRecs with very complex operands. 3036 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 3037 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) 3038 continue; 3039 3040 bool Overflow = false; 3041 Type *Ty = AddRec->getType(); 3042 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 3043 SmallVector<const SCEV*, 7> AddRecOps; 3044 for (int x = 0, xe = AddRec->getNumOperands() + 3045 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 3046 SmallVector <const SCEV *, 7> SumOps; 3047 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 3048 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 3049 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 3050 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 3051 z < ze && !Overflow; ++z) { 3052 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 3053 uint64_t Coeff; 3054 if (LargerThan64Bits) 3055 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 3056 else 3057 Coeff = Coeff1*Coeff2; 3058 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 3059 const SCEV *Term1 = AddRec->getOperand(y-z); 3060 const SCEV *Term2 = OtherAddRec->getOperand(z); 3061 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 3062 SCEV::FlagAnyWrap, Depth + 1)); 3063 } 3064 } 3065 if (SumOps.empty()) 3066 SumOps.push_back(getZero(Ty)); 3067 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 3068 } 3069 if (!Overflow) { 3070 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, 3071 SCEV::FlagAnyWrap); 3072 if (Ops.size() == 2) return NewAddRec; 3073 Ops[Idx] = NewAddRec; 3074 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3075 OpsModified = true; 3076 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 3077 if (!AddRec) 3078 break; 3079 } 3080 } 3081 if (OpsModified) 3082 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3083 3084 // Otherwise couldn't fold anything into this recurrence. Move onto the 3085 // next one. 3086 } 3087 3088 // Okay, it looks like we really DO need an mul expr. Check to see if we 3089 // already have one, otherwise create a new one. 3090 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3091 } 3092 3093 /// Represents an unsigned remainder expression based on unsigned division. 3094 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3095 const SCEV *RHS) { 3096 assert(getEffectiveSCEVType(LHS->getType()) == 3097 getEffectiveSCEVType(RHS->getType()) && 3098 "SCEVURemExpr operand types don't match!"); 3099 3100 // Short-circuit easy cases 3101 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3102 // If constant is one, the result is trivial 3103 if (RHSC->getValue()->isOne()) 3104 return getZero(LHS->getType()); // X urem 1 --> 0 3105 3106 // If constant is a power of two, fold into a zext(trunc(LHS)). 3107 if (RHSC->getAPInt().isPowerOf2()) { 3108 Type *FullTy = LHS->getType(); 3109 Type *TruncTy = 3110 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3111 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3112 } 3113 } 3114 3115 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3116 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3117 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3118 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3119 } 3120 3121 /// Get a canonical unsigned division expression, or something simpler if 3122 /// possible. 3123 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3124 const SCEV *RHS) { 3125 assert(getEffectiveSCEVType(LHS->getType()) == 3126 getEffectiveSCEVType(RHS->getType()) && 3127 "SCEVUDivExpr operand types don't match!"); 3128 3129 FoldingSetNodeID ID; 3130 ID.AddInteger(scUDivExpr); 3131 ID.AddPointer(LHS); 3132 ID.AddPointer(RHS); 3133 void *IP = nullptr; 3134 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3135 return S; 3136 3137 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3138 if (RHSC->getValue()->isOne()) 3139 return LHS; // X udiv 1 --> x 3140 // If the denominator is zero, the result of the udiv is undefined. Don't 3141 // try to analyze it, because the resolution chosen here may differ from 3142 // the resolution chosen in other parts of the compiler. 3143 if (!RHSC->getValue()->isZero()) { 3144 // Determine if the division can be folded into the operands of 3145 // its operands. 3146 // TODO: Generalize this to non-constants by using known-bits information. 3147 Type *Ty = LHS->getType(); 3148 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3149 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3150 // For non-power-of-two values, effectively round the value up to the 3151 // nearest power of two. 3152 if (!RHSC->getAPInt().isPowerOf2()) 3153 ++MaxShiftAmt; 3154 IntegerType *ExtTy = 3155 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3156 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3157 if (const SCEVConstant *Step = 3158 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3159 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3160 const APInt &StepInt = Step->getAPInt(); 3161 const APInt &DivInt = RHSC->getAPInt(); 3162 if (!StepInt.urem(DivInt) && 3163 getZeroExtendExpr(AR, ExtTy) == 3164 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3165 getZeroExtendExpr(Step, ExtTy), 3166 AR->getLoop(), SCEV::FlagAnyWrap)) { 3167 SmallVector<const SCEV *, 4> Operands; 3168 for (const SCEV *Op : AR->operands()) 3169 Operands.push_back(getUDivExpr(Op, RHS)); 3170 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3171 } 3172 /// Get a canonical UDivExpr for a recurrence. 3173 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3174 // We can currently only fold X%N if X is constant. 3175 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3176 if (StartC && !DivInt.urem(StepInt) && 3177 getZeroExtendExpr(AR, ExtTy) == 3178 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3179 getZeroExtendExpr(Step, ExtTy), 3180 AR->getLoop(), SCEV::FlagAnyWrap)) { 3181 const APInt &StartInt = StartC->getAPInt(); 3182 const APInt &StartRem = StartInt.urem(StepInt); 3183 if (StartRem != 0) { 3184 const SCEV *NewLHS = 3185 getAddRecExpr(getConstant(StartInt - StartRem), Step, 3186 AR->getLoop(), SCEV::FlagNW); 3187 if (LHS != NewLHS) { 3188 LHS = NewLHS; 3189 3190 // Reset the ID to include the new LHS, and check if it is 3191 // already cached. 3192 ID.clear(); 3193 ID.AddInteger(scUDivExpr); 3194 ID.AddPointer(LHS); 3195 ID.AddPointer(RHS); 3196 IP = nullptr; 3197 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3198 return S; 3199 } 3200 } 3201 } 3202 } 3203 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3204 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3205 SmallVector<const SCEV *, 4> Operands; 3206 for (const SCEV *Op : M->operands()) 3207 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3208 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3209 // Find an operand that's safely divisible. 3210 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3211 const SCEV *Op = M->getOperand(i); 3212 const SCEV *Div = getUDivExpr(Op, RHSC); 3213 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3214 Operands = SmallVector<const SCEV *, 4>(M->operands()); 3215 Operands[i] = Div; 3216 return getMulExpr(Operands); 3217 } 3218 } 3219 } 3220 3221 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3222 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3223 if (auto *DivisorConstant = 3224 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3225 bool Overflow = false; 3226 APInt NewRHS = 3227 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3228 if (Overflow) { 3229 return getConstant(RHSC->getType(), 0, false); 3230 } 3231 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3232 } 3233 } 3234 3235 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3236 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3237 SmallVector<const SCEV *, 4> Operands; 3238 for (const SCEV *Op : A->operands()) 3239 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3240 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3241 Operands.clear(); 3242 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3243 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3244 if (isa<SCEVUDivExpr>(Op) || 3245 getMulExpr(Op, RHS) != A->getOperand(i)) 3246 break; 3247 Operands.push_back(Op); 3248 } 3249 if (Operands.size() == A->getNumOperands()) 3250 return getAddExpr(Operands); 3251 } 3252 } 3253 3254 // Fold if both operands are constant. 3255 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3256 Constant *LHSCV = LHSC->getValue(); 3257 Constant *RHSCV = RHSC->getValue(); 3258 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3259 RHSCV))); 3260 } 3261 } 3262 } 3263 3264 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs 3265 // changes). Make sure we get a new one. 3266 IP = nullptr; 3267 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3268 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3269 LHS, RHS); 3270 UniqueSCEVs.InsertNode(S, IP); 3271 addToLoopUseLists(S); 3272 return S; 3273 } 3274 3275 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3276 APInt A = C1->getAPInt().abs(); 3277 APInt B = C2->getAPInt().abs(); 3278 uint32_t ABW = A.getBitWidth(); 3279 uint32_t BBW = B.getBitWidth(); 3280 3281 if (ABW > BBW) 3282 B = B.zext(ABW); 3283 else if (ABW < BBW) 3284 A = A.zext(BBW); 3285 3286 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3287 } 3288 3289 /// Get a canonical unsigned division expression, or something simpler if 3290 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3291 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3292 /// it's not exact because the udiv may be clearing bits. 3293 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3294 const SCEV *RHS) { 3295 // TODO: we could try to find factors in all sorts of things, but for now we 3296 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3297 // end of this file for inspiration. 3298 3299 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3300 if (!Mul || !Mul->hasNoUnsignedWrap()) 3301 return getUDivExpr(LHS, RHS); 3302 3303 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3304 // If the mulexpr multiplies by a constant, then that constant must be the 3305 // first element of the mulexpr. 3306 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3307 if (LHSCst == RHSCst) { 3308 SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands())); 3309 return getMulExpr(Operands); 3310 } 3311 3312 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3313 // that there's a factor provided by one of the other terms. We need to 3314 // check. 3315 APInt Factor = gcd(LHSCst, RHSCst); 3316 if (!Factor.isIntN(1)) { 3317 LHSCst = 3318 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3319 RHSCst = 3320 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3321 SmallVector<const SCEV *, 2> Operands; 3322 Operands.push_back(LHSCst); 3323 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3324 LHS = getMulExpr(Operands); 3325 RHS = RHSCst; 3326 Mul = dyn_cast<SCEVMulExpr>(LHS); 3327 if (!Mul) 3328 return getUDivExactExpr(LHS, RHS); 3329 } 3330 } 3331 } 3332 3333 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3334 if (Mul->getOperand(i) == RHS) { 3335 SmallVector<const SCEV *, 2> Operands; 3336 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3337 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3338 return getMulExpr(Operands); 3339 } 3340 } 3341 3342 return getUDivExpr(LHS, RHS); 3343 } 3344 3345 /// Get an add recurrence expression for the specified loop. Simplify the 3346 /// expression as much as possible. 3347 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3348 const Loop *L, 3349 SCEV::NoWrapFlags Flags) { 3350 SmallVector<const SCEV *, 4> Operands; 3351 Operands.push_back(Start); 3352 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3353 if (StepChrec->getLoop() == L) { 3354 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3355 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3356 } 3357 3358 Operands.push_back(Step); 3359 return getAddRecExpr(Operands, L, Flags); 3360 } 3361 3362 /// Get an add recurrence expression for the specified loop. Simplify the 3363 /// expression as much as possible. 3364 const SCEV * 3365 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3366 const Loop *L, SCEV::NoWrapFlags Flags) { 3367 if (Operands.size() == 1) return Operands[0]; 3368 #ifndef NDEBUG 3369 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3370 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 3371 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3372 "SCEVAddRecExpr operand types don't match!"); 3373 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3374 assert(isLoopInvariant(Operands[i], L) && 3375 "SCEVAddRecExpr operand is not loop-invariant!"); 3376 #endif 3377 3378 if (Operands.back()->isZero()) { 3379 Operands.pop_back(); 3380 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3381 } 3382 3383 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and 3384 // use that information to infer NUW and NSW flags. However, computing a 3385 // BE count requires calling getAddRecExpr, so we may not yet have a 3386 // meaningful BE count at this point (and if we don't, we'd be stuck 3387 // with a SCEVCouldNotCompute as the cached BE count). 3388 3389 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3390 3391 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3392 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3393 const Loop *NestedLoop = NestedAR->getLoop(); 3394 if (L->contains(NestedLoop) 3395 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3396 : (!NestedLoop->contains(L) && 3397 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3398 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands()); 3399 Operands[0] = NestedAR->getStart(); 3400 // AddRecs require their operands be loop-invariant with respect to their 3401 // loops. Don't perform this transformation if it would break this 3402 // requirement. 3403 bool AllInvariant = all_of( 3404 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3405 3406 if (AllInvariant) { 3407 // Create a recurrence for the outer loop with the same step size. 3408 // 3409 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3410 // inner recurrence has the same property. 3411 SCEV::NoWrapFlags OuterFlags = 3412 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3413 3414 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3415 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3416 return isLoopInvariant(Op, NestedLoop); 3417 }); 3418 3419 if (AllInvariant) { 3420 // Ok, both add recurrences are valid after the transformation. 3421 // 3422 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3423 // the outer recurrence has the same property. 3424 SCEV::NoWrapFlags InnerFlags = 3425 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3426 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3427 } 3428 } 3429 // Reset Operands to its original state. 3430 Operands[0] = NestedAR; 3431 } 3432 } 3433 3434 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3435 // already have one, otherwise create a new one. 3436 return getOrCreateAddRecExpr(Operands, L, Flags); 3437 } 3438 3439 const SCEV * 3440 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3441 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3442 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3443 // getSCEV(Base)->getType() has the same address space as Base->getType() 3444 // because SCEV::getType() preserves the address space. 3445 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); 3446 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP 3447 // instruction to its SCEV, because the Instruction may be guarded by control 3448 // flow and the no-overflow bits may not be valid for the expression in any 3449 // context. This can be fixed similarly to how these flags are handled for 3450 // adds. 3451 SCEV::NoWrapFlags OffsetWrap = 3452 GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3453 3454 Type *CurTy = GEP->getType(); 3455 bool FirstIter = true; 3456 SmallVector<const SCEV *, 4> Offsets; 3457 for (const SCEV *IndexExpr : IndexExprs) { 3458 // Compute the (potentially symbolic) offset in bytes for this index. 3459 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3460 // For a struct, add the member offset. 3461 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3462 unsigned FieldNo = Index->getZExtValue(); 3463 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); 3464 Offsets.push_back(FieldOffset); 3465 3466 // Update CurTy to the type of the field at Index. 3467 CurTy = STy->getTypeAtIndex(Index); 3468 } else { 3469 // Update CurTy to its element type. 3470 if (FirstIter) { 3471 assert(isa<PointerType>(CurTy) && 3472 "The first index of a GEP indexes a pointer"); 3473 CurTy = GEP->getSourceElementType(); 3474 FirstIter = false; 3475 } else { 3476 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); 3477 } 3478 // For an array, add the element offset, explicitly scaled. 3479 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); 3480 // Getelementptr indices are signed. 3481 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); 3482 3483 // Multiply the index by the element size to compute the element offset. 3484 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap); 3485 Offsets.push_back(LocalOffset); 3486 } 3487 } 3488 3489 // Handle degenerate case of GEP without offsets. 3490 if (Offsets.empty()) 3491 return BaseExpr; 3492 3493 // Add the offsets together, assuming nsw if inbounds. 3494 const SCEV *Offset = getAddExpr(Offsets, OffsetWrap); 3495 // Add the base address and the offset. We cannot use the nsw flag, as the 3496 // base address is unsigned. However, if we know that the offset is 3497 // non-negative, we can use nuw. 3498 SCEV::NoWrapFlags BaseWrap = GEP->isInBounds() && isKnownNonNegative(Offset) 3499 ? SCEV::FlagNUW : SCEV::FlagAnyWrap; 3500 return getAddExpr(BaseExpr, Offset, BaseWrap); 3501 } 3502 3503 std::tuple<SCEV *, FoldingSetNodeID, void *> 3504 ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType, 3505 ArrayRef<const SCEV *> Ops) { 3506 FoldingSetNodeID ID; 3507 void *IP = nullptr; 3508 ID.AddInteger(SCEVType); 3509 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3510 ID.AddPointer(Ops[i]); 3511 return std::tuple<SCEV *, FoldingSetNodeID, void *>( 3512 UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP); 3513 } 3514 3515 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { 3516 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3517 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); 3518 } 3519 3520 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind, 3521 SmallVectorImpl<const SCEV *> &Ops) { 3522 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 3523 if (Ops.size() == 1) return Ops[0]; 3524 #ifndef NDEBUG 3525 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3526 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3527 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3528 "Operand types don't match!"); 3529 #endif 3530 3531 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; 3532 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; 3533 3534 // Sort by complexity, this groups all similar expression types together. 3535 GroupByComplexity(Ops, &LI, DT); 3536 3537 // Check if we have created the same expression before. 3538 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) { 3539 return S; 3540 } 3541 3542 // If there are any constants, fold them together. 3543 unsigned Idx = 0; 3544 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3545 ++Idx; 3546 assert(Idx < Ops.size()); 3547 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { 3548 if (Kind == scSMaxExpr) 3549 return APIntOps::smax(LHS, RHS); 3550 else if (Kind == scSMinExpr) 3551 return APIntOps::smin(LHS, RHS); 3552 else if (Kind == scUMaxExpr) 3553 return APIntOps::umax(LHS, RHS); 3554 else if (Kind == scUMinExpr) 3555 return APIntOps::umin(LHS, RHS); 3556 llvm_unreachable("Unknown SCEV min/max opcode"); 3557 }; 3558 3559 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3560 // We found two constants, fold them together! 3561 ConstantInt *Fold = ConstantInt::get( 3562 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); 3563 Ops[0] = getConstant(Fold); 3564 Ops.erase(Ops.begin()+1); // Erase the folded element 3565 if (Ops.size() == 1) return Ops[0]; 3566 LHSC = cast<SCEVConstant>(Ops[0]); 3567 } 3568 3569 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); 3570 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); 3571 3572 if (IsMax ? IsMinV : IsMaxV) { 3573 // If we are left with a constant minimum(/maximum)-int, strip it off. 3574 Ops.erase(Ops.begin()); 3575 --Idx; 3576 } else if (IsMax ? IsMaxV : IsMinV) { 3577 // If we have a max(/min) with a constant maximum(/minimum)-int, 3578 // it will always be the extremum. 3579 return LHSC; 3580 } 3581 3582 if (Ops.size() == 1) return Ops[0]; 3583 } 3584 3585 // Find the first operation of the same kind 3586 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) 3587 ++Idx; 3588 3589 // Check to see if one of the operands is of the same kind. If so, expand its 3590 // operands onto our operand list, and recurse to simplify. 3591 if (Idx < Ops.size()) { 3592 bool DeletedAny = false; 3593 while (Ops[Idx]->getSCEVType() == Kind) { 3594 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); 3595 Ops.erase(Ops.begin()+Idx); 3596 Ops.append(SMME->op_begin(), SMME->op_end()); 3597 DeletedAny = true; 3598 } 3599 3600 if (DeletedAny) 3601 return getMinMaxExpr(Kind, Ops); 3602 } 3603 3604 // Okay, check to see if the same value occurs in the operand list twice. If 3605 // so, delete one. Since we sorted the list, these values are required to 3606 // be adjacent. 3607 llvm::CmpInst::Predicate GEPred = 3608 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 3609 llvm::CmpInst::Predicate LEPred = 3610 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 3611 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; 3612 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; 3613 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { 3614 if (Ops[i] == Ops[i + 1] || 3615 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { 3616 // X op Y op Y --> X op Y 3617 // X op Y --> X, if we know X, Y are ordered appropriately 3618 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3619 --i; 3620 --e; 3621 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], 3622 Ops[i + 1])) { 3623 // X op Y --> Y, if we know X, Y are ordered appropriately 3624 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3625 --i; 3626 --e; 3627 } 3628 } 3629 3630 if (Ops.size() == 1) return Ops[0]; 3631 3632 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3633 3634 // Okay, it looks like we really DO need an expr. Check to see if we 3635 // already have one, otherwise create a new one. 3636 const SCEV *ExistingSCEV; 3637 FoldingSetNodeID ID; 3638 void *IP; 3639 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops); 3640 if (ExistingSCEV) 3641 return ExistingSCEV; 3642 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3643 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3644 SCEV *S = new (SCEVAllocator) 3645 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); 3646 3647 UniqueSCEVs.InsertNode(S, IP); 3648 addToLoopUseLists(S); 3649 return S; 3650 } 3651 3652 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3653 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3654 return getSMaxExpr(Ops); 3655 } 3656 3657 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3658 return getMinMaxExpr(scSMaxExpr, Ops); 3659 } 3660 3661 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { 3662 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 3663 return getUMaxExpr(Ops); 3664 } 3665 3666 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 3667 return getMinMaxExpr(scUMaxExpr, Ops); 3668 } 3669 3670 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 3671 const SCEV *RHS) { 3672 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3673 return getSMinExpr(Ops); 3674 } 3675 3676 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3677 return getMinMaxExpr(scSMinExpr, Ops); 3678 } 3679 3680 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 3681 const SCEV *RHS) { 3682 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 3683 return getUMinExpr(Ops); 3684 } 3685 3686 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 3687 return getMinMaxExpr(scUMinExpr, Ops); 3688 } 3689 3690 const SCEV * 3691 ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy, 3692 ScalableVectorType *ScalableTy) { 3693 Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo()); 3694 Constant *One = ConstantInt::get(IntTy, 1); 3695 Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One); 3696 // Note that the expression we created is the final expression, we don't 3697 // want to simplify it any further Also, if we call a normal getSCEV(), 3698 // we'll end up in an endless recursion. So just create an SCEVUnknown. 3699 return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy)); 3700 } 3701 3702 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 3703 if (auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy)) 3704 return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy); 3705 // We can bypass creating a target-independent constant expression and then 3706 // folding it back into a ConstantInt. This is just a compile-time 3707 // optimization. 3708 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 3709 } 3710 3711 const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) { 3712 if (auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy)) 3713 return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy); 3714 // We can bypass creating a target-independent constant expression and then 3715 // folding it back into a ConstantInt. This is just a compile-time 3716 // optimization. 3717 return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy)); 3718 } 3719 3720 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 3721 StructType *STy, 3722 unsigned FieldNo) { 3723 // We can bypass creating a target-independent constant expression and then 3724 // folding it back into a ConstantInt. This is just a compile-time 3725 // optimization. 3726 return getConstant( 3727 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 3728 } 3729 3730 const SCEV *ScalarEvolution::getUnknown(Value *V) { 3731 // Don't attempt to do anything other than create a SCEVUnknown object 3732 // here. createSCEV only calls getUnknown after checking for all other 3733 // interesting possibilities, and any other code that calls getUnknown 3734 // is doing so in order to hide a value from SCEV canonicalization. 3735 3736 FoldingSetNodeID ID; 3737 ID.AddInteger(scUnknown); 3738 ID.AddPointer(V); 3739 void *IP = nullptr; 3740 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 3741 assert(cast<SCEVUnknown>(S)->getValue() == V && 3742 "Stale SCEVUnknown in uniquing map!"); 3743 return S; 3744 } 3745 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 3746 FirstUnknown); 3747 FirstUnknown = cast<SCEVUnknown>(S); 3748 UniqueSCEVs.InsertNode(S, IP); 3749 return S; 3750 } 3751 3752 //===----------------------------------------------------------------------===// 3753 // Basic SCEV Analysis and PHI Idiom Recognition Code 3754 // 3755 3756 /// Test if values of the given type are analyzable within the SCEV 3757 /// framework. This primarily includes integer types, and it can optionally 3758 /// include pointer types if the ScalarEvolution class has access to 3759 /// target-specific information. 3760 bool ScalarEvolution::isSCEVable(Type *Ty) const { 3761 // Integers and pointers are always SCEVable. 3762 return Ty->isIntOrPtrTy(); 3763 } 3764 3765 /// Return the size in bits of the specified type, for which isSCEVable must 3766 /// return true. 3767 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 3768 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3769 if (Ty->isPointerTy()) 3770 return getDataLayout().getIndexTypeSizeInBits(Ty); 3771 return getDataLayout().getTypeSizeInBits(Ty); 3772 } 3773 3774 /// Return a type with the same bitwidth as the given type and which represents 3775 /// how SCEV will treat the given type, for which isSCEVable must return 3776 /// true. For pointer types, this is the pointer index sized integer type. 3777 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 3778 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 3779 3780 if (Ty->isIntegerTy()) 3781 return Ty; 3782 3783 // The only other support type is pointer. 3784 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 3785 return getDataLayout().getIndexType(Ty); 3786 } 3787 3788 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 3789 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 3790 } 3791 3792 const SCEV *ScalarEvolution::getCouldNotCompute() { 3793 return CouldNotCompute.get(); 3794 } 3795 3796 bool ScalarEvolution::checkValidity(const SCEV *S) const { 3797 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 3798 auto *SU = dyn_cast<SCEVUnknown>(S); 3799 return SU && SU->getValue() == nullptr; 3800 }); 3801 3802 return !ContainsNulls; 3803 } 3804 3805 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 3806 HasRecMapType::iterator I = HasRecMap.find(S); 3807 if (I != HasRecMap.end()) 3808 return I->second; 3809 3810 bool FoundAddRec = 3811 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); 3812 HasRecMap.insert({S, FoundAddRec}); 3813 return FoundAddRec; 3814 } 3815 3816 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}. 3817 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an 3818 /// offset I, then return {S', I}, else return {\p S, nullptr}. 3819 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) { 3820 const auto *Add = dyn_cast<SCEVAddExpr>(S); 3821 if (!Add) 3822 return {S, nullptr}; 3823 3824 if (Add->getNumOperands() != 2) 3825 return {S, nullptr}; 3826 3827 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0)); 3828 if (!ConstOp) 3829 return {S, nullptr}; 3830 3831 return {Add->getOperand(1), ConstOp->getValue()}; 3832 } 3833 3834 /// Return the ValueOffsetPair set for \p S. \p S can be represented 3835 /// by the value and offset from any ValueOffsetPair in the set. 3836 SetVector<ScalarEvolution::ValueOffsetPair> * 3837 ScalarEvolution::getSCEVValues(const SCEV *S) { 3838 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 3839 if (SI == ExprValueMap.end()) 3840 return nullptr; 3841 #ifndef NDEBUG 3842 if (VerifySCEVMap) { 3843 // Check there is no dangling Value in the set returned. 3844 for (const auto &VE : SI->second) 3845 assert(ValueExprMap.count(VE.first)); 3846 } 3847 #endif 3848 return &SI->second; 3849 } 3850 3851 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 3852 /// cannot be used separately. eraseValueFromMap should be used to remove 3853 /// V from ValueExprMap and ExprValueMap at the same time. 3854 void ScalarEvolution::eraseValueFromMap(Value *V) { 3855 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3856 if (I != ValueExprMap.end()) { 3857 const SCEV *S = I->second; 3858 // Remove {V, 0} from the set of ExprValueMap[S] 3859 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S)) 3860 SV->remove({V, nullptr}); 3861 3862 // Remove {V, Offset} from the set of ExprValueMap[Stripped] 3863 const SCEV *Stripped; 3864 ConstantInt *Offset; 3865 std::tie(Stripped, Offset) = splitAddExpr(S); 3866 if (Offset != nullptr) { 3867 if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped)) 3868 SV->remove({V, Offset}); 3869 } 3870 ValueExprMap.erase(V); 3871 } 3872 } 3873 3874 /// Check whether value has nuw/nsw/exact set but SCEV does not. 3875 /// TODO: In reality it is better to check the poison recursively 3876 /// but this is better than nothing. 3877 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { 3878 if (auto *I = dyn_cast<Instruction>(V)) { 3879 if (isa<OverflowingBinaryOperator>(I)) { 3880 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) { 3881 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap()) 3882 return true; 3883 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap()) 3884 return true; 3885 } 3886 } else if (isa<PossiblyExactOperator>(I) && I->isExact()) 3887 return true; 3888 } 3889 return false; 3890 } 3891 3892 /// Return an existing SCEV if it exists, otherwise analyze the expression and 3893 /// create a new one. 3894 const SCEV *ScalarEvolution::getSCEV(Value *V) { 3895 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3896 3897 const SCEV *S = getExistingSCEV(V); 3898 if (S == nullptr) { 3899 S = createSCEV(V); 3900 // During PHI resolution, it is possible to create two SCEVs for the same 3901 // V, so it is needed to double check whether V->S is inserted into 3902 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 3903 std::pair<ValueExprMapType::iterator, bool> Pair = 3904 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 3905 if (Pair.second && !SCEVLostPoisonFlags(S, V)) { 3906 ExprValueMap[S].insert({V, nullptr}); 3907 3908 // If S == Stripped + Offset, add Stripped -> {V, Offset} into 3909 // ExprValueMap. 3910 const SCEV *Stripped = S; 3911 ConstantInt *Offset = nullptr; 3912 std::tie(Stripped, Offset) = splitAddExpr(S); 3913 // If stripped is SCEVUnknown, don't bother to save 3914 // Stripped -> {V, offset}. It doesn't simplify and sometimes even 3915 // increase the complexity of the expansion code. 3916 // If V is GetElementPtrInst, don't save Stripped -> {V, offset} 3917 // because it may generate add/sub instead of GEP in SCEV expansion. 3918 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) && 3919 !isa<GetElementPtrInst>(V)) 3920 ExprValueMap[Stripped].insert({V, Offset}); 3921 } 3922 } 3923 return S; 3924 } 3925 3926 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 3927 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 3928 3929 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 3930 if (I != ValueExprMap.end()) { 3931 const SCEV *S = I->second; 3932 if (checkValidity(S)) 3933 return S; 3934 eraseValueFromMap(V); 3935 forgetMemoizedResults(S); 3936 } 3937 return nullptr; 3938 } 3939 3940 /// Return a SCEV corresponding to -V = -1*V 3941 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 3942 SCEV::NoWrapFlags Flags) { 3943 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3944 return getConstant( 3945 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 3946 3947 Type *Ty = V->getType(); 3948 Ty = getEffectiveSCEVType(Ty); 3949 return getMulExpr(V, getMinusOne(Ty), Flags); 3950 } 3951 3952 /// If Expr computes ~A, return A else return nullptr 3953 static const SCEV *MatchNotExpr(const SCEV *Expr) { 3954 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 3955 if (!Add || Add->getNumOperands() != 2 || 3956 !Add->getOperand(0)->isAllOnesValue()) 3957 return nullptr; 3958 3959 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 3960 if (!AddRHS || AddRHS->getNumOperands() != 2 || 3961 !AddRHS->getOperand(0)->isAllOnesValue()) 3962 return nullptr; 3963 3964 return AddRHS->getOperand(1); 3965 } 3966 3967 /// Return a SCEV corresponding to ~V = -1-V 3968 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 3969 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 3970 return getConstant( 3971 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 3972 3973 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) 3974 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { 3975 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { 3976 SmallVector<const SCEV *, 2> MatchedOperands; 3977 for (const SCEV *Operand : MME->operands()) { 3978 const SCEV *Matched = MatchNotExpr(Operand); 3979 if (!Matched) 3980 return (const SCEV *)nullptr; 3981 MatchedOperands.push_back(Matched); 3982 } 3983 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()), 3984 MatchedOperands); 3985 }; 3986 if (const SCEV *Replaced = MatchMinMaxNegation(MME)) 3987 return Replaced; 3988 } 3989 3990 Type *Ty = V->getType(); 3991 Ty = getEffectiveSCEVType(Ty); 3992 return getMinusSCEV(getMinusOne(Ty), V); 3993 } 3994 3995 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 3996 SCEV::NoWrapFlags Flags, 3997 unsigned Depth) { 3998 // Fast path: X - X --> 0. 3999 if (LHS == RHS) 4000 return getZero(LHS->getType()); 4001 4002 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 4003 // makes it so that we cannot make much use of NUW. 4004 auto AddFlags = SCEV::FlagAnyWrap; 4005 const bool RHSIsNotMinSigned = 4006 !getSignedRangeMin(RHS).isMinSignedValue(); 4007 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { 4008 // Let M be the minimum representable signed value. Then (-1)*RHS 4009 // signed-wraps if and only if RHS is M. That can happen even for 4010 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 4011 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 4012 // (-1)*RHS, we need to prove that RHS != M. 4013 // 4014 // If LHS is non-negative and we know that LHS - RHS does not 4015 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 4016 // either by proving that RHS > M or that LHS >= 0. 4017 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 4018 AddFlags = SCEV::FlagNSW; 4019 } 4020 } 4021 4022 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 4023 // RHS is NSW and LHS >= 0. 4024 // 4025 // The difficulty here is that the NSW flag may have been proven 4026 // relative to a loop that is to be found in a recurrence in LHS and 4027 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 4028 // larger scope than intended. 4029 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 4030 4031 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 4032 } 4033 4034 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 4035 unsigned Depth) { 4036 Type *SrcTy = V->getType(); 4037 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4038 "Cannot truncate or zero extend with non-integer arguments!"); 4039 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4040 return V; // No conversion 4041 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4042 return getTruncateExpr(V, Ty, Depth); 4043 return getZeroExtendExpr(V, Ty, Depth); 4044 } 4045 4046 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 4047 unsigned Depth) { 4048 Type *SrcTy = V->getType(); 4049 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4050 "Cannot truncate or zero extend with non-integer arguments!"); 4051 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4052 return V; // No conversion 4053 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4054 return getTruncateExpr(V, Ty, Depth); 4055 return getSignExtendExpr(V, Ty, Depth); 4056 } 4057 4058 const SCEV * 4059 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 4060 Type *SrcTy = V->getType(); 4061 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4062 "Cannot noop or zero extend with non-integer arguments!"); 4063 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4064 "getNoopOrZeroExtend cannot truncate!"); 4065 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4066 return V; // No conversion 4067 return getZeroExtendExpr(V, Ty); 4068 } 4069 4070 const SCEV * 4071 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 4072 Type *SrcTy = V->getType(); 4073 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4074 "Cannot noop or sign extend with non-integer arguments!"); 4075 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4076 "getNoopOrSignExtend cannot truncate!"); 4077 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4078 return V; // No conversion 4079 return getSignExtendExpr(V, Ty); 4080 } 4081 4082 const SCEV * 4083 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 4084 Type *SrcTy = V->getType(); 4085 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4086 "Cannot noop or any extend with non-integer arguments!"); 4087 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4088 "getNoopOrAnyExtend cannot truncate!"); 4089 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4090 return V; // No conversion 4091 return getAnyExtendExpr(V, Ty); 4092 } 4093 4094 const SCEV * 4095 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 4096 Type *SrcTy = V->getType(); 4097 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4098 "Cannot truncate or noop with non-integer arguments!"); 4099 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4100 "getTruncateOrNoop cannot extend!"); 4101 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4102 return V; // No conversion 4103 return getTruncateExpr(V, Ty); 4104 } 4105 4106 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4107 const SCEV *RHS) { 4108 const SCEV *PromotedLHS = LHS; 4109 const SCEV *PromotedRHS = RHS; 4110 4111 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4112 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4113 else 4114 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4115 4116 return getUMaxExpr(PromotedLHS, PromotedRHS); 4117 } 4118 4119 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4120 const SCEV *RHS) { 4121 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4122 return getUMinFromMismatchedTypes(Ops); 4123 } 4124 4125 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes( 4126 SmallVectorImpl<const SCEV *> &Ops) { 4127 assert(!Ops.empty() && "At least one operand must be!"); 4128 // Trivial case. 4129 if (Ops.size() == 1) 4130 return Ops[0]; 4131 4132 // Find the max type first. 4133 Type *MaxType = nullptr; 4134 for (auto *S : Ops) 4135 if (MaxType) 4136 MaxType = getWiderType(MaxType, S->getType()); 4137 else 4138 MaxType = S->getType(); 4139 assert(MaxType && "Failed to find maximum type!"); 4140 4141 // Extend all ops to max type. 4142 SmallVector<const SCEV *, 2> PromotedOps; 4143 for (auto *S : Ops) 4144 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4145 4146 // Generate umin. 4147 return getUMinExpr(PromotedOps); 4148 } 4149 4150 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4151 // A pointer operand may evaluate to a nonpointer expression, such as null. 4152 if (!V->getType()->isPointerTy()) 4153 return V; 4154 4155 while (true) { 4156 if (const SCEVIntegralCastExpr *Cast = dyn_cast<SCEVIntegralCastExpr>(V)) { 4157 V = Cast->getOperand(); 4158 } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) { 4159 const SCEV *PtrOp = nullptr; 4160 for (const SCEV *NAryOp : NAry->operands()) { 4161 if (NAryOp->getType()->isPointerTy()) { 4162 // Cannot find the base of an expression with multiple pointer ops. 4163 if (PtrOp) 4164 return V; 4165 PtrOp = NAryOp; 4166 } 4167 } 4168 if (!PtrOp) // All operands were non-pointer. 4169 return V; 4170 V = PtrOp; 4171 } else // Not something we can look further into. 4172 return V; 4173 } 4174 } 4175 4176 /// Push users of the given Instruction onto the given Worklist. 4177 static void 4178 PushDefUseChildren(Instruction *I, 4179 SmallVectorImpl<Instruction *> &Worklist) { 4180 // Push the def-use children onto the Worklist stack. 4181 for (User *U : I->users()) 4182 Worklist.push_back(cast<Instruction>(U)); 4183 } 4184 4185 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { 4186 SmallVector<Instruction *, 16> Worklist; 4187 PushDefUseChildren(PN, Worklist); 4188 4189 SmallPtrSet<Instruction *, 8> Visited; 4190 Visited.insert(PN); 4191 while (!Worklist.empty()) { 4192 Instruction *I = Worklist.pop_back_val(); 4193 if (!Visited.insert(I).second) 4194 continue; 4195 4196 auto It = ValueExprMap.find_as(static_cast<Value *>(I)); 4197 if (It != ValueExprMap.end()) { 4198 const SCEV *Old = It->second; 4199 4200 // Short-circuit the def-use traversal if the symbolic name 4201 // ceases to appear in expressions. 4202 if (Old != SymName && !hasOperand(Old, SymName)) 4203 continue; 4204 4205 // SCEVUnknown for a PHI either means that it has an unrecognized 4206 // structure, it's a PHI that's in the progress of being computed 4207 // by createNodeForPHI, or it's a single-value PHI. In the first case, 4208 // additional loop trip count information isn't going to change anything. 4209 // In the second case, createNodeForPHI will perform the necessary 4210 // updates on its own when it gets to that point. In the third, we do 4211 // want to forget the SCEVUnknown. 4212 if (!isa<PHINode>(I) || 4213 !isa<SCEVUnknown>(Old) || 4214 (I != PN && Old == SymName)) { 4215 eraseValueFromMap(It->first); 4216 forgetMemoizedResults(Old); 4217 } 4218 } 4219 4220 PushDefUseChildren(I, Worklist); 4221 } 4222 } 4223 4224 namespace { 4225 4226 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4227 /// expression in case its Loop is L. If it is not L then 4228 /// if IgnoreOtherLoops is true then use AddRec itself 4229 /// otherwise rewrite cannot be done. 4230 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4231 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4232 public: 4233 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4234 bool IgnoreOtherLoops = true) { 4235 SCEVInitRewriter Rewriter(L, SE); 4236 const SCEV *Result = Rewriter.visit(S); 4237 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4238 return SE.getCouldNotCompute(); 4239 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4240 ? SE.getCouldNotCompute() 4241 : Result; 4242 } 4243 4244 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4245 if (!SE.isLoopInvariant(Expr, L)) 4246 SeenLoopVariantSCEVUnknown = true; 4247 return Expr; 4248 } 4249 4250 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4251 // Only re-write AddRecExprs for this loop. 4252 if (Expr->getLoop() == L) 4253 return Expr->getStart(); 4254 SeenOtherLoops = true; 4255 return Expr; 4256 } 4257 4258 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4259 4260 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4261 4262 private: 4263 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4264 : SCEVRewriteVisitor(SE), L(L) {} 4265 4266 const Loop *L; 4267 bool SeenLoopVariantSCEVUnknown = false; 4268 bool SeenOtherLoops = false; 4269 }; 4270 4271 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4272 /// increment expression in case its Loop is L. If it is not L then 4273 /// use AddRec itself. 4274 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4275 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4276 public: 4277 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4278 SCEVPostIncRewriter Rewriter(L, SE); 4279 const SCEV *Result = Rewriter.visit(S); 4280 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4281 ? SE.getCouldNotCompute() 4282 : Result; 4283 } 4284 4285 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4286 if (!SE.isLoopInvariant(Expr, L)) 4287 SeenLoopVariantSCEVUnknown = true; 4288 return Expr; 4289 } 4290 4291 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4292 // Only re-write AddRecExprs for this loop. 4293 if (Expr->getLoop() == L) 4294 return Expr->getPostIncExpr(SE); 4295 SeenOtherLoops = true; 4296 return Expr; 4297 } 4298 4299 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4300 4301 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4302 4303 private: 4304 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4305 : SCEVRewriteVisitor(SE), L(L) {} 4306 4307 const Loop *L; 4308 bool SeenLoopVariantSCEVUnknown = false; 4309 bool SeenOtherLoops = false; 4310 }; 4311 4312 /// This class evaluates the compare condition by matching it against the 4313 /// condition of loop latch. If there is a match we assume a true value 4314 /// for the condition while building SCEV nodes. 4315 class SCEVBackedgeConditionFolder 4316 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4317 public: 4318 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4319 ScalarEvolution &SE) { 4320 bool IsPosBECond = false; 4321 Value *BECond = nullptr; 4322 if (BasicBlock *Latch = L->getLoopLatch()) { 4323 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4324 if (BI && BI->isConditional()) { 4325 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4326 "Both outgoing branches should not target same header!"); 4327 BECond = BI->getCondition(); 4328 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4329 } else { 4330 return S; 4331 } 4332 } 4333 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4334 return Rewriter.visit(S); 4335 } 4336 4337 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4338 const SCEV *Result = Expr; 4339 bool InvariantF = SE.isLoopInvariant(Expr, L); 4340 4341 if (!InvariantF) { 4342 Instruction *I = cast<Instruction>(Expr->getValue()); 4343 switch (I->getOpcode()) { 4344 case Instruction::Select: { 4345 SelectInst *SI = cast<SelectInst>(I); 4346 Optional<const SCEV *> Res = 4347 compareWithBackedgeCondition(SI->getCondition()); 4348 if (Res.hasValue()) { 4349 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4350 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4351 } 4352 break; 4353 } 4354 default: { 4355 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4356 if (Res.hasValue()) 4357 Result = Res.getValue(); 4358 break; 4359 } 4360 } 4361 } 4362 return Result; 4363 } 4364 4365 private: 4366 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4367 bool IsPosBECond, ScalarEvolution &SE) 4368 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4369 IsPositiveBECond(IsPosBECond) {} 4370 4371 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4372 4373 const Loop *L; 4374 /// Loop back condition. 4375 Value *BackedgeCond = nullptr; 4376 /// Set to true if loop back is on positive branch condition. 4377 bool IsPositiveBECond; 4378 }; 4379 4380 Optional<const SCEV *> 4381 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4382 4383 // If value matches the backedge condition for loop latch, 4384 // then return a constant evolution node based on loopback 4385 // branch taken. 4386 if (BackedgeCond == IC) 4387 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4388 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4389 return None; 4390 } 4391 4392 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4393 public: 4394 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4395 ScalarEvolution &SE) { 4396 SCEVShiftRewriter Rewriter(L, SE); 4397 const SCEV *Result = Rewriter.visit(S); 4398 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4399 } 4400 4401 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4402 // Only allow AddRecExprs for this loop. 4403 if (!SE.isLoopInvariant(Expr, L)) 4404 Valid = false; 4405 return Expr; 4406 } 4407 4408 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4409 if (Expr->getLoop() == L && Expr->isAffine()) 4410 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4411 Valid = false; 4412 return Expr; 4413 } 4414 4415 bool isValid() { return Valid; } 4416 4417 private: 4418 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4419 : SCEVRewriteVisitor(SE), L(L) {} 4420 4421 const Loop *L; 4422 bool Valid = true; 4423 }; 4424 4425 } // end anonymous namespace 4426 4427 SCEV::NoWrapFlags 4428 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4429 if (!AR->isAffine()) 4430 return SCEV::FlagAnyWrap; 4431 4432 using OBO = OverflowingBinaryOperator; 4433 4434 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4435 4436 if (!AR->hasNoSignedWrap()) { 4437 ConstantRange AddRecRange = getSignedRange(AR); 4438 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4439 4440 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4441 Instruction::Add, IncRange, OBO::NoSignedWrap); 4442 if (NSWRegion.contains(AddRecRange)) 4443 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4444 } 4445 4446 if (!AR->hasNoUnsignedWrap()) { 4447 ConstantRange AddRecRange = getUnsignedRange(AR); 4448 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4449 4450 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4451 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4452 if (NUWRegion.contains(AddRecRange)) 4453 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4454 } 4455 4456 return Result; 4457 } 4458 4459 SCEV::NoWrapFlags 4460 ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) { 4461 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 4462 4463 if (AR->hasNoSignedWrap()) 4464 return Result; 4465 4466 if (!AR->isAffine()) 4467 return Result; 4468 4469 const SCEV *Step = AR->getStepRecurrence(*this); 4470 const Loop *L = AR->getLoop(); 4471 4472 // Check whether the backedge-taken count is SCEVCouldNotCompute. 4473 // Note that this serves two purposes: It filters out loops that are 4474 // simply not analyzable, and it covers the case where this code is 4475 // being called from within backedge-taken count analysis, such that 4476 // attempting to ask for the backedge-taken count would likely result 4477 // in infinite recursion. In the later case, the analysis code will 4478 // cope with a conservative value, and it will take care to purge 4479 // that value once it has finished. 4480 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 4481 4482 // Normally, in the cases we can prove no-overflow via a 4483 // backedge guarding condition, we can also compute a backedge 4484 // taken count for the loop. The exceptions are assumptions and 4485 // guards present in the loop -- SCEV is not great at exploiting 4486 // these to compute max backedge taken counts, but can still use 4487 // these to prove lack of overflow. Use this fact to avoid 4488 // doing extra work that may not pay off. 4489 4490 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 4491 AC.assumptions().empty()) 4492 return Result; 4493 4494 // If the backedge is guarded by a comparison with the pre-inc value the 4495 // addrec is safe. Also, if the entry is guarded by a comparison with the 4496 // start value and the backedge is guarded by a comparison with the post-inc 4497 // value, the addrec is safe. 4498 ICmpInst::Predicate Pred; 4499 const SCEV *OverflowLimit = 4500 getSignedOverflowLimitForStep(Step, &Pred, this); 4501 if (OverflowLimit && 4502 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 4503 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 4504 Result = setFlags(Result, SCEV::FlagNSW); 4505 } 4506 return Result; 4507 } 4508 SCEV::NoWrapFlags 4509 ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) { 4510 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 4511 4512 if (AR->hasNoUnsignedWrap()) 4513 return Result; 4514 4515 if (!AR->isAffine()) 4516 return Result; 4517 4518 const SCEV *Step = AR->getStepRecurrence(*this); 4519 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 4520 const Loop *L = AR->getLoop(); 4521 4522 // Check whether the backedge-taken count is SCEVCouldNotCompute. 4523 // Note that this serves two purposes: It filters out loops that are 4524 // simply not analyzable, and it covers the case where this code is 4525 // being called from within backedge-taken count analysis, such that 4526 // attempting to ask for the backedge-taken count would likely result 4527 // in infinite recursion. In the later case, the analysis code will 4528 // cope with a conservative value, and it will take care to purge 4529 // that value once it has finished. 4530 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 4531 4532 // Normally, in the cases we can prove no-overflow via a 4533 // backedge guarding condition, we can also compute a backedge 4534 // taken count for the loop. The exceptions are assumptions and 4535 // guards present in the loop -- SCEV is not great at exploiting 4536 // these to compute max backedge taken counts, but can still use 4537 // these to prove lack of overflow. Use this fact to avoid 4538 // doing extra work that may not pay off. 4539 4540 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 4541 AC.assumptions().empty()) 4542 return Result; 4543 4544 // If the backedge is guarded by a comparison with the pre-inc value the 4545 // addrec is safe. Also, if the entry is guarded by a comparison with the 4546 // start value and the backedge is guarded by a comparison with the post-inc 4547 // value, the addrec is safe. 4548 if (isKnownPositive(Step)) { 4549 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 4550 getUnsignedRangeMax(Step)); 4551 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 4552 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 4553 Result = setFlags(Result, SCEV::FlagNUW); 4554 } 4555 } 4556 4557 return Result; 4558 } 4559 4560 namespace { 4561 4562 /// Represents an abstract binary operation. This may exist as a 4563 /// normal instruction or constant expression, or may have been 4564 /// derived from an expression tree. 4565 struct BinaryOp { 4566 unsigned Opcode; 4567 Value *LHS; 4568 Value *RHS; 4569 bool IsNSW = false; 4570 bool IsNUW = false; 4571 4572 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 4573 /// constant expression. 4574 Operator *Op = nullptr; 4575 4576 explicit BinaryOp(Operator *Op) 4577 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 4578 Op(Op) { 4579 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 4580 IsNSW = OBO->hasNoSignedWrap(); 4581 IsNUW = OBO->hasNoUnsignedWrap(); 4582 } 4583 } 4584 4585 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 4586 bool IsNUW = false) 4587 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 4588 }; 4589 4590 } // end anonymous namespace 4591 4592 /// Try to map \p V into a BinaryOp, and return \c None on failure. 4593 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 4594 auto *Op = dyn_cast<Operator>(V); 4595 if (!Op) 4596 return None; 4597 4598 // Implementation detail: all the cleverness here should happen without 4599 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 4600 // SCEV expressions when possible, and we should not break that. 4601 4602 switch (Op->getOpcode()) { 4603 case Instruction::Add: 4604 case Instruction::Sub: 4605 case Instruction::Mul: 4606 case Instruction::UDiv: 4607 case Instruction::URem: 4608 case Instruction::And: 4609 case Instruction::Or: 4610 case Instruction::AShr: 4611 case Instruction::Shl: 4612 return BinaryOp(Op); 4613 4614 case Instruction::Xor: 4615 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 4616 // If the RHS of the xor is a signmask, then this is just an add. 4617 // Instcombine turns add of signmask into xor as a strength reduction step. 4618 if (RHSC->getValue().isSignMask()) 4619 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 4620 return BinaryOp(Op); 4621 4622 case Instruction::LShr: 4623 // Turn logical shift right of a constant into a unsigned divide. 4624 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 4625 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 4626 4627 // If the shift count is not less than the bitwidth, the result of 4628 // the shift is undefined. Don't try to analyze it, because the 4629 // resolution chosen here may differ from the resolution chosen in 4630 // other parts of the compiler. 4631 if (SA->getValue().ult(BitWidth)) { 4632 Constant *X = 4633 ConstantInt::get(SA->getContext(), 4634 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 4635 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 4636 } 4637 } 4638 return BinaryOp(Op); 4639 4640 case Instruction::ExtractValue: { 4641 auto *EVI = cast<ExtractValueInst>(Op); 4642 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 4643 break; 4644 4645 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 4646 if (!WO) 4647 break; 4648 4649 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 4650 bool Signed = WO->isSigned(); 4651 // TODO: Should add nuw/nsw flags for mul as well. 4652 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 4653 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 4654 4655 // Now that we know that all uses of the arithmetic-result component of 4656 // CI are guarded by the overflow check, we can go ahead and pretend 4657 // that the arithmetic is non-overflowing. 4658 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 4659 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 4660 } 4661 4662 default: 4663 break; 4664 } 4665 4666 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same 4667 // semantics as a Sub, return a binary sub expression. 4668 if (auto *II = dyn_cast<IntrinsicInst>(V)) 4669 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) 4670 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); 4671 4672 return None; 4673 } 4674 4675 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 4676 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 4677 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 4678 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 4679 /// follows one of the following patterns: 4680 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4681 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 4682 /// If the SCEV expression of \p Op conforms with one of the expected patterns 4683 /// we return the type of the truncation operation, and indicate whether the 4684 /// truncated type should be treated as signed/unsigned by setting 4685 /// \p Signed to true/false, respectively. 4686 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 4687 bool &Signed, ScalarEvolution &SE) { 4688 // The case where Op == SymbolicPHI (that is, with no type conversions on 4689 // the way) is handled by the regular add recurrence creating logic and 4690 // would have already been triggered in createAddRecForPHI. Reaching it here 4691 // means that createAddRecFromPHI had failed for this PHI before (e.g., 4692 // because one of the other operands of the SCEVAddExpr updating this PHI is 4693 // not invariant). 4694 // 4695 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 4696 // this case predicates that allow us to prove that Op == SymbolicPHI will 4697 // be added. 4698 if (Op == SymbolicPHI) 4699 return nullptr; 4700 4701 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 4702 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 4703 if (SourceBits != NewBits) 4704 return nullptr; 4705 4706 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 4707 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 4708 if (!SExt && !ZExt) 4709 return nullptr; 4710 const SCEVTruncateExpr *Trunc = 4711 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 4712 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 4713 if (!Trunc) 4714 return nullptr; 4715 const SCEV *X = Trunc->getOperand(); 4716 if (X != SymbolicPHI) 4717 return nullptr; 4718 Signed = SExt != nullptr; 4719 return Trunc->getType(); 4720 } 4721 4722 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 4723 if (!PN->getType()->isIntegerTy()) 4724 return nullptr; 4725 const Loop *L = LI.getLoopFor(PN->getParent()); 4726 if (!L || L->getHeader() != PN->getParent()) 4727 return nullptr; 4728 return L; 4729 } 4730 4731 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 4732 // computation that updates the phi follows the following pattern: 4733 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 4734 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 4735 // If so, try to see if it can be rewritten as an AddRecExpr under some 4736 // Predicates. If successful, return them as a pair. Also cache the results 4737 // of the analysis. 4738 // 4739 // Example usage scenario: 4740 // Say the Rewriter is called for the following SCEV: 4741 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4742 // where: 4743 // %X = phi i64 (%Start, %BEValue) 4744 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 4745 // and call this function with %SymbolicPHI = %X. 4746 // 4747 // The analysis will find that the value coming around the backedge has 4748 // the following SCEV: 4749 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 4750 // Upon concluding that this matches the desired pattern, the function 4751 // will return the pair {NewAddRec, SmallPredsVec} where: 4752 // NewAddRec = {%Start,+,%Step} 4753 // SmallPredsVec = {P1, P2, P3} as follows: 4754 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 4755 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 4756 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 4757 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 4758 // under the predicates {P1,P2,P3}. 4759 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 4760 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 4761 // 4762 // TODO's: 4763 // 4764 // 1) Extend the Induction descriptor to also support inductions that involve 4765 // casts: When needed (namely, when we are called in the context of the 4766 // vectorizer induction analysis), a Set of cast instructions will be 4767 // populated by this method, and provided back to isInductionPHI. This is 4768 // needed to allow the vectorizer to properly record them to be ignored by 4769 // the cost model and to avoid vectorizing them (otherwise these casts, 4770 // which are redundant under the runtime overflow checks, will be 4771 // vectorized, which can be costly). 4772 // 4773 // 2) Support additional induction/PHISCEV patterns: We also want to support 4774 // inductions where the sext-trunc / zext-trunc operations (partly) occur 4775 // after the induction update operation (the induction increment): 4776 // 4777 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 4778 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 4779 // 4780 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 4781 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 4782 // 4783 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 4784 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4785 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 4786 SmallVector<const SCEVPredicate *, 3> Predicates; 4787 4788 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 4789 // return an AddRec expression under some predicate. 4790 4791 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4792 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4793 assert(L && "Expecting an integer loop header phi"); 4794 4795 // The loop may have multiple entrances or multiple exits; we can analyze 4796 // this phi as an addrec if it has a unique entry value and a unique 4797 // backedge value. 4798 Value *BEValueV = nullptr, *StartValueV = nullptr; 4799 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 4800 Value *V = PN->getIncomingValue(i); 4801 if (L->contains(PN->getIncomingBlock(i))) { 4802 if (!BEValueV) { 4803 BEValueV = V; 4804 } else if (BEValueV != V) { 4805 BEValueV = nullptr; 4806 break; 4807 } 4808 } else if (!StartValueV) { 4809 StartValueV = V; 4810 } else if (StartValueV != V) { 4811 StartValueV = nullptr; 4812 break; 4813 } 4814 } 4815 if (!BEValueV || !StartValueV) 4816 return None; 4817 4818 const SCEV *BEValue = getSCEV(BEValueV); 4819 4820 // If the value coming around the backedge is an add with the symbolic 4821 // value we just inserted, possibly with casts that we can ignore under 4822 // an appropriate runtime guard, then we found a simple induction variable! 4823 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 4824 if (!Add) 4825 return None; 4826 4827 // If there is a single occurrence of the symbolic value, possibly 4828 // casted, replace it with a recurrence. 4829 unsigned FoundIndex = Add->getNumOperands(); 4830 Type *TruncTy = nullptr; 4831 bool Signed; 4832 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4833 if ((TruncTy = 4834 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 4835 if (FoundIndex == e) { 4836 FoundIndex = i; 4837 break; 4838 } 4839 4840 if (FoundIndex == Add->getNumOperands()) 4841 return None; 4842 4843 // Create an add with everything but the specified operand. 4844 SmallVector<const SCEV *, 8> Ops; 4845 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 4846 if (i != FoundIndex) 4847 Ops.push_back(Add->getOperand(i)); 4848 const SCEV *Accum = getAddExpr(Ops); 4849 4850 // The runtime checks will not be valid if the step amount is 4851 // varying inside the loop. 4852 if (!isLoopInvariant(Accum, L)) 4853 return None; 4854 4855 // *** Part2: Create the predicates 4856 4857 // Analysis was successful: we have a phi-with-cast pattern for which we 4858 // can return an AddRec expression under the following predicates: 4859 // 4860 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 4861 // fits within the truncated type (does not overflow) for i = 0 to n-1. 4862 // P2: An Equal predicate that guarantees that 4863 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 4864 // P3: An Equal predicate that guarantees that 4865 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 4866 // 4867 // As we next prove, the above predicates guarantee that: 4868 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 4869 // 4870 // 4871 // More formally, we want to prove that: 4872 // Expr(i+1) = Start + (i+1) * Accum 4873 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4874 // 4875 // Given that: 4876 // 1) Expr(0) = Start 4877 // 2) Expr(1) = Start + Accum 4878 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 4879 // 3) Induction hypothesis (step i): 4880 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 4881 // 4882 // Proof: 4883 // Expr(i+1) = 4884 // = Start + (i+1)*Accum 4885 // = (Start + i*Accum) + Accum 4886 // = Expr(i) + Accum 4887 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 4888 // :: from step i 4889 // 4890 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 4891 // 4892 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 4893 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 4894 // + Accum :: from P3 4895 // 4896 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 4897 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 4898 // 4899 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 4900 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 4901 // 4902 // By induction, the same applies to all iterations 1<=i<n: 4903 // 4904 4905 // Create a truncated addrec for which we will add a no overflow check (P1). 4906 const SCEV *StartVal = getSCEV(StartValueV); 4907 const SCEV *PHISCEV = 4908 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 4909 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 4910 4911 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 4912 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 4913 // will be constant. 4914 // 4915 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 4916 // add P1. 4917 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 4918 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 4919 Signed ? SCEVWrapPredicate::IncrementNSSW 4920 : SCEVWrapPredicate::IncrementNUSW; 4921 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 4922 Predicates.push_back(AddRecPred); 4923 } 4924 4925 // Create the Equal Predicates P2,P3: 4926 4927 // It is possible that the predicates P2 and/or P3 are computable at 4928 // compile time due to StartVal and/or Accum being constants. 4929 // If either one is, then we can check that now and escape if either P2 4930 // or P3 is false. 4931 4932 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 4933 // for each of StartVal and Accum 4934 auto getExtendedExpr = [&](const SCEV *Expr, 4935 bool CreateSignExtend) -> const SCEV * { 4936 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 4937 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 4938 const SCEV *ExtendedExpr = 4939 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 4940 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 4941 return ExtendedExpr; 4942 }; 4943 4944 // Given: 4945 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 4946 // = getExtendedExpr(Expr) 4947 // Determine whether the predicate P: Expr == ExtendedExpr 4948 // is known to be false at compile time 4949 auto PredIsKnownFalse = [&](const SCEV *Expr, 4950 const SCEV *ExtendedExpr) -> bool { 4951 return Expr != ExtendedExpr && 4952 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 4953 }; 4954 4955 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 4956 if (PredIsKnownFalse(StartVal, StartExtended)) { 4957 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 4958 return None; 4959 } 4960 4961 // The Step is always Signed (because the overflow checks are either 4962 // NSSW or NUSW) 4963 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 4964 if (PredIsKnownFalse(Accum, AccumExtended)) { 4965 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 4966 return None; 4967 } 4968 4969 auto AppendPredicate = [&](const SCEV *Expr, 4970 const SCEV *ExtendedExpr) -> void { 4971 if (Expr != ExtendedExpr && 4972 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 4973 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 4974 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 4975 Predicates.push_back(Pred); 4976 } 4977 }; 4978 4979 AppendPredicate(StartVal, StartExtended); 4980 AppendPredicate(Accum, AccumExtended); 4981 4982 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 4983 // which the casts had been folded away. The caller can rewrite SymbolicPHI 4984 // into NewAR if it will also add the runtime overflow checks specified in 4985 // Predicates. 4986 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 4987 4988 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 4989 std::make_pair(NewAR, Predicates); 4990 // Remember the result of the analysis for this SCEV at this locayyytion. 4991 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 4992 return PredRewrite; 4993 } 4994 4995 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 4996 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 4997 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 4998 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 4999 if (!L) 5000 return None; 5001 5002 // Check to see if we already analyzed this PHI. 5003 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 5004 if (I != PredicatedSCEVRewrites.end()) { 5005 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 5006 I->second; 5007 // Analysis was done before and failed to create an AddRec: 5008 if (Rewrite.first == SymbolicPHI) 5009 return None; 5010 // Analysis was done before and succeeded to create an AddRec under 5011 // a predicate: 5012 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 5013 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 5014 return Rewrite; 5015 } 5016 5017 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5018 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 5019 5020 // Record in the cache that the analysis failed 5021 if (!Rewrite) { 5022 SmallVector<const SCEVPredicate *, 3> Predicates; 5023 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 5024 return None; 5025 } 5026 5027 return Rewrite; 5028 } 5029 5030 // FIXME: This utility is currently required because the Rewriter currently 5031 // does not rewrite this expression: 5032 // {0, +, (sext ix (trunc iy to ix) to iy)} 5033 // into {0, +, %step}, 5034 // even when the following Equal predicate exists: 5035 // "%step == (sext ix (trunc iy to ix) to iy)". 5036 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 5037 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 5038 if (AR1 == AR2) 5039 return true; 5040 5041 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 5042 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) && 5043 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1))) 5044 return false; 5045 return true; 5046 }; 5047 5048 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 5049 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 5050 return false; 5051 return true; 5052 } 5053 5054 /// A helper function for createAddRecFromPHI to handle simple cases. 5055 /// 5056 /// This function tries to find an AddRec expression for the simplest (yet most 5057 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 5058 /// If it fails, createAddRecFromPHI will use a more general, but slow, 5059 /// technique for finding the AddRec expression. 5060 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 5061 Value *BEValueV, 5062 Value *StartValueV) { 5063 const Loop *L = LI.getLoopFor(PN->getParent()); 5064 assert(L && L->getHeader() == PN->getParent()); 5065 assert(BEValueV && StartValueV); 5066 5067 auto BO = MatchBinaryOp(BEValueV, DT); 5068 if (!BO) 5069 return nullptr; 5070 5071 if (BO->Opcode != Instruction::Add) 5072 return nullptr; 5073 5074 const SCEV *Accum = nullptr; 5075 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 5076 Accum = getSCEV(BO->RHS); 5077 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 5078 Accum = getSCEV(BO->LHS); 5079 5080 if (!Accum) 5081 return nullptr; 5082 5083 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5084 if (BO->IsNUW) 5085 Flags = setFlags(Flags, SCEV::FlagNUW); 5086 if (BO->IsNSW) 5087 Flags = setFlags(Flags, SCEV::FlagNSW); 5088 5089 const SCEV *StartVal = getSCEV(StartValueV); 5090 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5091 5092 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5093 5094 // We can add Flags to the post-inc expression only if we 5095 // know that it is *undefined behavior* for BEValueV to 5096 // overflow. 5097 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5098 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5099 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5100 5101 return PHISCEV; 5102 } 5103 5104 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 5105 const Loop *L = LI.getLoopFor(PN->getParent()); 5106 if (!L || L->getHeader() != PN->getParent()) 5107 return nullptr; 5108 5109 // The loop may have multiple entrances or multiple exits; we can analyze 5110 // this phi as an addrec if it has a unique entry value and a unique 5111 // backedge value. 5112 Value *BEValueV = nullptr, *StartValueV = nullptr; 5113 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5114 Value *V = PN->getIncomingValue(i); 5115 if (L->contains(PN->getIncomingBlock(i))) { 5116 if (!BEValueV) { 5117 BEValueV = V; 5118 } else if (BEValueV != V) { 5119 BEValueV = nullptr; 5120 break; 5121 } 5122 } else if (!StartValueV) { 5123 StartValueV = V; 5124 } else if (StartValueV != V) { 5125 StartValueV = nullptr; 5126 break; 5127 } 5128 } 5129 if (!BEValueV || !StartValueV) 5130 return nullptr; 5131 5132 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 5133 "PHI node already processed?"); 5134 5135 // First, try to find AddRec expression without creating a fictituos symbolic 5136 // value for PN. 5137 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 5138 return S; 5139 5140 // Handle PHI node value symbolically. 5141 const SCEV *SymbolicName = getUnknown(PN); 5142 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); 5143 5144 // Using this symbolic name for the PHI, analyze the value coming around 5145 // the back-edge. 5146 const SCEV *BEValue = getSCEV(BEValueV); 5147 5148 // NOTE: If BEValue is loop invariant, we know that the PHI node just 5149 // has a special value for the first iteration of the loop. 5150 5151 // If the value coming around the backedge is an add with the symbolic 5152 // value we just inserted, then we found a simple induction variable! 5153 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 5154 // If there is a single occurrence of the symbolic value, replace it 5155 // with a recurrence. 5156 unsigned FoundIndex = Add->getNumOperands(); 5157 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5158 if (Add->getOperand(i) == SymbolicName) 5159 if (FoundIndex == e) { 5160 FoundIndex = i; 5161 break; 5162 } 5163 5164 if (FoundIndex != Add->getNumOperands()) { 5165 // Create an add with everything but the specified operand. 5166 SmallVector<const SCEV *, 8> Ops; 5167 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5168 if (i != FoundIndex) 5169 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 5170 L, *this)); 5171 const SCEV *Accum = getAddExpr(Ops); 5172 5173 // This is not a valid addrec if the step amount is varying each 5174 // loop iteration, but is not itself an addrec in this loop. 5175 if (isLoopInvariant(Accum, L) || 5176 (isa<SCEVAddRecExpr>(Accum) && 5177 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5178 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5179 5180 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 5181 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5182 if (BO->IsNUW) 5183 Flags = setFlags(Flags, SCEV::FlagNUW); 5184 if (BO->IsNSW) 5185 Flags = setFlags(Flags, SCEV::FlagNSW); 5186 } 5187 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5188 // If the increment is an inbounds GEP, then we know the address 5189 // space cannot be wrapped around. We cannot make any guarantee 5190 // about signed or unsigned overflow because pointers are 5191 // unsigned but we may have a negative index from the base 5192 // pointer. We can guarantee that no unsigned wrap occurs if the 5193 // indices form a positive value. 5194 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5195 Flags = setFlags(Flags, SCEV::FlagNW); 5196 5197 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 5198 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 5199 Flags = setFlags(Flags, SCEV::FlagNUW); 5200 } 5201 5202 // We cannot transfer nuw and nsw flags from subtraction 5203 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5204 // for instance. 5205 } 5206 5207 const SCEV *StartVal = getSCEV(StartValueV); 5208 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5209 5210 // Okay, for the entire analysis of this edge we assumed the PHI 5211 // to be symbolic. We now need to go back and purge all of the 5212 // entries for the scalars that use the symbolic expression. 5213 forgetSymbolicName(PN, SymbolicName); 5214 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 5215 5216 // We can add Flags to the post-inc expression only if we 5217 // know that it is *undefined behavior* for BEValueV to 5218 // overflow. 5219 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5220 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5221 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5222 5223 return PHISCEV; 5224 } 5225 } 5226 } else { 5227 // Otherwise, this could be a loop like this: 5228 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5229 // In this case, j = {1,+,1} and BEValue is j. 5230 // Because the other in-value of i (0) fits the evolution of BEValue 5231 // i really is an addrec evolution. 5232 // 5233 // We can generalize this saying that i is the shifted value of BEValue 5234 // by one iteration: 5235 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5236 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5237 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5238 if (Shifted != getCouldNotCompute() && 5239 Start != getCouldNotCompute()) { 5240 const SCEV *StartVal = getSCEV(StartValueV); 5241 if (Start == StartVal) { 5242 // Okay, for the entire analysis of this edge we assumed the PHI 5243 // to be symbolic. We now need to go back and purge all of the 5244 // entries for the scalars that use the symbolic expression. 5245 forgetSymbolicName(PN, SymbolicName); 5246 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; 5247 return Shifted; 5248 } 5249 } 5250 } 5251 5252 // Remove the temporary PHI node SCEV that has been inserted while intending 5253 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5254 // as it will prevent later (possibly simpler) SCEV expressions to be added 5255 // to the ValueExprMap. 5256 eraseValueFromMap(PN); 5257 5258 return nullptr; 5259 } 5260 5261 // Checks if the SCEV S is available at BB. S is considered available at BB 5262 // if S can be materialized at BB without introducing a fault. 5263 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 5264 BasicBlock *BB) { 5265 struct CheckAvailable { 5266 bool TraversalDone = false; 5267 bool Available = true; 5268 5269 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 5270 BasicBlock *BB = nullptr; 5271 DominatorTree &DT; 5272 5273 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5274 : L(L), BB(BB), DT(DT) {} 5275 5276 bool setUnavailable() { 5277 TraversalDone = true; 5278 Available = false; 5279 return false; 5280 } 5281 5282 bool follow(const SCEV *S) { 5283 switch (S->getSCEVType()) { 5284 case scConstant: 5285 case scPtrToInt: 5286 case scTruncate: 5287 case scZeroExtend: 5288 case scSignExtend: 5289 case scAddExpr: 5290 case scMulExpr: 5291 case scUMaxExpr: 5292 case scSMaxExpr: 5293 case scUMinExpr: 5294 case scSMinExpr: 5295 // These expressions are available if their operand(s) is/are. 5296 return true; 5297 5298 case scAddRecExpr: { 5299 // We allow add recurrences that are on the loop BB is in, or some 5300 // outer loop. This guarantees availability because the value of the 5301 // add recurrence at BB is simply the "current" value of the induction 5302 // variable. We can relax this in the future; for instance an add 5303 // recurrence on a sibling dominating loop is also available at BB. 5304 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5305 if (L && (ARLoop == L || ARLoop->contains(L))) 5306 return true; 5307 5308 return setUnavailable(); 5309 } 5310 5311 case scUnknown: { 5312 // For SCEVUnknown, we check for simple dominance. 5313 const auto *SU = cast<SCEVUnknown>(S); 5314 Value *V = SU->getValue(); 5315 5316 if (isa<Argument>(V)) 5317 return false; 5318 5319 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5320 return false; 5321 5322 return setUnavailable(); 5323 } 5324 5325 case scUDivExpr: 5326 case scCouldNotCompute: 5327 // We do not try to smart about these at all. 5328 return setUnavailable(); 5329 } 5330 llvm_unreachable("Unknown SCEV kind!"); 5331 } 5332 5333 bool isDone() { return TraversalDone; } 5334 }; 5335 5336 CheckAvailable CA(L, BB, DT); 5337 SCEVTraversal<CheckAvailable> ST(CA); 5338 5339 ST.visitAll(S); 5340 return CA.Available; 5341 } 5342 5343 // Try to match a control flow sequence that branches out at BI and merges back 5344 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5345 // match. 5346 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5347 Value *&C, Value *&LHS, Value *&RHS) { 5348 C = BI->getCondition(); 5349 5350 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5351 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5352 5353 if (!LeftEdge.isSingleEdge()) 5354 return false; 5355 5356 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5357 5358 Use &LeftUse = Merge->getOperandUse(0); 5359 Use &RightUse = Merge->getOperandUse(1); 5360 5361 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5362 LHS = LeftUse; 5363 RHS = RightUse; 5364 return true; 5365 } 5366 5367 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5368 LHS = RightUse; 5369 RHS = LeftUse; 5370 return true; 5371 } 5372 5373 return false; 5374 } 5375 5376 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5377 auto IsReachable = 5378 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5379 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5380 const Loop *L = LI.getLoopFor(PN->getParent()); 5381 5382 // We don't want to break LCSSA, even in a SCEV expression tree. 5383 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5384 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5385 return nullptr; 5386 5387 // Try to match 5388 // 5389 // br %cond, label %left, label %right 5390 // left: 5391 // br label %merge 5392 // right: 5393 // br label %merge 5394 // merge: 5395 // V = phi [ %x, %left ], [ %y, %right ] 5396 // 5397 // as "select %cond, %x, %y" 5398 5399 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5400 assert(IDom && "At least the entry block should dominate PN"); 5401 5402 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5403 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5404 5405 if (BI && BI->isConditional() && 5406 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5407 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5408 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5409 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5410 } 5411 5412 return nullptr; 5413 } 5414 5415 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5416 if (const SCEV *S = createAddRecFromPHI(PN)) 5417 return S; 5418 5419 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5420 return S; 5421 5422 // If the PHI has a single incoming value, follow that value, unless the 5423 // PHI's incoming blocks are in a different loop, in which case doing so 5424 // risks breaking LCSSA form. Instcombine would normally zap these, but 5425 // it doesn't have DominatorTree information, so it may miss cases. 5426 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5427 if (LI.replacementPreservesLCSSAForm(PN, V)) 5428 return getSCEV(V); 5429 5430 // If it's not a loop phi, we can't handle it yet. 5431 return getUnknown(PN); 5432 } 5433 5434 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I, 5435 Value *Cond, 5436 Value *TrueVal, 5437 Value *FalseVal) { 5438 // Handle "constant" branch or select. This can occur for instance when a 5439 // loop pass transforms an inner loop and moves on to process the outer loop. 5440 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 5441 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 5442 5443 // Try to match some simple smax or umax patterns. 5444 auto *ICI = dyn_cast<ICmpInst>(Cond); 5445 if (!ICI) 5446 return getUnknown(I); 5447 5448 Value *LHS = ICI->getOperand(0); 5449 Value *RHS = ICI->getOperand(1); 5450 5451 switch (ICI->getPredicate()) { 5452 case ICmpInst::ICMP_SLT: 5453 case ICmpInst::ICMP_SLE: 5454 std::swap(LHS, RHS); 5455 LLVM_FALLTHROUGH; 5456 case ICmpInst::ICMP_SGT: 5457 case ICmpInst::ICMP_SGE: 5458 // a >s b ? a+x : b+x -> smax(a, b)+x 5459 // a >s b ? b+x : a+x -> smin(a, b)+x 5460 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5461 const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType()); 5462 const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType()); 5463 const SCEV *LA = getSCEV(TrueVal); 5464 const SCEV *RA = getSCEV(FalseVal); 5465 const SCEV *LDiff = getMinusSCEV(LA, LS); 5466 const SCEV *RDiff = getMinusSCEV(RA, RS); 5467 if (LDiff == RDiff) 5468 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 5469 LDiff = getMinusSCEV(LA, RS); 5470 RDiff = getMinusSCEV(RA, LS); 5471 if (LDiff == RDiff) 5472 return getAddExpr(getSMinExpr(LS, RS), LDiff); 5473 } 5474 break; 5475 case ICmpInst::ICMP_ULT: 5476 case ICmpInst::ICMP_ULE: 5477 std::swap(LHS, RHS); 5478 LLVM_FALLTHROUGH; 5479 case ICmpInst::ICMP_UGT: 5480 case ICmpInst::ICMP_UGE: 5481 // a >u b ? a+x : b+x -> umax(a, b)+x 5482 // a >u b ? b+x : a+x -> umin(a, b)+x 5483 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 5484 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5485 const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType()); 5486 const SCEV *LA = getSCEV(TrueVal); 5487 const SCEV *RA = getSCEV(FalseVal); 5488 const SCEV *LDiff = getMinusSCEV(LA, LS); 5489 const SCEV *RDiff = getMinusSCEV(RA, RS); 5490 if (LDiff == RDiff) 5491 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 5492 LDiff = getMinusSCEV(LA, RS); 5493 RDiff = getMinusSCEV(RA, LS); 5494 if (LDiff == RDiff) 5495 return getAddExpr(getUMinExpr(LS, RS), LDiff); 5496 } 5497 break; 5498 case ICmpInst::ICMP_NE: 5499 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 5500 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5501 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5502 const SCEV *One = getOne(I->getType()); 5503 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5504 const SCEV *LA = getSCEV(TrueVal); 5505 const SCEV *RA = getSCEV(FalseVal); 5506 const SCEV *LDiff = getMinusSCEV(LA, LS); 5507 const SCEV *RDiff = getMinusSCEV(RA, One); 5508 if (LDiff == RDiff) 5509 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5510 } 5511 break; 5512 case ICmpInst::ICMP_EQ: 5513 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 5514 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 5515 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 5516 const SCEV *One = getOne(I->getType()); 5517 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 5518 const SCEV *LA = getSCEV(TrueVal); 5519 const SCEV *RA = getSCEV(FalseVal); 5520 const SCEV *LDiff = getMinusSCEV(LA, One); 5521 const SCEV *RDiff = getMinusSCEV(RA, LS); 5522 if (LDiff == RDiff) 5523 return getAddExpr(getUMaxExpr(One, LS), LDiff); 5524 } 5525 break; 5526 default: 5527 break; 5528 } 5529 5530 return getUnknown(I); 5531 } 5532 5533 /// Expand GEP instructions into add and multiply operations. This allows them 5534 /// to be analyzed by regular SCEV code. 5535 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 5536 // Don't attempt to analyze GEPs over unsized objects. 5537 if (!GEP->getSourceElementType()->isSized()) 5538 return getUnknown(GEP); 5539 5540 SmallVector<const SCEV *, 4> IndexExprs; 5541 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 5542 IndexExprs.push_back(getSCEV(*Index)); 5543 return getGEPExpr(GEP, IndexExprs); 5544 } 5545 5546 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 5547 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5548 return C->getAPInt().countTrailingZeros(); 5549 5550 if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S)) 5551 return GetMinTrailingZeros(I->getOperand()); 5552 5553 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 5554 return std::min(GetMinTrailingZeros(T->getOperand()), 5555 (uint32_t)getTypeSizeInBits(T->getType())); 5556 5557 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 5558 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5559 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5560 ? getTypeSizeInBits(E->getType()) 5561 : OpRes; 5562 } 5563 5564 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 5565 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 5566 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 5567 ? getTypeSizeInBits(E->getType()) 5568 : OpRes; 5569 } 5570 5571 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 5572 // The result is the min of all operands results. 5573 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5574 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5575 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5576 return MinOpRes; 5577 } 5578 5579 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 5580 // The result is the sum of all operands results. 5581 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 5582 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 5583 for (unsigned i = 1, e = M->getNumOperands(); 5584 SumOpRes != BitWidth && i != e; ++i) 5585 SumOpRes = 5586 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 5587 return SumOpRes; 5588 } 5589 5590 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 5591 // The result is the min of all operands results. 5592 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 5593 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 5594 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 5595 return MinOpRes; 5596 } 5597 5598 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 5599 // The result is the min of all operands results. 5600 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5601 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5602 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5603 return MinOpRes; 5604 } 5605 5606 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 5607 // The result is the min of all operands results. 5608 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 5609 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 5610 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 5611 return MinOpRes; 5612 } 5613 5614 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5615 // For a SCEVUnknown, ask ValueTracking. 5616 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 5617 return Known.countMinTrailingZeros(); 5618 } 5619 5620 // SCEVUDivExpr 5621 return 0; 5622 } 5623 5624 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 5625 auto I = MinTrailingZerosCache.find(S); 5626 if (I != MinTrailingZerosCache.end()) 5627 return I->second; 5628 5629 uint32_t Result = GetMinTrailingZerosImpl(S); 5630 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 5631 assert(InsertPair.second && "Should insert a new key"); 5632 return InsertPair.first->second; 5633 } 5634 5635 /// Helper method to assign a range to V from metadata present in the IR. 5636 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 5637 if (Instruction *I = dyn_cast<Instruction>(V)) 5638 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 5639 return getConstantRangeFromMetadata(*MD); 5640 5641 return None; 5642 } 5643 5644 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec, 5645 SCEV::NoWrapFlags Flags) { 5646 if (AddRec->getNoWrapFlags(Flags) != Flags) { 5647 AddRec->setNoWrapFlags(Flags); 5648 UnsignedRanges.erase(AddRec); 5649 SignedRanges.erase(AddRec); 5650 } 5651 } 5652 5653 ConstantRange ScalarEvolution:: 5654 getRangeForUnknownRecurrence(const SCEVUnknown *U) { 5655 const DataLayout &DL = getDataLayout(); 5656 5657 unsigned BitWidth = getTypeSizeInBits(U->getType()); 5658 ConstantRange CR(BitWidth, /*isFullSet=*/true); 5659 5660 // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then 5661 // use information about the trip count to improve our available range. Note 5662 // that the trip count independent cases are already handled by known bits. 5663 // WARNING: The definition of recurrence used here is subtly different than 5664 // the one used by AddRec (and thus most of this file). Step is allowed to 5665 // be arbitrarily loop varying here, where AddRec allows only loop invariant 5666 // and other addrecs in the same loop (for non-affine addrecs). The code 5667 // below intentionally handles the case where step is not loop invariant. 5668 auto *P = dyn_cast<PHINode>(U->getValue()); 5669 if (!P) 5670 return CR; 5671 5672 // Make sure that no Phi input comes from an unreachable block. Otherwise, 5673 // even the values that are not available in these blocks may come from them, 5674 // and this leads to false-positive recurrence test. 5675 for (auto *Pred : predecessors(P->getParent())) 5676 if (!DT.isReachableFromEntry(Pred)) 5677 return CR; 5678 5679 BinaryOperator *BO; 5680 Value *Start, *Step; 5681 if (!matchSimpleRecurrence(P, BO, Start, Step)) 5682 return CR; 5683 5684 // If we found a recurrence in reachable code, we must be in a loop. Note 5685 // that BO might be in some subloop of L, and that's completely okay. 5686 auto *L = LI.getLoopFor(P->getParent()); 5687 assert(L && L->getHeader() == P->getParent()); 5688 if (!L->contains(BO->getParent())) 5689 // NOTE: This bailout should be an assert instead. However, asserting 5690 // the condition here exposes a case where LoopFusion is querying SCEV 5691 // with malformed loop information during the midst of the transform. 5692 // There doesn't appear to be an obvious fix, so for the moment bailout 5693 // until the caller issue can be fixed. PR49566 tracks the bug. 5694 return CR; 5695 5696 // TODO: Extend to other opcodes such as ashr, mul, and div 5697 switch (BO->getOpcode()) { 5698 default: 5699 return CR; 5700 case Instruction::Shl: 5701 break; 5702 }; 5703 5704 if (BO->getOperand(0) != P) 5705 // TODO: Handle the power function forms some day. 5706 return CR; 5707 5708 unsigned TC = getSmallConstantMaxTripCount(L); 5709 if (!TC || TC >= BitWidth) 5710 return CR; 5711 5712 auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT); 5713 auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT); 5714 assert(KnownStart.getBitWidth() == BitWidth && 5715 KnownStep.getBitWidth() == BitWidth); 5716 5717 // Compute total shift amount, being careful of overflow and bitwidths. 5718 auto MaxShiftAmt = KnownStep.getMaxValue(); 5719 APInt TCAP(BitWidth, TC-1); 5720 bool Overflow = false; 5721 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow); 5722 if (Overflow) 5723 return CR; 5724 5725 switch (BO->getOpcode()) { 5726 default: 5727 llvm_unreachable("filtered out above"); 5728 case Instruction::Shl: { 5729 // Iff no bits are shifted out, value increases on every shift. 5730 auto KnownEnd = KnownBits::shl(KnownStart, 5731 KnownBits::makeConstant(TotalShift)); 5732 if (TotalShift.ult(KnownStart.countMinLeadingZeros())) 5733 CR = CR.intersectWith(ConstantRange(KnownStart.getMinValue(), 5734 KnownEnd.getMaxValue() + 1)); 5735 break; 5736 } 5737 }; 5738 return CR; 5739 } 5740 5741 5742 5743 /// Determine the range for a particular SCEV. If SignHint is 5744 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 5745 /// with a "cleaner" unsigned (resp. signed) representation. 5746 const ConstantRange & 5747 ScalarEvolution::getRangeRef(const SCEV *S, 5748 ScalarEvolution::RangeSignHint SignHint) { 5749 DenseMap<const SCEV *, ConstantRange> &Cache = 5750 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 5751 : SignedRanges; 5752 ConstantRange::PreferredRangeType RangeType = 5753 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED 5754 ? ConstantRange::Unsigned : ConstantRange::Signed; 5755 5756 // See if we've computed this range already. 5757 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 5758 if (I != Cache.end()) 5759 return I->second; 5760 5761 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 5762 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 5763 5764 unsigned BitWidth = getTypeSizeInBits(S->getType()); 5765 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 5766 using OBO = OverflowingBinaryOperator; 5767 5768 // If the value has known zeros, the maximum value will have those known zeros 5769 // as well. 5770 uint32_t TZ = GetMinTrailingZeros(S); 5771 if (TZ != 0) { 5772 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 5773 ConservativeResult = 5774 ConstantRange(APInt::getMinValue(BitWidth), 5775 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 5776 else 5777 ConservativeResult = ConstantRange( 5778 APInt::getSignedMinValue(BitWidth), 5779 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 5780 } 5781 5782 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 5783 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 5784 unsigned WrapType = OBO::AnyWrap; 5785 if (Add->hasNoSignedWrap()) 5786 WrapType |= OBO::NoSignedWrap; 5787 if (Add->hasNoUnsignedWrap()) 5788 WrapType |= OBO::NoUnsignedWrap; 5789 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 5790 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint), 5791 WrapType, RangeType); 5792 return setRange(Add, SignHint, 5793 ConservativeResult.intersectWith(X, RangeType)); 5794 } 5795 5796 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 5797 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 5798 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 5799 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 5800 return setRange(Mul, SignHint, 5801 ConservativeResult.intersectWith(X, RangeType)); 5802 } 5803 5804 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 5805 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); 5806 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 5807 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); 5808 return setRange(SMax, SignHint, 5809 ConservativeResult.intersectWith(X, RangeType)); 5810 } 5811 5812 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 5813 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); 5814 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 5815 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); 5816 return setRange(UMax, SignHint, 5817 ConservativeResult.intersectWith(X, RangeType)); 5818 } 5819 5820 if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) { 5821 ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint); 5822 for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i) 5823 X = X.smin(getRangeRef(SMin->getOperand(i), SignHint)); 5824 return setRange(SMin, SignHint, 5825 ConservativeResult.intersectWith(X, RangeType)); 5826 } 5827 5828 if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) { 5829 ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint); 5830 for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i) 5831 X = X.umin(getRangeRef(UMin->getOperand(i), SignHint)); 5832 return setRange(UMin, SignHint, 5833 ConservativeResult.intersectWith(X, RangeType)); 5834 } 5835 5836 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 5837 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 5838 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 5839 return setRange(UDiv, SignHint, 5840 ConservativeResult.intersectWith(X.udiv(Y), RangeType)); 5841 } 5842 5843 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 5844 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 5845 return setRange(ZExt, SignHint, 5846 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), 5847 RangeType)); 5848 } 5849 5850 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 5851 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 5852 return setRange(SExt, SignHint, 5853 ConservativeResult.intersectWith(X.signExtend(BitWidth), 5854 RangeType)); 5855 } 5856 5857 if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast<SCEVPtrToIntExpr>(S)) { 5858 ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint); 5859 return setRange(PtrToInt, SignHint, X); 5860 } 5861 5862 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 5863 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 5864 return setRange(Trunc, SignHint, 5865 ConservativeResult.intersectWith(X.truncate(BitWidth), 5866 RangeType)); 5867 } 5868 5869 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 5870 // If there's no unsigned wrap, the value will never be less than its 5871 // initial value. 5872 if (AddRec->hasNoUnsignedWrap()) { 5873 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); 5874 if (!UnsignedMinValue.isNullValue()) 5875 ConservativeResult = ConservativeResult.intersectWith( 5876 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); 5877 } 5878 5879 // If there's no signed wrap, and all the operands except initial value have 5880 // the same sign or zero, the value won't ever be: 5881 // 1: smaller than initial value if operands are non negative, 5882 // 2: bigger than initial value if operands are non positive. 5883 // For both cases, value can not cross signed min/max boundary. 5884 if (AddRec->hasNoSignedWrap()) { 5885 bool AllNonNeg = true; 5886 bool AllNonPos = true; 5887 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { 5888 if (!isKnownNonNegative(AddRec->getOperand(i))) 5889 AllNonNeg = false; 5890 if (!isKnownNonPositive(AddRec->getOperand(i))) 5891 AllNonPos = false; 5892 } 5893 if (AllNonNeg) 5894 ConservativeResult = ConservativeResult.intersectWith( 5895 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), 5896 APInt::getSignedMinValue(BitWidth)), 5897 RangeType); 5898 else if (AllNonPos) 5899 ConservativeResult = ConservativeResult.intersectWith( 5900 ConstantRange::getNonEmpty( 5901 APInt::getSignedMinValue(BitWidth), 5902 getSignedRangeMax(AddRec->getStart()) + 1), 5903 RangeType); 5904 } 5905 5906 // TODO: non-affine addrec 5907 if (AddRec->isAffine()) { 5908 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop()); 5909 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 5910 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 5911 auto RangeFromAffine = getRangeForAffineAR( 5912 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5913 BitWidth); 5914 ConservativeResult = 5915 ConservativeResult.intersectWith(RangeFromAffine, RangeType); 5916 5917 auto RangeFromFactoring = getRangeViaFactoring( 5918 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 5919 BitWidth); 5920 ConservativeResult = 5921 ConservativeResult.intersectWith(RangeFromFactoring, RangeType); 5922 } 5923 5924 // Now try symbolic BE count and more powerful methods. 5925 if (UseExpensiveRangeSharpening) { 5926 const SCEV *SymbolicMaxBECount = 5927 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop()); 5928 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) && 5929 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 5930 AddRec->hasNoSelfWrap()) { 5931 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR( 5932 AddRec, SymbolicMaxBECount, BitWidth, SignHint); 5933 ConservativeResult = 5934 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType); 5935 } 5936 } 5937 } 5938 5939 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 5940 } 5941 5942 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 5943 5944 // Check if the IR explicitly contains !range metadata. 5945 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 5946 if (MDRange.hasValue()) 5947 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(), 5948 RangeType); 5949 5950 // Use facts about recurrences in the underlying IR. Note that add 5951 // recurrences are AddRecExprs and thus don't hit this path. This 5952 // primarily handles shift recurrences. 5953 auto CR = getRangeForUnknownRecurrence(U); 5954 ConservativeResult = ConservativeResult.intersectWith(CR); 5955 5956 // See if ValueTracking can give us a useful range. 5957 const DataLayout &DL = getDataLayout(); 5958 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5959 if (Known.getBitWidth() != BitWidth) 5960 Known = Known.zextOrTrunc(BitWidth); 5961 5962 // ValueTracking may be able to compute a tighter result for the number of 5963 // sign bits than for the value of those sign bits. 5964 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 5965 if (U->getType()->isPointerTy()) { 5966 // If the pointer size is larger than the index size type, this can cause 5967 // NS to be larger than BitWidth. So compensate for this. 5968 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); 5969 int ptrIdxDiff = ptrSize - BitWidth; 5970 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) 5971 NS -= ptrIdxDiff; 5972 } 5973 5974 if (NS > 1) { 5975 // If we know any of the sign bits, we know all of the sign bits. 5976 if (!Known.Zero.getHiBits(NS).isNullValue()) 5977 Known.Zero.setHighBits(NS); 5978 if (!Known.One.getHiBits(NS).isNullValue()) 5979 Known.One.setHighBits(NS); 5980 } 5981 5982 if (Known.getMinValue() != Known.getMaxValue() + 1) 5983 ConservativeResult = ConservativeResult.intersectWith( 5984 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), 5985 RangeType); 5986 if (NS > 1) 5987 ConservativeResult = ConservativeResult.intersectWith( 5988 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 5989 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), 5990 RangeType); 5991 5992 // A range of Phi is a subset of union of all ranges of its input. 5993 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 5994 // Make sure that we do not run over cycled Phis. 5995 if (PendingPhiRanges.insert(Phi).second) { 5996 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 5997 for (auto &Op : Phi->operands()) { 5998 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 5999 RangeFromOps = RangeFromOps.unionWith(OpRange); 6000 // No point to continue if we already have a full set. 6001 if (RangeFromOps.isFullSet()) 6002 break; 6003 } 6004 ConservativeResult = 6005 ConservativeResult.intersectWith(RangeFromOps, RangeType); 6006 bool Erased = PendingPhiRanges.erase(Phi); 6007 assert(Erased && "Failed to erase Phi properly?"); 6008 (void) Erased; 6009 } 6010 } 6011 6012 return setRange(U, SignHint, std::move(ConservativeResult)); 6013 } 6014 6015 return setRange(S, SignHint, std::move(ConservativeResult)); 6016 } 6017 6018 // Given a StartRange, Step and MaxBECount for an expression compute a range of 6019 // values that the expression can take. Initially, the expression has a value 6020 // from StartRange and then is changed by Step up to MaxBECount times. Signed 6021 // argument defines if we treat Step as signed or unsigned. 6022 static ConstantRange getRangeForAffineARHelper(APInt Step, 6023 const ConstantRange &StartRange, 6024 const APInt &MaxBECount, 6025 unsigned BitWidth, bool Signed) { 6026 // If either Step or MaxBECount is 0, then the expression won't change, and we 6027 // just need to return the initial range. 6028 if (Step == 0 || MaxBECount == 0) 6029 return StartRange; 6030 6031 // If we don't know anything about the initial value (i.e. StartRange is 6032 // FullRange), then we don't know anything about the final range either. 6033 // Return FullRange. 6034 if (StartRange.isFullSet()) 6035 return ConstantRange::getFull(BitWidth); 6036 6037 // If Step is signed and negative, then we use its absolute value, but we also 6038 // note that we're moving in the opposite direction. 6039 bool Descending = Signed && Step.isNegative(); 6040 6041 if (Signed) 6042 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 6043 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 6044 // This equations hold true due to the well-defined wrap-around behavior of 6045 // APInt. 6046 Step = Step.abs(); 6047 6048 // Check if Offset is more than full span of BitWidth. If it is, the 6049 // expression is guaranteed to overflow. 6050 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 6051 return ConstantRange::getFull(BitWidth); 6052 6053 // Offset is by how much the expression can change. Checks above guarantee no 6054 // overflow here. 6055 APInt Offset = Step * MaxBECount; 6056 6057 // Minimum value of the final range will match the minimal value of StartRange 6058 // if the expression is increasing and will be decreased by Offset otherwise. 6059 // Maximum value of the final range will match the maximal value of StartRange 6060 // if the expression is decreasing and will be increased by Offset otherwise. 6061 APInt StartLower = StartRange.getLower(); 6062 APInt StartUpper = StartRange.getUpper() - 1; 6063 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 6064 : (StartUpper + std::move(Offset)); 6065 6066 // It's possible that the new minimum/maximum value will fall into the initial 6067 // range (due to wrap around). This means that the expression can take any 6068 // value in this bitwidth, and we have to return full range. 6069 if (StartRange.contains(MovedBoundary)) 6070 return ConstantRange::getFull(BitWidth); 6071 6072 APInt NewLower = 6073 Descending ? std::move(MovedBoundary) : std::move(StartLower); 6074 APInt NewUpper = 6075 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 6076 NewUpper += 1; 6077 6078 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 6079 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); 6080 } 6081 6082 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 6083 const SCEV *Step, 6084 const SCEV *MaxBECount, 6085 unsigned BitWidth) { 6086 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 6087 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 6088 "Precondition!"); 6089 6090 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 6091 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 6092 6093 // First, consider step signed. 6094 ConstantRange StartSRange = getSignedRange(Start); 6095 ConstantRange StepSRange = getSignedRange(Step); 6096 6097 // If Step can be both positive and negative, we need to find ranges for the 6098 // maximum absolute step values in both directions and union them. 6099 ConstantRange SR = 6100 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 6101 MaxBECountValue, BitWidth, /* Signed = */ true); 6102 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 6103 StartSRange, MaxBECountValue, 6104 BitWidth, /* Signed = */ true)); 6105 6106 // Next, consider step unsigned. 6107 ConstantRange UR = getRangeForAffineARHelper( 6108 getUnsignedRangeMax(Step), getUnsignedRange(Start), 6109 MaxBECountValue, BitWidth, /* Signed = */ false); 6110 6111 // Finally, intersect signed and unsigned ranges. 6112 return SR.intersectWith(UR, ConstantRange::Smallest); 6113 } 6114 6115 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR( 6116 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth, 6117 ScalarEvolution::RangeSignHint SignHint) { 6118 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n"); 6119 assert(AddRec->hasNoSelfWrap() && 6120 "This only works for non-self-wrapping AddRecs!"); 6121 const bool IsSigned = SignHint == HINT_RANGE_SIGNED; 6122 const SCEV *Step = AddRec->getStepRecurrence(*this); 6123 // Only deal with constant step to save compile time. 6124 if (!isa<SCEVConstant>(Step)) 6125 return ConstantRange::getFull(BitWidth); 6126 // Let's make sure that we can prove that we do not self-wrap during 6127 // MaxBECount iterations. We need this because MaxBECount is a maximum 6128 // iteration count estimate, and we might infer nw from some exit for which we 6129 // do not know max exit count (or any other side reasoning). 6130 // TODO: Turn into assert at some point. 6131 if (getTypeSizeInBits(MaxBECount->getType()) > 6132 getTypeSizeInBits(AddRec->getType())) 6133 return ConstantRange::getFull(BitWidth); 6134 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType()); 6135 const SCEV *RangeWidth = getMinusOne(AddRec->getType()); 6136 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step)); 6137 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs); 6138 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount, 6139 MaxItersWithoutWrap)) 6140 return ConstantRange::getFull(BitWidth); 6141 6142 ICmpInst::Predicate LEPred = 6143 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 6144 ICmpInst::Predicate GEPred = 6145 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 6146 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); 6147 6148 // We know that there is no self-wrap. Let's take Start and End values and 6149 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during 6150 // the iteration. They either lie inside the range [Min(Start, End), 6151 // Max(Start, End)] or outside it: 6152 // 6153 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax; 6154 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax; 6155 // 6156 // No self wrap flag guarantees that the intermediate values cannot be BOTH 6157 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that 6158 // knowledge, let's try to prove that we are dealing with Case 1. It is so if 6159 // Start <= End and step is positive, or Start >= End and step is negative. 6160 const SCEV *Start = AddRec->getStart(); 6161 ConstantRange StartRange = getRangeRef(Start, SignHint); 6162 ConstantRange EndRange = getRangeRef(End, SignHint); 6163 ConstantRange RangeBetween = StartRange.unionWith(EndRange); 6164 // If they already cover full iteration space, we will know nothing useful 6165 // even if we prove what we want to prove. 6166 if (RangeBetween.isFullSet()) 6167 return RangeBetween; 6168 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax). 6169 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet() 6170 : RangeBetween.isWrappedSet(); 6171 if (IsWrappedSet) 6172 return ConstantRange::getFull(BitWidth); 6173 6174 if (isKnownPositive(Step) && 6175 isKnownPredicateViaConstantRanges(LEPred, Start, End)) 6176 return RangeBetween; 6177 else if (isKnownNegative(Step) && 6178 isKnownPredicateViaConstantRanges(GEPred, Start, End)) 6179 return RangeBetween; 6180 return ConstantRange::getFull(BitWidth); 6181 } 6182 6183 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 6184 const SCEV *Step, 6185 const SCEV *MaxBECount, 6186 unsigned BitWidth) { 6187 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 6188 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 6189 6190 struct SelectPattern { 6191 Value *Condition = nullptr; 6192 APInt TrueValue; 6193 APInt FalseValue; 6194 6195 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 6196 const SCEV *S) { 6197 Optional<unsigned> CastOp; 6198 APInt Offset(BitWidth, 0); 6199 6200 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 6201 "Should be!"); 6202 6203 // Peel off a constant offset: 6204 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 6205 // In the future we could consider being smarter here and handle 6206 // {Start+Step,+,Step} too. 6207 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 6208 return; 6209 6210 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 6211 S = SA->getOperand(1); 6212 } 6213 6214 // Peel off a cast operation 6215 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) { 6216 CastOp = SCast->getSCEVType(); 6217 S = SCast->getOperand(); 6218 } 6219 6220 using namespace llvm::PatternMatch; 6221 6222 auto *SU = dyn_cast<SCEVUnknown>(S); 6223 const APInt *TrueVal, *FalseVal; 6224 if (!SU || 6225 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 6226 m_APInt(FalseVal)))) { 6227 Condition = nullptr; 6228 return; 6229 } 6230 6231 TrueValue = *TrueVal; 6232 FalseValue = *FalseVal; 6233 6234 // Re-apply the cast we peeled off earlier 6235 if (CastOp.hasValue()) 6236 switch (*CastOp) { 6237 default: 6238 llvm_unreachable("Unknown SCEV cast type!"); 6239 6240 case scTruncate: 6241 TrueValue = TrueValue.trunc(BitWidth); 6242 FalseValue = FalseValue.trunc(BitWidth); 6243 break; 6244 case scZeroExtend: 6245 TrueValue = TrueValue.zext(BitWidth); 6246 FalseValue = FalseValue.zext(BitWidth); 6247 break; 6248 case scSignExtend: 6249 TrueValue = TrueValue.sext(BitWidth); 6250 FalseValue = FalseValue.sext(BitWidth); 6251 break; 6252 } 6253 6254 // Re-apply the constant offset we peeled off earlier 6255 TrueValue += Offset; 6256 FalseValue += Offset; 6257 } 6258 6259 bool isRecognized() { return Condition != nullptr; } 6260 }; 6261 6262 SelectPattern StartPattern(*this, BitWidth, Start); 6263 if (!StartPattern.isRecognized()) 6264 return ConstantRange::getFull(BitWidth); 6265 6266 SelectPattern StepPattern(*this, BitWidth, Step); 6267 if (!StepPattern.isRecognized()) 6268 return ConstantRange::getFull(BitWidth); 6269 6270 if (StartPattern.Condition != StepPattern.Condition) { 6271 // We don't handle this case today; but we could, by considering four 6272 // possibilities below instead of two. I'm not sure if there are cases where 6273 // that will help over what getRange already does, though. 6274 return ConstantRange::getFull(BitWidth); 6275 } 6276 6277 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 6278 // construct arbitrary general SCEV expressions here. This function is called 6279 // from deep in the call stack, and calling getSCEV (on a sext instruction, 6280 // say) can end up caching a suboptimal value. 6281 6282 // FIXME: without the explicit `this` receiver below, MSVC errors out with 6283 // C2352 and C2512 (otherwise it isn't needed). 6284 6285 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 6286 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 6287 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 6288 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 6289 6290 ConstantRange TrueRange = 6291 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 6292 ConstantRange FalseRange = 6293 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 6294 6295 return TrueRange.unionWith(FalseRange); 6296 } 6297 6298 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 6299 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 6300 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 6301 6302 // Return early if there are no flags to propagate to the SCEV. 6303 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6304 if (BinOp->hasNoUnsignedWrap()) 6305 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 6306 if (BinOp->hasNoSignedWrap()) 6307 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 6308 if (Flags == SCEV::FlagAnyWrap) 6309 return SCEV::FlagAnyWrap; 6310 6311 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 6312 } 6313 6314 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 6315 // Here we check that I is in the header of the innermost loop containing I, 6316 // since we only deal with instructions in the loop header. The actual loop we 6317 // need to check later will come from an add recurrence, but getting that 6318 // requires computing the SCEV of the operands, which can be expensive. This 6319 // check we can do cheaply to rule out some cases early. 6320 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent()); 6321 if (InnermostContainingLoop == nullptr || 6322 InnermostContainingLoop->getHeader() != I->getParent()) 6323 return false; 6324 6325 // Only proceed if we can prove that I does not yield poison. 6326 if (!programUndefinedIfPoison(I)) 6327 return false; 6328 6329 // At this point we know that if I is executed, then it does not wrap 6330 // according to at least one of NSW or NUW. If I is not executed, then we do 6331 // not know if the calculation that I represents would wrap. Multiple 6332 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 6333 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 6334 // derived from other instructions that map to the same SCEV. We cannot make 6335 // that guarantee for cases where I is not executed. So we need to find the 6336 // loop that I is considered in relation to and prove that I is executed for 6337 // every iteration of that loop. That implies that the value that I 6338 // calculates does not wrap anywhere in the loop, so then we can apply the 6339 // flags to the SCEV. 6340 // 6341 // We check isLoopInvariant to disambiguate in case we are adding recurrences 6342 // from different loops, so that we know which loop to prove that I is 6343 // executed in. 6344 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) { 6345 // I could be an extractvalue from a call to an overflow intrinsic. 6346 // TODO: We can do better here in some cases. 6347 if (!isSCEVable(I->getOperand(OpIndex)->getType())) 6348 return false; 6349 const SCEV *Op = getSCEV(I->getOperand(OpIndex)); 6350 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 6351 bool AllOtherOpsLoopInvariant = true; 6352 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands(); 6353 ++OtherOpIndex) { 6354 if (OtherOpIndex != OpIndex) { 6355 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex)); 6356 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) { 6357 AllOtherOpsLoopInvariant = false; 6358 break; 6359 } 6360 } 6361 } 6362 if (AllOtherOpsLoopInvariant && 6363 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop())) 6364 return true; 6365 } 6366 } 6367 return false; 6368 } 6369 6370 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 6371 // If we know that \c I can never be poison period, then that's enough. 6372 if (isSCEVExprNeverPoison(I)) 6373 return true; 6374 6375 // For an add recurrence specifically, we assume that infinite loops without 6376 // side effects are undefined behavior, and then reason as follows: 6377 // 6378 // If the add recurrence is poison in any iteration, it is poison on all 6379 // future iterations (since incrementing poison yields poison). If the result 6380 // of the add recurrence is fed into the loop latch condition and the loop 6381 // does not contain any throws or exiting blocks other than the latch, we now 6382 // have the ability to "choose" whether the backedge is taken or not (by 6383 // choosing a sufficiently evil value for the poison feeding into the branch) 6384 // for every iteration including and after the one in which \p I first became 6385 // poison. There are two possibilities (let's call the iteration in which \p 6386 // I first became poison as K): 6387 // 6388 // 1. In the set of iterations including and after K, the loop body executes 6389 // no side effects. In this case executing the backege an infinte number 6390 // of times will yield undefined behavior. 6391 // 6392 // 2. In the set of iterations including and after K, the loop body executes 6393 // at least one side effect. In this case, that specific instance of side 6394 // effect is control dependent on poison, which also yields undefined 6395 // behavior. 6396 6397 auto *ExitingBB = L->getExitingBlock(); 6398 auto *LatchBB = L->getLoopLatch(); 6399 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 6400 return false; 6401 6402 SmallPtrSet<const Instruction *, 16> Pushed; 6403 SmallVector<const Instruction *, 8> PoisonStack; 6404 6405 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 6406 // things that are known to be poison under that assumption go on the 6407 // PoisonStack. 6408 Pushed.insert(I); 6409 PoisonStack.push_back(I); 6410 6411 bool LatchControlDependentOnPoison = false; 6412 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 6413 const Instruction *Poison = PoisonStack.pop_back_val(); 6414 6415 for (auto *PoisonUser : Poison->users()) { 6416 if (propagatesPoison(cast<Operator>(PoisonUser))) { 6417 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 6418 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 6419 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 6420 assert(BI->isConditional() && "Only possibility!"); 6421 if (BI->getParent() == LatchBB) { 6422 LatchControlDependentOnPoison = true; 6423 break; 6424 } 6425 } 6426 } 6427 } 6428 6429 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 6430 } 6431 6432 ScalarEvolution::LoopProperties 6433 ScalarEvolution::getLoopProperties(const Loop *L) { 6434 using LoopProperties = ScalarEvolution::LoopProperties; 6435 6436 auto Itr = LoopPropertiesCache.find(L); 6437 if (Itr == LoopPropertiesCache.end()) { 6438 auto HasSideEffects = [](Instruction *I) { 6439 if (auto *SI = dyn_cast<StoreInst>(I)) 6440 return !SI->isSimple(); 6441 6442 return I->mayHaveSideEffects(); 6443 }; 6444 6445 LoopProperties LP = {/* HasNoAbnormalExits */ true, 6446 /*HasNoSideEffects*/ true}; 6447 6448 for (auto *BB : L->getBlocks()) 6449 for (auto &I : *BB) { 6450 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 6451 LP.HasNoAbnormalExits = false; 6452 if (HasSideEffects(&I)) 6453 LP.HasNoSideEffects = false; 6454 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 6455 break; // We're already as pessimistic as we can get. 6456 } 6457 6458 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 6459 assert(InsertPair.second && "We just checked!"); 6460 Itr = InsertPair.first; 6461 } 6462 6463 return Itr->second; 6464 } 6465 6466 const SCEV *ScalarEvolution::createSCEV(Value *V) { 6467 if (!isSCEVable(V->getType())) 6468 return getUnknown(V); 6469 6470 if (Instruction *I = dyn_cast<Instruction>(V)) { 6471 // Don't attempt to analyze instructions in blocks that aren't 6472 // reachable. Such instructions don't matter, and they aren't required 6473 // to obey basic rules for definitions dominating uses which this 6474 // analysis depends on. 6475 if (!DT.isReachableFromEntry(I->getParent())) 6476 return getUnknown(UndefValue::get(V->getType())); 6477 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 6478 return getConstant(CI); 6479 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 6480 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 6481 else if (!isa<ConstantExpr>(V)) 6482 return getUnknown(V); 6483 6484 Operator *U = cast<Operator>(V); 6485 if (auto BO = MatchBinaryOp(U, DT)) { 6486 switch (BO->Opcode) { 6487 case Instruction::Add: { 6488 // The simple thing to do would be to just call getSCEV on both operands 6489 // and call getAddExpr with the result. However if we're looking at a 6490 // bunch of things all added together, this can be quite inefficient, 6491 // because it leads to N-1 getAddExpr calls for N ultimate operands. 6492 // Instead, gather up all the operands and make a single getAddExpr call. 6493 // LLVM IR canonical form means we need only traverse the left operands. 6494 SmallVector<const SCEV *, 4> AddOps; 6495 do { 6496 if (BO->Op) { 6497 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6498 AddOps.push_back(OpSCEV); 6499 break; 6500 } 6501 6502 // If a NUW or NSW flag can be applied to the SCEV for this 6503 // addition, then compute the SCEV for this addition by itself 6504 // with a separate call to getAddExpr. We need to do that 6505 // instead of pushing the operands of the addition onto AddOps, 6506 // since the flags are only known to apply to this particular 6507 // addition - they may not apply to other additions that can be 6508 // formed with operands from AddOps. 6509 const SCEV *RHS = getSCEV(BO->RHS); 6510 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6511 if (Flags != SCEV::FlagAnyWrap) { 6512 const SCEV *LHS = getSCEV(BO->LHS); 6513 if (BO->Opcode == Instruction::Sub) 6514 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 6515 else 6516 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 6517 break; 6518 } 6519 } 6520 6521 if (BO->Opcode == Instruction::Sub) 6522 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 6523 else 6524 AddOps.push_back(getSCEV(BO->RHS)); 6525 6526 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6527 if (!NewBO || (NewBO->Opcode != Instruction::Add && 6528 NewBO->Opcode != Instruction::Sub)) { 6529 AddOps.push_back(getSCEV(BO->LHS)); 6530 break; 6531 } 6532 BO = NewBO; 6533 } while (true); 6534 6535 return getAddExpr(AddOps); 6536 } 6537 6538 case Instruction::Mul: { 6539 SmallVector<const SCEV *, 4> MulOps; 6540 do { 6541 if (BO->Op) { 6542 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 6543 MulOps.push_back(OpSCEV); 6544 break; 6545 } 6546 6547 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 6548 if (Flags != SCEV::FlagAnyWrap) { 6549 MulOps.push_back( 6550 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags)); 6551 break; 6552 } 6553 } 6554 6555 MulOps.push_back(getSCEV(BO->RHS)); 6556 auto NewBO = MatchBinaryOp(BO->LHS, DT); 6557 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 6558 MulOps.push_back(getSCEV(BO->LHS)); 6559 break; 6560 } 6561 BO = NewBO; 6562 } while (true); 6563 6564 return getMulExpr(MulOps); 6565 } 6566 case Instruction::UDiv: 6567 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6568 case Instruction::URem: 6569 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS)); 6570 case Instruction::Sub: { 6571 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6572 if (BO->Op) 6573 Flags = getNoWrapFlagsFromUB(BO->Op); 6574 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags); 6575 } 6576 case Instruction::And: 6577 // For an expression like x&255 that merely masks off the high bits, 6578 // use zext(trunc(x)) as the SCEV expression. 6579 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6580 if (CI->isZero()) 6581 return getSCEV(BO->RHS); 6582 if (CI->isMinusOne()) 6583 return getSCEV(BO->LHS); 6584 const APInt &A = CI->getValue(); 6585 6586 // Instcombine's ShrinkDemandedConstant may strip bits out of 6587 // constants, obscuring what would otherwise be a low-bits mask. 6588 // Use computeKnownBits to compute what ShrinkDemandedConstant 6589 // knew about to reconstruct a low-bits mask value. 6590 unsigned LZ = A.countLeadingZeros(); 6591 unsigned TZ = A.countTrailingZeros(); 6592 unsigned BitWidth = A.getBitWidth(); 6593 KnownBits Known(BitWidth); 6594 computeKnownBits(BO->LHS, Known, getDataLayout(), 6595 0, &AC, nullptr, &DT); 6596 6597 APInt EffectiveMask = 6598 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 6599 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 6600 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 6601 const SCEV *LHS = getSCEV(BO->LHS); 6602 const SCEV *ShiftedLHS = nullptr; 6603 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 6604 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 6605 // For an expression like (x * 8) & 8, simplify the multiply. 6606 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 6607 unsigned GCD = std::min(MulZeros, TZ); 6608 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 6609 SmallVector<const SCEV*, 4> MulOps; 6610 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 6611 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 6612 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 6613 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 6614 } 6615 } 6616 if (!ShiftedLHS) 6617 ShiftedLHS = getUDivExpr(LHS, MulCount); 6618 return getMulExpr( 6619 getZeroExtendExpr( 6620 getTruncateExpr(ShiftedLHS, 6621 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 6622 BO->LHS->getType()), 6623 MulCount); 6624 } 6625 } 6626 break; 6627 6628 case Instruction::Or: 6629 // If the RHS of the Or is a constant, we may have something like: 6630 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 6631 // optimizations will transparently handle this case. 6632 // 6633 // In order for this transformation to be safe, the LHS must be of the 6634 // form X*(2^n) and the Or constant must be less than 2^n. 6635 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6636 const SCEV *LHS = getSCEV(BO->LHS); 6637 const APInt &CIVal = CI->getValue(); 6638 if (GetMinTrailingZeros(LHS) >= 6639 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 6640 // Build a plain add SCEV. 6641 return getAddExpr(LHS, getSCEV(CI), 6642 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 6643 } 6644 } 6645 break; 6646 6647 case Instruction::Xor: 6648 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 6649 // If the RHS of xor is -1, then this is a not operation. 6650 if (CI->isMinusOne()) 6651 return getNotSCEV(getSCEV(BO->LHS)); 6652 6653 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 6654 // This is a variant of the check for xor with -1, and it handles 6655 // the case where instcombine has trimmed non-demanded bits out 6656 // of an xor with -1. 6657 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 6658 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 6659 if (LBO->getOpcode() == Instruction::And && 6660 LCI->getValue() == CI->getValue()) 6661 if (const SCEVZeroExtendExpr *Z = 6662 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 6663 Type *UTy = BO->LHS->getType(); 6664 const SCEV *Z0 = Z->getOperand(); 6665 Type *Z0Ty = Z0->getType(); 6666 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 6667 6668 // If C is a low-bits mask, the zero extend is serving to 6669 // mask off the high bits. Complement the operand and 6670 // re-apply the zext. 6671 if (CI->getValue().isMask(Z0TySize)) 6672 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 6673 6674 // If C is a single bit, it may be in the sign-bit position 6675 // before the zero-extend. In this case, represent the xor 6676 // using an add, which is equivalent, and re-apply the zext. 6677 APInt Trunc = CI->getValue().trunc(Z0TySize); 6678 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 6679 Trunc.isSignMask()) 6680 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 6681 UTy); 6682 } 6683 } 6684 break; 6685 6686 case Instruction::Shl: 6687 // Turn shift left of a constant amount into a multiply. 6688 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 6689 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 6690 6691 // If the shift count is not less than the bitwidth, the result of 6692 // the shift is undefined. Don't try to analyze it, because the 6693 // resolution chosen here may differ from the resolution chosen in 6694 // other parts of the compiler. 6695 if (SA->getValue().uge(BitWidth)) 6696 break; 6697 6698 // We can safely preserve the nuw flag in all cases. It's also safe to 6699 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation 6700 // requires special handling. It can be preserved as long as we're not 6701 // left shifting by bitwidth - 1. 6702 auto Flags = SCEV::FlagAnyWrap; 6703 if (BO->Op) { 6704 auto MulFlags = getNoWrapFlagsFromUB(BO->Op); 6705 if ((MulFlags & SCEV::FlagNSW) && 6706 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) 6707 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); 6708 if (MulFlags & SCEV::FlagNUW) 6709 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); 6710 } 6711 6712 Constant *X = ConstantInt::get( 6713 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 6714 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags); 6715 } 6716 break; 6717 6718 case Instruction::AShr: { 6719 // AShr X, C, where C is a constant. 6720 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 6721 if (!CI) 6722 break; 6723 6724 Type *OuterTy = BO->LHS->getType(); 6725 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 6726 // If the shift count is not less than the bitwidth, the result of 6727 // the shift is undefined. Don't try to analyze it, because the 6728 // resolution chosen here may differ from the resolution chosen in 6729 // other parts of the compiler. 6730 if (CI->getValue().uge(BitWidth)) 6731 break; 6732 6733 if (CI->isZero()) 6734 return getSCEV(BO->LHS); // shift by zero --> noop 6735 6736 uint64_t AShrAmt = CI->getZExtValue(); 6737 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 6738 6739 Operator *L = dyn_cast<Operator>(BO->LHS); 6740 if (L && L->getOpcode() == Instruction::Shl) { 6741 // X = Shl A, n 6742 // Y = AShr X, m 6743 // Both n and m are constant. 6744 6745 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 6746 if (L->getOperand(1) == BO->RHS) 6747 // For a two-shift sext-inreg, i.e. n = m, 6748 // use sext(trunc(x)) as the SCEV expression. 6749 return getSignExtendExpr( 6750 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 6751 6752 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 6753 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 6754 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 6755 if (ShlAmt > AShrAmt) { 6756 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 6757 // expression. We already checked that ShlAmt < BitWidth, so 6758 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 6759 // ShlAmt - AShrAmt < Amt. 6760 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 6761 ShlAmt - AShrAmt); 6762 return getSignExtendExpr( 6763 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 6764 getConstant(Mul)), OuterTy); 6765 } 6766 } 6767 } 6768 break; 6769 } 6770 } 6771 } 6772 6773 switch (U->getOpcode()) { 6774 case Instruction::Trunc: 6775 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 6776 6777 case Instruction::ZExt: 6778 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6779 6780 case Instruction::SExt: 6781 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 6782 // The NSW flag of a subtract does not always survive the conversion to 6783 // A + (-1)*B. By pushing sign extension onto its operands we are much 6784 // more likely to preserve NSW and allow later AddRec optimisations. 6785 // 6786 // NOTE: This is effectively duplicating this logic from getSignExtend: 6787 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 6788 // but by that point the NSW information has potentially been lost. 6789 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 6790 Type *Ty = U->getType(); 6791 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 6792 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 6793 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 6794 } 6795 } 6796 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 6797 6798 case Instruction::BitCast: 6799 // BitCasts are no-op casts so we just eliminate the cast. 6800 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 6801 return getSCEV(U->getOperand(0)); 6802 break; 6803 6804 case Instruction::PtrToInt: { 6805 // Pointer to integer cast is straight-forward, so do model it. 6806 const SCEV *Op = getSCEV(U->getOperand(0)); 6807 Type *DstIntTy = U->getType(); 6808 // But only if effective SCEV (integer) type is wide enough to represent 6809 // all possible pointer values. 6810 const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy); 6811 if (isa<SCEVCouldNotCompute>(IntOp)) 6812 return getUnknown(V); 6813 return IntOp; 6814 } 6815 case Instruction::IntToPtr: 6816 // Just don't deal with inttoptr casts. 6817 return getUnknown(V); 6818 6819 case Instruction::SDiv: 6820 // If both operands are non-negative, this is just an udiv. 6821 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 6822 isKnownNonNegative(getSCEV(U->getOperand(1)))) 6823 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 6824 break; 6825 6826 case Instruction::SRem: 6827 // If both operands are non-negative, this is just an urem. 6828 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 6829 isKnownNonNegative(getSCEV(U->getOperand(1)))) 6830 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 6831 break; 6832 6833 case Instruction::GetElementPtr: 6834 return createNodeForGEP(cast<GEPOperator>(U)); 6835 6836 case Instruction::PHI: 6837 return createNodeForPHI(cast<PHINode>(U)); 6838 6839 case Instruction::Select: 6840 // U can also be a select constant expr, which let fall through. Since 6841 // createNodeForSelect only works for a condition that is an `ICmpInst`, and 6842 // constant expressions cannot have instructions as operands, we'd have 6843 // returned getUnknown for a select constant expressions anyway. 6844 if (isa<Instruction>(U)) 6845 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0), 6846 U->getOperand(1), U->getOperand(2)); 6847 break; 6848 6849 case Instruction::Call: 6850 case Instruction::Invoke: 6851 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) 6852 return getSCEV(RV); 6853 6854 if (auto *II = dyn_cast<IntrinsicInst>(U)) { 6855 switch (II->getIntrinsicID()) { 6856 case Intrinsic::abs: 6857 return getAbsExpr( 6858 getSCEV(II->getArgOperand(0)), 6859 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne()); 6860 case Intrinsic::umax: 6861 return getUMaxExpr(getSCEV(II->getArgOperand(0)), 6862 getSCEV(II->getArgOperand(1))); 6863 case Intrinsic::umin: 6864 return getUMinExpr(getSCEV(II->getArgOperand(0)), 6865 getSCEV(II->getArgOperand(1))); 6866 case Intrinsic::smax: 6867 return getSMaxExpr(getSCEV(II->getArgOperand(0)), 6868 getSCEV(II->getArgOperand(1))); 6869 case Intrinsic::smin: 6870 return getSMinExpr(getSCEV(II->getArgOperand(0)), 6871 getSCEV(II->getArgOperand(1))); 6872 case Intrinsic::usub_sat: { 6873 const SCEV *X = getSCEV(II->getArgOperand(0)); 6874 const SCEV *Y = getSCEV(II->getArgOperand(1)); 6875 const SCEV *ClampedY = getUMinExpr(X, Y); 6876 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW); 6877 } 6878 case Intrinsic::uadd_sat: { 6879 const SCEV *X = getSCEV(II->getArgOperand(0)); 6880 const SCEV *Y = getSCEV(II->getArgOperand(1)); 6881 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y)); 6882 return getAddExpr(ClampedX, Y, SCEV::FlagNUW); 6883 } 6884 case Intrinsic::start_loop_iterations: 6885 // A start_loop_iterations is just equivalent to the first operand for 6886 // SCEV purposes. 6887 return getSCEV(II->getArgOperand(0)); 6888 default: 6889 break; 6890 } 6891 } 6892 break; 6893 } 6894 6895 return getUnknown(V); 6896 } 6897 6898 //===----------------------------------------------------------------------===// 6899 // Iteration Count Computation Code 6900 // 6901 6902 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 6903 if (!ExitCount) 6904 return 0; 6905 6906 ConstantInt *ExitConst = ExitCount->getValue(); 6907 6908 // Guard against huge trip counts. 6909 if (ExitConst->getValue().getActiveBits() > 32) 6910 return 0; 6911 6912 // In case of integer overflow, this returns 0, which is correct. 6913 return ((unsigned)ExitConst->getZExtValue()) + 1; 6914 } 6915 6916 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 6917 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6918 return getSmallConstantTripCount(L, ExitingBB); 6919 6920 // No trip count information for multiple exits. 6921 return 0; 6922 } 6923 6924 unsigned 6925 ScalarEvolution::getSmallConstantTripCount(const Loop *L, 6926 const BasicBlock *ExitingBlock) { 6927 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6928 assert(L->isLoopExiting(ExitingBlock) && 6929 "Exiting block must actually branch out of the loop!"); 6930 const SCEVConstant *ExitCount = 6931 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 6932 return getConstantTripCount(ExitCount); 6933 } 6934 6935 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 6936 const auto *MaxExitCount = 6937 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); 6938 return getConstantTripCount(MaxExitCount); 6939 } 6940 6941 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 6942 if (BasicBlock *ExitingBB = L->getExitingBlock()) 6943 return getSmallConstantTripMultiple(L, ExitingBB); 6944 6945 // No trip multiple information for multiple exits. 6946 return 0; 6947 } 6948 6949 /// Returns the largest constant divisor of the trip count of this loop as a 6950 /// normal unsigned value, if possible. This means that the actual trip count is 6951 /// always a multiple of the returned value (don't forget the trip count could 6952 /// very well be zero as well!). 6953 /// 6954 /// Returns 1 if the trip count is unknown or not guaranteed to be the 6955 /// multiple of a constant (which is also the case if the trip count is simply 6956 /// constant, use getSmallConstantTripCount for that case), Will also return 1 6957 /// if the trip count is very large (>= 2^32). 6958 /// 6959 /// As explained in the comments for getSmallConstantTripCount, this assumes 6960 /// that control exits the loop via ExitingBlock. 6961 unsigned 6962 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 6963 const BasicBlock *ExitingBlock) { 6964 assert(ExitingBlock && "Must pass a non-null exiting block!"); 6965 assert(L->isLoopExiting(ExitingBlock) && 6966 "Exiting block must actually branch out of the loop!"); 6967 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 6968 if (ExitCount == getCouldNotCompute()) 6969 return 1; 6970 6971 // Get the trip count from the BE count by adding 1. 6972 const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType())); 6973 6974 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 6975 if (!TC) 6976 // Attempt to factor more general cases. Returns the greatest power of 6977 // two divisor. If overflow happens, the trip count expression is still 6978 // divisible by the greatest power of 2 divisor returned. 6979 return 1U << std::min((uint32_t)31, 6980 GetMinTrailingZeros(applyLoopGuards(TCExpr, L))); 6981 6982 ConstantInt *Result = TC->getValue(); 6983 6984 // Guard against huge trip counts (this requires checking 6985 // for zero to handle the case where the trip count == -1 and the 6986 // addition wraps). 6987 if (!Result || Result->getValue().getActiveBits() > 32 || 6988 Result->getValue().getActiveBits() == 0) 6989 return 1; 6990 6991 return (unsigned)Result->getZExtValue(); 6992 } 6993 6994 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 6995 const BasicBlock *ExitingBlock, 6996 ExitCountKind Kind) { 6997 switch (Kind) { 6998 case Exact: 6999 case SymbolicMaximum: 7000 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 7001 case ConstantMaximum: 7002 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this); 7003 }; 7004 llvm_unreachable("Invalid ExitCountKind!"); 7005 } 7006 7007 const SCEV * 7008 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 7009 SCEVUnionPredicate &Preds) { 7010 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 7011 } 7012 7013 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, 7014 ExitCountKind Kind) { 7015 switch (Kind) { 7016 case Exact: 7017 return getBackedgeTakenInfo(L).getExact(L, this); 7018 case ConstantMaximum: 7019 return getBackedgeTakenInfo(L).getConstantMax(this); 7020 case SymbolicMaximum: 7021 return getBackedgeTakenInfo(L).getSymbolicMax(L, this); 7022 }; 7023 llvm_unreachable("Invalid ExitCountKind!"); 7024 } 7025 7026 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 7027 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this); 7028 } 7029 7030 /// Push PHI nodes in the header of the given loop onto the given Worklist. 7031 static void 7032 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 7033 BasicBlock *Header = L->getHeader(); 7034 7035 // Push all Loop-header PHIs onto the Worklist stack. 7036 for (PHINode &PN : Header->phis()) 7037 Worklist.push_back(&PN); 7038 } 7039 7040 const ScalarEvolution::BackedgeTakenInfo & 7041 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 7042 auto &BTI = getBackedgeTakenInfo(L); 7043 if (BTI.hasFullInfo()) 7044 return BTI; 7045 7046 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 7047 7048 if (!Pair.second) 7049 return Pair.first->second; 7050 7051 BackedgeTakenInfo Result = 7052 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 7053 7054 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 7055 } 7056 7057 ScalarEvolution::BackedgeTakenInfo & 7058 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 7059 // Initially insert an invalid entry for this loop. If the insertion 7060 // succeeds, proceed to actually compute a backedge-taken count and 7061 // update the value. The temporary CouldNotCompute value tells SCEV 7062 // code elsewhere that it shouldn't attempt to request a new 7063 // backedge-taken count, which could result in infinite recursion. 7064 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 7065 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 7066 if (!Pair.second) 7067 return Pair.first->second; 7068 7069 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 7070 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 7071 // must be cleared in this scope. 7072 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 7073 7074 // In product build, there are no usage of statistic. 7075 (void)NumTripCountsComputed; 7076 (void)NumTripCountsNotComputed; 7077 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 7078 const SCEV *BEExact = Result.getExact(L, this); 7079 if (BEExact != getCouldNotCompute()) { 7080 assert(isLoopInvariant(BEExact, L) && 7081 isLoopInvariant(Result.getConstantMax(this), L) && 7082 "Computed backedge-taken count isn't loop invariant for loop!"); 7083 ++NumTripCountsComputed; 7084 } else if (Result.getConstantMax(this) == getCouldNotCompute() && 7085 isa<PHINode>(L->getHeader()->begin())) { 7086 // Only count loops that have phi nodes as not being computable. 7087 ++NumTripCountsNotComputed; 7088 } 7089 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 7090 7091 // Now that we know more about the trip count for this loop, forget any 7092 // existing SCEV values for PHI nodes in this loop since they are only 7093 // conservative estimates made without the benefit of trip count 7094 // information. This is similar to the code in forgetLoop, except that 7095 // it handles SCEVUnknown PHI nodes specially. 7096 if (Result.hasAnyInfo()) { 7097 SmallVector<Instruction *, 16> Worklist; 7098 PushLoopPHIs(L, Worklist); 7099 7100 SmallPtrSet<Instruction *, 8> Discovered; 7101 while (!Worklist.empty()) { 7102 Instruction *I = Worklist.pop_back_val(); 7103 7104 ValueExprMapType::iterator It = 7105 ValueExprMap.find_as(static_cast<Value *>(I)); 7106 if (It != ValueExprMap.end()) { 7107 const SCEV *Old = It->second; 7108 7109 // SCEVUnknown for a PHI either means that it has an unrecognized 7110 // structure, or it's a PHI that's in the progress of being computed 7111 // by createNodeForPHI. In the former case, additional loop trip 7112 // count information isn't going to change anything. In the later 7113 // case, createNodeForPHI will perform the necessary updates on its 7114 // own when it gets to that point. 7115 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) { 7116 eraseValueFromMap(It->first); 7117 forgetMemoizedResults(Old); 7118 } 7119 if (PHINode *PN = dyn_cast<PHINode>(I)) 7120 ConstantEvolutionLoopExitValue.erase(PN); 7121 } 7122 7123 // Since we don't need to invalidate anything for correctness and we're 7124 // only invalidating to make SCEV's results more precise, we get to stop 7125 // early to avoid invalidating too much. This is especially important in 7126 // cases like: 7127 // 7128 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node 7129 // loop0: 7130 // %pn0 = phi 7131 // ... 7132 // loop1: 7133 // %pn1 = phi 7134 // ... 7135 // 7136 // where both loop0 and loop1's backedge taken count uses the SCEV 7137 // expression for %v. If we don't have the early stop below then in cases 7138 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip 7139 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip 7140 // count for loop1, effectively nullifying SCEV's trip count cache. 7141 for (auto *U : I->users()) 7142 if (auto *I = dyn_cast<Instruction>(U)) { 7143 auto *LoopForUser = LI.getLoopFor(I->getParent()); 7144 if (LoopForUser && L->contains(LoopForUser) && 7145 Discovered.insert(I).second) 7146 Worklist.push_back(I); 7147 } 7148 } 7149 } 7150 7151 // Re-lookup the insert position, since the call to 7152 // computeBackedgeTakenCount above could result in a 7153 // recusive call to getBackedgeTakenInfo (on a different 7154 // loop), which would invalidate the iterator computed 7155 // earlier. 7156 return BackedgeTakenCounts.find(L)->second = std::move(Result); 7157 } 7158 7159 void ScalarEvolution::forgetAllLoops() { 7160 // This method is intended to forget all info about loops. It should 7161 // invalidate caches as if the following happened: 7162 // - The trip counts of all loops have changed arbitrarily 7163 // - Every llvm::Value has been updated in place to produce a different 7164 // result. 7165 BackedgeTakenCounts.clear(); 7166 PredicatedBackedgeTakenCounts.clear(); 7167 LoopPropertiesCache.clear(); 7168 ConstantEvolutionLoopExitValue.clear(); 7169 ValueExprMap.clear(); 7170 ValuesAtScopes.clear(); 7171 LoopDispositions.clear(); 7172 BlockDispositions.clear(); 7173 UnsignedRanges.clear(); 7174 SignedRanges.clear(); 7175 ExprValueMap.clear(); 7176 HasRecMap.clear(); 7177 MinTrailingZerosCache.clear(); 7178 PredicatedSCEVRewrites.clear(); 7179 } 7180 7181 void ScalarEvolution::forgetLoop(const Loop *L) { 7182 // Drop any stored trip count value. 7183 auto RemoveLoopFromBackedgeMap = 7184 [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) { 7185 auto BTCPos = Map.find(L); 7186 if (BTCPos != Map.end()) { 7187 BTCPos->second.clear(); 7188 Map.erase(BTCPos); 7189 } 7190 }; 7191 7192 SmallVector<const Loop *, 16> LoopWorklist(1, L); 7193 SmallVector<Instruction *, 32> Worklist; 7194 SmallPtrSet<Instruction *, 16> Visited; 7195 7196 // Iterate over all the loops and sub-loops to drop SCEV information. 7197 while (!LoopWorklist.empty()) { 7198 auto *CurrL = LoopWorklist.pop_back_val(); 7199 7200 RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL); 7201 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL); 7202 7203 // Drop information about predicated SCEV rewrites for this loop. 7204 for (auto I = PredicatedSCEVRewrites.begin(); 7205 I != PredicatedSCEVRewrites.end();) { 7206 std::pair<const SCEV *, const Loop *> Entry = I->first; 7207 if (Entry.second == CurrL) 7208 PredicatedSCEVRewrites.erase(I++); 7209 else 7210 ++I; 7211 } 7212 7213 auto LoopUsersItr = LoopUsers.find(CurrL); 7214 if (LoopUsersItr != LoopUsers.end()) { 7215 for (auto *S : LoopUsersItr->second) 7216 forgetMemoizedResults(S); 7217 LoopUsers.erase(LoopUsersItr); 7218 } 7219 7220 // Drop information about expressions based on loop-header PHIs. 7221 PushLoopPHIs(CurrL, Worklist); 7222 7223 while (!Worklist.empty()) { 7224 Instruction *I = Worklist.pop_back_val(); 7225 if (!Visited.insert(I).second) 7226 continue; 7227 7228 ValueExprMapType::iterator It = 7229 ValueExprMap.find_as(static_cast<Value *>(I)); 7230 if (It != ValueExprMap.end()) { 7231 eraseValueFromMap(It->first); 7232 forgetMemoizedResults(It->second); 7233 if (PHINode *PN = dyn_cast<PHINode>(I)) 7234 ConstantEvolutionLoopExitValue.erase(PN); 7235 } 7236 7237 PushDefUseChildren(I, Worklist); 7238 } 7239 7240 LoopPropertiesCache.erase(CurrL); 7241 // Forget all contained loops too, to avoid dangling entries in the 7242 // ValuesAtScopes map. 7243 LoopWorklist.append(CurrL->begin(), CurrL->end()); 7244 } 7245 } 7246 7247 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 7248 while (Loop *Parent = L->getParentLoop()) 7249 L = Parent; 7250 forgetLoop(L); 7251 } 7252 7253 void ScalarEvolution::forgetValue(Value *V) { 7254 Instruction *I = dyn_cast<Instruction>(V); 7255 if (!I) return; 7256 7257 // Drop information about expressions based on loop-header PHIs. 7258 SmallVector<Instruction *, 16> Worklist; 7259 Worklist.push_back(I); 7260 7261 SmallPtrSet<Instruction *, 8> Visited; 7262 while (!Worklist.empty()) { 7263 I = Worklist.pop_back_val(); 7264 if (!Visited.insert(I).second) 7265 continue; 7266 7267 ValueExprMapType::iterator It = 7268 ValueExprMap.find_as(static_cast<Value *>(I)); 7269 if (It != ValueExprMap.end()) { 7270 eraseValueFromMap(It->first); 7271 forgetMemoizedResults(It->second); 7272 if (PHINode *PN = dyn_cast<PHINode>(I)) 7273 ConstantEvolutionLoopExitValue.erase(PN); 7274 } 7275 7276 PushDefUseChildren(I, Worklist); 7277 } 7278 } 7279 7280 void ScalarEvolution::forgetLoopDispositions(const Loop *L) { 7281 LoopDispositions.clear(); 7282 } 7283 7284 /// Get the exact loop backedge taken count considering all loop exits. A 7285 /// computable result can only be returned for loops with all exiting blocks 7286 /// dominating the latch. howFarToZero assumes that the limit of each loop test 7287 /// is never skipped. This is a valid assumption as long as the loop exits via 7288 /// that test. For precise results, it is the caller's responsibility to specify 7289 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 7290 const SCEV * 7291 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 7292 SCEVUnionPredicate *Preds) const { 7293 // If any exits were not computable, the loop is not computable. 7294 if (!isComplete() || ExitNotTaken.empty()) 7295 return SE->getCouldNotCompute(); 7296 7297 const BasicBlock *Latch = L->getLoopLatch(); 7298 // All exiting blocks we have collected must dominate the only backedge. 7299 if (!Latch) 7300 return SE->getCouldNotCompute(); 7301 7302 // All exiting blocks we have gathered dominate loop's latch, so exact trip 7303 // count is simply a minimum out of all these calculated exit counts. 7304 SmallVector<const SCEV *, 2> Ops; 7305 for (auto &ENT : ExitNotTaken) { 7306 const SCEV *BECount = ENT.ExactNotTaken; 7307 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 7308 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 7309 "We should only have known counts for exiting blocks that dominate " 7310 "latch!"); 7311 7312 Ops.push_back(BECount); 7313 7314 if (Preds && !ENT.hasAlwaysTruePredicate()) 7315 Preds->add(ENT.Predicate.get()); 7316 7317 assert((Preds || ENT.hasAlwaysTruePredicate()) && 7318 "Predicate should be always true!"); 7319 } 7320 7321 return SE->getUMinFromMismatchedTypes(Ops); 7322 } 7323 7324 /// Get the exact not taken count for this loop exit. 7325 const SCEV * 7326 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock, 7327 ScalarEvolution *SE) const { 7328 for (auto &ENT : ExitNotTaken) 7329 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 7330 return ENT.ExactNotTaken; 7331 7332 return SE->getCouldNotCompute(); 7333 } 7334 7335 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax( 7336 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { 7337 for (auto &ENT : ExitNotTaken) 7338 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 7339 return ENT.MaxNotTaken; 7340 7341 return SE->getCouldNotCompute(); 7342 } 7343 7344 /// getConstantMax - Get the constant max backedge taken count for the loop. 7345 const SCEV * 7346 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const { 7347 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 7348 return !ENT.hasAlwaysTruePredicate(); 7349 }; 7350 7351 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getConstantMax()) 7352 return SE->getCouldNotCompute(); 7353 7354 assert((isa<SCEVCouldNotCompute>(getConstantMax()) || 7355 isa<SCEVConstant>(getConstantMax())) && 7356 "No point in having a non-constant max backedge taken count!"); 7357 return getConstantMax(); 7358 } 7359 7360 const SCEV * 7361 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L, 7362 ScalarEvolution *SE) { 7363 if (!SymbolicMax) 7364 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L); 7365 return SymbolicMax; 7366 } 7367 7368 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero( 7369 ScalarEvolution *SE) const { 7370 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 7371 return !ENT.hasAlwaysTruePredicate(); 7372 }; 7373 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 7374 } 7375 7376 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S, 7377 ScalarEvolution *SE) const { 7378 if (getConstantMax() && getConstantMax() != SE->getCouldNotCompute() && 7379 SE->hasOperand(getConstantMax(), S)) 7380 return true; 7381 7382 for (auto &ENT : ExitNotTaken) 7383 if (ENT.ExactNotTaken != SE->getCouldNotCompute() && 7384 SE->hasOperand(ENT.ExactNotTaken, S)) 7385 return true; 7386 7387 return false; 7388 } 7389 7390 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 7391 : ExactNotTaken(E), MaxNotTaken(E) { 7392 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7393 isa<SCEVConstant>(MaxNotTaken)) && 7394 "No point in having a non-constant max backedge taken count!"); 7395 } 7396 7397 ScalarEvolution::ExitLimit::ExitLimit( 7398 const SCEV *E, const SCEV *M, bool MaxOrZero, 7399 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 7400 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 7401 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 7402 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 7403 "Exact is not allowed to be less precise than Max"); 7404 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7405 isa<SCEVConstant>(MaxNotTaken)) && 7406 "No point in having a non-constant max backedge taken count!"); 7407 for (auto *PredSet : PredSetList) 7408 for (auto *P : *PredSet) 7409 addPredicate(P); 7410 } 7411 7412 ScalarEvolution::ExitLimit::ExitLimit( 7413 const SCEV *E, const SCEV *M, bool MaxOrZero, 7414 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 7415 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 7416 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7417 isa<SCEVConstant>(MaxNotTaken)) && 7418 "No point in having a non-constant max backedge taken count!"); 7419 } 7420 7421 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 7422 bool MaxOrZero) 7423 : ExitLimit(E, M, MaxOrZero, None) { 7424 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 7425 isa<SCEVConstant>(MaxNotTaken)) && 7426 "No point in having a non-constant max backedge taken count!"); 7427 } 7428 7429 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 7430 /// computable exit into a persistent ExitNotTakenInfo array. 7431 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 7432 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts, 7433 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero) 7434 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) { 7435 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7436 7437 ExitNotTaken.reserve(ExitCounts.size()); 7438 std::transform( 7439 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 7440 [&](const EdgeExitInfo &EEI) { 7441 BasicBlock *ExitBB = EEI.first; 7442 const ExitLimit &EL = EEI.second; 7443 if (EL.Predicates.empty()) 7444 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7445 nullptr); 7446 7447 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate); 7448 for (auto *Pred : EL.Predicates) 7449 Predicate->add(Pred); 7450 7451 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 7452 std::move(Predicate)); 7453 }); 7454 assert((isa<SCEVCouldNotCompute>(ConstantMax) || 7455 isa<SCEVConstant>(ConstantMax)) && 7456 "No point in having a non-constant max backedge taken count!"); 7457 } 7458 7459 /// Invalidate this result and free the ExitNotTakenInfo array. 7460 void ScalarEvolution::BackedgeTakenInfo::clear() { 7461 ExitNotTaken.clear(); 7462 } 7463 7464 /// Compute the number of times the backedge of the specified loop will execute. 7465 ScalarEvolution::BackedgeTakenInfo 7466 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 7467 bool AllowPredicates) { 7468 SmallVector<BasicBlock *, 8> ExitingBlocks; 7469 L->getExitingBlocks(ExitingBlocks); 7470 7471 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 7472 7473 SmallVector<EdgeExitInfo, 4> ExitCounts; 7474 bool CouldComputeBECount = true; 7475 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 7476 const SCEV *MustExitMaxBECount = nullptr; 7477 const SCEV *MayExitMaxBECount = nullptr; 7478 bool MustExitMaxOrZero = false; 7479 7480 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 7481 // and compute maxBECount. 7482 // Do a union of all the predicates here. 7483 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 7484 BasicBlock *ExitBB = ExitingBlocks[i]; 7485 7486 // We canonicalize untaken exits to br (constant), ignore them so that 7487 // proving an exit untaken doesn't negatively impact our ability to reason 7488 // about the loop as whole. 7489 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) 7490 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { 7491 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7492 if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne())) 7493 continue; 7494 } 7495 7496 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 7497 7498 assert((AllowPredicates || EL.Predicates.empty()) && 7499 "Predicated exit limit when predicates are not allowed!"); 7500 7501 // 1. For each exit that can be computed, add an entry to ExitCounts. 7502 // CouldComputeBECount is true only if all exits can be computed. 7503 if (EL.ExactNotTaken == getCouldNotCompute()) 7504 // We couldn't compute an exact value for this exit, so 7505 // we won't be able to compute an exact value for the loop. 7506 CouldComputeBECount = false; 7507 else 7508 ExitCounts.emplace_back(ExitBB, EL); 7509 7510 // 2. Derive the loop's MaxBECount from each exit's max number of 7511 // non-exiting iterations. Partition the loop exits into two kinds: 7512 // LoopMustExits and LoopMayExits. 7513 // 7514 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 7515 // is a LoopMayExit. If any computable LoopMustExit is found, then 7516 // MaxBECount is the minimum EL.MaxNotTaken of computable 7517 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 7518 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 7519 // computable EL.MaxNotTaken. 7520 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 7521 DT.dominates(ExitBB, Latch)) { 7522 if (!MustExitMaxBECount) { 7523 MustExitMaxBECount = EL.MaxNotTaken; 7524 MustExitMaxOrZero = EL.MaxOrZero; 7525 } else { 7526 MustExitMaxBECount = 7527 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 7528 } 7529 } else if (MayExitMaxBECount != getCouldNotCompute()) { 7530 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 7531 MayExitMaxBECount = EL.MaxNotTaken; 7532 else { 7533 MayExitMaxBECount = 7534 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 7535 } 7536 } 7537 } 7538 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 7539 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 7540 // The loop backedge will be taken the maximum or zero times if there's 7541 // a single exit that must be taken the maximum or zero times. 7542 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 7543 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 7544 MaxBECount, MaxOrZero); 7545 } 7546 7547 ScalarEvolution::ExitLimit 7548 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 7549 bool AllowPredicates) { 7550 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 7551 // If our exiting block does not dominate the latch, then its connection with 7552 // loop's exit limit may be far from trivial. 7553 const BasicBlock *Latch = L->getLoopLatch(); 7554 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 7555 return getCouldNotCompute(); 7556 7557 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 7558 Instruction *Term = ExitingBlock->getTerminator(); 7559 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 7560 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 7561 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 7562 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 7563 "It should have one successor in loop and one exit block!"); 7564 // Proceed to the next level to examine the exit condition expression. 7565 return computeExitLimitFromCond( 7566 L, BI->getCondition(), ExitIfTrue, 7567 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 7568 } 7569 7570 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 7571 // For switch, make sure that there is a single exit from the loop. 7572 BasicBlock *Exit = nullptr; 7573 for (auto *SBB : successors(ExitingBlock)) 7574 if (!L->contains(SBB)) { 7575 if (Exit) // Multiple exit successors. 7576 return getCouldNotCompute(); 7577 Exit = SBB; 7578 } 7579 assert(Exit && "Exiting block must have at least one exit"); 7580 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 7581 /*ControlsExit=*/IsOnlyExit); 7582 } 7583 7584 return getCouldNotCompute(); 7585 } 7586 7587 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 7588 const Loop *L, Value *ExitCond, bool ExitIfTrue, 7589 bool ControlsExit, bool AllowPredicates) { 7590 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 7591 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 7592 ControlsExit, AllowPredicates); 7593 } 7594 7595 Optional<ScalarEvolution::ExitLimit> 7596 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 7597 bool ExitIfTrue, bool ControlsExit, 7598 bool AllowPredicates) { 7599 (void)this->L; 7600 (void)this->ExitIfTrue; 7601 (void)this->AllowPredicates; 7602 7603 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7604 this->AllowPredicates == AllowPredicates && 7605 "Variance in assumed invariant key components!"); 7606 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 7607 if (Itr == TripCountMap.end()) 7608 return None; 7609 return Itr->second; 7610 } 7611 7612 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 7613 bool ExitIfTrue, 7614 bool ControlsExit, 7615 bool AllowPredicates, 7616 const ExitLimit &EL) { 7617 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 7618 this->AllowPredicates == AllowPredicates && 7619 "Variance in assumed invariant key components!"); 7620 7621 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 7622 assert(InsertResult.second && "Expected successful insertion!"); 7623 (void)InsertResult; 7624 (void)ExitIfTrue; 7625 } 7626 7627 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 7628 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7629 bool ControlsExit, bool AllowPredicates) { 7630 7631 if (auto MaybeEL = 7632 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7633 return *MaybeEL; 7634 7635 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 7636 ControlsExit, AllowPredicates); 7637 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 7638 return EL; 7639 } 7640 7641 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 7642 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7643 bool ControlsExit, bool AllowPredicates) { 7644 // Handle BinOp conditions (And, Or). 7645 if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp( 7646 Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 7647 return *LimitFromBinOp; 7648 7649 // With an icmp, it may be feasible to compute an exact backedge-taken count. 7650 // Proceed to the next level to examine the icmp. 7651 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 7652 ExitLimit EL = 7653 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 7654 if (EL.hasFullInfo() || !AllowPredicates) 7655 return EL; 7656 7657 // Try again, but use SCEV predicates this time. 7658 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 7659 /*AllowPredicates=*/true); 7660 } 7661 7662 // Check for a constant condition. These are normally stripped out by 7663 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 7664 // preserve the CFG and is temporarily leaving constant conditions 7665 // in place. 7666 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 7667 if (ExitIfTrue == !CI->getZExtValue()) 7668 // The backedge is always taken. 7669 return getCouldNotCompute(); 7670 else 7671 // The backedge is never taken. 7672 return getZero(CI->getType()); 7673 } 7674 7675 // If it's not an integer or pointer comparison then compute it the hard way. 7676 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7677 } 7678 7679 Optional<ScalarEvolution::ExitLimit> 7680 ScalarEvolution::computeExitLimitFromCondFromBinOp( 7681 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 7682 bool ControlsExit, bool AllowPredicates) { 7683 // Check if the controlling expression for this loop is an And or Or. 7684 Value *Op0, *Op1; 7685 bool IsAnd = false; 7686 if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) 7687 IsAnd = true; 7688 else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) 7689 IsAnd = false; 7690 else 7691 return None; 7692 7693 // EitherMayExit is true in these two cases: 7694 // br (and Op0 Op1), loop, exit 7695 // br (or Op0 Op1), exit, loop 7696 bool EitherMayExit = IsAnd ^ ExitIfTrue; 7697 ExitLimit EL0 = computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue, 7698 ControlsExit && !EitherMayExit, 7699 AllowPredicates); 7700 ExitLimit EL1 = computeExitLimitFromCondCached(Cache, L, Op1, ExitIfTrue, 7701 ControlsExit && !EitherMayExit, 7702 AllowPredicates); 7703 7704 // Be robust against unsimplified IR for the form "op i1 X, NeutralElement" 7705 const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd); 7706 if (isa<ConstantInt>(Op1)) 7707 return Op1 == NeutralElement ? EL0 : EL1; 7708 if (isa<ConstantInt>(Op0)) 7709 return Op0 == NeutralElement ? EL1 : EL0; 7710 7711 const SCEV *BECount = getCouldNotCompute(); 7712 const SCEV *MaxBECount = getCouldNotCompute(); 7713 if (EitherMayExit) { 7714 // Both conditions must be same for the loop to continue executing. 7715 // Choose the less conservative count. 7716 // If ExitCond is a short-circuit form (select), using 7717 // umin(EL0.ExactNotTaken, EL1.ExactNotTaken) is unsafe in general. 7718 // To see the detailed examples, please see 7719 // test/Analysis/ScalarEvolution/exit-count-select.ll 7720 bool PoisonSafe = isa<BinaryOperator>(ExitCond); 7721 if (!PoisonSafe) 7722 // Even if ExitCond is select, we can safely derive BECount using both 7723 // EL0 and EL1 in these cases: 7724 // (1) EL0.ExactNotTaken is non-zero 7725 // (2) EL1.ExactNotTaken is non-poison 7726 // (3) EL0.ExactNotTaken is zero (BECount should be simply zero and 7727 // it cannot be umin(0, ..)) 7728 // The PoisonSafe assignment below is simplified and the assertion after 7729 // BECount calculation fully guarantees the condition (3). 7730 PoisonSafe = isa<SCEVConstant>(EL0.ExactNotTaken) || 7731 isa<SCEVConstant>(EL1.ExactNotTaken); 7732 if (EL0.ExactNotTaken != getCouldNotCompute() && 7733 EL1.ExactNotTaken != getCouldNotCompute() && PoisonSafe) { 7734 BECount = 7735 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken); 7736 7737 // If EL0.ExactNotTaken was zero and ExitCond was a short-circuit form, 7738 // it should have been simplified to zero (see the condition (3) above) 7739 assert(!isa<BinaryOperator>(ExitCond) || !EL0.ExactNotTaken->isZero() || 7740 BECount->isZero()); 7741 } 7742 if (EL0.MaxNotTaken == getCouldNotCompute()) 7743 MaxBECount = EL1.MaxNotTaken; 7744 else if (EL1.MaxNotTaken == getCouldNotCompute()) 7745 MaxBECount = EL0.MaxNotTaken; 7746 else 7747 MaxBECount = getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 7748 } else { 7749 // Both conditions must be same at the same time for the loop to exit. 7750 // For now, be conservative. 7751 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 7752 BECount = EL0.ExactNotTaken; 7753 } 7754 7755 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 7756 // to be more aggressive when computing BECount than when computing 7757 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 7758 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 7759 // to not. 7760 if (isa<SCEVCouldNotCompute>(MaxBECount) && 7761 !isa<SCEVCouldNotCompute>(BECount)) 7762 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 7763 7764 return ExitLimit(BECount, MaxBECount, false, 7765 { &EL0.Predicates, &EL1.Predicates }); 7766 } 7767 7768 ScalarEvolution::ExitLimit 7769 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 7770 ICmpInst *ExitCond, 7771 bool ExitIfTrue, 7772 bool ControlsExit, 7773 bool AllowPredicates) { 7774 // If the condition was exit on true, convert the condition to exit on false 7775 ICmpInst::Predicate Pred; 7776 if (!ExitIfTrue) 7777 Pred = ExitCond->getPredicate(); 7778 else 7779 Pred = ExitCond->getInversePredicate(); 7780 const ICmpInst::Predicate OriginalPred = Pred; 7781 7782 // Handle common loops like: for (X = "string"; *X; ++X) 7783 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 7784 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 7785 ExitLimit ItCnt = 7786 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred); 7787 if (ItCnt.hasAnyInfo()) 7788 return ItCnt; 7789 } 7790 7791 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 7792 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 7793 7794 // Try to evaluate any dependencies out of the loop. 7795 LHS = getSCEVAtScope(LHS, L); 7796 RHS = getSCEVAtScope(RHS, L); 7797 7798 // At this point, we would like to compute how many iterations of the 7799 // loop the predicate will return true for these inputs. 7800 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 7801 // If there is a loop-invariant, force it into the RHS. 7802 std::swap(LHS, RHS); 7803 Pred = ICmpInst::getSwappedPredicate(Pred); 7804 } 7805 7806 // Simplify the operands before analyzing them. 7807 (void)SimplifyICmpOperands(Pred, LHS, RHS); 7808 7809 // If we have a comparison of a chrec against a constant, try to use value 7810 // ranges to answer this query. 7811 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 7812 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 7813 if (AddRec->getLoop() == L) { 7814 // Form the constant range. 7815 ConstantRange CompRange = 7816 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 7817 7818 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 7819 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 7820 } 7821 7822 switch (Pred) { 7823 case ICmpInst::ICMP_NE: { // while (X != Y) 7824 // Convert to: while (X-Y != 0) 7825 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 7826 AllowPredicates); 7827 if (EL.hasAnyInfo()) return EL; 7828 break; 7829 } 7830 case ICmpInst::ICMP_EQ: { // while (X == Y) 7831 // Convert to: while (X-Y == 0) 7832 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 7833 if (EL.hasAnyInfo()) return EL; 7834 break; 7835 } 7836 case ICmpInst::ICMP_SLT: 7837 case ICmpInst::ICMP_ULT: { // while (X < Y) 7838 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 7839 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 7840 AllowPredicates); 7841 if (EL.hasAnyInfo()) return EL; 7842 break; 7843 } 7844 case ICmpInst::ICMP_SGT: 7845 case ICmpInst::ICMP_UGT: { // while (X > Y) 7846 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 7847 ExitLimit EL = 7848 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 7849 AllowPredicates); 7850 if (EL.hasAnyInfo()) return EL; 7851 break; 7852 } 7853 default: 7854 break; 7855 } 7856 7857 auto *ExhaustiveCount = 7858 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 7859 7860 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 7861 return ExhaustiveCount; 7862 7863 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 7864 ExitCond->getOperand(1), L, OriginalPred); 7865 } 7866 7867 ScalarEvolution::ExitLimit 7868 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 7869 SwitchInst *Switch, 7870 BasicBlock *ExitingBlock, 7871 bool ControlsExit) { 7872 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 7873 7874 // Give up if the exit is the default dest of a switch. 7875 if (Switch->getDefaultDest() == ExitingBlock) 7876 return getCouldNotCompute(); 7877 7878 assert(L->contains(Switch->getDefaultDest()) && 7879 "Default case must not exit the loop!"); 7880 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 7881 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 7882 7883 // while (X != Y) --> while (X-Y != 0) 7884 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 7885 if (EL.hasAnyInfo()) 7886 return EL; 7887 7888 return getCouldNotCompute(); 7889 } 7890 7891 static ConstantInt * 7892 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 7893 ScalarEvolution &SE) { 7894 const SCEV *InVal = SE.getConstant(C); 7895 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 7896 assert(isa<SCEVConstant>(Val) && 7897 "Evaluation of SCEV at constant didn't fold correctly?"); 7898 return cast<SCEVConstant>(Val)->getValue(); 7899 } 7900 7901 /// Given an exit condition of 'icmp op load X, cst', try to see if we can 7902 /// compute the backedge execution count. 7903 ScalarEvolution::ExitLimit 7904 ScalarEvolution::computeLoadConstantCompareExitLimit( 7905 LoadInst *LI, 7906 Constant *RHS, 7907 const Loop *L, 7908 ICmpInst::Predicate predicate) { 7909 if (LI->isVolatile()) return getCouldNotCompute(); 7910 7911 // Check to see if the loaded pointer is a getelementptr of a global. 7912 // TODO: Use SCEV instead of manually grubbing with GEPs. 7913 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 7914 if (!GEP) return getCouldNotCompute(); 7915 7916 // Make sure that it is really a constant global we are gepping, with an 7917 // initializer, and make sure the first IDX is really 0. 7918 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 7919 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 7920 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 7921 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 7922 return getCouldNotCompute(); 7923 7924 // Okay, we allow one non-constant index into the GEP instruction. 7925 Value *VarIdx = nullptr; 7926 std::vector<Constant*> Indexes; 7927 unsigned VarIdxNum = 0; 7928 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 7929 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 7930 Indexes.push_back(CI); 7931 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 7932 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 7933 VarIdx = GEP->getOperand(i); 7934 VarIdxNum = i-2; 7935 Indexes.push_back(nullptr); 7936 } 7937 7938 // Loop-invariant loads may be a byproduct of loop optimization. Skip them. 7939 if (!VarIdx) 7940 return getCouldNotCompute(); 7941 7942 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 7943 // Check to see if X is a loop variant variable value now. 7944 const SCEV *Idx = getSCEV(VarIdx); 7945 Idx = getSCEVAtScope(Idx, L); 7946 7947 // We can only recognize very limited forms of loop index expressions, in 7948 // particular, only affine AddRec's like {C1,+,C2}<L>. 7949 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 7950 if (!IdxExpr || IdxExpr->getLoop() != L || !IdxExpr->isAffine() || 7951 isLoopInvariant(IdxExpr, L) || 7952 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 7953 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 7954 return getCouldNotCompute(); 7955 7956 unsigned MaxSteps = MaxBruteForceIterations; 7957 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 7958 ConstantInt *ItCst = ConstantInt::get( 7959 cast<IntegerType>(IdxExpr->getType()), IterationNum); 7960 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 7961 7962 // Form the GEP offset. 7963 Indexes[VarIdxNum] = Val; 7964 7965 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(), 7966 Indexes); 7967 if (!Result) break; // Cannot compute! 7968 7969 // Evaluate the condition for this iteration. 7970 Result = ConstantExpr::getICmp(predicate, Result, RHS); 7971 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 7972 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 7973 ++NumArrayLenItCounts; 7974 return getConstant(ItCst); // Found terminating iteration! 7975 } 7976 } 7977 return getCouldNotCompute(); 7978 } 7979 7980 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 7981 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 7982 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 7983 if (!RHS) 7984 return getCouldNotCompute(); 7985 7986 const BasicBlock *Latch = L->getLoopLatch(); 7987 if (!Latch) 7988 return getCouldNotCompute(); 7989 7990 const BasicBlock *Predecessor = L->getLoopPredecessor(); 7991 if (!Predecessor) 7992 return getCouldNotCompute(); 7993 7994 // Return true if V is of the form "LHS `shift_op` <positive constant>". 7995 // Return LHS in OutLHS and shift_opt in OutOpCode. 7996 auto MatchPositiveShift = 7997 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 7998 7999 using namespace PatternMatch; 8000 8001 ConstantInt *ShiftAmt; 8002 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 8003 OutOpCode = Instruction::LShr; 8004 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 8005 OutOpCode = Instruction::AShr; 8006 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 8007 OutOpCode = Instruction::Shl; 8008 else 8009 return false; 8010 8011 return ShiftAmt->getValue().isStrictlyPositive(); 8012 }; 8013 8014 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 8015 // 8016 // loop: 8017 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 8018 // %iv.shifted = lshr i32 %iv, <positive constant> 8019 // 8020 // Return true on a successful match. Return the corresponding PHI node (%iv 8021 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 8022 auto MatchShiftRecurrence = 8023 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 8024 Optional<Instruction::BinaryOps> PostShiftOpCode; 8025 8026 { 8027 Instruction::BinaryOps OpC; 8028 Value *V; 8029 8030 // If we encounter a shift instruction, "peel off" the shift operation, 8031 // and remember that we did so. Later when we inspect %iv's backedge 8032 // value, we will make sure that the backedge value uses the same 8033 // operation. 8034 // 8035 // Note: the peeled shift operation does not have to be the same 8036 // instruction as the one feeding into the PHI's backedge value. We only 8037 // really care about it being the same *kind* of shift instruction -- 8038 // that's all that is required for our later inferences to hold. 8039 if (MatchPositiveShift(LHS, V, OpC)) { 8040 PostShiftOpCode = OpC; 8041 LHS = V; 8042 } 8043 } 8044 8045 PNOut = dyn_cast<PHINode>(LHS); 8046 if (!PNOut || PNOut->getParent() != L->getHeader()) 8047 return false; 8048 8049 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 8050 Value *OpLHS; 8051 8052 return 8053 // The backedge value for the PHI node must be a shift by a positive 8054 // amount 8055 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 8056 8057 // of the PHI node itself 8058 OpLHS == PNOut && 8059 8060 // and the kind of shift should be match the kind of shift we peeled 8061 // off, if any. 8062 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 8063 }; 8064 8065 PHINode *PN; 8066 Instruction::BinaryOps OpCode; 8067 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 8068 return getCouldNotCompute(); 8069 8070 const DataLayout &DL = getDataLayout(); 8071 8072 // The key rationale for this optimization is that for some kinds of shift 8073 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 8074 // within a finite number of iterations. If the condition guarding the 8075 // backedge (in the sense that the backedge is taken if the condition is true) 8076 // is false for the value the shift recurrence stabilizes to, then we know 8077 // that the backedge is taken only a finite number of times. 8078 8079 ConstantInt *StableValue = nullptr; 8080 switch (OpCode) { 8081 default: 8082 llvm_unreachable("Impossible case!"); 8083 8084 case Instruction::AShr: { 8085 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 8086 // bitwidth(K) iterations. 8087 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 8088 KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC, 8089 Predecessor->getTerminator(), &DT); 8090 auto *Ty = cast<IntegerType>(RHS->getType()); 8091 if (Known.isNonNegative()) 8092 StableValue = ConstantInt::get(Ty, 0); 8093 else if (Known.isNegative()) 8094 StableValue = ConstantInt::get(Ty, -1, true); 8095 else 8096 return getCouldNotCompute(); 8097 8098 break; 8099 } 8100 case Instruction::LShr: 8101 case Instruction::Shl: 8102 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 8103 // stabilize to 0 in at most bitwidth(K) iterations. 8104 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 8105 break; 8106 } 8107 8108 auto *Result = 8109 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 8110 assert(Result->getType()->isIntegerTy(1) && 8111 "Otherwise cannot be an operand to a branch instruction"); 8112 8113 if (Result->isZeroValue()) { 8114 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 8115 const SCEV *UpperBound = 8116 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 8117 return ExitLimit(getCouldNotCompute(), UpperBound, false); 8118 } 8119 8120 return getCouldNotCompute(); 8121 } 8122 8123 /// Return true if we can constant fold an instruction of the specified type, 8124 /// assuming that all operands were constants. 8125 static bool CanConstantFold(const Instruction *I) { 8126 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 8127 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 8128 isa<LoadInst>(I) || isa<ExtractValueInst>(I)) 8129 return true; 8130 8131 if (const CallInst *CI = dyn_cast<CallInst>(I)) 8132 if (const Function *F = CI->getCalledFunction()) 8133 return canConstantFoldCallTo(CI, F); 8134 return false; 8135 } 8136 8137 /// Determine whether this instruction can constant evolve within this loop 8138 /// assuming its operands can all constant evolve. 8139 static bool canConstantEvolve(Instruction *I, const Loop *L) { 8140 // An instruction outside of the loop can't be derived from a loop PHI. 8141 if (!L->contains(I)) return false; 8142 8143 if (isa<PHINode>(I)) { 8144 // We don't currently keep track of the control flow needed to evaluate 8145 // PHIs, so we cannot handle PHIs inside of loops. 8146 return L->getHeader() == I->getParent(); 8147 } 8148 8149 // If we won't be able to constant fold this expression even if the operands 8150 // are constants, bail early. 8151 return CanConstantFold(I); 8152 } 8153 8154 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 8155 /// recursing through each instruction operand until reaching a loop header phi. 8156 static PHINode * 8157 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 8158 DenseMap<Instruction *, PHINode *> &PHIMap, 8159 unsigned Depth) { 8160 if (Depth > MaxConstantEvolvingDepth) 8161 return nullptr; 8162 8163 // Otherwise, we can evaluate this instruction if all of its operands are 8164 // constant or derived from a PHI node themselves. 8165 PHINode *PHI = nullptr; 8166 for (Value *Op : UseInst->operands()) { 8167 if (isa<Constant>(Op)) continue; 8168 8169 Instruction *OpInst = dyn_cast<Instruction>(Op); 8170 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 8171 8172 PHINode *P = dyn_cast<PHINode>(OpInst); 8173 if (!P) 8174 // If this operand is already visited, reuse the prior result. 8175 // We may have P != PHI if this is the deepest point at which the 8176 // inconsistent paths meet. 8177 P = PHIMap.lookup(OpInst); 8178 if (!P) { 8179 // Recurse and memoize the results, whether a phi is found or not. 8180 // This recursive call invalidates pointers into PHIMap. 8181 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 8182 PHIMap[OpInst] = P; 8183 } 8184 if (!P) 8185 return nullptr; // Not evolving from PHI 8186 if (PHI && PHI != P) 8187 return nullptr; // Evolving from multiple different PHIs. 8188 PHI = P; 8189 } 8190 // This is a expression evolving from a constant PHI! 8191 return PHI; 8192 } 8193 8194 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 8195 /// in the loop that V is derived from. We allow arbitrary operations along the 8196 /// way, but the operands of an operation must either be constants or a value 8197 /// derived from a constant PHI. If this expression does not fit with these 8198 /// constraints, return null. 8199 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 8200 Instruction *I = dyn_cast<Instruction>(V); 8201 if (!I || !canConstantEvolve(I, L)) return nullptr; 8202 8203 if (PHINode *PN = dyn_cast<PHINode>(I)) 8204 return PN; 8205 8206 // Record non-constant instructions contained by the loop. 8207 DenseMap<Instruction *, PHINode *> PHIMap; 8208 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 8209 } 8210 8211 /// EvaluateExpression - Given an expression that passes the 8212 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 8213 /// in the loop has the value PHIVal. If we can't fold this expression for some 8214 /// reason, return null. 8215 static Constant *EvaluateExpression(Value *V, const Loop *L, 8216 DenseMap<Instruction *, Constant *> &Vals, 8217 const DataLayout &DL, 8218 const TargetLibraryInfo *TLI) { 8219 // Convenient constant check, but redundant for recursive calls. 8220 if (Constant *C = dyn_cast<Constant>(V)) return C; 8221 Instruction *I = dyn_cast<Instruction>(V); 8222 if (!I) return nullptr; 8223 8224 if (Constant *C = Vals.lookup(I)) return C; 8225 8226 // An instruction inside the loop depends on a value outside the loop that we 8227 // weren't given a mapping for, or a value such as a call inside the loop. 8228 if (!canConstantEvolve(I, L)) return nullptr; 8229 8230 // An unmapped PHI can be due to a branch or another loop inside this loop, 8231 // or due to this not being the initial iteration through a loop where we 8232 // couldn't compute the evolution of this particular PHI last time. 8233 if (isa<PHINode>(I)) return nullptr; 8234 8235 std::vector<Constant*> Operands(I->getNumOperands()); 8236 8237 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 8238 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 8239 if (!Operand) { 8240 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 8241 if (!Operands[i]) return nullptr; 8242 continue; 8243 } 8244 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 8245 Vals[Operand] = C; 8246 if (!C) return nullptr; 8247 Operands[i] = C; 8248 } 8249 8250 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 8251 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8252 Operands[1], DL, TLI); 8253 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 8254 if (!LI->isVolatile()) 8255 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 8256 } 8257 return ConstantFoldInstOperands(I, Operands, DL, TLI); 8258 } 8259 8260 8261 // If every incoming value to PN except the one for BB is a specific Constant, 8262 // return that, else return nullptr. 8263 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 8264 Constant *IncomingVal = nullptr; 8265 8266 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 8267 if (PN->getIncomingBlock(i) == BB) 8268 continue; 8269 8270 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 8271 if (!CurrentVal) 8272 return nullptr; 8273 8274 if (IncomingVal != CurrentVal) { 8275 if (IncomingVal) 8276 return nullptr; 8277 IncomingVal = CurrentVal; 8278 } 8279 } 8280 8281 return IncomingVal; 8282 } 8283 8284 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 8285 /// in the header of its containing loop, we know the loop executes a 8286 /// constant number of times, and the PHI node is just a recurrence 8287 /// involving constants, fold it. 8288 Constant * 8289 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 8290 const APInt &BEs, 8291 const Loop *L) { 8292 auto I = ConstantEvolutionLoopExitValue.find(PN); 8293 if (I != ConstantEvolutionLoopExitValue.end()) 8294 return I->second; 8295 8296 if (BEs.ugt(MaxBruteForceIterations)) 8297 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 8298 8299 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 8300 8301 DenseMap<Instruction *, Constant *> CurrentIterVals; 8302 BasicBlock *Header = L->getHeader(); 8303 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 8304 8305 BasicBlock *Latch = L->getLoopLatch(); 8306 if (!Latch) 8307 return nullptr; 8308 8309 for (PHINode &PHI : Header->phis()) { 8310 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8311 CurrentIterVals[&PHI] = StartCST; 8312 } 8313 if (!CurrentIterVals.count(PN)) 8314 return RetVal = nullptr; 8315 8316 Value *BEValue = PN->getIncomingValueForBlock(Latch); 8317 8318 // Execute the loop symbolically to determine the exit value. 8319 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 8320 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 8321 8322 unsigned NumIterations = BEs.getZExtValue(); // must be in range 8323 unsigned IterationNum = 0; 8324 const DataLayout &DL = getDataLayout(); 8325 for (; ; ++IterationNum) { 8326 if (IterationNum == NumIterations) 8327 return RetVal = CurrentIterVals[PN]; // Got exit value! 8328 8329 // Compute the value of the PHIs for the next iteration. 8330 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 8331 DenseMap<Instruction *, Constant *> NextIterVals; 8332 Constant *NextPHI = 8333 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8334 if (!NextPHI) 8335 return nullptr; // Couldn't evaluate! 8336 NextIterVals[PN] = NextPHI; 8337 8338 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 8339 8340 // Also evaluate the other PHI nodes. However, we don't get to stop if we 8341 // cease to be able to evaluate one of them or if they stop evolving, 8342 // because that doesn't necessarily prevent us from computing PN. 8343 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 8344 for (const auto &I : CurrentIterVals) { 8345 PHINode *PHI = dyn_cast<PHINode>(I.first); 8346 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 8347 PHIsToCompute.emplace_back(PHI, I.second); 8348 } 8349 // We use two distinct loops because EvaluateExpression may invalidate any 8350 // iterators into CurrentIterVals. 8351 for (const auto &I : PHIsToCompute) { 8352 PHINode *PHI = I.first; 8353 Constant *&NextPHI = NextIterVals[PHI]; 8354 if (!NextPHI) { // Not already computed. 8355 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8356 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8357 } 8358 if (NextPHI != I.second) 8359 StoppedEvolving = false; 8360 } 8361 8362 // If all entries in CurrentIterVals == NextIterVals then we can stop 8363 // iterating, the loop can't continue to change. 8364 if (StoppedEvolving) 8365 return RetVal = CurrentIterVals[PN]; 8366 8367 CurrentIterVals.swap(NextIterVals); 8368 } 8369 } 8370 8371 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 8372 Value *Cond, 8373 bool ExitWhen) { 8374 PHINode *PN = getConstantEvolvingPHI(Cond, L); 8375 if (!PN) return getCouldNotCompute(); 8376 8377 // If the loop is canonicalized, the PHI will have exactly two entries. 8378 // That's the only form we support here. 8379 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 8380 8381 DenseMap<Instruction *, Constant *> CurrentIterVals; 8382 BasicBlock *Header = L->getHeader(); 8383 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 8384 8385 BasicBlock *Latch = L->getLoopLatch(); 8386 assert(Latch && "Should follow from NumIncomingValues == 2!"); 8387 8388 for (PHINode &PHI : Header->phis()) { 8389 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 8390 CurrentIterVals[&PHI] = StartCST; 8391 } 8392 if (!CurrentIterVals.count(PN)) 8393 return getCouldNotCompute(); 8394 8395 // Okay, we find a PHI node that defines the trip count of this loop. Execute 8396 // the loop symbolically to determine when the condition gets a value of 8397 // "ExitWhen". 8398 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 8399 const DataLayout &DL = getDataLayout(); 8400 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 8401 auto *CondVal = dyn_cast_or_null<ConstantInt>( 8402 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 8403 8404 // Couldn't symbolically evaluate. 8405 if (!CondVal) return getCouldNotCompute(); 8406 8407 if (CondVal->getValue() == uint64_t(ExitWhen)) { 8408 ++NumBruteForceTripCountsComputed; 8409 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 8410 } 8411 8412 // Update all the PHI nodes for the next iteration. 8413 DenseMap<Instruction *, Constant *> NextIterVals; 8414 8415 // Create a list of which PHIs we need to compute. We want to do this before 8416 // calling EvaluateExpression on them because that may invalidate iterators 8417 // into CurrentIterVals. 8418 SmallVector<PHINode *, 8> PHIsToCompute; 8419 for (const auto &I : CurrentIterVals) { 8420 PHINode *PHI = dyn_cast<PHINode>(I.first); 8421 if (!PHI || PHI->getParent() != Header) continue; 8422 PHIsToCompute.push_back(PHI); 8423 } 8424 for (PHINode *PHI : PHIsToCompute) { 8425 Constant *&NextPHI = NextIterVals[PHI]; 8426 if (NextPHI) continue; // Already computed! 8427 8428 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 8429 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 8430 } 8431 CurrentIterVals.swap(NextIterVals); 8432 } 8433 8434 // Too many iterations were needed to evaluate. 8435 return getCouldNotCompute(); 8436 } 8437 8438 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 8439 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 8440 ValuesAtScopes[V]; 8441 // Check to see if we've folded this expression at this loop before. 8442 for (auto &LS : Values) 8443 if (LS.first == L) 8444 return LS.second ? LS.second : V; 8445 8446 Values.emplace_back(L, nullptr); 8447 8448 // Otherwise compute it. 8449 const SCEV *C = computeSCEVAtScope(V, L); 8450 for (auto &LS : reverse(ValuesAtScopes[V])) 8451 if (LS.first == L) { 8452 LS.second = C; 8453 break; 8454 } 8455 return C; 8456 } 8457 8458 /// This builds up a Constant using the ConstantExpr interface. That way, we 8459 /// will return Constants for objects which aren't represented by a 8460 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 8461 /// Returns NULL if the SCEV isn't representable as a Constant. 8462 static Constant *BuildConstantFromSCEV(const SCEV *V) { 8463 switch (V->getSCEVType()) { 8464 case scCouldNotCompute: 8465 case scAddRecExpr: 8466 return nullptr; 8467 case scConstant: 8468 return cast<SCEVConstant>(V)->getValue(); 8469 case scUnknown: 8470 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 8471 case scSignExtend: { 8472 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 8473 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 8474 return ConstantExpr::getSExt(CastOp, SS->getType()); 8475 return nullptr; 8476 } 8477 case scZeroExtend: { 8478 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 8479 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 8480 return ConstantExpr::getZExt(CastOp, SZ->getType()); 8481 return nullptr; 8482 } 8483 case scPtrToInt: { 8484 const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V); 8485 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand())) 8486 return ConstantExpr::getPtrToInt(CastOp, P2I->getType()); 8487 8488 return nullptr; 8489 } 8490 case scTruncate: { 8491 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 8492 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 8493 return ConstantExpr::getTrunc(CastOp, ST->getType()); 8494 return nullptr; 8495 } 8496 case scAddExpr: { 8497 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 8498 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 8499 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8500 unsigned AS = PTy->getAddressSpace(); 8501 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8502 C = ConstantExpr::getBitCast(C, DestPtrTy); 8503 } 8504 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 8505 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 8506 if (!C2) 8507 return nullptr; 8508 8509 // First pointer! 8510 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 8511 unsigned AS = C2->getType()->getPointerAddressSpace(); 8512 std::swap(C, C2); 8513 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 8514 // The offsets have been converted to bytes. We can add bytes to an 8515 // i8* by GEP with the byte count in the first index. 8516 C = ConstantExpr::getBitCast(C, DestPtrTy); 8517 } 8518 8519 // Don't bother trying to sum two pointers. We probably can't 8520 // statically compute a load that results from it anyway. 8521 if (C2->getType()->isPointerTy()) 8522 return nullptr; 8523 8524 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 8525 if (PTy->getElementType()->isStructTy()) 8526 C2 = ConstantExpr::getIntegerCast( 8527 C2, Type::getInt32Ty(C->getContext()), true); 8528 C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2); 8529 } else 8530 C = ConstantExpr::getAdd(C, C2); 8531 } 8532 return C; 8533 } 8534 return nullptr; 8535 } 8536 case scMulExpr: { 8537 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 8538 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 8539 // Don't bother with pointers at all. 8540 if (C->getType()->isPointerTy()) 8541 return nullptr; 8542 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 8543 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 8544 if (!C2 || C2->getType()->isPointerTy()) 8545 return nullptr; 8546 C = ConstantExpr::getMul(C, C2); 8547 } 8548 return C; 8549 } 8550 return nullptr; 8551 } 8552 case scUDivExpr: { 8553 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 8554 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 8555 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 8556 if (LHS->getType() == RHS->getType()) 8557 return ConstantExpr::getUDiv(LHS, RHS); 8558 return nullptr; 8559 } 8560 case scSMaxExpr: 8561 case scUMaxExpr: 8562 case scSMinExpr: 8563 case scUMinExpr: 8564 return nullptr; // TODO: smax, umax, smin, umax. 8565 } 8566 llvm_unreachable("Unknown SCEV kind!"); 8567 } 8568 8569 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 8570 if (isa<SCEVConstant>(V)) return V; 8571 8572 // If this instruction is evolved from a constant-evolving PHI, compute the 8573 // exit value from the loop without using SCEVs. 8574 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 8575 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 8576 if (PHINode *PN = dyn_cast<PHINode>(I)) { 8577 const Loop *CurrLoop = this->LI[I->getParent()]; 8578 // Looking for loop exit value. 8579 if (CurrLoop && CurrLoop->getParentLoop() == L && 8580 PN->getParent() == CurrLoop->getHeader()) { 8581 // Okay, there is no closed form solution for the PHI node. Check 8582 // to see if the loop that contains it has a known backedge-taken 8583 // count. If so, we may be able to force computation of the exit 8584 // value. 8585 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop); 8586 // This trivial case can show up in some degenerate cases where 8587 // the incoming IR has not yet been fully simplified. 8588 if (BackedgeTakenCount->isZero()) { 8589 Value *InitValue = nullptr; 8590 bool MultipleInitValues = false; 8591 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 8592 if (!CurrLoop->contains(PN->getIncomingBlock(i))) { 8593 if (!InitValue) 8594 InitValue = PN->getIncomingValue(i); 8595 else if (InitValue != PN->getIncomingValue(i)) { 8596 MultipleInitValues = true; 8597 break; 8598 } 8599 } 8600 } 8601 if (!MultipleInitValues && InitValue) 8602 return getSCEV(InitValue); 8603 } 8604 // Do we have a loop invariant value flowing around the backedge 8605 // for a loop which must execute the backedge? 8606 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 8607 isKnownPositive(BackedgeTakenCount) && 8608 PN->getNumIncomingValues() == 2) { 8609 8610 unsigned InLoopPred = 8611 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1; 8612 Value *BackedgeVal = PN->getIncomingValue(InLoopPred); 8613 if (CurrLoop->isLoopInvariant(BackedgeVal)) 8614 return getSCEV(BackedgeVal); 8615 } 8616 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 8617 // Okay, we know how many times the containing loop executes. If 8618 // this is a constant evolving PHI node, get the final value at 8619 // the specified iteration number. 8620 Constant *RV = getConstantEvolutionLoopExitValue( 8621 PN, BTCC->getAPInt(), CurrLoop); 8622 if (RV) return getSCEV(RV); 8623 } 8624 } 8625 8626 // If there is a single-input Phi, evaluate it at our scope. If we can 8627 // prove that this replacement does not break LCSSA form, use new value. 8628 if (PN->getNumOperands() == 1) { 8629 const SCEV *Input = getSCEV(PN->getOperand(0)); 8630 const SCEV *InputAtScope = getSCEVAtScope(Input, L); 8631 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, 8632 // for the simplest case just support constants. 8633 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope; 8634 } 8635 } 8636 8637 // Okay, this is an expression that we cannot symbolically evaluate 8638 // into a SCEV. Check to see if it's possible to symbolically evaluate 8639 // the arguments into constants, and if so, try to constant propagate the 8640 // result. This is particularly useful for computing loop exit values. 8641 if (CanConstantFold(I)) { 8642 SmallVector<Constant *, 4> Operands; 8643 bool MadeImprovement = false; 8644 for (Value *Op : I->operands()) { 8645 if (Constant *C = dyn_cast<Constant>(Op)) { 8646 Operands.push_back(C); 8647 continue; 8648 } 8649 8650 // If any of the operands is non-constant and if they are 8651 // non-integer and non-pointer, don't even try to analyze them 8652 // with scev techniques. 8653 if (!isSCEVable(Op->getType())) 8654 return V; 8655 8656 const SCEV *OrigV = getSCEV(Op); 8657 const SCEV *OpV = getSCEVAtScope(OrigV, L); 8658 MadeImprovement |= OrigV != OpV; 8659 8660 Constant *C = BuildConstantFromSCEV(OpV); 8661 if (!C) return V; 8662 if (C->getType() != Op->getType()) 8663 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 8664 Op->getType(), 8665 false), 8666 C, Op->getType()); 8667 Operands.push_back(C); 8668 } 8669 8670 // Check to see if getSCEVAtScope actually made an improvement. 8671 if (MadeImprovement) { 8672 Constant *C = nullptr; 8673 const DataLayout &DL = getDataLayout(); 8674 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 8675 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 8676 Operands[1], DL, &TLI); 8677 else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) { 8678 if (!Load->isVolatile()) 8679 C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(), 8680 DL); 8681 } else 8682 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 8683 if (!C) return V; 8684 return getSCEV(C); 8685 } 8686 } 8687 } 8688 8689 // This is some other type of SCEVUnknown, just return it. 8690 return V; 8691 } 8692 8693 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 8694 // Avoid performing the look-up in the common case where the specified 8695 // expression has no loop-variant portions. 8696 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 8697 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8698 if (OpAtScope != Comm->getOperand(i)) { 8699 // Okay, at least one of these operands is loop variant but might be 8700 // foldable. Build a new instance of the folded commutative expression. 8701 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 8702 Comm->op_begin()+i); 8703 NewOps.push_back(OpAtScope); 8704 8705 for (++i; i != e; ++i) { 8706 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 8707 NewOps.push_back(OpAtScope); 8708 } 8709 if (isa<SCEVAddExpr>(Comm)) 8710 return getAddExpr(NewOps, Comm->getNoWrapFlags()); 8711 if (isa<SCEVMulExpr>(Comm)) 8712 return getMulExpr(NewOps, Comm->getNoWrapFlags()); 8713 if (isa<SCEVMinMaxExpr>(Comm)) 8714 return getMinMaxExpr(Comm->getSCEVType(), NewOps); 8715 llvm_unreachable("Unknown commutative SCEV type!"); 8716 } 8717 } 8718 // If we got here, all operands are loop invariant. 8719 return Comm; 8720 } 8721 8722 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 8723 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 8724 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 8725 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 8726 return Div; // must be loop invariant 8727 return getUDivExpr(LHS, RHS); 8728 } 8729 8730 // If this is a loop recurrence for a loop that does not contain L, then we 8731 // are dealing with the final value computed by the loop. 8732 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 8733 // First, attempt to evaluate each operand. 8734 // Avoid performing the look-up in the common case where the specified 8735 // expression has no loop-variant portions. 8736 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 8737 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 8738 if (OpAtScope == AddRec->getOperand(i)) 8739 continue; 8740 8741 // Okay, at least one of these operands is loop variant but might be 8742 // foldable. Build a new instance of the folded commutative expression. 8743 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 8744 AddRec->op_begin()+i); 8745 NewOps.push_back(OpAtScope); 8746 for (++i; i != e; ++i) 8747 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 8748 8749 const SCEV *FoldedRec = 8750 getAddRecExpr(NewOps, AddRec->getLoop(), 8751 AddRec->getNoWrapFlags(SCEV::FlagNW)); 8752 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 8753 // The addrec may be folded to a nonrecurrence, for example, if the 8754 // induction variable is multiplied by zero after constant folding. Go 8755 // ahead and return the folded value. 8756 if (!AddRec) 8757 return FoldedRec; 8758 break; 8759 } 8760 8761 // If the scope is outside the addrec's loop, evaluate it by using the 8762 // loop exit value of the addrec. 8763 if (!AddRec->getLoop()->contains(L)) { 8764 // To evaluate this recurrence, we need to know how many times the AddRec 8765 // loop iterates. Compute this now. 8766 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 8767 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 8768 8769 // Then, evaluate the AddRec. 8770 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 8771 } 8772 8773 return AddRec; 8774 } 8775 8776 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 8777 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8778 if (Op == Cast->getOperand()) 8779 return Cast; // must be loop invariant 8780 return getZeroExtendExpr(Op, Cast->getType()); 8781 } 8782 8783 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 8784 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8785 if (Op == Cast->getOperand()) 8786 return Cast; // must be loop invariant 8787 return getSignExtendExpr(Op, Cast->getType()); 8788 } 8789 8790 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 8791 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8792 if (Op == Cast->getOperand()) 8793 return Cast; // must be loop invariant 8794 return getTruncateExpr(Op, Cast->getType()); 8795 } 8796 8797 if (const SCEVPtrToIntExpr *Cast = dyn_cast<SCEVPtrToIntExpr>(V)) { 8798 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 8799 if (Op == Cast->getOperand()) 8800 return Cast; // must be loop invariant 8801 return getPtrToIntExpr(Op, Cast->getType()); 8802 } 8803 8804 llvm_unreachable("Unknown SCEV type!"); 8805 } 8806 8807 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 8808 return getSCEVAtScope(getSCEV(V), L); 8809 } 8810 8811 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 8812 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 8813 return stripInjectiveFunctions(ZExt->getOperand()); 8814 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 8815 return stripInjectiveFunctions(SExt->getOperand()); 8816 return S; 8817 } 8818 8819 /// Finds the minimum unsigned root of the following equation: 8820 /// 8821 /// A * X = B (mod N) 8822 /// 8823 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 8824 /// A and B isn't important. 8825 /// 8826 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 8827 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 8828 ScalarEvolution &SE) { 8829 uint32_t BW = A.getBitWidth(); 8830 assert(BW == SE.getTypeSizeInBits(B->getType())); 8831 assert(A != 0 && "A must be non-zero."); 8832 8833 // 1. D = gcd(A, N) 8834 // 8835 // The gcd of A and N may have only one prime factor: 2. The number of 8836 // trailing zeros in A is its multiplicity 8837 uint32_t Mult2 = A.countTrailingZeros(); 8838 // D = 2^Mult2 8839 8840 // 2. Check if B is divisible by D. 8841 // 8842 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 8843 // is not less than multiplicity of this prime factor for D. 8844 if (SE.GetMinTrailingZeros(B) < Mult2) 8845 return SE.getCouldNotCompute(); 8846 8847 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 8848 // modulo (N / D). 8849 // 8850 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 8851 // (N / D) in general. The inverse itself always fits into BW bits, though, 8852 // so we immediately truncate it. 8853 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 8854 APInt Mod(BW + 1, 0); 8855 Mod.setBit(BW - Mult2); // Mod = N / D 8856 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 8857 8858 // 4. Compute the minimum unsigned root of the equation: 8859 // I * (B / D) mod (N / D) 8860 // To simplify the computation, we factor out the divide by D: 8861 // (I * B mod N) / D 8862 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 8863 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 8864 } 8865 8866 /// For a given quadratic addrec, generate coefficients of the corresponding 8867 /// quadratic equation, multiplied by a common value to ensure that they are 8868 /// integers. 8869 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 8870 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 8871 /// were multiplied by, and BitWidth is the bit width of the original addrec 8872 /// coefficients. 8873 /// This function returns None if the addrec coefficients are not compile- 8874 /// time constants. 8875 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 8876 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 8877 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 8878 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 8879 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 8880 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 8881 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 8882 << *AddRec << '\n'); 8883 8884 // We currently can only solve this if the coefficients are constants. 8885 if (!LC || !MC || !NC) { 8886 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 8887 return None; 8888 } 8889 8890 APInt L = LC->getAPInt(); 8891 APInt M = MC->getAPInt(); 8892 APInt N = NC->getAPInt(); 8893 assert(!N.isNullValue() && "This is not a quadratic addrec"); 8894 8895 unsigned BitWidth = LC->getAPInt().getBitWidth(); 8896 unsigned NewWidth = BitWidth + 1; 8897 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 8898 << BitWidth << '\n'); 8899 // The sign-extension (as opposed to a zero-extension) here matches the 8900 // extension used in SolveQuadraticEquationWrap (with the same motivation). 8901 N = N.sext(NewWidth); 8902 M = M.sext(NewWidth); 8903 L = L.sext(NewWidth); 8904 8905 // The increments are M, M+N, M+2N, ..., so the accumulated values are 8906 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 8907 // L+M, L+2M+N, L+3M+3N, ... 8908 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 8909 // 8910 // The equation Acc = 0 is then 8911 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 8912 // In a quadratic form it becomes: 8913 // N n^2 + (2M-N) n + 2L = 0. 8914 8915 APInt A = N; 8916 APInt B = 2 * M - A; 8917 APInt C = 2 * L; 8918 APInt T = APInt(NewWidth, 2); 8919 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 8920 << "x + " << C << ", coeff bw: " << NewWidth 8921 << ", multiplied by " << T << '\n'); 8922 return std::make_tuple(A, B, C, T, BitWidth); 8923 } 8924 8925 /// Helper function to compare optional APInts: 8926 /// (a) if X and Y both exist, return min(X, Y), 8927 /// (b) if neither X nor Y exist, return None, 8928 /// (c) if exactly one of X and Y exists, return that value. 8929 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 8930 if (X.hasValue() && Y.hasValue()) { 8931 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 8932 APInt XW = X->sextOrSelf(W); 8933 APInt YW = Y->sextOrSelf(W); 8934 return XW.slt(YW) ? *X : *Y; 8935 } 8936 if (!X.hasValue() && !Y.hasValue()) 8937 return None; 8938 return X.hasValue() ? *X : *Y; 8939 } 8940 8941 /// Helper function to truncate an optional APInt to a given BitWidth. 8942 /// When solving addrec-related equations, it is preferable to return a value 8943 /// that has the same bit width as the original addrec's coefficients. If the 8944 /// solution fits in the original bit width, truncate it (except for i1). 8945 /// Returning a value of a different bit width may inhibit some optimizations. 8946 /// 8947 /// In general, a solution to a quadratic equation generated from an addrec 8948 /// may require BW+1 bits, where BW is the bit width of the addrec's 8949 /// coefficients. The reason is that the coefficients of the quadratic 8950 /// equation are BW+1 bits wide (to avoid truncation when converting from 8951 /// the addrec to the equation). 8952 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 8953 if (!X.hasValue()) 8954 return None; 8955 unsigned W = X->getBitWidth(); 8956 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 8957 return X->trunc(BitWidth); 8958 return X; 8959 } 8960 8961 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 8962 /// iterations. The values L, M, N are assumed to be signed, and they 8963 /// should all have the same bit widths. 8964 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 8965 /// where BW is the bit width of the addrec's coefficients. 8966 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 8967 /// returned as such, otherwise the bit width of the returned value may 8968 /// be greater than BW. 8969 /// 8970 /// This function returns None if 8971 /// (a) the addrec coefficients are not constant, or 8972 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 8973 /// like x^2 = 5, no integer solutions exist, in other cases an integer 8974 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 8975 static Optional<APInt> 8976 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 8977 APInt A, B, C, M; 8978 unsigned BitWidth; 8979 auto T = GetQuadraticEquation(AddRec); 8980 if (!T.hasValue()) 8981 return None; 8982 8983 std::tie(A, B, C, M, BitWidth) = *T; 8984 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 8985 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 8986 if (!X.hasValue()) 8987 return None; 8988 8989 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 8990 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 8991 if (!V->isZero()) 8992 return None; 8993 8994 return TruncIfPossible(X, BitWidth); 8995 } 8996 8997 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 8998 /// iterations. The values M, N are assumed to be signed, and they 8999 /// should all have the same bit widths. 9000 /// Find the least n such that c(n) does not belong to the given range, 9001 /// while c(n-1) does. 9002 /// 9003 /// This function returns None if 9004 /// (a) the addrec coefficients are not constant, or 9005 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 9006 /// bounds of the range. 9007 static Optional<APInt> 9008 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 9009 const ConstantRange &Range, ScalarEvolution &SE) { 9010 assert(AddRec->getOperand(0)->isZero() && 9011 "Starting value of addrec should be 0"); 9012 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 9013 << Range << ", addrec " << *AddRec << '\n'); 9014 // This case is handled in getNumIterationsInRange. Here we can assume that 9015 // we start in the range. 9016 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 9017 "Addrec's initial value should be in range"); 9018 9019 APInt A, B, C, M; 9020 unsigned BitWidth; 9021 auto T = GetQuadraticEquation(AddRec); 9022 if (!T.hasValue()) 9023 return None; 9024 9025 // Be careful about the return value: there can be two reasons for not 9026 // returning an actual number. First, if no solutions to the equations 9027 // were found, and second, if the solutions don't leave the given range. 9028 // The first case means that the actual solution is "unknown", the second 9029 // means that it's known, but not valid. If the solution is unknown, we 9030 // cannot make any conclusions. 9031 // Return a pair: the optional solution and a flag indicating if the 9032 // solution was found. 9033 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 9034 // Solve for signed overflow and unsigned overflow, pick the lower 9035 // solution. 9036 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 9037 << Bound << " (before multiplying by " << M << ")\n"); 9038 Bound *= M; // The quadratic equation multiplier. 9039 9040 Optional<APInt> SO = None; 9041 if (BitWidth > 1) { 9042 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 9043 "signed overflow\n"); 9044 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 9045 } 9046 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 9047 "unsigned overflow\n"); 9048 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 9049 BitWidth+1); 9050 9051 auto LeavesRange = [&] (const APInt &X) { 9052 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 9053 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 9054 if (Range.contains(V0->getValue())) 9055 return false; 9056 // X should be at least 1, so X-1 is non-negative. 9057 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 9058 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 9059 if (Range.contains(V1->getValue())) 9060 return true; 9061 return false; 9062 }; 9063 9064 // If SolveQuadraticEquationWrap returns None, it means that there can 9065 // be a solution, but the function failed to find it. We cannot treat it 9066 // as "no solution". 9067 if (!SO.hasValue() || !UO.hasValue()) 9068 return { None, false }; 9069 9070 // Check the smaller value first to see if it leaves the range. 9071 // At this point, both SO and UO must have values. 9072 Optional<APInt> Min = MinOptional(SO, UO); 9073 if (LeavesRange(*Min)) 9074 return { Min, true }; 9075 Optional<APInt> Max = Min == SO ? UO : SO; 9076 if (LeavesRange(*Max)) 9077 return { Max, true }; 9078 9079 // Solutions were found, but were eliminated, hence the "true". 9080 return { None, true }; 9081 }; 9082 9083 std::tie(A, B, C, M, BitWidth) = *T; 9084 // Lower bound is inclusive, subtract 1 to represent the exiting value. 9085 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1; 9086 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth()); 9087 auto SL = SolveForBoundary(Lower); 9088 auto SU = SolveForBoundary(Upper); 9089 // If any of the solutions was unknown, no meaninigful conclusions can 9090 // be made. 9091 if (!SL.second || !SU.second) 9092 return None; 9093 9094 // Claim: The correct solution is not some value between Min and Max. 9095 // 9096 // Justification: Assuming that Min and Max are different values, one of 9097 // them is when the first signed overflow happens, the other is when the 9098 // first unsigned overflow happens. Crossing the range boundary is only 9099 // possible via an overflow (treating 0 as a special case of it, modeling 9100 // an overflow as crossing k*2^W for some k). 9101 // 9102 // The interesting case here is when Min was eliminated as an invalid 9103 // solution, but Max was not. The argument is that if there was another 9104 // overflow between Min and Max, it would also have been eliminated if 9105 // it was considered. 9106 // 9107 // For a given boundary, it is possible to have two overflows of the same 9108 // type (signed/unsigned) without having the other type in between: this 9109 // can happen when the vertex of the parabola is between the iterations 9110 // corresponding to the overflows. This is only possible when the two 9111 // overflows cross k*2^W for the same k. In such case, if the second one 9112 // left the range (and was the first one to do so), the first overflow 9113 // would have to enter the range, which would mean that either we had left 9114 // the range before or that we started outside of it. Both of these cases 9115 // are contradictions. 9116 // 9117 // Claim: In the case where SolveForBoundary returns None, the correct 9118 // solution is not some value between the Max for this boundary and the 9119 // Min of the other boundary. 9120 // 9121 // Justification: Assume that we had such Max_A and Min_B corresponding 9122 // to range boundaries A and B and such that Max_A < Min_B. If there was 9123 // a solution between Max_A and Min_B, it would have to be caused by an 9124 // overflow corresponding to either A or B. It cannot correspond to B, 9125 // since Min_B is the first occurrence of such an overflow. If it 9126 // corresponded to A, it would have to be either a signed or an unsigned 9127 // overflow that is larger than both eliminated overflows for A. But 9128 // between the eliminated overflows and this overflow, the values would 9129 // cover the entire value space, thus crossing the other boundary, which 9130 // is a contradiction. 9131 9132 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 9133 } 9134 9135 ScalarEvolution::ExitLimit 9136 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 9137 bool AllowPredicates) { 9138 9139 // This is only used for loops with a "x != y" exit test. The exit condition 9140 // is now expressed as a single expression, V = x-y. So the exit test is 9141 // effectively V != 0. We know and take advantage of the fact that this 9142 // expression only being used in a comparison by zero context. 9143 9144 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9145 // If the value is a constant 9146 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 9147 // If the value is already zero, the branch will execute zero times. 9148 if (C->getValue()->isZero()) return C; 9149 return getCouldNotCompute(); // Otherwise it will loop infinitely. 9150 } 9151 9152 const SCEVAddRecExpr *AddRec = 9153 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 9154 9155 if (!AddRec && AllowPredicates) 9156 // Try to make this an AddRec using runtime tests, in the first X 9157 // iterations of this loop, where X is the SCEV expression found by the 9158 // algorithm below. 9159 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 9160 9161 if (!AddRec || AddRec->getLoop() != L) 9162 return getCouldNotCompute(); 9163 9164 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 9165 // the quadratic equation to solve it. 9166 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 9167 // We can only use this value if the chrec ends up with an exact zero 9168 // value at this index. When solving for "X*X != 5", for example, we 9169 // should not accept a root of 2. 9170 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 9171 const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); 9172 return ExitLimit(R, R, false, Predicates); 9173 } 9174 return getCouldNotCompute(); 9175 } 9176 9177 // Otherwise we can only handle this if it is affine. 9178 if (!AddRec->isAffine()) 9179 return getCouldNotCompute(); 9180 9181 // If this is an affine expression, the execution count of this branch is 9182 // the minimum unsigned root of the following equation: 9183 // 9184 // Start + Step*N = 0 (mod 2^BW) 9185 // 9186 // equivalent to: 9187 // 9188 // Step*N = -Start (mod 2^BW) 9189 // 9190 // where BW is the common bit width of Start and Step. 9191 9192 // Get the initial value for the loop. 9193 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 9194 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 9195 9196 // For now we handle only constant steps. 9197 // 9198 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 9199 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 9200 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 9201 // We have not yet seen any such cases. 9202 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 9203 if (!StepC || StepC->getValue()->isZero()) 9204 return getCouldNotCompute(); 9205 9206 // For positive steps (counting up until unsigned overflow): 9207 // N = -Start/Step (as unsigned) 9208 // For negative steps (counting down to zero): 9209 // N = Start/-Step 9210 // First compute the unsigned distance from zero in the direction of Step. 9211 bool CountDown = StepC->getAPInt().isNegative(); 9212 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 9213 9214 // Handle unitary steps, which cannot wraparound. 9215 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 9216 // N = Distance (as unsigned) 9217 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 9218 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L)); 9219 APInt MaxBECountBase = getUnsignedRangeMax(Distance); 9220 if (MaxBECountBase.ult(MaxBECount)) 9221 MaxBECount = MaxBECountBase; 9222 9223 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 9224 // we end up with a loop whose backedge-taken count is n - 1. Detect this 9225 // case, and see if we can improve the bound. 9226 // 9227 // Explicitly handling this here is necessary because getUnsignedRange 9228 // isn't context-sensitive; it doesn't know that we only care about the 9229 // range inside the loop. 9230 const SCEV *Zero = getZero(Distance->getType()); 9231 const SCEV *One = getOne(Distance->getType()); 9232 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 9233 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 9234 // If Distance + 1 doesn't overflow, we can compute the maximum distance 9235 // as "unsigned_max(Distance + 1) - 1". 9236 ConstantRange CR = getUnsignedRange(DistancePlusOne); 9237 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 9238 } 9239 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 9240 } 9241 9242 // If the condition controls loop exit (the loop exits only if the expression 9243 // is true) and the addition is no-wrap we can use unsigned divide to 9244 // compute the backedge count. In this case, the step may not divide the 9245 // distance, but we don't care because if the condition is "missed" the loop 9246 // will have undefined behavior due to wrapping. 9247 if (ControlsExit && AddRec->hasNoSelfWrap() && 9248 loopHasNoAbnormalExits(AddRec->getLoop())) { 9249 const SCEV *Exact = 9250 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 9251 const SCEV *Max = 9252 Exact == getCouldNotCompute() 9253 ? Exact 9254 : getConstant(getUnsignedRangeMax(Exact)); 9255 return ExitLimit(Exact, Max, false, Predicates); 9256 } 9257 9258 // Solve the general equation. 9259 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 9260 getNegativeSCEV(Start), *this); 9261 const SCEV *M = E == getCouldNotCompute() 9262 ? E 9263 : getConstant(getUnsignedRangeMax(E)); 9264 return ExitLimit(E, M, false, Predicates); 9265 } 9266 9267 ScalarEvolution::ExitLimit 9268 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 9269 // Loops that look like: while (X == 0) are very strange indeed. We don't 9270 // handle them yet except for the trivial case. This could be expanded in the 9271 // future as needed. 9272 9273 // If the value is a constant, check to see if it is known to be non-zero 9274 // already. If so, the backedge will execute zero times. 9275 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 9276 if (!C->getValue()->isZero()) 9277 return getZero(C->getType()); 9278 return getCouldNotCompute(); // Otherwise it will loop infinitely. 9279 } 9280 9281 // We could implement others, but I really doubt anyone writes loops like 9282 // this, and if they did, they would already be constant folded. 9283 return getCouldNotCompute(); 9284 } 9285 9286 std::pair<const BasicBlock *, const BasicBlock *> 9287 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) 9288 const { 9289 // If the block has a unique predecessor, then there is no path from the 9290 // predecessor to the block that does not go through the direct edge 9291 // from the predecessor to the block. 9292 if (const BasicBlock *Pred = BB->getSinglePredecessor()) 9293 return {Pred, BB}; 9294 9295 // A loop's header is defined to be a block that dominates the loop. 9296 // If the header has a unique predecessor outside the loop, it must be 9297 // a block that has exactly one successor that can reach the loop. 9298 if (const Loop *L = LI.getLoopFor(BB)) 9299 return {L->getLoopPredecessor(), L->getHeader()}; 9300 9301 return {nullptr, nullptr}; 9302 } 9303 9304 /// SCEV structural equivalence is usually sufficient for testing whether two 9305 /// expressions are equal, however for the purposes of looking for a condition 9306 /// guarding a loop, it can be useful to be a little more general, since a 9307 /// front-end may have replicated the controlling expression. 9308 static bool HasSameValue(const SCEV *A, const SCEV *B) { 9309 // Quick check to see if they are the same SCEV. 9310 if (A == B) return true; 9311 9312 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 9313 // Not all instructions that are "identical" compute the same value. For 9314 // instance, two distinct alloca instructions allocating the same type are 9315 // identical and do not read memory; but compute distinct values. 9316 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 9317 }; 9318 9319 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 9320 // two different instructions with the same value. Check for this case. 9321 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 9322 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 9323 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 9324 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 9325 if (ComputesEqualValues(AI, BI)) 9326 return true; 9327 9328 // Otherwise assume they may have a different value. 9329 return false; 9330 } 9331 9332 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 9333 const SCEV *&LHS, const SCEV *&RHS, 9334 unsigned Depth) { 9335 bool Changed = false; 9336 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 9337 // '0 != 0'. 9338 auto TrivialCase = [&](bool TriviallyTrue) { 9339 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 9340 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 9341 return true; 9342 }; 9343 // If we hit the max recursion limit bail out. 9344 if (Depth >= 3) 9345 return false; 9346 9347 // Canonicalize a constant to the right side. 9348 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 9349 // Check for both operands constant. 9350 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 9351 if (ConstantExpr::getICmp(Pred, 9352 LHSC->getValue(), 9353 RHSC->getValue())->isNullValue()) 9354 return TrivialCase(false); 9355 else 9356 return TrivialCase(true); 9357 } 9358 // Otherwise swap the operands to put the constant on the right. 9359 std::swap(LHS, RHS); 9360 Pred = ICmpInst::getSwappedPredicate(Pred); 9361 Changed = true; 9362 } 9363 9364 // If we're comparing an addrec with a value which is loop-invariant in the 9365 // addrec's loop, put the addrec on the left. Also make a dominance check, 9366 // as both operands could be addrecs loop-invariant in each other's loop. 9367 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 9368 const Loop *L = AR->getLoop(); 9369 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 9370 std::swap(LHS, RHS); 9371 Pred = ICmpInst::getSwappedPredicate(Pred); 9372 Changed = true; 9373 } 9374 } 9375 9376 // If there's a constant operand, canonicalize comparisons with boundary 9377 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 9378 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 9379 const APInt &RA = RC->getAPInt(); 9380 9381 bool SimplifiedByConstantRange = false; 9382 9383 if (!ICmpInst::isEquality(Pred)) { 9384 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 9385 if (ExactCR.isFullSet()) 9386 return TrivialCase(true); 9387 else if (ExactCR.isEmptySet()) 9388 return TrivialCase(false); 9389 9390 APInt NewRHS; 9391 CmpInst::Predicate NewPred; 9392 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 9393 ICmpInst::isEquality(NewPred)) { 9394 // We were able to convert an inequality to an equality. 9395 Pred = NewPred; 9396 RHS = getConstant(NewRHS); 9397 Changed = SimplifiedByConstantRange = true; 9398 } 9399 } 9400 9401 if (!SimplifiedByConstantRange) { 9402 switch (Pred) { 9403 default: 9404 break; 9405 case ICmpInst::ICMP_EQ: 9406 case ICmpInst::ICMP_NE: 9407 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 9408 if (!RA) 9409 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 9410 if (const SCEVMulExpr *ME = 9411 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 9412 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 9413 ME->getOperand(0)->isAllOnesValue()) { 9414 RHS = AE->getOperand(1); 9415 LHS = ME->getOperand(1); 9416 Changed = true; 9417 } 9418 break; 9419 9420 9421 // The "Should have been caught earlier!" messages refer to the fact 9422 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 9423 // should have fired on the corresponding cases, and canonicalized the 9424 // check to trivial case. 9425 9426 case ICmpInst::ICMP_UGE: 9427 assert(!RA.isMinValue() && "Should have been caught earlier!"); 9428 Pred = ICmpInst::ICMP_UGT; 9429 RHS = getConstant(RA - 1); 9430 Changed = true; 9431 break; 9432 case ICmpInst::ICMP_ULE: 9433 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 9434 Pred = ICmpInst::ICMP_ULT; 9435 RHS = getConstant(RA + 1); 9436 Changed = true; 9437 break; 9438 case ICmpInst::ICMP_SGE: 9439 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 9440 Pred = ICmpInst::ICMP_SGT; 9441 RHS = getConstant(RA - 1); 9442 Changed = true; 9443 break; 9444 case ICmpInst::ICMP_SLE: 9445 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 9446 Pred = ICmpInst::ICMP_SLT; 9447 RHS = getConstant(RA + 1); 9448 Changed = true; 9449 break; 9450 } 9451 } 9452 } 9453 9454 // Check for obvious equality. 9455 if (HasSameValue(LHS, RHS)) { 9456 if (ICmpInst::isTrueWhenEqual(Pred)) 9457 return TrivialCase(true); 9458 if (ICmpInst::isFalseWhenEqual(Pred)) 9459 return TrivialCase(false); 9460 } 9461 9462 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 9463 // adding or subtracting 1 from one of the operands. 9464 switch (Pred) { 9465 case ICmpInst::ICMP_SLE: 9466 if (!getSignedRangeMax(RHS).isMaxSignedValue()) { 9467 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9468 SCEV::FlagNSW); 9469 Pred = ICmpInst::ICMP_SLT; 9470 Changed = true; 9471 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 9472 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 9473 SCEV::FlagNSW); 9474 Pred = ICmpInst::ICMP_SLT; 9475 Changed = true; 9476 } 9477 break; 9478 case ICmpInst::ICMP_SGE: 9479 if (!getSignedRangeMin(RHS).isMinSignedValue()) { 9480 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 9481 SCEV::FlagNSW); 9482 Pred = ICmpInst::ICMP_SGT; 9483 Changed = true; 9484 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 9485 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9486 SCEV::FlagNSW); 9487 Pred = ICmpInst::ICMP_SGT; 9488 Changed = true; 9489 } 9490 break; 9491 case ICmpInst::ICMP_ULE: 9492 if (!getUnsignedRangeMax(RHS).isMaxValue()) { 9493 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 9494 SCEV::FlagNUW); 9495 Pred = ICmpInst::ICMP_ULT; 9496 Changed = true; 9497 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 9498 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 9499 Pred = ICmpInst::ICMP_ULT; 9500 Changed = true; 9501 } 9502 break; 9503 case ICmpInst::ICMP_UGE: 9504 if (!getUnsignedRangeMin(RHS).isMinValue()) { 9505 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 9506 Pred = ICmpInst::ICMP_UGT; 9507 Changed = true; 9508 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 9509 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 9510 SCEV::FlagNUW); 9511 Pred = ICmpInst::ICMP_UGT; 9512 Changed = true; 9513 } 9514 break; 9515 default: 9516 break; 9517 } 9518 9519 // TODO: More simplifications are possible here. 9520 9521 // Recursively simplify until we either hit a recursion limit or nothing 9522 // changes. 9523 if (Changed) 9524 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1); 9525 9526 return Changed; 9527 } 9528 9529 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 9530 return getSignedRangeMax(S).isNegative(); 9531 } 9532 9533 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 9534 return getSignedRangeMin(S).isStrictlyPositive(); 9535 } 9536 9537 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 9538 return !getSignedRangeMin(S).isNegative(); 9539 } 9540 9541 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 9542 return !getSignedRangeMax(S).isStrictlyPositive(); 9543 } 9544 9545 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 9546 return isKnownNegative(S) || isKnownPositive(S); 9547 } 9548 9549 std::pair<const SCEV *, const SCEV *> 9550 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 9551 // Compute SCEV on entry of loop L. 9552 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 9553 if (Start == getCouldNotCompute()) 9554 return { Start, Start }; 9555 // Compute post increment SCEV for loop L. 9556 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 9557 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 9558 return { Start, PostInc }; 9559 } 9560 9561 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 9562 const SCEV *LHS, const SCEV *RHS) { 9563 // First collect all loops. 9564 SmallPtrSet<const Loop *, 8> LoopsUsed; 9565 getUsedLoops(LHS, LoopsUsed); 9566 getUsedLoops(RHS, LoopsUsed); 9567 9568 if (LoopsUsed.empty()) 9569 return false; 9570 9571 // Domination relationship must be a linear order on collected loops. 9572 #ifndef NDEBUG 9573 for (auto *L1 : LoopsUsed) 9574 for (auto *L2 : LoopsUsed) 9575 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 9576 DT.dominates(L2->getHeader(), L1->getHeader())) && 9577 "Domination relationship is not a linear order"); 9578 #endif 9579 9580 const Loop *MDL = 9581 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 9582 [&](const Loop *L1, const Loop *L2) { 9583 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 9584 }); 9585 9586 // Get init and post increment value for LHS. 9587 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 9588 // if LHS contains unknown non-invariant SCEV then bail out. 9589 if (SplitLHS.first == getCouldNotCompute()) 9590 return false; 9591 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 9592 // Get init and post increment value for RHS. 9593 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 9594 // if RHS contains unknown non-invariant SCEV then bail out. 9595 if (SplitRHS.first == getCouldNotCompute()) 9596 return false; 9597 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 9598 // It is possible that init SCEV contains an invariant load but it does 9599 // not dominate MDL and is not available at MDL loop entry, so we should 9600 // check it here. 9601 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 9602 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 9603 return false; 9604 9605 // It seems backedge guard check is faster than entry one so in some cases 9606 // it can speed up whole estimation by short circuit 9607 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 9608 SplitRHS.second) && 9609 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); 9610 } 9611 9612 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 9613 const SCEV *LHS, const SCEV *RHS) { 9614 // Canonicalize the inputs first. 9615 (void)SimplifyICmpOperands(Pred, LHS, RHS); 9616 9617 if (isKnownViaInduction(Pred, LHS, RHS)) 9618 return true; 9619 9620 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 9621 return true; 9622 9623 // Otherwise see what can be done with some simple reasoning. 9624 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 9625 } 9626 9627 Optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred, 9628 const SCEV *LHS, 9629 const SCEV *RHS) { 9630 if (isKnownPredicate(Pred, LHS, RHS)) 9631 return true; 9632 else if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS)) 9633 return false; 9634 return None; 9635 } 9636 9637 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred, 9638 const SCEV *LHS, const SCEV *RHS, 9639 const Instruction *Context) { 9640 // TODO: Analyze guards and assumes from Context's block. 9641 return isKnownPredicate(Pred, LHS, RHS) || 9642 isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS); 9643 } 9644 9645 Optional<bool> 9646 ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, 9647 const SCEV *RHS, 9648 const Instruction *Context) { 9649 Optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS); 9650 if (KnownWithoutContext) 9651 return KnownWithoutContext; 9652 9653 if (isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS)) 9654 return true; 9655 else if (isBasicBlockEntryGuardedByCond(Context->getParent(), 9656 ICmpInst::getInversePredicate(Pred), 9657 LHS, RHS)) 9658 return false; 9659 return None; 9660 } 9661 9662 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 9663 const SCEVAddRecExpr *LHS, 9664 const SCEV *RHS) { 9665 const Loop *L = LHS->getLoop(); 9666 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 9667 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 9668 } 9669 9670 Optional<ScalarEvolution::MonotonicPredicateType> 9671 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS, 9672 ICmpInst::Predicate Pred) { 9673 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred); 9674 9675 #ifndef NDEBUG 9676 // Verify an invariant: inverting the predicate should turn a monotonically 9677 // increasing change to a monotonically decreasing one, and vice versa. 9678 if (Result) { 9679 auto ResultSwapped = 9680 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred)); 9681 9682 assert(ResultSwapped.hasValue() && "should be able to analyze both!"); 9683 assert(ResultSwapped.getValue() != Result.getValue() && 9684 "monotonicity should flip as we flip the predicate"); 9685 } 9686 #endif 9687 9688 return Result; 9689 } 9690 9691 Optional<ScalarEvolution::MonotonicPredicateType> 9692 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS, 9693 ICmpInst::Predicate Pred) { 9694 // A zero step value for LHS means the induction variable is essentially a 9695 // loop invariant value. We don't really depend on the predicate actually 9696 // flipping from false to true (for increasing predicates, and the other way 9697 // around for decreasing predicates), all we care about is that *if* the 9698 // predicate changes then it only changes from false to true. 9699 // 9700 // A zero step value in itself is not very useful, but there may be places 9701 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 9702 // as general as possible. 9703 9704 // Only handle LE/LT/GE/GT predicates. 9705 if (!ICmpInst::isRelational(Pred)) 9706 return None; 9707 9708 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred); 9709 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) && 9710 "Should be greater or less!"); 9711 9712 // Check that AR does not wrap. 9713 if (ICmpInst::isUnsigned(Pred)) { 9714 if (!LHS->hasNoUnsignedWrap()) 9715 return None; 9716 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9717 } else { 9718 assert(ICmpInst::isSigned(Pred) && 9719 "Relational predicate is either signed or unsigned!"); 9720 if (!LHS->hasNoSignedWrap()) 9721 return None; 9722 9723 const SCEV *Step = LHS->getStepRecurrence(*this); 9724 9725 if (isKnownNonNegative(Step)) 9726 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9727 9728 if (isKnownNonPositive(Step)) 9729 return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 9730 9731 return None; 9732 } 9733 } 9734 9735 Optional<ScalarEvolution::LoopInvariantPredicate> 9736 ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred, 9737 const SCEV *LHS, const SCEV *RHS, 9738 const Loop *L) { 9739 9740 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9741 if (!isLoopInvariant(RHS, L)) { 9742 if (!isLoopInvariant(LHS, L)) 9743 return None; 9744 9745 std::swap(LHS, RHS); 9746 Pred = ICmpInst::getSwappedPredicate(Pred); 9747 } 9748 9749 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 9750 if (!ArLHS || ArLHS->getLoop() != L) 9751 return None; 9752 9753 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred); 9754 if (!MonotonicType) 9755 return None; 9756 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 9757 // true as the loop iterates, and the backedge is control dependent on 9758 // "ArLHS `Pred` RHS" == true then we can reason as follows: 9759 // 9760 // * if the predicate was false in the first iteration then the predicate 9761 // is never evaluated again, since the loop exits without taking the 9762 // backedge. 9763 // * if the predicate was true in the first iteration then it will 9764 // continue to be true for all future iterations since it is 9765 // monotonically increasing. 9766 // 9767 // For both the above possibilities, we can replace the loop varying 9768 // predicate with its value on the first iteration of the loop (which is 9769 // loop invariant). 9770 // 9771 // A similar reasoning applies for a monotonically decreasing predicate, by 9772 // replacing true with false and false with true in the above two bullets. 9773 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing; 9774 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 9775 9776 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 9777 return None; 9778 9779 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), RHS); 9780 } 9781 9782 Optional<ScalarEvolution::LoopInvariantPredicate> 9783 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations( 9784 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 9785 const Instruction *Context, const SCEV *MaxIter) { 9786 // Try to prove the following set of facts: 9787 // - The predicate is monotonic in the iteration space. 9788 // - If the check does not fail on the 1st iteration: 9789 // - No overflow will happen during first MaxIter iterations; 9790 // - It will not fail on the MaxIter'th iteration. 9791 // If the check does fail on the 1st iteration, we leave the loop and no 9792 // other checks matter. 9793 9794 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 9795 if (!isLoopInvariant(RHS, L)) { 9796 if (!isLoopInvariant(LHS, L)) 9797 return None; 9798 9799 std::swap(LHS, RHS); 9800 Pred = ICmpInst::getSwappedPredicate(Pred); 9801 } 9802 9803 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS); 9804 if (!AR || AR->getLoop() != L) 9805 return None; 9806 9807 // The predicate must be relational (i.e. <, <=, >=, >). 9808 if (!ICmpInst::isRelational(Pred)) 9809 return None; 9810 9811 // TODO: Support steps other than +/- 1. 9812 const SCEV *Step = AR->getStepRecurrence(*this); 9813 auto *One = getOne(Step->getType()); 9814 auto *MinusOne = getNegativeSCEV(One); 9815 if (Step != One && Step != MinusOne) 9816 return None; 9817 9818 // Type mismatch here means that MaxIter is potentially larger than max 9819 // unsigned value in start type, which mean we cannot prove no wrap for the 9820 // indvar. 9821 if (AR->getType() != MaxIter->getType()) 9822 return None; 9823 9824 // Value of IV on suggested last iteration. 9825 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this); 9826 // Does it still meet the requirement? 9827 if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS)) 9828 return None; 9829 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does 9830 // not exceed max unsigned value of this type), this effectively proves 9831 // that there is no wrap during the iteration. To prove that there is no 9832 // signed/unsigned wrap, we need to check that 9833 // Start <= Last for step = 1 or Start >= Last for step = -1. 9834 ICmpInst::Predicate NoOverflowPred = 9835 CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 9836 if (Step == MinusOne) 9837 NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred); 9838 const SCEV *Start = AR->getStart(); 9839 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, Context)) 9840 return None; 9841 9842 // Everything is fine. 9843 return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS); 9844 } 9845 9846 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 9847 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 9848 if (HasSameValue(LHS, RHS)) 9849 return ICmpInst::isTrueWhenEqual(Pred); 9850 9851 // This code is split out from isKnownPredicate because it is called from 9852 // within isLoopEntryGuardedByCond. 9853 9854 auto CheckRanges = [&](const ConstantRange &RangeLHS, 9855 const ConstantRange &RangeRHS) { 9856 return RangeLHS.icmp(Pred, RangeRHS); 9857 }; 9858 9859 // The check at the top of the function catches the case where the values are 9860 // known to be equal. 9861 if (Pred == CmpInst::ICMP_EQ) 9862 return false; 9863 9864 if (Pred == CmpInst::ICMP_NE) 9865 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || 9866 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || 9867 isKnownNonZero(getMinusSCEV(LHS, RHS)); 9868 9869 if (CmpInst::isSigned(Pred)) 9870 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); 9871 9872 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)); 9873 } 9874 9875 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 9876 const SCEV *LHS, 9877 const SCEV *RHS) { 9878 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer. 9879 // Return Y via OutY. 9880 auto MatchBinaryAddToConst = 9881 [this](const SCEV *Result, const SCEV *X, APInt &OutY, 9882 SCEV::NoWrapFlags ExpectedFlags) { 9883 const SCEV *NonConstOp, *ConstOp; 9884 SCEV::NoWrapFlags FlagsPresent; 9885 9886 if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) || 9887 !isa<SCEVConstant>(ConstOp) || NonConstOp != X) 9888 return false; 9889 9890 OutY = cast<SCEVConstant>(ConstOp)->getAPInt(); 9891 return (FlagsPresent & ExpectedFlags) == ExpectedFlags; 9892 }; 9893 9894 APInt C; 9895 9896 switch (Pred) { 9897 default: 9898 break; 9899 9900 case ICmpInst::ICMP_SGE: 9901 std::swap(LHS, RHS); 9902 LLVM_FALLTHROUGH; 9903 case ICmpInst::ICMP_SLE: 9904 // X s<= (X + C)<nsw> if C >= 0 9905 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative()) 9906 return true; 9907 9908 // (X + C)<nsw> s<= X if C <= 0 9909 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && 9910 !C.isStrictlyPositive()) 9911 return true; 9912 break; 9913 9914 case ICmpInst::ICMP_SGT: 9915 std::swap(LHS, RHS); 9916 LLVM_FALLTHROUGH; 9917 case ICmpInst::ICMP_SLT: 9918 // X s< (X + C)<nsw> if C > 0 9919 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && 9920 C.isStrictlyPositive()) 9921 return true; 9922 9923 // (X + C)<nsw> s< X if C < 0 9924 if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative()) 9925 return true; 9926 break; 9927 9928 case ICmpInst::ICMP_UGE: 9929 std::swap(LHS, RHS); 9930 LLVM_FALLTHROUGH; 9931 case ICmpInst::ICMP_ULE: 9932 // X u<= (X + C)<nuw> for any C 9933 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW)) 9934 return true; 9935 break; 9936 9937 case ICmpInst::ICMP_UGT: 9938 std::swap(LHS, RHS); 9939 LLVM_FALLTHROUGH; 9940 case ICmpInst::ICMP_ULT: 9941 // X u< (X + C)<nuw> if C != 0 9942 if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW) && !C.isNullValue()) 9943 return true; 9944 break; 9945 } 9946 9947 return false; 9948 } 9949 9950 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 9951 const SCEV *LHS, 9952 const SCEV *RHS) { 9953 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 9954 return false; 9955 9956 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 9957 // the stack can result in exponential time complexity. 9958 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 9959 9960 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 9961 // 9962 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 9963 // isKnownPredicate. isKnownPredicate is more powerful, but also more 9964 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 9965 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 9966 // use isKnownPredicate later if needed. 9967 return isKnownNonNegative(RHS) && 9968 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 9969 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 9970 } 9971 9972 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, 9973 ICmpInst::Predicate Pred, 9974 const SCEV *LHS, const SCEV *RHS) { 9975 // No need to even try if we know the module has no guards. 9976 if (!HasGuards) 9977 return false; 9978 9979 return any_of(*BB, [&](const Instruction &I) { 9980 using namespace llvm::PatternMatch; 9981 9982 Value *Condition; 9983 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 9984 m_Value(Condition))) && 9985 isImpliedCond(Pred, LHS, RHS, Condition, false); 9986 }); 9987 } 9988 9989 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 9990 /// protected by a conditional between LHS and RHS. This is used to 9991 /// to eliminate casts. 9992 bool 9993 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 9994 ICmpInst::Predicate Pred, 9995 const SCEV *LHS, const SCEV *RHS) { 9996 // Interpret a null as meaning no loop, where there is obviously no guard 9997 // (interprocedural conditions notwithstanding). 9998 if (!L) return true; 9999 10000 if (VerifyIR) 10001 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 10002 "This cannot be done on broken IR!"); 10003 10004 10005 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 10006 return true; 10007 10008 BasicBlock *Latch = L->getLoopLatch(); 10009 if (!Latch) 10010 return false; 10011 10012 BranchInst *LoopContinuePredicate = 10013 dyn_cast<BranchInst>(Latch->getTerminator()); 10014 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 10015 isImpliedCond(Pred, LHS, RHS, 10016 LoopContinuePredicate->getCondition(), 10017 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 10018 return true; 10019 10020 // We don't want more than one activation of the following loops on the stack 10021 // -- that can lead to O(n!) time complexity. 10022 if (WalkingBEDominatingConds) 10023 return false; 10024 10025 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 10026 10027 // See if we can exploit a trip count to prove the predicate. 10028 const auto &BETakenInfo = getBackedgeTakenInfo(L); 10029 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 10030 if (LatchBECount != getCouldNotCompute()) { 10031 // We know that Latch branches back to the loop header exactly 10032 // LatchBECount times. This means the backdege condition at Latch is 10033 // equivalent to "{0,+,1} u< LatchBECount". 10034 Type *Ty = LatchBECount->getType(); 10035 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 10036 const SCEV *LoopCounter = 10037 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 10038 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 10039 LatchBECount)) 10040 return true; 10041 } 10042 10043 // Check conditions due to any @llvm.assume intrinsics. 10044 for (auto &AssumeVH : AC.assumptions()) { 10045 if (!AssumeVH) 10046 continue; 10047 auto *CI = cast<CallInst>(AssumeVH); 10048 if (!DT.dominates(CI, Latch->getTerminator())) 10049 continue; 10050 10051 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 10052 return true; 10053 } 10054 10055 // If the loop is not reachable from the entry block, we risk running into an 10056 // infinite loop as we walk up into the dom tree. These loops do not matter 10057 // anyway, so we just return a conservative answer when we see them. 10058 if (!DT.isReachableFromEntry(L->getHeader())) 10059 return false; 10060 10061 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 10062 return true; 10063 10064 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 10065 DTN != HeaderDTN; DTN = DTN->getIDom()) { 10066 assert(DTN && "should reach the loop header before reaching the root!"); 10067 10068 BasicBlock *BB = DTN->getBlock(); 10069 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 10070 return true; 10071 10072 BasicBlock *PBB = BB->getSinglePredecessor(); 10073 if (!PBB) 10074 continue; 10075 10076 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 10077 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 10078 continue; 10079 10080 Value *Condition = ContinuePredicate->getCondition(); 10081 10082 // If we have an edge `E` within the loop body that dominates the only 10083 // latch, the condition guarding `E` also guards the backedge. This 10084 // reasoning works only for loops with a single latch. 10085 10086 BasicBlockEdge DominatingEdge(PBB, BB); 10087 if (DominatingEdge.isSingleEdge()) { 10088 // We're constructively (and conservatively) enumerating edges within the 10089 // loop body that dominate the latch. The dominator tree better agree 10090 // with us on this: 10091 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 10092 10093 if (isImpliedCond(Pred, LHS, RHS, Condition, 10094 BB != ContinuePredicate->getSuccessor(0))) 10095 return true; 10096 } 10097 } 10098 10099 return false; 10100 } 10101 10102 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, 10103 ICmpInst::Predicate Pred, 10104 const SCEV *LHS, 10105 const SCEV *RHS) { 10106 if (VerifyIR) 10107 assert(!verifyFunction(*BB->getParent(), &dbgs()) && 10108 "This cannot be done on broken IR!"); 10109 10110 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 10111 // the facts (a >= b && a != b) separately. A typical situation is when the 10112 // non-strict comparison is known from ranges and non-equality is known from 10113 // dominating predicates. If we are proving strict comparison, we always try 10114 // to prove non-equality and non-strict comparison separately. 10115 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 10116 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 10117 bool ProvedNonStrictComparison = false; 10118 bool ProvedNonEquality = false; 10119 10120 auto SplitAndProve = 10121 [&](std::function<bool(ICmpInst::Predicate)> Fn) -> bool { 10122 if (!ProvedNonStrictComparison) 10123 ProvedNonStrictComparison = Fn(NonStrictPredicate); 10124 if (!ProvedNonEquality) 10125 ProvedNonEquality = Fn(ICmpInst::ICMP_NE); 10126 if (ProvedNonStrictComparison && ProvedNonEquality) 10127 return true; 10128 return false; 10129 }; 10130 10131 if (ProvingStrictComparison) { 10132 auto ProofFn = [&](ICmpInst::Predicate P) { 10133 return isKnownViaNonRecursiveReasoning(P, LHS, RHS); 10134 }; 10135 if (SplitAndProve(ProofFn)) 10136 return true; 10137 } 10138 10139 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 10140 auto ProveViaGuard = [&](const BasicBlock *Block) { 10141 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 10142 return true; 10143 if (ProvingStrictComparison) { 10144 auto ProofFn = [&](ICmpInst::Predicate P) { 10145 return isImpliedViaGuard(Block, P, LHS, RHS); 10146 }; 10147 if (SplitAndProve(ProofFn)) 10148 return true; 10149 } 10150 return false; 10151 }; 10152 10153 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 10154 auto ProveViaCond = [&](const Value *Condition, bool Inverse) { 10155 const Instruction *Context = &BB->front(); 10156 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, Context)) 10157 return true; 10158 if (ProvingStrictComparison) { 10159 auto ProofFn = [&](ICmpInst::Predicate P) { 10160 return isImpliedCond(P, LHS, RHS, Condition, Inverse, Context); 10161 }; 10162 if (SplitAndProve(ProofFn)) 10163 return true; 10164 } 10165 return false; 10166 }; 10167 10168 // Starting at the block's predecessor, climb up the predecessor chain, as long 10169 // as there are predecessors that can be found that have unique successors 10170 // leading to the original block. 10171 const Loop *ContainingLoop = LI.getLoopFor(BB); 10172 const BasicBlock *PredBB; 10173 if (ContainingLoop && ContainingLoop->getHeader() == BB) 10174 PredBB = ContainingLoop->getLoopPredecessor(); 10175 else 10176 PredBB = BB->getSinglePredecessor(); 10177 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB); 10178 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 10179 if (ProveViaGuard(Pair.first)) 10180 return true; 10181 10182 const BranchInst *LoopEntryPredicate = 10183 dyn_cast<BranchInst>(Pair.first->getTerminator()); 10184 if (!LoopEntryPredicate || 10185 LoopEntryPredicate->isUnconditional()) 10186 continue; 10187 10188 if (ProveViaCond(LoopEntryPredicate->getCondition(), 10189 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 10190 return true; 10191 } 10192 10193 // Check conditions due to any @llvm.assume intrinsics. 10194 for (auto &AssumeVH : AC.assumptions()) { 10195 if (!AssumeVH) 10196 continue; 10197 auto *CI = cast<CallInst>(AssumeVH); 10198 if (!DT.dominates(CI, BB)) 10199 continue; 10200 10201 if (ProveViaCond(CI->getArgOperand(0), false)) 10202 return true; 10203 } 10204 10205 return false; 10206 } 10207 10208 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 10209 ICmpInst::Predicate Pred, 10210 const SCEV *LHS, 10211 const SCEV *RHS) { 10212 // Interpret a null as meaning no loop, where there is obviously no guard 10213 // (interprocedural conditions notwithstanding). 10214 if (!L) 10215 return false; 10216 10217 // Both LHS and RHS must be available at loop entry. 10218 assert(isAvailableAtLoopEntry(LHS, L) && 10219 "LHS is not available at Loop Entry"); 10220 assert(isAvailableAtLoopEntry(RHS, L) && 10221 "RHS is not available at Loop Entry"); 10222 10223 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 10224 return true; 10225 10226 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS); 10227 } 10228 10229 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 10230 const SCEV *RHS, 10231 const Value *FoundCondValue, bool Inverse, 10232 const Instruction *Context) { 10233 // False conditions implies anything. Do not bother analyzing it further. 10234 if (FoundCondValue == 10235 ConstantInt::getBool(FoundCondValue->getContext(), Inverse)) 10236 return true; 10237 10238 if (!PendingLoopPredicates.insert(FoundCondValue).second) 10239 return false; 10240 10241 auto ClearOnExit = 10242 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 10243 10244 // Recursively handle And and Or conditions. 10245 const Value *Op0, *Op1; 10246 if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { 10247 if (!Inverse) 10248 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, Context) || 10249 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, Context); 10250 } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) { 10251 if (Inverse) 10252 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, Context) || 10253 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, Context); 10254 } 10255 10256 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 10257 if (!ICI) return false; 10258 10259 // Now that we found a conditional branch that dominates the loop or controls 10260 // the loop latch. Check to see if it is the comparison we are looking for. 10261 ICmpInst::Predicate FoundPred; 10262 if (Inverse) 10263 FoundPred = ICI->getInversePredicate(); 10264 else 10265 FoundPred = ICI->getPredicate(); 10266 10267 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 10268 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 10269 10270 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, Context); 10271 } 10272 10273 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 10274 const SCEV *RHS, 10275 ICmpInst::Predicate FoundPred, 10276 const SCEV *FoundLHS, const SCEV *FoundRHS, 10277 const Instruction *Context) { 10278 // Balance the types. 10279 if (getTypeSizeInBits(LHS->getType()) < 10280 getTypeSizeInBits(FoundLHS->getType())) { 10281 // For unsigned and equality predicates, try to prove that both found 10282 // operands fit into narrow unsigned range. If so, try to prove facts in 10283 // narrow types. 10284 if (!CmpInst::isSigned(FoundPred)) { 10285 auto *NarrowType = LHS->getType(); 10286 auto *WideType = FoundLHS->getType(); 10287 auto BitWidth = getTypeSizeInBits(NarrowType); 10288 const SCEV *MaxValue = getZeroExtendExpr( 10289 getConstant(APInt::getMaxValue(BitWidth)), WideType); 10290 if (isKnownPredicate(ICmpInst::ICMP_ULE, FoundLHS, MaxValue) && 10291 isKnownPredicate(ICmpInst::ICMP_ULE, FoundRHS, MaxValue)) { 10292 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType); 10293 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType); 10294 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS, 10295 TruncFoundRHS, Context)) 10296 return true; 10297 } 10298 } 10299 10300 if (CmpInst::isSigned(Pred)) { 10301 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 10302 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 10303 } else { 10304 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 10305 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 10306 } 10307 } else if (getTypeSizeInBits(LHS->getType()) > 10308 getTypeSizeInBits(FoundLHS->getType())) { 10309 if (CmpInst::isSigned(FoundPred)) { 10310 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 10311 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 10312 } else { 10313 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 10314 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 10315 } 10316 } 10317 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS, 10318 FoundRHS, Context); 10319 } 10320 10321 bool ScalarEvolution::isImpliedCondBalancedTypes( 10322 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10323 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS, 10324 const Instruction *Context) { 10325 assert(getTypeSizeInBits(LHS->getType()) == 10326 getTypeSizeInBits(FoundLHS->getType()) && 10327 "Types should be balanced!"); 10328 // Canonicalize the query to match the way instcombine will have 10329 // canonicalized the comparison. 10330 if (SimplifyICmpOperands(Pred, LHS, RHS)) 10331 if (LHS == RHS) 10332 return CmpInst::isTrueWhenEqual(Pred); 10333 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 10334 if (FoundLHS == FoundRHS) 10335 return CmpInst::isFalseWhenEqual(FoundPred); 10336 10337 // Check to see if we can make the LHS or RHS match. 10338 if (LHS == FoundRHS || RHS == FoundLHS) { 10339 if (isa<SCEVConstant>(RHS)) { 10340 std::swap(FoundLHS, FoundRHS); 10341 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 10342 } else { 10343 std::swap(LHS, RHS); 10344 Pred = ICmpInst::getSwappedPredicate(Pred); 10345 } 10346 } 10347 10348 // Check whether the found predicate is the same as the desired predicate. 10349 if (FoundPred == Pred) 10350 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); 10351 10352 // Check whether swapping the found predicate makes it the same as the 10353 // desired predicate. 10354 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 10355 // We can write the implication 10356 // 0. LHS Pred RHS <- FoundLHS SwapPred FoundRHS 10357 // using one of the following ways: 10358 // 1. LHS Pred RHS <- FoundRHS Pred FoundLHS 10359 // 2. RHS SwapPred LHS <- FoundLHS SwapPred FoundRHS 10360 // 3. LHS Pred RHS <- ~FoundLHS Pred ~FoundRHS 10361 // 4. ~LHS SwapPred ~RHS <- FoundLHS SwapPred FoundRHS 10362 // Forms 1. and 2. require swapping the operands of one condition. Don't 10363 // do this if it would break canonical constant/addrec ordering. 10364 if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS)) 10365 return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS, 10366 Context); 10367 if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS)) 10368 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, Context); 10369 10370 // There's no clear preference between forms 3. and 4., try both. 10371 return isImpliedCondOperands(FoundPred, getNotSCEV(LHS), getNotSCEV(RHS), 10372 FoundLHS, FoundRHS, Context) || 10373 isImpliedCondOperands(Pred, LHS, RHS, getNotSCEV(FoundLHS), 10374 getNotSCEV(FoundRHS), Context); 10375 } 10376 10377 // Unsigned comparison is the same as signed comparison when both the operands 10378 // are non-negative. 10379 if (CmpInst::isUnsigned(FoundPred) && 10380 CmpInst::getSignedPredicate(FoundPred) == Pred && 10381 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) 10382 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context); 10383 10384 // Check if we can make progress by sharpening ranges. 10385 if (FoundPred == ICmpInst::ICMP_NE && 10386 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 10387 10388 const SCEVConstant *C = nullptr; 10389 const SCEV *V = nullptr; 10390 10391 if (isa<SCEVConstant>(FoundLHS)) { 10392 C = cast<SCEVConstant>(FoundLHS); 10393 V = FoundRHS; 10394 } else { 10395 C = cast<SCEVConstant>(FoundRHS); 10396 V = FoundLHS; 10397 } 10398 10399 // The guarding predicate tells us that C != V. If the known range 10400 // of V is [C, t), we can sharpen the range to [C + 1, t). The 10401 // range we consider has to correspond to same signedness as the 10402 // predicate we're interested in folding. 10403 10404 APInt Min = ICmpInst::isSigned(Pred) ? 10405 getSignedRangeMin(V) : getUnsignedRangeMin(V); 10406 10407 if (Min == C->getAPInt()) { 10408 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 10409 // This is true even if (Min + 1) wraps around -- in case of 10410 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 10411 10412 APInt SharperMin = Min + 1; 10413 10414 switch (Pred) { 10415 case ICmpInst::ICMP_SGE: 10416 case ICmpInst::ICMP_UGE: 10417 // We know V `Pred` SharperMin. If this implies LHS `Pred` 10418 // RHS, we're done. 10419 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin), 10420 Context)) 10421 return true; 10422 LLVM_FALLTHROUGH; 10423 10424 case ICmpInst::ICMP_SGT: 10425 case ICmpInst::ICMP_UGT: 10426 // We know from the range information that (V `Pred` Min || 10427 // V == Min). We know from the guarding condition that !(V 10428 // == Min). This gives us 10429 // 10430 // V `Pred` Min || V == Min && !(V == Min) 10431 // => V `Pred` Min 10432 // 10433 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 10434 10435 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), 10436 Context)) 10437 return true; 10438 break; 10439 10440 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively. 10441 case ICmpInst::ICMP_SLE: 10442 case ICmpInst::ICMP_ULE: 10443 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 10444 LHS, V, getConstant(SharperMin), Context)) 10445 return true; 10446 LLVM_FALLTHROUGH; 10447 10448 case ICmpInst::ICMP_SLT: 10449 case ICmpInst::ICMP_ULT: 10450 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 10451 LHS, V, getConstant(Min), Context)) 10452 return true; 10453 break; 10454 10455 default: 10456 // No change 10457 break; 10458 } 10459 } 10460 } 10461 10462 // Check whether the actual condition is beyond sufficient. 10463 if (FoundPred == ICmpInst::ICMP_EQ) 10464 if (ICmpInst::isTrueWhenEqual(Pred)) 10465 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context)) 10466 return true; 10467 if (Pred == ICmpInst::ICMP_NE) 10468 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 10469 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, 10470 Context)) 10471 return true; 10472 10473 // Otherwise assume the worst. 10474 return false; 10475 } 10476 10477 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 10478 const SCEV *&L, const SCEV *&R, 10479 SCEV::NoWrapFlags &Flags) { 10480 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 10481 if (!AE || AE->getNumOperands() != 2) 10482 return false; 10483 10484 L = AE->getOperand(0); 10485 R = AE->getOperand(1); 10486 Flags = AE->getNoWrapFlags(); 10487 return true; 10488 } 10489 10490 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 10491 const SCEV *Less) { 10492 // We avoid subtracting expressions here because this function is usually 10493 // fairly deep in the call stack (i.e. is called many times). 10494 10495 // X - X = 0. 10496 if (More == Less) 10497 return APInt(getTypeSizeInBits(More->getType()), 0); 10498 10499 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 10500 const auto *LAR = cast<SCEVAddRecExpr>(Less); 10501 const auto *MAR = cast<SCEVAddRecExpr>(More); 10502 10503 if (LAR->getLoop() != MAR->getLoop()) 10504 return None; 10505 10506 // We look at affine expressions only; not for correctness but to keep 10507 // getStepRecurrence cheap. 10508 if (!LAR->isAffine() || !MAR->isAffine()) 10509 return None; 10510 10511 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 10512 return None; 10513 10514 Less = LAR->getStart(); 10515 More = MAR->getStart(); 10516 10517 // fall through 10518 } 10519 10520 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 10521 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 10522 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 10523 return M - L; 10524 } 10525 10526 SCEV::NoWrapFlags Flags; 10527 const SCEV *LLess = nullptr, *RLess = nullptr; 10528 const SCEV *LMore = nullptr, *RMore = nullptr; 10529 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 10530 // Compare (X + C1) vs X. 10531 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 10532 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 10533 if (RLess == More) 10534 return -(C1->getAPInt()); 10535 10536 // Compare X vs (X + C2). 10537 if (splitBinaryAdd(More, LMore, RMore, Flags)) 10538 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 10539 if (RMore == Less) 10540 return C2->getAPInt(); 10541 10542 // Compare (X + C1) vs (X + C2). 10543 if (C1 && C2 && RLess == RMore) 10544 return C2->getAPInt() - C1->getAPInt(); 10545 10546 return None; 10547 } 10548 10549 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart( 10550 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10551 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *Context) { 10552 // Try to recognize the following pattern: 10553 // 10554 // FoundRHS = ... 10555 // ... 10556 // loop: 10557 // FoundLHS = {Start,+,W} 10558 // context_bb: // Basic block from the same loop 10559 // known(Pred, FoundLHS, FoundRHS) 10560 // 10561 // If some predicate is known in the context of a loop, it is also known on 10562 // each iteration of this loop, including the first iteration. Therefore, in 10563 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to 10564 // prove the original pred using this fact. 10565 if (!Context) 10566 return false; 10567 const BasicBlock *ContextBB = Context->getParent(); 10568 // Make sure AR varies in the context block. 10569 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) { 10570 const Loop *L = AR->getLoop(); 10571 // Make sure that context belongs to the loop and executes on 1st iteration 10572 // (if it ever executes at all). 10573 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 10574 return false; 10575 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop())) 10576 return false; 10577 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS); 10578 } 10579 10580 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) { 10581 const Loop *L = AR->getLoop(); 10582 // Make sure that context belongs to the loop and executes on 1st iteration 10583 // (if it ever executes at all). 10584 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 10585 return false; 10586 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop())) 10587 return false; 10588 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart()); 10589 } 10590 10591 return false; 10592 } 10593 10594 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 10595 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 10596 const SCEV *FoundLHS, const SCEV *FoundRHS) { 10597 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 10598 return false; 10599 10600 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 10601 if (!AddRecLHS) 10602 return false; 10603 10604 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 10605 if (!AddRecFoundLHS) 10606 return false; 10607 10608 // We'd like to let SCEV reason about control dependencies, so we constrain 10609 // both the inequalities to be about add recurrences on the same loop. This 10610 // way we can use isLoopEntryGuardedByCond later. 10611 10612 const Loop *L = AddRecFoundLHS->getLoop(); 10613 if (L != AddRecLHS->getLoop()) 10614 return false; 10615 10616 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 10617 // 10618 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 10619 // ... (2) 10620 // 10621 // Informal proof for (2), assuming (1) [*]: 10622 // 10623 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 10624 // 10625 // Then 10626 // 10627 // FoundLHS s< FoundRHS s< INT_MIN - C 10628 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 10629 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 10630 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 10631 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 10632 // <=> FoundLHS + C s< FoundRHS + C 10633 // 10634 // [*]: (1) can be proved by ruling out overflow. 10635 // 10636 // [**]: This can be proved by analyzing all the four possibilities: 10637 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 10638 // (A s>= 0, B s>= 0). 10639 // 10640 // Note: 10641 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 10642 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 10643 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 10644 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 10645 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 10646 // C)". 10647 10648 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 10649 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 10650 if (!LDiff || !RDiff || *LDiff != *RDiff) 10651 return false; 10652 10653 if (LDiff->isMinValue()) 10654 return true; 10655 10656 APInt FoundRHSLimit; 10657 10658 if (Pred == CmpInst::ICMP_ULT) { 10659 FoundRHSLimit = -(*RDiff); 10660 } else { 10661 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 10662 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 10663 } 10664 10665 // Try to prove (1) or (2), as needed. 10666 return isAvailableAtLoopEntry(FoundRHS, L) && 10667 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 10668 getConstant(FoundRHSLimit)); 10669 } 10670 10671 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 10672 const SCEV *LHS, const SCEV *RHS, 10673 const SCEV *FoundLHS, 10674 const SCEV *FoundRHS, unsigned Depth) { 10675 const PHINode *LPhi = nullptr, *RPhi = nullptr; 10676 10677 auto ClearOnExit = make_scope_exit([&]() { 10678 if (LPhi) { 10679 bool Erased = PendingMerges.erase(LPhi); 10680 assert(Erased && "Failed to erase LPhi!"); 10681 (void)Erased; 10682 } 10683 if (RPhi) { 10684 bool Erased = PendingMerges.erase(RPhi); 10685 assert(Erased && "Failed to erase RPhi!"); 10686 (void)Erased; 10687 } 10688 }); 10689 10690 // Find respective Phis and check that they are not being pending. 10691 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 10692 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 10693 if (!PendingMerges.insert(Phi).second) 10694 return false; 10695 LPhi = Phi; 10696 } 10697 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 10698 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 10699 // If we detect a loop of Phi nodes being processed by this method, for 10700 // example: 10701 // 10702 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 10703 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 10704 // 10705 // we don't want to deal with a case that complex, so return conservative 10706 // answer false. 10707 if (!PendingMerges.insert(Phi).second) 10708 return false; 10709 RPhi = Phi; 10710 } 10711 10712 // If none of LHS, RHS is a Phi, nothing to do here. 10713 if (!LPhi && !RPhi) 10714 return false; 10715 10716 // If there is a SCEVUnknown Phi we are interested in, make it left. 10717 if (!LPhi) { 10718 std::swap(LHS, RHS); 10719 std::swap(FoundLHS, FoundRHS); 10720 std::swap(LPhi, RPhi); 10721 Pred = ICmpInst::getSwappedPredicate(Pred); 10722 } 10723 10724 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 10725 const BasicBlock *LBB = LPhi->getParent(); 10726 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10727 10728 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 10729 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 10730 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 10731 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 10732 }; 10733 10734 if (RPhi && RPhi->getParent() == LBB) { 10735 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 10736 // If we compare two Phis from the same block, and for each entry block 10737 // the predicate is true for incoming values from this block, then the 10738 // predicate is also true for the Phis. 10739 for (const BasicBlock *IncBB : predecessors(LBB)) { 10740 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10741 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 10742 if (!ProvedEasily(L, R)) 10743 return false; 10744 } 10745 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 10746 // Case two: RHS is also a Phi from the same basic block, and it is an 10747 // AddRec. It means that there is a loop which has both AddRec and Unknown 10748 // PHIs, for it we can compare incoming values of AddRec from above the loop 10749 // and latch with their respective incoming values of LPhi. 10750 // TODO: Generalize to handle loops with many inputs in a header. 10751 if (LPhi->getNumIncomingValues() != 2) return false; 10752 10753 auto *RLoop = RAR->getLoop(); 10754 auto *Predecessor = RLoop->getLoopPredecessor(); 10755 assert(Predecessor && "Loop with AddRec with no predecessor?"); 10756 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 10757 if (!ProvedEasily(L1, RAR->getStart())) 10758 return false; 10759 auto *Latch = RLoop->getLoopLatch(); 10760 assert(Latch && "Loop with AddRec with no latch?"); 10761 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 10762 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 10763 return false; 10764 } else { 10765 // In all other cases go over inputs of LHS and compare each of them to RHS, 10766 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 10767 // At this point RHS is either a non-Phi, or it is a Phi from some block 10768 // different from LBB. 10769 for (const BasicBlock *IncBB : predecessors(LBB)) { 10770 // Check that RHS is available in this block. 10771 if (!dominates(RHS, IncBB)) 10772 return false; 10773 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 10774 if (!ProvedEasily(L, RHS)) 10775 return false; 10776 } 10777 } 10778 return true; 10779 } 10780 10781 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 10782 const SCEV *LHS, const SCEV *RHS, 10783 const SCEV *FoundLHS, 10784 const SCEV *FoundRHS, 10785 const Instruction *Context) { 10786 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10787 return true; 10788 10789 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 10790 return true; 10791 10792 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS, 10793 Context)) 10794 return true; 10795 10796 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 10797 FoundLHS, FoundRHS); 10798 } 10799 10800 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? 10801 template <typename MinMaxExprType> 10802 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, 10803 const SCEV *Candidate) { 10804 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); 10805 if (!MinMaxExpr) 10806 return false; 10807 10808 return is_contained(MinMaxExpr->operands(), Candidate); 10809 } 10810 10811 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 10812 ICmpInst::Predicate Pred, 10813 const SCEV *LHS, const SCEV *RHS) { 10814 // If both sides are affine addrecs for the same loop, with equal 10815 // steps, and we know the recurrences don't wrap, then we only 10816 // need to check the predicate on the starting values. 10817 10818 if (!ICmpInst::isRelational(Pred)) 10819 return false; 10820 10821 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 10822 if (!LAR) 10823 return false; 10824 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 10825 if (!RAR) 10826 return false; 10827 if (LAR->getLoop() != RAR->getLoop()) 10828 return false; 10829 if (!LAR->isAffine() || !RAR->isAffine()) 10830 return false; 10831 10832 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 10833 return false; 10834 10835 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 10836 SCEV::FlagNSW : SCEV::FlagNUW; 10837 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 10838 return false; 10839 10840 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 10841 } 10842 10843 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 10844 /// expression? 10845 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 10846 ICmpInst::Predicate Pred, 10847 const SCEV *LHS, const SCEV *RHS) { 10848 switch (Pred) { 10849 default: 10850 return false; 10851 10852 case ICmpInst::ICMP_SGE: 10853 std::swap(LHS, RHS); 10854 LLVM_FALLTHROUGH; 10855 case ICmpInst::ICMP_SLE: 10856 return 10857 // min(A, ...) <= A 10858 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || 10859 // A <= max(A, ...) 10860 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 10861 10862 case ICmpInst::ICMP_UGE: 10863 std::swap(LHS, RHS); 10864 LLVM_FALLTHROUGH; 10865 case ICmpInst::ICMP_ULE: 10866 return 10867 // min(A, ...) <= A 10868 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || 10869 // A <= max(A, ...) 10870 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 10871 } 10872 10873 llvm_unreachable("covered switch fell through?!"); 10874 } 10875 10876 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 10877 const SCEV *LHS, const SCEV *RHS, 10878 const SCEV *FoundLHS, 10879 const SCEV *FoundRHS, 10880 unsigned Depth) { 10881 assert(getTypeSizeInBits(LHS->getType()) == 10882 getTypeSizeInBits(RHS->getType()) && 10883 "LHS and RHS have different sizes?"); 10884 assert(getTypeSizeInBits(FoundLHS->getType()) == 10885 getTypeSizeInBits(FoundRHS->getType()) && 10886 "FoundLHS and FoundRHS have different sizes?"); 10887 // We want to avoid hurting the compile time with analysis of too big trees. 10888 if (Depth > MaxSCEVOperationsImplicationDepth) 10889 return false; 10890 10891 // We only want to work with GT comparison so far. 10892 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) { 10893 Pred = CmpInst::getSwappedPredicate(Pred); 10894 std::swap(LHS, RHS); 10895 std::swap(FoundLHS, FoundRHS); 10896 } 10897 10898 // For unsigned, try to reduce it to corresponding signed comparison. 10899 if (Pred == ICmpInst::ICMP_UGT) 10900 // We can replace unsigned predicate with its signed counterpart if all 10901 // involved values are non-negative. 10902 // TODO: We could have better support for unsigned. 10903 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) { 10904 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing 10905 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us 10906 // use this fact to prove that LHS and RHS are non-negative. 10907 const SCEV *MinusOne = getMinusOne(LHS->getType()); 10908 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS, 10909 FoundRHS) && 10910 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS, 10911 FoundRHS)) 10912 Pred = ICmpInst::ICMP_SGT; 10913 } 10914 10915 if (Pred != ICmpInst::ICMP_SGT) 10916 return false; 10917 10918 auto GetOpFromSExt = [&](const SCEV *S) { 10919 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 10920 return Ext->getOperand(); 10921 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 10922 // the constant in some cases. 10923 return S; 10924 }; 10925 10926 // Acquire values from extensions. 10927 auto *OrigLHS = LHS; 10928 auto *OrigFoundLHS = FoundLHS; 10929 LHS = GetOpFromSExt(LHS); 10930 FoundLHS = GetOpFromSExt(FoundLHS); 10931 10932 // Is the SGT predicate can be proved trivially or using the found context. 10933 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 10934 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 10935 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 10936 FoundRHS, Depth + 1); 10937 }; 10938 10939 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 10940 // We want to avoid creation of any new non-constant SCEV. Since we are 10941 // going to compare the operands to RHS, we should be certain that we don't 10942 // need any size extensions for this. So let's decline all cases when the 10943 // sizes of types of LHS and RHS do not match. 10944 // TODO: Maybe try to get RHS from sext to catch more cases? 10945 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 10946 return false; 10947 10948 // Should not overflow. 10949 if (!LHSAddExpr->hasNoSignedWrap()) 10950 return false; 10951 10952 auto *LL = LHSAddExpr->getOperand(0); 10953 auto *LR = LHSAddExpr->getOperand(1); 10954 auto *MinusOne = getMinusOne(RHS->getType()); 10955 10956 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 10957 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 10958 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 10959 }; 10960 // Try to prove the following rule: 10961 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 10962 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 10963 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 10964 return true; 10965 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 10966 Value *LL, *LR; 10967 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 10968 10969 using namespace llvm::PatternMatch; 10970 10971 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 10972 // Rules for division. 10973 // We are going to perform some comparisons with Denominator and its 10974 // derivative expressions. In general case, creating a SCEV for it may 10975 // lead to a complex analysis of the entire graph, and in particular it 10976 // can request trip count recalculation for the same loop. This would 10977 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 10978 // this, we only want to create SCEVs that are constants in this section. 10979 // So we bail if Denominator is not a constant. 10980 if (!isa<ConstantInt>(LR)) 10981 return false; 10982 10983 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 10984 10985 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 10986 // then a SCEV for the numerator already exists and matches with FoundLHS. 10987 auto *Numerator = getExistingSCEV(LL); 10988 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 10989 return false; 10990 10991 // Make sure that the numerator matches with FoundLHS and the denominator 10992 // is positive. 10993 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 10994 return false; 10995 10996 auto *DTy = Denominator->getType(); 10997 auto *FRHSTy = FoundRHS->getType(); 10998 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 10999 // One of types is a pointer and another one is not. We cannot extend 11000 // them properly to a wider type, so let us just reject this case. 11001 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 11002 // to avoid this check. 11003 return false; 11004 11005 // Given that: 11006 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 11007 auto *WTy = getWiderType(DTy, FRHSTy); 11008 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 11009 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 11010 11011 // Try to prove the following rule: 11012 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 11013 // For example, given that FoundLHS > 2. It means that FoundLHS is at 11014 // least 3. If we divide it by Denominator < 4, we will have at least 1. 11015 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 11016 if (isKnownNonPositive(RHS) && 11017 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 11018 return true; 11019 11020 // Try to prove the following rule: 11021 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 11022 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 11023 // If we divide it by Denominator > 2, then: 11024 // 1. If FoundLHS is negative, then the result is 0. 11025 // 2. If FoundLHS is non-negative, then the result is non-negative. 11026 // Anyways, the result is non-negative. 11027 auto *MinusOne = getMinusOne(WTy); 11028 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 11029 if (isKnownNegative(RHS) && 11030 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 11031 return true; 11032 } 11033 } 11034 11035 // If our expression contained SCEVUnknown Phis, and we split it down and now 11036 // need to prove something for them, try to prove the predicate for every 11037 // possible incoming values of those Phis. 11038 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 11039 return true; 11040 11041 return false; 11042 } 11043 11044 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, 11045 const SCEV *LHS, const SCEV *RHS) { 11046 // zext x u<= sext x, sext x s<= zext x 11047 switch (Pred) { 11048 case ICmpInst::ICMP_SGE: 11049 std::swap(LHS, RHS); 11050 LLVM_FALLTHROUGH; 11051 case ICmpInst::ICMP_SLE: { 11052 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. 11053 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); 11054 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); 11055 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 11056 return true; 11057 break; 11058 } 11059 case ICmpInst::ICMP_UGE: 11060 std::swap(LHS, RHS); 11061 LLVM_FALLTHROUGH; 11062 case ICmpInst::ICMP_ULE: { 11063 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. 11064 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); 11065 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); 11066 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 11067 return true; 11068 break; 11069 } 11070 default: 11071 break; 11072 }; 11073 return false; 11074 } 11075 11076 bool 11077 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 11078 const SCEV *LHS, const SCEV *RHS) { 11079 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || 11080 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 11081 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 11082 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 11083 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 11084 } 11085 11086 bool 11087 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 11088 const SCEV *LHS, const SCEV *RHS, 11089 const SCEV *FoundLHS, 11090 const SCEV *FoundRHS) { 11091 switch (Pred) { 11092 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 11093 case ICmpInst::ICMP_EQ: 11094 case ICmpInst::ICMP_NE: 11095 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 11096 return true; 11097 break; 11098 case ICmpInst::ICMP_SLT: 11099 case ICmpInst::ICMP_SLE: 11100 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 11101 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 11102 return true; 11103 break; 11104 case ICmpInst::ICMP_SGT: 11105 case ICmpInst::ICMP_SGE: 11106 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 11107 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 11108 return true; 11109 break; 11110 case ICmpInst::ICMP_ULT: 11111 case ICmpInst::ICMP_ULE: 11112 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 11113 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 11114 return true; 11115 break; 11116 case ICmpInst::ICMP_UGT: 11117 case ICmpInst::ICMP_UGE: 11118 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 11119 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 11120 return true; 11121 break; 11122 } 11123 11124 // Maybe it can be proved via operations? 11125 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 11126 return true; 11127 11128 return false; 11129 } 11130 11131 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 11132 const SCEV *LHS, 11133 const SCEV *RHS, 11134 const SCEV *FoundLHS, 11135 const SCEV *FoundRHS) { 11136 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 11137 // The restriction on `FoundRHS` be lifted easily -- it exists only to 11138 // reduce the compile time impact of this optimization. 11139 return false; 11140 11141 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 11142 if (!Addend) 11143 return false; 11144 11145 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 11146 11147 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 11148 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 11149 ConstantRange FoundLHSRange = 11150 ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS); 11151 11152 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 11153 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 11154 11155 // We can also compute the range of values for `LHS` that satisfy the 11156 // consequent, "`LHS` `Pred` `RHS`": 11157 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 11158 // The antecedent implies the consequent if every value of `LHS` that 11159 // satisfies the antecedent also satisfies the consequent. 11160 return LHSRange.icmp(Pred, ConstRHS); 11161 } 11162 11163 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 11164 bool IsSigned, bool NoWrap) { 11165 assert(isKnownPositive(Stride) && "Positive stride expected!"); 11166 11167 if (NoWrap) return false; 11168 11169 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 11170 const SCEV *One = getOne(Stride->getType()); 11171 11172 if (IsSigned) { 11173 APInt MaxRHS = getSignedRangeMax(RHS); 11174 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 11175 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 11176 11177 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 11178 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 11179 } 11180 11181 APInt MaxRHS = getUnsignedRangeMax(RHS); 11182 APInt MaxValue = APInt::getMaxValue(BitWidth); 11183 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 11184 11185 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 11186 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 11187 } 11188 11189 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 11190 bool IsSigned, bool NoWrap) { 11191 if (NoWrap) return false; 11192 11193 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 11194 const SCEV *One = getOne(Stride->getType()); 11195 11196 if (IsSigned) { 11197 APInt MinRHS = getSignedRangeMin(RHS); 11198 APInt MinValue = APInt::getSignedMinValue(BitWidth); 11199 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 11200 11201 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 11202 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 11203 } 11204 11205 APInt MinRHS = getUnsignedRangeMin(RHS); 11206 APInt MinValue = APInt::getMinValue(BitWidth); 11207 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 11208 11209 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 11210 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 11211 } 11212 11213 const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step, 11214 bool Equality) { 11215 const SCEV *One = getOne(Step->getType()); 11216 Delta = Equality ? getAddExpr(Delta, Step) 11217 : getAddExpr(Delta, getMinusSCEV(Step, One)); 11218 return getUDivExpr(Delta, Step); 11219 } 11220 11221 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 11222 const SCEV *Stride, 11223 const SCEV *End, 11224 unsigned BitWidth, 11225 bool IsSigned) { 11226 11227 assert(!isKnownNonPositive(Stride) && 11228 "Stride is expected strictly positive!"); 11229 // Calculate the maximum backedge count based on the range of values 11230 // permitted by Start, End, and Stride. 11231 const SCEV *MaxBECount; 11232 APInt MinStart = 11233 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 11234 11235 APInt StrideForMaxBECount = 11236 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 11237 11238 // We already know that the stride is positive, so we paper over conservatism 11239 // in our range computation by forcing StrideForMaxBECount to be at least one. 11240 // In theory this is unnecessary, but we expect MaxBECount to be a 11241 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there 11242 // is nothing to constant fold it to). 11243 APInt One(BitWidth, 1, IsSigned); 11244 StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount); 11245 11246 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 11247 : APInt::getMaxValue(BitWidth); 11248 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 11249 11250 // Although End can be a MAX expression we estimate MaxEnd considering only 11251 // the case End = RHS of the loop termination condition. This is safe because 11252 // in the other case (End - Start) is zero, leading to a zero maximum backedge 11253 // taken count. 11254 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 11255 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 11256 11257 MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */, 11258 getConstant(StrideForMaxBECount) /* Step */, 11259 false /* Equality */); 11260 11261 return MaxBECount; 11262 } 11263 11264 ScalarEvolution::ExitLimit 11265 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 11266 const Loop *L, bool IsSigned, 11267 bool ControlsExit, bool AllowPredicates) { 11268 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 11269 11270 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 11271 bool PredicatedIV = false; 11272 11273 if (!IV && AllowPredicates) { 11274 // Try to make this an AddRec using runtime tests, in the first X 11275 // iterations of this loop, where X is the SCEV expression found by the 11276 // algorithm below. 11277 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 11278 PredicatedIV = true; 11279 } 11280 11281 // Avoid weird loops 11282 if (!IV || IV->getLoop() != L || !IV->isAffine()) 11283 return getCouldNotCompute(); 11284 11285 bool NoWrap = ControlsExit && 11286 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 11287 11288 const SCEV *Stride = IV->getStepRecurrence(*this); 11289 11290 bool PositiveStride = isKnownPositive(Stride); 11291 11292 // Avoid negative or zero stride values. 11293 if (!PositiveStride) { 11294 // We can compute the correct backedge taken count for loops with unknown 11295 // strides if we can prove that the loop is not an infinite loop with side 11296 // effects. Here's the loop structure we are trying to handle - 11297 // 11298 // i = start 11299 // do { 11300 // A[i] = i; 11301 // i += s; 11302 // } while (i < end); 11303 // 11304 // The backedge taken count for such loops is evaluated as - 11305 // (max(end, start + stride) - start - 1) /u stride 11306 // 11307 // The additional preconditions that we need to check to prove correctness 11308 // of the above formula is as follows - 11309 // 11310 // a) IV is either nuw or nsw depending upon signedness (indicated by the 11311 // NoWrap flag). 11312 // b) loop is single exit with no side effects. 11313 // 11314 // 11315 // Precondition a) implies that if the stride is negative, this is a single 11316 // trip loop. The backedge taken count formula reduces to zero in this case. 11317 // 11318 // Precondition b) implies that the unknown stride cannot be zero otherwise 11319 // we have UB. 11320 // 11321 // The positive stride case is the same as isKnownPositive(Stride) returning 11322 // true (original behavior of the function). 11323 // 11324 // We want to make sure that the stride is truly unknown as there are edge 11325 // cases where ScalarEvolution propagates no wrap flags to the 11326 // post-increment/decrement IV even though the increment/decrement operation 11327 // itself is wrapping. The computed backedge taken count may be wrong in 11328 // such cases. This is prevented by checking that the stride is not known to 11329 // be either positive or non-positive. For example, no wrap flags are 11330 // propagated to the post-increment IV of this loop with a trip count of 2 - 11331 // 11332 // unsigned char i; 11333 // for(i=127; i<128; i+=129) 11334 // A[i] = i; 11335 // 11336 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) || 11337 !loopHasNoSideEffects(L)) 11338 return getCouldNotCompute(); 11339 } else if (!Stride->isOne() && 11340 doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap)) 11341 // Avoid proven overflow cases: this will ensure that the backedge taken 11342 // count will not generate any unsigned overflow. Relaxed no-overflow 11343 // conditions exploit NoWrapFlags, allowing to optimize in presence of 11344 // undefined behaviors like the case of C language. 11345 return getCouldNotCompute(); 11346 11347 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT 11348 : ICmpInst::ICMP_ULT; 11349 const SCEV *Start = IV->getStart(); 11350 const SCEV *End = RHS; 11351 // When the RHS is not invariant, we do not know the end bound of the loop and 11352 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 11353 // calculate the MaxBECount, given the start, stride and max value for the end 11354 // bound of the loop (RHS), and the fact that IV does not overflow (which is 11355 // checked above). 11356 if (!isLoopInvariant(RHS, L)) { 11357 const SCEV *MaxBECount = computeMaxBECountForLT( 11358 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 11359 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 11360 false /*MaxOrZero*/, Predicates); 11361 } 11362 // If the backedge is taken at least once, then it will be taken 11363 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start 11364 // is the LHS value of the less-than comparison the first time it is evaluated 11365 // and End is the RHS. 11366 const SCEV *BECountIfBackedgeTaken = 11367 computeBECount(getMinusSCEV(End, Start), Stride, false); 11368 // If the loop entry is guarded by the result of the backedge test of the 11369 // first loop iteration, then we know the backedge will be taken at least 11370 // once and so the backedge taken count is as above. If not then we use the 11371 // expression (max(End,Start)-Start)/Stride to describe the backedge count, 11372 // as if the backedge is taken at least once max(End,Start) is End and so the 11373 // result is as above, and if not max(End,Start) is Start so we get a backedge 11374 // count of zero. 11375 const SCEV *BECount; 11376 if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS)) 11377 BECount = BECountIfBackedgeTaken; 11378 else { 11379 // If we know that RHS >= Start in the context of loop, then we know that 11380 // max(RHS, Start) = RHS at this point. 11381 if (isLoopEntryGuardedByCond( 11382 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, RHS, Start)) 11383 End = RHS; 11384 else 11385 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 11386 BECount = computeBECount(getMinusSCEV(End, Start), Stride, false); 11387 } 11388 11389 const SCEV *MaxBECount; 11390 bool MaxOrZero = false; 11391 if (isa<SCEVConstant>(BECount)) 11392 MaxBECount = BECount; 11393 else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) { 11394 // If we know exactly how many times the backedge will be taken if it's 11395 // taken at least once, then the backedge count will either be that or 11396 // zero. 11397 MaxBECount = BECountIfBackedgeTaken; 11398 MaxOrZero = true; 11399 } else { 11400 MaxBECount = computeMaxBECountForLT( 11401 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 11402 } 11403 11404 if (isa<SCEVCouldNotCompute>(MaxBECount) && 11405 !isa<SCEVCouldNotCompute>(BECount)) 11406 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 11407 11408 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 11409 } 11410 11411 ScalarEvolution::ExitLimit 11412 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 11413 const Loop *L, bool IsSigned, 11414 bool ControlsExit, bool AllowPredicates) { 11415 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 11416 // We handle only IV > Invariant 11417 if (!isLoopInvariant(RHS, L)) 11418 return getCouldNotCompute(); 11419 11420 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 11421 if (!IV && AllowPredicates) 11422 // Try to make this an AddRec using runtime tests, in the first X 11423 // iterations of this loop, where X is the SCEV expression found by the 11424 // algorithm below. 11425 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 11426 11427 // Avoid weird loops 11428 if (!IV || IV->getLoop() != L || !IV->isAffine()) 11429 return getCouldNotCompute(); 11430 11431 bool NoWrap = ControlsExit && 11432 IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW); 11433 11434 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 11435 11436 // Avoid negative or zero stride values 11437 if (!isKnownPositive(Stride)) 11438 return getCouldNotCompute(); 11439 11440 // Avoid proven overflow cases: this will ensure that the backedge taken count 11441 // will not generate any unsigned overflow. Relaxed no-overflow conditions 11442 // exploit NoWrapFlags, allowing to optimize in presence of undefined 11443 // behaviors like the case of C language. 11444 if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap)) 11445 return getCouldNotCompute(); 11446 11447 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT 11448 : ICmpInst::ICMP_UGT; 11449 11450 const SCEV *Start = IV->getStart(); 11451 const SCEV *End = RHS; 11452 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { 11453 // If we know that Start >= RHS in the context of loop, then we know that 11454 // min(RHS, Start) = RHS at this point. 11455 if (isLoopEntryGuardedByCond( 11456 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS)) 11457 End = RHS; 11458 else 11459 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 11460 } 11461 11462 const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false); 11463 11464 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 11465 : getUnsignedRangeMax(Start); 11466 11467 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 11468 : getUnsignedRangeMin(Stride); 11469 11470 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 11471 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 11472 : APInt::getMinValue(BitWidth) + (MinStride - 1); 11473 11474 // Although End can be a MIN expression we estimate MinEnd considering only 11475 // the case End = RHS. This is safe because in the other case (Start - End) 11476 // is zero, leading to a zero maximum backedge taken count. 11477 APInt MinEnd = 11478 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 11479 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 11480 11481 const SCEV *MaxBECount = isa<SCEVConstant>(BECount) 11482 ? BECount 11483 : computeBECount(getConstant(MaxStart - MinEnd), 11484 getConstant(MinStride), false); 11485 11486 if (isa<SCEVCouldNotCompute>(MaxBECount)) 11487 MaxBECount = BECount; 11488 11489 return ExitLimit(BECount, MaxBECount, false, Predicates); 11490 } 11491 11492 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 11493 ScalarEvolution &SE) const { 11494 if (Range.isFullSet()) // Infinite loop. 11495 return SE.getCouldNotCompute(); 11496 11497 // If the start is a non-zero constant, shift the range to simplify things. 11498 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 11499 if (!SC->getValue()->isZero()) { 11500 SmallVector<const SCEV *, 4> Operands(operands()); 11501 Operands[0] = SE.getZero(SC->getType()); 11502 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 11503 getNoWrapFlags(FlagNW)); 11504 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 11505 return ShiftedAddRec->getNumIterationsInRange( 11506 Range.subtract(SC->getAPInt()), SE); 11507 // This is strange and shouldn't happen. 11508 return SE.getCouldNotCompute(); 11509 } 11510 11511 // The only time we can solve this is when we have all constant indices. 11512 // Otherwise, we cannot determine the overflow conditions. 11513 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 11514 return SE.getCouldNotCompute(); 11515 11516 // Okay at this point we know that all elements of the chrec are constants and 11517 // that the start element is zero. 11518 11519 // First check to see if the range contains zero. If not, the first 11520 // iteration exits. 11521 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 11522 if (!Range.contains(APInt(BitWidth, 0))) 11523 return SE.getZero(getType()); 11524 11525 if (isAffine()) { 11526 // If this is an affine expression then we have this situation: 11527 // Solve {0,+,A} in Range === Ax in Range 11528 11529 // We know that zero is in the range. If A is positive then we know that 11530 // the upper value of the range must be the first possible exit value. 11531 // If A is negative then the lower of the range is the last possible loop 11532 // value. Also note that we already checked for a full range. 11533 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 11534 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 11535 11536 // The exit value should be (End+A)/A. 11537 APInt ExitVal = (End + A).udiv(A); 11538 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 11539 11540 // Evaluate at the exit value. If we really did fall out of the valid 11541 // range, then we computed our trip count, otherwise wrap around or other 11542 // things must have happened. 11543 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 11544 if (Range.contains(Val->getValue())) 11545 return SE.getCouldNotCompute(); // Something strange happened 11546 11547 // Ensure that the previous value is in the range. This is a sanity check. 11548 assert(Range.contains( 11549 EvaluateConstantChrecAtConstant(this, 11550 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 11551 "Linear scev computation is off in a bad way!"); 11552 return SE.getConstant(ExitValue); 11553 } 11554 11555 if (isQuadratic()) { 11556 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 11557 return SE.getConstant(S.getValue()); 11558 } 11559 11560 return SE.getCouldNotCompute(); 11561 } 11562 11563 const SCEVAddRecExpr * 11564 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 11565 assert(getNumOperands() > 1 && "AddRec with zero step?"); 11566 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 11567 // but in this case we cannot guarantee that the value returned will be an 11568 // AddRec because SCEV does not have a fixed point where it stops 11569 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 11570 // may happen if we reach arithmetic depth limit while simplifying. So we 11571 // construct the returned value explicitly. 11572 SmallVector<const SCEV *, 3> Ops; 11573 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 11574 // (this + Step) is {A+B,+,B+C,+...,+,N}. 11575 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 11576 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 11577 // We know that the last operand is not a constant zero (otherwise it would 11578 // have been popped out earlier). This guarantees us that if the result has 11579 // the same last operand, then it will also not be popped out, meaning that 11580 // the returned value will be an AddRec. 11581 const SCEV *Last = getOperand(getNumOperands() - 1); 11582 assert(!Last->isZero() && "Recurrency with zero step?"); 11583 Ops.push_back(Last); 11584 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 11585 SCEV::FlagAnyWrap)); 11586 } 11587 11588 // Return true when S contains at least an undef value. 11589 static inline bool containsUndefs(const SCEV *S) { 11590 return SCEVExprContains(S, [](const SCEV *S) { 11591 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 11592 return isa<UndefValue>(SU->getValue()); 11593 return false; 11594 }); 11595 } 11596 11597 namespace { 11598 11599 // Collect all steps of SCEV expressions. 11600 struct SCEVCollectStrides { 11601 ScalarEvolution &SE; 11602 SmallVectorImpl<const SCEV *> &Strides; 11603 11604 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S) 11605 : SE(SE), Strides(S) {} 11606 11607 bool follow(const SCEV *S) { 11608 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 11609 Strides.push_back(AR->getStepRecurrence(SE)); 11610 return true; 11611 } 11612 11613 bool isDone() const { return false; } 11614 }; 11615 11616 // Collect all SCEVUnknown and SCEVMulExpr expressions. 11617 struct SCEVCollectTerms { 11618 SmallVectorImpl<const SCEV *> &Terms; 11619 11620 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {} 11621 11622 bool follow(const SCEV *S) { 11623 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) || 11624 isa<SCEVSignExtendExpr>(S)) { 11625 if (!containsUndefs(S)) 11626 Terms.push_back(S); 11627 11628 // Stop recursion: once we collected a term, do not walk its operands. 11629 return false; 11630 } 11631 11632 // Keep looking. 11633 return true; 11634 } 11635 11636 bool isDone() const { return false; } 11637 }; 11638 11639 // Check if a SCEV contains an AddRecExpr. 11640 struct SCEVHasAddRec { 11641 bool &ContainsAddRec; 11642 11643 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) { 11644 ContainsAddRec = false; 11645 } 11646 11647 bool follow(const SCEV *S) { 11648 if (isa<SCEVAddRecExpr>(S)) { 11649 ContainsAddRec = true; 11650 11651 // Stop recursion: once we collected a term, do not walk its operands. 11652 return false; 11653 } 11654 11655 // Keep looking. 11656 return true; 11657 } 11658 11659 bool isDone() const { return false; } 11660 }; 11661 11662 // Find factors that are multiplied with an expression that (possibly as a 11663 // subexpression) contains an AddRecExpr. In the expression: 11664 // 11665 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop)) 11666 // 11667 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)" 11668 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size 11669 // parameters as they form a product with an induction variable. 11670 // 11671 // This collector expects all array size parameters to be in the same MulExpr. 11672 // It might be necessary to later add support for collecting parameters that are 11673 // spread over different nested MulExpr. 11674 struct SCEVCollectAddRecMultiplies { 11675 SmallVectorImpl<const SCEV *> &Terms; 11676 ScalarEvolution &SE; 11677 11678 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE) 11679 : Terms(T), SE(SE) {} 11680 11681 bool follow(const SCEV *S) { 11682 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) { 11683 bool HasAddRec = false; 11684 SmallVector<const SCEV *, 0> Operands; 11685 for (auto Op : Mul->operands()) { 11686 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op); 11687 if (Unknown && !isa<CallInst>(Unknown->getValue())) { 11688 Operands.push_back(Op); 11689 } else if (Unknown) { 11690 HasAddRec = true; 11691 } else { 11692 bool ContainsAddRec = false; 11693 SCEVHasAddRec ContiansAddRec(ContainsAddRec); 11694 visitAll(Op, ContiansAddRec); 11695 HasAddRec |= ContainsAddRec; 11696 } 11697 } 11698 if (Operands.size() == 0) 11699 return true; 11700 11701 if (!HasAddRec) 11702 return false; 11703 11704 Terms.push_back(SE.getMulExpr(Operands)); 11705 // Stop recursion: once we collected a term, do not walk its operands. 11706 return false; 11707 } 11708 11709 // Keep looking. 11710 return true; 11711 } 11712 11713 bool isDone() const { return false; } 11714 }; 11715 11716 } // end anonymous namespace 11717 11718 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in 11719 /// two places: 11720 /// 1) The strides of AddRec expressions. 11721 /// 2) Unknowns that are multiplied with AddRec expressions. 11722 void ScalarEvolution::collectParametricTerms(const SCEV *Expr, 11723 SmallVectorImpl<const SCEV *> &Terms) { 11724 SmallVector<const SCEV *, 4> Strides; 11725 SCEVCollectStrides StrideCollector(*this, Strides); 11726 visitAll(Expr, StrideCollector); 11727 11728 LLVM_DEBUG({ 11729 dbgs() << "Strides:\n"; 11730 for (const SCEV *S : Strides) 11731 dbgs() << *S << "\n"; 11732 }); 11733 11734 for (const SCEV *S : Strides) { 11735 SCEVCollectTerms TermCollector(Terms); 11736 visitAll(S, TermCollector); 11737 } 11738 11739 LLVM_DEBUG({ 11740 dbgs() << "Terms:\n"; 11741 for (const SCEV *T : Terms) 11742 dbgs() << *T << "\n"; 11743 }); 11744 11745 SCEVCollectAddRecMultiplies MulCollector(Terms, *this); 11746 visitAll(Expr, MulCollector); 11747 } 11748 11749 static bool findArrayDimensionsRec(ScalarEvolution &SE, 11750 SmallVectorImpl<const SCEV *> &Terms, 11751 SmallVectorImpl<const SCEV *> &Sizes) { 11752 int Last = Terms.size() - 1; 11753 const SCEV *Step = Terms[Last]; 11754 11755 // End of recursion. 11756 if (Last == 0) { 11757 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) { 11758 SmallVector<const SCEV *, 2> Qs; 11759 for (const SCEV *Op : M->operands()) 11760 if (!isa<SCEVConstant>(Op)) 11761 Qs.push_back(Op); 11762 11763 Step = SE.getMulExpr(Qs); 11764 } 11765 11766 Sizes.push_back(Step); 11767 return true; 11768 } 11769 11770 for (const SCEV *&Term : Terms) { 11771 // Normalize the terms before the next call to findArrayDimensionsRec. 11772 const SCEV *Q, *R; 11773 SCEVDivision::divide(SE, Term, Step, &Q, &R); 11774 11775 // Bail out when GCD does not evenly divide one of the terms. 11776 if (!R->isZero()) 11777 return false; 11778 11779 Term = Q; 11780 } 11781 11782 // Remove all SCEVConstants. 11783 erase_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); }); 11784 11785 if (Terms.size() > 0) 11786 if (!findArrayDimensionsRec(SE, Terms, Sizes)) 11787 return false; 11788 11789 Sizes.push_back(Step); 11790 return true; 11791 } 11792 11793 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter. 11794 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) { 11795 for (const SCEV *T : Terms) 11796 if (SCEVExprContains(T, [](const SCEV *S) { return isa<SCEVUnknown>(S); })) 11797 return true; 11798 11799 return false; 11800 } 11801 11802 // Return the number of product terms in S. 11803 static inline int numberOfTerms(const SCEV *S) { 11804 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S)) 11805 return Expr->getNumOperands(); 11806 return 1; 11807 } 11808 11809 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) { 11810 if (isa<SCEVConstant>(T)) 11811 return nullptr; 11812 11813 if (isa<SCEVUnknown>(T)) 11814 return T; 11815 11816 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) { 11817 SmallVector<const SCEV *, 2> Factors; 11818 for (const SCEV *Op : M->operands()) 11819 if (!isa<SCEVConstant>(Op)) 11820 Factors.push_back(Op); 11821 11822 return SE.getMulExpr(Factors); 11823 } 11824 11825 return T; 11826 } 11827 11828 /// Return the size of an element read or written by Inst. 11829 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 11830 Type *Ty; 11831 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 11832 Ty = Store->getValueOperand()->getType(); 11833 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 11834 Ty = Load->getType(); 11835 else 11836 return nullptr; 11837 11838 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 11839 return getSizeOfExpr(ETy, Ty); 11840 } 11841 11842 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms, 11843 SmallVectorImpl<const SCEV *> &Sizes, 11844 const SCEV *ElementSize) { 11845 if (Terms.size() < 1 || !ElementSize) 11846 return; 11847 11848 // Early return when Terms do not contain parameters: we do not delinearize 11849 // non parametric SCEVs. 11850 if (!containsParameters(Terms)) 11851 return; 11852 11853 LLVM_DEBUG({ 11854 dbgs() << "Terms:\n"; 11855 for (const SCEV *T : Terms) 11856 dbgs() << *T << "\n"; 11857 }); 11858 11859 // Remove duplicates. 11860 array_pod_sort(Terms.begin(), Terms.end()); 11861 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); 11862 11863 // Put larger terms first. 11864 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) { 11865 return numberOfTerms(LHS) > numberOfTerms(RHS); 11866 }); 11867 11868 // Try to divide all terms by the element size. If term is not divisible by 11869 // element size, proceed with the original term. 11870 for (const SCEV *&Term : Terms) { 11871 const SCEV *Q, *R; 11872 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R); 11873 if (!Q->isZero()) 11874 Term = Q; 11875 } 11876 11877 SmallVector<const SCEV *, 4> NewTerms; 11878 11879 // Remove constant factors. 11880 for (const SCEV *T : Terms) 11881 if (const SCEV *NewT = removeConstantFactors(*this, T)) 11882 NewTerms.push_back(NewT); 11883 11884 LLVM_DEBUG({ 11885 dbgs() << "Terms after sorting:\n"; 11886 for (const SCEV *T : NewTerms) 11887 dbgs() << *T << "\n"; 11888 }); 11889 11890 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) { 11891 Sizes.clear(); 11892 return; 11893 } 11894 11895 // The last element to be pushed into Sizes is the size of an element. 11896 Sizes.push_back(ElementSize); 11897 11898 LLVM_DEBUG({ 11899 dbgs() << "Sizes:\n"; 11900 for (const SCEV *S : Sizes) 11901 dbgs() << *S << "\n"; 11902 }); 11903 } 11904 11905 void ScalarEvolution::computeAccessFunctions( 11906 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts, 11907 SmallVectorImpl<const SCEV *> &Sizes) { 11908 // Early exit in case this SCEV is not an affine multivariate function. 11909 if (Sizes.empty()) 11910 return; 11911 11912 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr)) 11913 if (!AR->isAffine()) 11914 return; 11915 11916 const SCEV *Res = Expr; 11917 int Last = Sizes.size() - 1; 11918 for (int i = Last; i >= 0; i--) { 11919 const SCEV *Q, *R; 11920 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R); 11921 11922 LLVM_DEBUG({ 11923 dbgs() << "Res: " << *Res << "\n"; 11924 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n"; 11925 dbgs() << "Res divided by Sizes[i]:\n"; 11926 dbgs() << "Quotient: " << *Q << "\n"; 11927 dbgs() << "Remainder: " << *R << "\n"; 11928 }); 11929 11930 Res = Q; 11931 11932 // Do not record the last subscript corresponding to the size of elements in 11933 // the array. 11934 if (i == Last) { 11935 11936 // Bail out if the remainder is too complex. 11937 if (isa<SCEVAddRecExpr>(R)) { 11938 Subscripts.clear(); 11939 Sizes.clear(); 11940 return; 11941 } 11942 11943 continue; 11944 } 11945 11946 // Record the access function for the current subscript. 11947 Subscripts.push_back(R); 11948 } 11949 11950 // Also push in last position the remainder of the last division: it will be 11951 // the access function of the innermost dimension. 11952 Subscripts.push_back(Res); 11953 11954 std::reverse(Subscripts.begin(), Subscripts.end()); 11955 11956 LLVM_DEBUG({ 11957 dbgs() << "Subscripts:\n"; 11958 for (const SCEV *S : Subscripts) 11959 dbgs() << *S << "\n"; 11960 }); 11961 } 11962 11963 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and 11964 /// sizes of an array access. Returns the remainder of the delinearization that 11965 /// is the offset start of the array. The SCEV->delinearize algorithm computes 11966 /// the multiples of SCEV coefficients: that is a pattern matching of sub 11967 /// expressions in the stride and base of a SCEV corresponding to the 11968 /// computation of a GCD (greatest common divisor) of base and stride. When 11969 /// SCEV->delinearize fails, it returns the SCEV unchanged. 11970 /// 11971 /// For example: when analyzing the memory access A[i][j][k] in this loop nest 11972 /// 11973 /// void foo(long n, long m, long o, double A[n][m][o]) { 11974 /// 11975 /// for (long i = 0; i < n; i++) 11976 /// for (long j = 0; j < m; j++) 11977 /// for (long k = 0; k < o; k++) 11978 /// A[i][j][k] = 1.0; 11979 /// } 11980 /// 11981 /// the delinearization input is the following AddRec SCEV: 11982 /// 11983 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k> 11984 /// 11985 /// From this SCEV, we are able to say that the base offset of the access is %A 11986 /// because it appears as an offset that does not divide any of the strides in 11987 /// the loops: 11988 /// 11989 /// CHECK: Base offset: %A 11990 /// 11991 /// and then SCEV->delinearize determines the size of some of the dimensions of 11992 /// the array as these are the multiples by which the strides are happening: 11993 /// 11994 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes. 11995 /// 11996 /// Note that the outermost dimension remains of UnknownSize because there are 11997 /// no strides that would help identifying the size of the last dimension: when 11998 /// the array has been statically allocated, one could compute the size of that 11999 /// dimension by dividing the overall size of the array by the size of the known 12000 /// dimensions: %m * %o * 8. 12001 /// 12002 /// Finally delinearize provides the access functions for the array reference 12003 /// that does correspond to A[i][j][k] of the above C testcase: 12004 /// 12005 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>] 12006 /// 12007 /// The testcases are checking the output of a function pass: 12008 /// DelinearizationPass that walks through all loads and stores of a function 12009 /// asking for the SCEV of the memory access with respect to all enclosing 12010 /// loops, calling SCEV->delinearize on that and printing the results. 12011 void ScalarEvolution::delinearize(const SCEV *Expr, 12012 SmallVectorImpl<const SCEV *> &Subscripts, 12013 SmallVectorImpl<const SCEV *> &Sizes, 12014 const SCEV *ElementSize) { 12015 // First step: collect parametric terms. 12016 SmallVector<const SCEV *, 4> Terms; 12017 collectParametricTerms(Expr, Terms); 12018 12019 if (Terms.empty()) 12020 return; 12021 12022 // Second step: find subscript sizes. 12023 findArrayDimensions(Terms, Sizes, ElementSize); 12024 12025 if (Sizes.empty()) 12026 return; 12027 12028 // Third step: compute the access functions for each subscript. 12029 computeAccessFunctions(Expr, Subscripts, Sizes); 12030 12031 if (Subscripts.empty()) 12032 return; 12033 12034 LLVM_DEBUG({ 12035 dbgs() << "succeeded to delinearize " << *Expr << "\n"; 12036 dbgs() << "ArrayDecl[UnknownSize]"; 12037 for (const SCEV *S : Sizes) 12038 dbgs() << "[" << *S << "]"; 12039 12040 dbgs() << "\nArrayRef"; 12041 for (const SCEV *S : Subscripts) 12042 dbgs() << "[" << *S << "]"; 12043 dbgs() << "\n"; 12044 }); 12045 } 12046 12047 bool ScalarEvolution::getIndexExpressionsFromGEP( 12048 const GetElementPtrInst *GEP, SmallVectorImpl<const SCEV *> &Subscripts, 12049 SmallVectorImpl<int> &Sizes) { 12050 assert(Subscripts.empty() && Sizes.empty() && 12051 "Expected output lists to be empty on entry to this function."); 12052 assert(GEP && "getIndexExpressionsFromGEP called with a null GEP"); 12053 Type *Ty = GEP->getPointerOperandType(); 12054 bool DroppedFirstDim = false; 12055 for (unsigned i = 1; i < GEP->getNumOperands(); i++) { 12056 const SCEV *Expr = getSCEV(GEP->getOperand(i)); 12057 if (i == 1) { 12058 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) { 12059 Ty = PtrTy->getElementType(); 12060 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) { 12061 Ty = ArrayTy->getElementType(); 12062 } else { 12063 Subscripts.clear(); 12064 Sizes.clear(); 12065 return false; 12066 } 12067 if (auto *Const = dyn_cast<SCEVConstant>(Expr)) 12068 if (Const->getValue()->isZero()) { 12069 DroppedFirstDim = true; 12070 continue; 12071 } 12072 Subscripts.push_back(Expr); 12073 continue; 12074 } 12075 12076 auto *ArrayTy = dyn_cast<ArrayType>(Ty); 12077 if (!ArrayTy) { 12078 Subscripts.clear(); 12079 Sizes.clear(); 12080 return false; 12081 } 12082 12083 Subscripts.push_back(Expr); 12084 if (!(DroppedFirstDim && i == 2)) 12085 Sizes.push_back(ArrayTy->getNumElements()); 12086 12087 Ty = ArrayTy->getElementType(); 12088 } 12089 return !Subscripts.empty(); 12090 } 12091 12092 //===----------------------------------------------------------------------===// 12093 // SCEVCallbackVH Class Implementation 12094 //===----------------------------------------------------------------------===// 12095 12096 void ScalarEvolution::SCEVCallbackVH::deleted() { 12097 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 12098 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 12099 SE->ConstantEvolutionLoopExitValue.erase(PN); 12100 SE->eraseValueFromMap(getValPtr()); 12101 // this now dangles! 12102 } 12103 12104 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 12105 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 12106 12107 // Forget all the expressions associated with users of the old value, 12108 // so that future queries will recompute the expressions using the new 12109 // value. 12110 Value *Old = getValPtr(); 12111 SmallVector<User *, 16> Worklist(Old->users()); 12112 SmallPtrSet<User *, 8> Visited; 12113 while (!Worklist.empty()) { 12114 User *U = Worklist.pop_back_val(); 12115 // Deleting the Old value will cause this to dangle. Postpone 12116 // that until everything else is done. 12117 if (U == Old) 12118 continue; 12119 if (!Visited.insert(U).second) 12120 continue; 12121 if (PHINode *PN = dyn_cast<PHINode>(U)) 12122 SE->ConstantEvolutionLoopExitValue.erase(PN); 12123 SE->eraseValueFromMap(U); 12124 llvm::append_range(Worklist, U->users()); 12125 } 12126 // Delete the Old value. 12127 if (PHINode *PN = dyn_cast<PHINode>(Old)) 12128 SE->ConstantEvolutionLoopExitValue.erase(PN); 12129 SE->eraseValueFromMap(Old); 12130 // this now dangles! 12131 } 12132 12133 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 12134 : CallbackVH(V), SE(se) {} 12135 12136 //===----------------------------------------------------------------------===// 12137 // ScalarEvolution Class Implementation 12138 //===----------------------------------------------------------------------===// 12139 12140 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 12141 AssumptionCache &AC, DominatorTree &DT, 12142 LoopInfo &LI) 12143 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 12144 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 12145 LoopDispositions(64), BlockDispositions(64) { 12146 // To use guards for proving predicates, we need to scan every instruction in 12147 // relevant basic blocks, and not just terminators. Doing this is a waste of 12148 // time if the IR does not actually contain any calls to 12149 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 12150 // 12151 // This pessimizes the case where a pass that preserves ScalarEvolution wants 12152 // to _add_ guards to the module when there weren't any before, and wants 12153 // ScalarEvolution to optimize based on those guards. For now we prefer to be 12154 // efficient in lieu of being smart in that rather obscure case. 12155 12156 auto *GuardDecl = F.getParent()->getFunction( 12157 Intrinsic::getName(Intrinsic::experimental_guard)); 12158 HasGuards = GuardDecl && !GuardDecl->use_empty(); 12159 } 12160 12161 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 12162 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 12163 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 12164 ValueExprMap(std::move(Arg.ValueExprMap)), 12165 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 12166 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 12167 PendingMerges(std::move(Arg.PendingMerges)), 12168 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 12169 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 12170 PredicatedBackedgeTakenCounts( 12171 std::move(Arg.PredicatedBackedgeTakenCounts)), 12172 ConstantEvolutionLoopExitValue( 12173 std::move(Arg.ConstantEvolutionLoopExitValue)), 12174 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 12175 LoopDispositions(std::move(Arg.LoopDispositions)), 12176 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 12177 BlockDispositions(std::move(Arg.BlockDispositions)), 12178 UnsignedRanges(std::move(Arg.UnsignedRanges)), 12179 SignedRanges(std::move(Arg.SignedRanges)), 12180 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 12181 UniquePreds(std::move(Arg.UniquePreds)), 12182 SCEVAllocator(std::move(Arg.SCEVAllocator)), 12183 LoopUsers(std::move(Arg.LoopUsers)), 12184 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 12185 FirstUnknown(Arg.FirstUnknown) { 12186 Arg.FirstUnknown = nullptr; 12187 } 12188 12189 ScalarEvolution::~ScalarEvolution() { 12190 // Iterate through all the SCEVUnknown instances and call their 12191 // destructors, so that they release their references to their values. 12192 for (SCEVUnknown *U = FirstUnknown; U;) { 12193 SCEVUnknown *Tmp = U; 12194 U = U->Next; 12195 Tmp->~SCEVUnknown(); 12196 } 12197 FirstUnknown = nullptr; 12198 12199 ExprValueMap.clear(); 12200 ValueExprMap.clear(); 12201 HasRecMap.clear(); 12202 12203 // Free any extra memory created for ExitNotTakenInfo in the unlikely event 12204 // that a loop had multiple computable exits. 12205 for (auto &BTCI : BackedgeTakenCounts) 12206 BTCI.second.clear(); 12207 for (auto &BTCI : PredicatedBackedgeTakenCounts) 12208 BTCI.second.clear(); 12209 12210 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 12211 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 12212 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 12213 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 12214 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 12215 } 12216 12217 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 12218 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 12219 } 12220 12221 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 12222 const Loop *L) { 12223 // Print all inner loops first 12224 for (Loop *I : *L) 12225 PrintLoopInfo(OS, SE, I); 12226 12227 OS << "Loop "; 12228 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12229 OS << ": "; 12230 12231 SmallVector<BasicBlock *, 8> ExitingBlocks; 12232 L->getExitingBlocks(ExitingBlocks); 12233 if (ExitingBlocks.size() != 1) 12234 OS << "<multiple exits> "; 12235 12236 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 12237 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; 12238 else 12239 OS << "Unpredictable backedge-taken count.\n"; 12240 12241 if (ExitingBlocks.size() > 1) 12242 for (BasicBlock *ExitingBlock : ExitingBlocks) { 12243 OS << " exit count for " << ExitingBlock->getName() << ": " 12244 << *SE->getExitCount(L, ExitingBlock) << "\n"; 12245 } 12246 12247 OS << "Loop "; 12248 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12249 OS << ": "; 12250 12251 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) { 12252 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L); 12253 if (SE->isBackedgeTakenCountMaxOrZero(L)) 12254 OS << ", actual taken count either this or zero."; 12255 } else { 12256 OS << "Unpredictable max backedge-taken count. "; 12257 } 12258 12259 OS << "\n" 12260 "Loop "; 12261 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12262 OS << ": "; 12263 12264 SCEVUnionPredicate Pred; 12265 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred); 12266 if (!isa<SCEVCouldNotCompute>(PBT)) { 12267 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 12268 OS << " Predicates:\n"; 12269 Pred.print(OS, 4); 12270 } else { 12271 OS << "Unpredictable predicated backedge-taken count. "; 12272 } 12273 OS << "\n"; 12274 12275 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 12276 OS << "Loop "; 12277 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12278 OS << ": "; 12279 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 12280 } 12281 } 12282 12283 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 12284 switch (LD) { 12285 case ScalarEvolution::LoopVariant: 12286 return "Variant"; 12287 case ScalarEvolution::LoopInvariant: 12288 return "Invariant"; 12289 case ScalarEvolution::LoopComputable: 12290 return "Computable"; 12291 } 12292 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 12293 } 12294 12295 void ScalarEvolution::print(raw_ostream &OS) const { 12296 // ScalarEvolution's implementation of the print method is to print 12297 // out SCEV values of all instructions that are interesting. Doing 12298 // this potentially causes it to create new SCEV objects though, 12299 // which technically conflicts with the const qualifier. This isn't 12300 // observable from outside the class though, so casting away the 12301 // const isn't dangerous. 12302 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 12303 12304 if (ClassifyExpressions) { 12305 OS << "Classifying expressions for: "; 12306 F.printAsOperand(OS, /*PrintType=*/false); 12307 OS << "\n"; 12308 for (Instruction &I : instructions(F)) 12309 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 12310 OS << I << '\n'; 12311 OS << " --> "; 12312 const SCEV *SV = SE.getSCEV(&I); 12313 SV->print(OS); 12314 if (!isa<SCEVCouldNotCompute>(SV)) { 12315 OS << " U: "; 12316 SE.getUnsignedRange(SV).print(OS); 12317 OS << " S: "; 12318 SE.getSignedRange(SV).print(OS); 12319 } 12320 12321 const Loop *L = LI.getLoopFor(I.getParent()); 12322 12323 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 12324 if (AtUse != SV) { 12325 OS << " --> "; 12326 AtUse->print(OS); 12327 if (!isa<SCEVCouldNotCompute>(AtUse)) { 12328 OS << " U: "; 12329 SE.getUnsignedRange(AtUse).print(OS); 12330 OS << " S: "; 12331 SE.getSignedRange(AtUse).print(OS); 12332 } 12333 } 12334 12335 if (L) { 12336 OS << "\t\t" "Exits: "; 12337 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 12338 if (!SE.isLoopInvariant(ExitValue, L)) { 12339 OS << "<<Unknown>>"; 12340 } else { 12341 OS << *ExitValue; 12342 } 12343 12344 bool First = true; 12345 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 12346 if (First) { 12347 OS << "\t\t" "LoopDispositions: { "; 12348 First = false; 12349 } else { 12350 OS << ", "; 12351 } 12352 12353 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12354 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 12355 } 12356 12357 for (auto *InnerL : depth_first(L)) { 12358 if (InnerL == L) 12359 continue; 12360 if (First) { 12361 OS << "\t\t" "LoopDispositions: { "; 12362 First = false; 12363 } else { 12364 OS << ", "; 12365 } 12366 12367 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12368 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 12369 } 12370 12371 OS << " }"; 12372 } 12373 12374 OS << "\n"; 12375 } 12376 } 12377 12378 OS << "Determining loop execution counts for: "; 12379 F.printAsOperand(OS, /*PrintType=*/false); 12380 OS << "\n"; 12381 for (Loop *I : LI) 12382 PrintLoopInfo(OS, &SE, I); 12383 } 12384 12385 ScalarEvolution::LoopDisposition 12386 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 12387 auto &Values = LoopDispositions[S]; 12388 for (auto &V : Values) { 12389 if (V.getPointer() == L) 12390 return V.getInt(); 12391 } 12392 Values.emplace_back(L, LoopVariant); 12393 LoopDisposition D = computeLoopDisposition(S, L); 12394 auto &Values2 = LoopDispositions[S]; 12395 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 12396 if (V.getPointer() == L) { 12397 V.setInt(D); 12398 break; 12399 } 12400 } 12401 return D; 12402 } 12403 12404 ScalarEvolution::LoopDisposition 12405 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 12406 switch (S->getSCEVType()) { 12407 case scConstant: 12408 return LoopInvariant; 12409 case scPtrToInt: 12410 case scTruncate: 12411 case scZeroExtend: 12412 case scSignExtend: 12413 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 12414 case scAddRecExpr: { 12415 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 12416 12417 // If L is the addrec's loop, it's computable. 12418 if (AR->getLoop() == L) 12419 return LoopComputable; 12420 12421 // Add recurrences are never invariant in the function-body (null loop). 12422 if (!L) 12423 return LoopVariant; 12424 12425 // Everything that is not defined at loop entry is variant. 12426 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 12427 return LoopVariant; 12428 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 12429 " dominate the contained loop's header?"); 12430 12431 // This recurrence is invariant w.r.t. L if AR's loop contains L. 12432 if (AR->getLoop()->contains(L)) 12433 return LoopInvariant; 12434 12435 // This recurrence is variant w.r.t. L if any of its operands 12436 // are variant. 12437 for (auto *Op : AR->operands()) 12438 if (!isLoopInvariant(Op, L)) 12439 return LoopVariant; 12440 12441 // Otherwise it's loop-invariant. 12442 return LoopInvariant; 12443 } 12444 case scAddExpr: 12445 case scMulExpr: 12446 case scUMaxExpr: 12447 case scSMaxExpr: 12448 case scUMinExpr: 12449 case scSMinExpr: { 12450 bool HasVarying = false; 12451 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 12452 LoopDisposition D = getLoopDisposition(Op, L); 12453 if (D == LoopVariant) 12454 return LoopVariant; 12455 if (D == LoopComputable) 12456 HasVarying = true; 12457 } 12458 return HasVarying ? LoopComputable : LoopInvariant; 12459 } 12460 case scUDivExpr: { 12461 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 12462 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 12463 if (LD == LoopVariant) 12464 return LoopVariant; 12465 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 12466 if (RD == LoopVariant) 12467 return LoopVariant; 12468 return (LD == LoopInvariant && RD == LoopInvariant) ? 12469 LoopInvariant : LoopComputable; 12470 } 12471 case scUnknown: 12472 // All non-instruction values are loop invariant. All instructions are loop 12473 // invariant if they are not contained in the specified loop. 12474 // Instructions are never considered invariant in the function body 12475 // (null loop) because they are defined within the "loop". 12476 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 12477 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 12478 return LoopInvariant; 12479 case scCouldNotCompute: 12480 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 12481 } 12482 llvm_unreachable("Unknown SCEV kind!"); 12483 } 12484 12485 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 12486 return getLoopDisposition(S, L) == LoopInvariant; 12487 } 12488 12489 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 12490 return getLoopDisposition(S, L) == LoopComputable; 12491 } 12492 12493 ScalarEvolution::BlockDisposition 12494 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 12495 auto &Values = BlockDispositions[S]; 12496 for (auto &V : Values) { 12497 if (V.getPointer() == BB) 12498 return V.getInt(); 12499 } 12500 Values.emplace_back(BB, DoesNotDominateBlock); 12501 BlockDisposition D = computeBlockDisposition(S, BB); 12502 auto &Values2 = BlockDispositions[S]; 12503 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) { 12504 if (V.getPointer() == BB) { 12505 V.setInt(D); 12506 break; 12507 } 12508 } 12509 return D; 12510 } 12511 12512 ScalarEvolution::BlockDisposition 12513 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 12514 switch (S->getSCEVType()) { 12515 case scConstant: 12516 return ProperlyDominatesBlock; 12517 case scPtrToInt: 12518 case scTruncate: 12519 case scZeroExtend: 12520 case scSignExtend: 12521 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 12522 case scAddRecExpr: { 12523 // This uses a "dominates" query instead of "properly dominates" query 12524 // to test for proper dominance too, because the instruction which 12525 // produces the addrec's value is a PHI, and a PHI effectively properly 12526 // dominates its entire containing block. 12527 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 12528 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 12529 return DoesNotDominateBlock; 12530 12531 // Fall through into SCEVNAryExpr handling. 12532 LLVM_FALLTHROUGH; 12533 } 12534 case scAddExpr: 12535 case scMulExpr: 12536 case scUMaxExpr: 12537 case scSMaxExpr: 12538 case scUMinExpr: 12539 case scSMinExpr: { 12540 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 12541 bool Proper = true; 12542 for (const SCEV *NAryOp : NAry->operands()) { 12543 BlockDisposition D = getBlockDisposition(NAryOp, BB); 12544 if (D == DoesNotDominateBlock) 12545 return DoesNotDominateBlock; 12546 if (D == DominatesBlock) 12547 Proper = false; 12548 } 12549 return Proper ? ProperlyDominatesBlock : DominatesBlock; 12550 } 12551 case scUDivExpr: { 12552 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 12553 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 12554 BlockDisposition LD = getBlockDisposition(LHS, BB); 12555 if (LD == DoesNotDominateBlock) 12556 return DoesNotDominateBlock; 12557 BlockDisposition RD = getBlockDisposition(RHS, BB); 12558 if (RD == DoesNotDominateBlock) 12559 return DoesNotDominateBlock; 12560 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 12561 ProperlyDominatesBlock : DominatesBlock; 12562 } 12563 case scUnknown: 12564 if (Instruction *I = 12565 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 12566 if (I->getParent() == BB) 12567 return DominatesBlock; 12568 if (DT.properlyDominates(I->getParent(), BB)) 12569 return ProperlyDominatesBlock; 12570 return DoesNotDominateBlock; 12571 } 12572 return ProperlyDominatesBlock; 12573 case scCouldNotCompute: 12574 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 12575 } 12576 llvm_unreachable("Unknown SCEV kind!"); 12577 } 12578 12579 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 12580 return getBlockDisposition(S, BB) >= DominatesBlock; 12581 } 12582 12583 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 12584 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 12585 } 12586 12587 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 12588 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 12589 } 12590 12591 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const { 12592 auto IsS = [&](const SCEV *X) { return S == X; }; 12593 auto ContainsS = [&](const SCEV *X) { 12594 return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS); 12595 }; 12596 return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken); 12597 } 12598 12599 void 12600 ScalarEvolution::forgetMemoizedResults(const SCEV *S) { 12601 ValuesAtScopes.erase(S); 12602 LoopDispositions.erase(S); 12603 BlockDispositions.erase(S); 12604 UnsignedRanges.erase(S); 12605 SignedRanges.erase(S); 12606 ExprValueMap.erase(S); 12607 HasRecMap.erase(S); 12608 MinTrailingZerosCache.erase(S); 12609 12610 for (auto I = PredicatedSCEVRewrites.begin(); 12611 I != PredicatedSCEVRewrites.end();) { 12612 std::pair<const SCEV *, const Loop *> Entry = I->first; 12613 if (Entry.first == S) 12614 PredicatedSCEVRewrites.erase(I++); 12615 else 12616 ++I; 12617 } 12618 12619 auto RemoveSCEVFromBackedgeMap = 12620 [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) { 12621 for (auto I = Map.begin(), E = Map.end(); I != E;) { 12622 BackedgeTakenInfo &BEInfo = I->second; 12623 if (BEInfo.hasOperand(S, this)) { 12624 BEInfo.clear(); 12625 Map.erase(I++); 12626 } else 12627 ++I; 12628 } 12629 }; 12630 12631 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts); 12632 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts); 12633 } 12634 12635 void 12636 ScalarEvolution::getUsedLoops(const SCEV *S, 12637 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 12638 struct FindUsedLoops { 12639 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 12640 : LoopsUsed(LoopsUsed) {} 12641 SmallPtrSetImpl<const Loop *> &LoopsUsed; 12642 bool follow(const SCEV *S) { 12643 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 12644 LoopsUsed.insert(AR->getLoop()); 12645 return true; 12646 } 12647 12648 bool isDone() const { return false; } 12649 }; 12650 12651 FindUsedLoops F(LoopsUsed); 12652 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 12653 } 12654 12655 void ScalarEvolution::addToLoopUseLists(const SCEV *S) { 12656 SmallPtrSet<const Loop *, 8> LoopsUsed; 12657 getUsedLoops(S, LoopsUsed); 12658 for (auto *L : LoopsUsed) 12659 LoopUsers[L].push_back(S); 12660 } 12661 12662 void ScalarEvolution::verify() const { 12663 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 12664 ScalarEvolution SE2(F, TLI, AC, DT, LI); 12665 12666 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 12667 12668 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 12669 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 12670 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 12671 12672 const SCEV *visitConstant(const SCEVConstant *Constant) { 12673 return SE.getConstant(Constant->getAPInt()); 12674 } 12675 12676 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12677 return SE.getUnknown(Expr->getValue()); 12678 } 12679 12680 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 12681 return SE.getCouldNotCompute(); 12682 } 12683 }; 12684 12685 SCEVMapper SCM(SE2); 12686 12687 while (!LoopStack.empty()) { 12688 auto *L = LoopStack.pop_back_val(); 12689 llvm::append_range(LoopStack, *L); 12690 12691 auto *CurBECount = SCM.visit( 12692 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L)); 12693 auto *NewBECount = SE2.getBackedgeTakenCount(L); 12694 12695 if (CurBECount == SE2.getCouldNotCompute() || 12696 NewBECount == SE2.getCouldNotCompute()) { 12697 // NB! This situation is legal, but is very suspicious -- whatever pass 12698 // change the loop to make a trip count go from could not compute to 12699 // computable or vice-versa *should have* invalidated SCEV. However, we 12700 // choose not to assert here (for now) since we don't want false 12701 // positives. 12702 continue; 12703 } 12704 12705 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) { 12706 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 12707 // not propagate undef aggressively). This means we can (and do) fail 12708 // verification in cases where a transform makes the trip count of a loop 12709 // go from "undef" to "undef+1" (say). The transform is fine, since in 12710 // both cases the loop iterates "undef" times, but SCEV thinks we 12711 // increased the trip count of the loop by 1 incorrectly. 12712 continue; 12713 } 12714 12715 if (SE.getTypeSizeInBits(CurBECount->getType()) > 12716 SE.getTypeSizeInBits(NewBECount->getType())) 12717 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 12718 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 12719 SE.getTypeSizeInBits(NewBECount->getType())) 12720 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 12721 12722 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount); 12723 12724 // Unless VerifySCEVStrict is set, we only compare constant deltas. 12725 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) { 12726 dbgs() << "Trip Count for " << *L << " Changed!\n"; 12727 dbgs() << "Old: " << *CurBECount << "\n"; 12728 dbgs() << "New: " << *NewBECount << "\n"; 12729 dbgs() << "Delta: " << *Delta << "\n"; 12730 std::abort(); 12731 } 12732 } 12733 12734 // Collect all valid loops currently in LoopInfo. 12735 SmallPtrSet<Loop *, 32> ValidLoops; 12736 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end()); 12737 while (!Worklist.empty()) { 12738 Loop *L = Worklist.pop_back_val(); 12739 if (ValidLoops.contains(L)) 12740 continue; 12741 ValidLoops.insert(L); 12742 Worklist.append(L->begin(), L->end()); 12743 } 12744 // Check for SCEV expressions referencing invalid/deleted loops. 12745 for (auto &KV : ValueExprMap) { 12746 auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second); 12747 if (!AR) 12748 continue; 12749 assert(ValidLoops.contains(AR->getLoop()) && 12750 "AddRec references invalid loop"); 12751 } 12752 } 12753 12754 bool ScalarEvolution::invalidate( 12755 Function &F, const PreservedAnalyses &PA, 12756 FunctionAnalysisManager::Invalidator &Inv) { 12757 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 12758 // of its dependencies is invalidated. 12759 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 12760 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 12761 Inv.invalidate<AssumptionAnalysis>(F, PA) || 12762 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 12763 Inv.invalidate<LoopAnalysis>(F, PA); 12764 } 12765 12766 AnalysisKey ScalarEvolutionAnalysis::Key; 12767 12768 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 12769 FunctionAnalysisManager &AM) { 12770 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 12771 AM.getResult<AssumptionAnalysis>(F), 12772 AM.getResult<DominatorTreeAnalysis>(F), 12773 AM.getResult<LoopAnalysis>(F)); 12774 } 12775 12776 PreservedAnalyses 12777 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 12778 AM.getResult<ScalarEvolutionAnalysis>(F).verify(); 12779 return PreservedAnalyses::all(); 12780 } 12781 12782 PreservedAnalyses 12783 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 12784 // For compatibility with opt's -analyze feature under legacy pass manager 12785 // which was not ported to NPM. This keeps tests using 12786 // update_analyze_test_checks.py working. 12787 OS << "Printing analysis 'Scalar Evolution Analysis' for function '" 12788 << F.getName() << "':\n"; 12789 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 12790 return PreservedAnalyses::all(); 12791 } 12792 12793 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 12794 "Scalar Evolution Analysis", false, true) 12795 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 12796 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 12797 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 12798 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 12799 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 12800 "Scalar Evolution Analysis", false, true) 12801 12802 char ScalarEvolutionWrapperPass::ID = 0; 12803 12804 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 12805 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 12806 } 12807 12808 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 12809 SE.reset(new ScalarEvolution( 12810 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 12811 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 12812 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 12813 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 12814 return false; 12815 } 12816 12817 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 12818 12819 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 12820 SE->print(OS); 12821 } 12822 12823 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 12824 if (!VerifySCEV) 12825 return; 12826 12827 SE->verify(); 12828 } 12829 12830 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 12831 AU.setPreservesAll(); 12832 AU.addRequiredTransitive<AssumptionCacheTracker>(); 12833 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 12834 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 12835 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 12836 } 12837 12838 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 12839 const SCEV *RHS) { 12840 FoldingSetNodeID ID; 12841 assert(LHS->getType() == RHS->getType() && 12842 "Type mismatch between LHS and RHS"); 12843 // Unique this node based on the arguments 12844 ID.AddInteger(SCEVPredicate::P_Equal); 12845 ID.AddPointer(LHS); 12846 ID.AddPointer(RHS); 12847 void *IP = nullptr; 12848 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12849 return S; 12850 SCEVEqualPredicate *Eq = new (SCEVAllocator) 12851 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS); 12852 UniquePreds.InsertNode(Eq, IP); 12853 return Eq; 12854 } 12855 12856 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 12857 const SCEVAddRecExpr *AR, 12858 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12859 FoldingSetNodeID ID; 12860 // Unique this node based on the arguments 12861 ID.AddInteger(SCEVPredicate::P_Wrap); 12862 ID.AddPointer(AR); 12863 ID.AddInteger(AddedFlags); 12864 void *IP = nullptr; 12865 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 12866 return S; 12867 auto *OF = new (SCEVAllocator) 12868 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 12869 UniquePreds.InsertNode(OF, IP); 12870 return OF; 12871 } 12872 12873 namespace { 12874 12875 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 12876 public: 12877 12878 /// Rewrites \p S in the context of a loop L and the SCEV predication 12879 /// infrastructure. 12880 /// 12881 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 12882 /// equivalences present in \p Pred. 12883 /// 12884 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 12885 /// \p NewPreds such that the result will be an AddRecExpr. 12886 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 12887 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12888 SCEVUnionPredicate *Pred) { 12889 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 12890 return Rewriter.visit(S); 12891 } 12892 12893 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 12894 if (Pred) { 12895 auto ExprPreds = Pred->getPredicatesForExpr(Expr); 12896 for (auto *Pred : ExprPreds) 12897 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred)) 12898 if (IPred->getLHS() == Expr) 12899 return IPred->getRHS(); 12900 } 12901 return convertToAddRecWithPreds(Expr); 12902 } 12903 12904 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 12905 const SCEV *Operand = visit(Expr->getOperand()); 12906 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12907 if (AR && AR->getLoop() == L && AR->isAffine()) { 12908 // This couldn't be folded because the operand didn't have the nuw 12909 // flag. Add the nusw flag as an assumption that we could make. 12910 const SCEV *Step = AR->getStepRecurrence(SE); 12911 Type *Ty = Expr->getType(); 12912 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 12913 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 12914 SE.getSignExtendExpr(Step, Ty), L, 12915 AR->getNoWrapFlags()); 12916 } 12917 return SE.getZeroExtendExpr(Operand, Expr->getType()); 12918 } 12919 12920 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 12921 const SCEV *Operand = visit(Expr->getOperand()); 12922 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 12923 if (AR && AR->getLoop() == L && AR->isAffine()) { 12924 // This couldn't be folded because the operand didn't have the nsw 12925 // flag. Add the nssw flag as an assumption that we could make. 12926 const SCEV *Step = AR->getStepRecurrence(SE); 12927 Type *Ty = Expr->getType(); 12928 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 12929 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 12930 SE.getSignExtendExpr(Step, Ty), L, 12931 AR->getNoWrapFlags()); 12932 } 12933 return SE.getSignExtendExpr(Operand, Expr->getType()); 12934 } 12935 12936 private: 12937 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 12938 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 12939 SCEVUnionPredicate *Pred) 12940 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 12941 12942 bool addOverflowAssumption(const SCEVPredicate *P) { 12943 if (!NewPreds) { 12944 // Check if we've already made this assumption. 12945 return Pred && Pred->implies(P); 12946 } 12947 NewPreds->insert(P); 12948 return true; 12949 } 12950 12951 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 12952 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 12953 auto *A = SE.getWrapPredicate(AR, AddedFlags); 12954 return addOverflowAssumption(A); 12955 } 12956 12957 // If \p Expr represents a PHINode, we try to see if it can be represented 12958 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 12959 // to add this predicate as a runtime overflow check, we return the AddRec. 12960 // If \p Expr does not meet these conditions (is not a PHI node, or we 12961 // couldn't create an AddRec for it, or couldn't add the predicate), we just 12962 // return \p Expr. 12963 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 12964 if (!isa<PHINode>(Expr->getValue())) 12965 return Expr; 12966 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 12967 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 12968 if (!PredicatedRewrite) 12969 return Expr; 12970 for (auto *P : PredicatedRewrite->second){ 12971 // Wrap predicates from outer loops are not supported. 12972 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 12973 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr()); 12974 if (L != AR->getLoop()) 12975 return Expr; 12976 } 12977 if (!addOverflowAssumption(P)) 12978 return Expr; 12979 } 12980 return PredicatedRewrite->first; 12981 } 12982 12983 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 12984 SCEVUnionPredicate *Pred; 12985 const Loop *L; 12986 }; 12987 12988 } // end anonymous namespace 12989 12990 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 12991 SCEVUnionPredicate &Preds) { 12992 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 12993 } 12994 12995 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 12996 const SCEV *S, const Loop *L, 12997 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 12998 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 12999 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 13000 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 13001 13002 if (!AddRec) 13003 return nullptr; 13004 13005 // Since the transformation was successful, we can now transfer the SCEV 13006 // predicates. 13007 for (auto *P : TransformPreds) 13008 Preds.insert(P); 13009 13010 return AddRec; 13011 } 13012 13013 /// SCEV predicates 13014 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 13015 SCEVPredicateKind Kind) 13016 : FastID(ID), Kind(Kind) {} 13017 13018 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID, 13019 const SCEV *LHS, const SCEV *RHS) 13020 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) { 13021 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 13022 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 13023 } 13024 13025 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const { 13026 const auto *Op = dyn_cast<SCEVEqualPredicate>(N); 13027 13028 if (!Op) 13029 return false; 13030 13031 return Op->LHS == LHS && Op->RHS == RHS; 13032 } 13033 13034 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; } 13035 13036 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; } 13037 13038 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const { 13039 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 13040 } 13041 13042 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 13043 const SCEVAddRecExpr *AR, 13044 IncrementWrapFlags Flags) 13045 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 13046 13047 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; } 13048 13049 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 13050 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 13051 13052 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 13053 } 13054 13055 bool SCEVWrapPredicate::isAlwaysTrue() const { 13056 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 13057 IncrementWrapFlags IFlags = Flags; 13058 13059 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 13060 IFlags = clearFlags(IFlags, IncrementNSSW); 13061 13062 return IFlags == IncrementAnyWrap; 13063 } 13064 13065 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 13066 OS.indent(Depth) << *getExpr() << " Added Flags: "; 13067 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 13068 OS << "<nusw>"; 13069 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 13070 OS << "<nssw>"; 13071 OS << "\n"; 13072 } 13073 13074 SCEVWrapPredicate::IncrementWrapFlags 13075 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 13076 ScalarEvolution &SE) { 13077 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 13078 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 13079 13080 // We can safely transfer the NSW flag as NSSW. 13081 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 13082 ImpliedFlags = IncrementNSSW; 13083 13084 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 13085 // If the increment is positive, the SCEV NUW flag will also imply the 13086 // WrapPredicate NUSW flag. 13087 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 13088 if (Step->getValue()->getValue().isNonNegative()) 13089 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 13090 } 13091 13092 return ImpliedFlags; 13093 } 13094 13095 /// Union predicates don't get cached so create a dummy set ID for it. 13096 SCEVUnionPredicate::SCEVUnionPredicate() 13097 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {} 13098 13099 bool SCEVUnionPredicate::isAlwaysTrue() const { 13100 return all_of(Preds, 13101 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 13102 } 13103 13104 ArrayRef<const SCEVPredicate *> 13105 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) { 13106 auto I = SCEVToPreds.find(Expr); 13107 if (I == SCEVToPreds.end()) 13108 return ArrayRef<const SCEVPredicate *>(); 13109 return I->second; 13110 } 13111 13112 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 13113 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 13114 return all_of(Set->Preds, 13115 [this](const SCEVPredicate *I) { return this->implies(I); }); 13116 13117 auto ScevPredsIt = SCEVToPreds.find(N->getExpr()); 13118 if (ScevPredsIt == SCEVToPreds.end()) 13119 return false; 13120 auto &SCEVPreds = ScevPredsIt->second; 13121 13122 return any_of(SCEVPreds, 13123 [N](const SCEVPredicate *I) { return I->implies(N); }); 13124 } 13125 13126 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; } 13127 13128 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 13129 for (auto Pred : Preds) 13130 Pred->print(OS, Depth); 13131 } 13132 13133 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 13134 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 13135 for (auto Pred : Set->Preds) 13136 add(Pred); 13137 return; 13138 } 13139 13140 if (implies(N)) 13141 return; 13142 13143 const SCEV *Key = N->getExpr(); 13144 assert(Key && "Only SCEVUnionPredicate doesn't have an " 13145 " associated expression!"); 13146 13147 SCEVToPreds[Key].push_back(N); 13148 Preds.push_back(N); 13149 } 13150 13151 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 13152 Loop &L) 13153 : SE(SE), L(L) {} 13154 13155 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 13156 const SCEV *Expr = SE.getSCEV(V); 13157 RewriteEntry &Entry = RewriteMap[Expr]; 13158 13159 // If we already have an entry and the version matches, return it. 13160 if (Entry.second && Generation == Entry.first) 13161 return Entry.second; 13162 13163 // We found an entry but it's stale. Rewrite the stale entry 13164 // according to the current predicate. 13165 if (Entry.second) 13166 Expr = Entry.second; 13167 13168 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds); 13169 Entry = {Generation, NewSCEV}; 13170 13171 return NewSCEV; 13172 } 13173 13174 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 13175 if (!BackedgeCount) { 13176 SCEVUnionPredicate BackedgePred; 13177 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred); 13178 addPredicate(BackedgePred); 13179 } 13180 return BackedgeCount; 13181 } 13182 13183 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 13184 if (Preds.implies(&Pred)) 13185 return; 13186 Preds.add(&Pred); 13187 updateGeneration(); 13188 } 13189 13190 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const { 13191 return Preds; 13192 } 13193 13194 void PredicatedScalarEvolution::updateGeneration() { 13195 // If the generation number wrapped recompute everything. 13196 if (++Generation == 0) { 13197 for (auto &II : RewriteMap) { 13198 const SCEV *Rewritten = II.second.second; 13199 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)}; 13200 } 13201 } 13202 } 13203 13204 void PredicatedScalarEvolution::setNoOverflow( 13205 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 13206 const SCEV *Expr = getSCEV(V); 13207 const auto *AR = cast<SCEVAddRecExpr>(Expr); 13208 13209 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 13210 13211 // Clear the statically implied flags. 13212 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 13213 addPredicate(*SE.getWrapPredicate(AR, Flags)); 13214 13215 auto II = FlagsMap.insert({V, Flags}); 13216 if (!II.second) 13217 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 13218 } 13219 13220 bool PredicatedScalarEvolution::hasNoOverflow( 13221 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 13222 const SCEV *Expr = getSCEV(V); 13223 const auto *AR = cast<SCEVAddRecExpr>(Expr); 13224 13225 Flags = SCEVWrapPredicate::clearFlags( 13226 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 13227 13228 auto II = FlagsMap.find(V); 13229 13230 if (II != FlagsMap.end()) 13231 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 13232 13233 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 13234 } 13235 13236 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 13237 const SCEV *Expr = this->getSCEV(V); 13238 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 13239 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 13240 13241 if (!New) 13242 return nullptr; 13243 13244 for (auto *P : NewPreds) 13245 Preds.add(P); 13246 13247 updateGeneration(); 13248 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 13249 return New; 13250 } 13251 13252 PredicatedScalarEvolution::PredicatedScalarEvolution( 13253 const PredicatedScalarEvolution &Init) 13254 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds), 13255 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 13256 for (auto I : Init.FlagsMap) 13257 FlagsMap.insert(I); 13258 } 13259 13260 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 13261 // For each block. 13262 for (auto *BB : L.getBlocks()) 13263 for (auto &I : *BB) { 13264 if (!SE.isSCEVable(I.getType())) 13265 continue; 13266 13267 auto *Expr = SE.getSCEV(&I); 13268 auto II = RewriteMap.find(Expr); 13269 13270 if (II == RewriteMap.end()) 13271 continue; 13272 13273 // Don't print things that are not interesting. 13274 if (II->second.second == Expr) 13275 continue; 13276 13277 OS.indent(Depth) << "[PSE]" << I << ":\n"; 13278 OS.indent(Depth + 2) << *Expr << "\n"; 13279 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 13280 } 13281 } 13282 13283 // Match the mathematical pattern A - (A / B) * B, where A and B can be 13284 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used 13285 // for URem with constant power-of-2 second operands. 13286 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 13287 // 4, A / B becomes X / 8). 13288 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 13289 const SCEV *&RHS) { 13290 // Try to match 'zext (trunc A to iB) to iY', which is used 13291 // for URem with constant power-of-2 second operands. Make sure the size of 13292 // the operand A matches the size of the whole expressions. 13293 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr)) 13294 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) { 13295 LHS = Trunc->getOperand(); 13296 // Bail out if the type of the LHS is larger than the type of the 13297 // expression for now. 13298 if (getTypeSizeInBits(LHS->getType()) > 13299 getTypeSizeInBits(Expr->getType())) 13300 return false; 13301 if (LHS->getType() != Expr->getType()) 13302 LHS = getZeroExtendExpr(LHS, Expr->getType()); 13303 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1) 13304 << getTypeSizeInBits(Trunc->getType())); 13305 return true; 13306 } 13307 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 13308 if (Add == nullptr || Add->getNumOperands() != 2) 13309 return false; 13310 13311 const SCEV *A = Add->getOperand(1); 13312 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 13313 13314 if (Mul == nullptr) 13315 return false; 13316 13317 const auto MatchURemWithDivisor = [&](const SCEV *B) { 13318 // (SomeExpr + (-(SomeExpr / B) * B)). 13319 if (Expr == getURemExpr(A, B)) { 13320 LHS = A; 13321 RHS = B; 13322 return true; 13323 } 13324 return false; 13325 }; 13326 13327 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 13328 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 13329 return MatchURemWithDivisor(Mul->getOperand(1)) || 13330 MatchURemWithDivisor(Mul->getOperand(2)); 13331 13332 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 13333 if (Mul->getNumOperands() == 2) 13334 return MatchURemWithDivisor(Mul->getOperand(1)) || 13335 MatchURemWithDivisor(Mul->getOperand(0)) || 13336 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 13337 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 13338 return false; 13339 } 13340 13341 const SCEV * 13342 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) { 13343 SmallVector<BasicBlock*, 16> ExitingBlocks; 13344 L->getExitingBlocks(ExitingBlocks); 13345 13346 // Form an expression for the maximum exit count possible for this loop. We 13347 // merge the max and exact information to approximate a version of 13348 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants. 13349 SmallVector<const SCEV*, 4> ExitCounts; 13350 for (BasicBlock *ExitingBB : ExitingBlocks) { 13351 const SCEV *ExitCount = getExitCount(L, ExitingBB); 13352 if (isa<SCEVCouldNotCompute>(ExitCount)) 13353 ExitCount = getExitCount(L, ExitingBB, 13354 ScalarEvolution::ConstantMaximum); 13355 if (!isa<SCEVCouldNotCompute>(ExitCount)) { 13356 assert(DT.dominates(ExitingBB, L->getLoopLatch()) && 13357 "We should only have known counts for exiting blocks that " 13358 "dominate latch!"); 13359 ExitCounts.push_back(ExitCount); 13360 } 13361 } 13362 if (ExitCounts.empty()) 13363 return getCouldNotCompute(); 13364 return getUMinFromMismatchedTypes(ExitCounts); 13365 } 13366 13367 /// This rewriter is similar to SCEVParameterRewriter (it replaces SCEVUnknown 13368 /// components following the Map (Value -> SCEV)), but skips AddRecExpr because 13369 /// we cannot guarantee that the replacement is loop invariant in the loop of 13370 /// the AddRec. 13371 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> { 13372 ValueToSCEVMapTy ⤅ 13373 13374 public: 13375 SCEVLoopGuardRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M) 13376 : SCEVRewriteVisitor(SE), Map(M) {} 13377 13378 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 13379 13380 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 13381 auto I = Map.find(Expr->getValue()); 13382 if (I == Map.end()) 13383 return Expr; 13384 return I->second; 13385 } 13386 }; 13387 13388 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) { 13389 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS, 13390 const SCEV *RHS, ValueToSCEVMapTy &RewriteMap) { 13391 // If we have LHS == 0, check if LHS is computing a property of some unknown 13392 // SCEV %v which we can rewrite %v to express explicitly. 13393 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS); 13394 if (Predicate == CmpInst::ICMP_EQ && RHSC && 13395 RHSC->getValue()->isNullValue()) { 13396 // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to 13397 // explicitly express that. 13398 const SCEV *URemLHS = nullptr; 13399 const SCEV *URemRHS = nullptr; 13400 if (matchURem(LHS, URemLHS, URemRHS)) { 13401 if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) { 13402 Value *V = LHSUnknown->getValue(); 13403 auto Multiple = 13404 getMulExpr(getUDivExpr(URemLHS, URemRHS), URemRHS, 13405 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 13406 RewriteMap[V] = Multiple; 13407 return; 13408 } 13409 } 13410 } 13411 13412 if (!isa<SCEVUnknown>(LHS)) { 13413 std::swap(LHS, RHS); 13414 Predicate = CmpInst::getSwappedPredicate(Predicate); 13415 } 13416 13417 // For now, limit to conditions that provide information about unknown 13418 // expressions. 13419 auto *LHSUnknown = dyn_cast<SCEVUnknown>(LHS); 13420 if (!LHSUnknown) 13421 return; 13422 13423 // TODO: use information from more predicates. 13424 switch (Predicate) { 13425 case CmpInst::ICMP_ULT: { 13426 if (!containsAddRecurrence(RHS)) { 13427 const SCEV *Base = LHS; 13428 auto I = RewriteMap.find(LHSUnknown->getValue()); 13429 if (I != RewriteMap.end()) 13430 Base = I->second; 13431 13432 RewriteMap[LHSUnknown->getValue()] = 13433 getUMinExpr(Base, getMinusSCEV(RHS, getOne(RHS->getType()))); 13434 } 13435 break; 13436 } 13437 case CmpInst::ICMP_ULE: { 13438 if (!containsAddRecurrence(RHS)) { 13439 const SCEV *Base = LHS; 13440 auto I = RewriteMap.find(LHSUnknown->getValue()); 13441 if (I != RewriteMap.end()) 13442 Base = I->second; 13443 RewriteMap[LHSUnknown->getValue()] = getUMinExpr(Base, RHS); 13444 } 13445 break; 13446 } 13447 case CmpInst::ICMP_EQ: 13448 if (isa<SCEVConstant>(RHS)) 13449 RewriteMap[LHSUnknown->getValue()] = RHS; 13450 break; 13451 case CmpInst::ICMP_NE: 13452 if (isa<SCEVConstant>(RHS) && 13453 cast<SCEVConstant>(RHS)->getValue()->isNullValue()) 13454 RewriteMap[LHSUnknown->getValue()] = 13455 getUMaxExpr(LHS, getOne(RHS->getType())); 13456 break; 13457 default: 13458 break; 13459 } 13460 }; 13461 // Starting at the loop predecessor, climb up the predecessor chain, as long 13462 // as there are predecessors that can be found that have unique successors 13463 // leading to the original header. 13464 // TODO: share this logic with isLoopEntryGuardedByCond. 13465 ValueToSCEVMapTy RewriteMap; 13466 for (std::pair<const BasicBlock *, const BasicBlock *> Pair( 13467 L->getLoopPredecessor(), L->getHeader()); 13468 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 13469 13470 const BranchInst *LoopEntryPredicate = 13471 dyn_cast<BranchInst>(Pair.first->getTerminator()); 13472 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) 13473 continue; 13474 13475 // TODO: use information from more complex conditions, e.g. AND expressions. 13476 auto *Cmp = dyn_cast<ICmpInst>(LoopEntryPredicate->getCondition()); 13477 if (!Cmp) 13478 continue; 13479 13480 auto Predicate = Cmp->getPredicate(); 13481 if (LoopEntryPredicate->getSuccessor(1) == Pair.second) 13482 Predicate = CmpInst::getInversePredicate(Predicate); 13483 CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)), 13484 getSCEV(Cmp->getOperand(1)), RewriteMap); 13485 } 13486 13487 // Also collect information from assumptions dominating the loop. 13488 for (auto &AssumeVH : AC.assumptions()) { 13489 if (!AssumeVH) 13490 continue; 13491 auto *AssumeI = cast<CallInst>(AssumeVH); 13492 auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0)); 13493 if (!Cmp || !DT.dominates(AssumeI, L->getHeader())) 13494 continue; 13495 CollectCondition(Cmp->getPredicate(), getSCEV(Cmp->getOperand(0)), 13496 getSCEV(Cmp->getOperand(1)), RewriteMap); 13497 } 13498 13499 if (RewriteMap.empty()) 13500 return Expr; 13501 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 13502 return Rewriter.visit(Expr); 13503 } 13504