1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution analysis 10 // engine, which is used primarily to analyze expressions involving induction 11 // variables in loops. 12 // 13 // There are several aspects to this library. First is the representation of 14 // scalar expressions, which are represented as subclasses of the SCEV class. 15 // These classes are used to represent certain types of subexpressions that we 16 // can handle. We only create one SCEV of a particular shape, so 17 // pointer-comparisons for equality are legal. 18 // 19 // One important aspect of the SCEV objects is that they are never cyclic, even 20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 22 // recurrence) then we represent it directly as a recurrence node, otherwise we 23 // represent it as a SCEVUnknown node. 24 // 25 // In addition to being able to represent expressions of various types, we also 26 // have folders that are used to build the *canonical* representation for a 27 // particular expression. These folders are capable of using a variety of 28 // rewrite rules to simplify the expressions. 29 // 30 // Once the folders are defined, we can implement the more interesting 31 // higher-level code, such as the code that recognizes PHI nodes of various 32 // types, computes the execution count of a loop, etc. 33 // 34 // TODO: We should use these routines and value representations to implement 35 // dependence analysis! 36 // 37 //===----------------------------------------------------------------------===// 38 // 39 // There are several good references for the techniques used in this analysis. 40 // 41 // Chains of recurrences -- a method to expedite the evaluation 42 // of closed-form functions 43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 44 // 45 // On computational properties of chains of recurrences 46 // Eugene V. Zima 47 // 48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 49 // Robert A. van Engelen 50 // 51 // Efficient Symbolic Analysis for Optimizing Compilers 52 // Robert A. van Engelen 53 // 54 // Using the chains of recurrences algebra for data dependence testing and 55 // induction variable substitution 56 // MS Thesis, Johnie Birch 57 // 58 //===----------------------------------------------------------------------===// 59 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/ADT/APInt.h" 62 #include "llvm/ADT/ArrayRef.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/DepthFirstIterator.h" 65 #include "llvm/ADT/EquivalenceClasses.h" 66 #include "llvm/ADT/FoldingSet.h" 67 #include "llvm/ADT/None.h" 68 #include "llvm/ADT/Optional.h" 69 #include "llvm/ADT/STLExtras.h" 70 #include "llvm/ADT/ScopeExit.h" 71 #include "llvm/ADT/Sequence.h" 72 #include "llvm/ADT/SetVector.h" 73 #include "llvm/ADT/SmallPtrSet.h" 74 #include "llvm/ADT/SmallSet.h" 75 #include "llvm/ADT/SmallVector.h" 76 #include "llvm/ADT/Statistic.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/ConstantFolding.h" 80 #include "llvm/Analysis/InstructionSimplify.h" 81 #include "llvm/Analysis/LoopInfo.h" 82 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 83 #include "llvm/Analysis/TargetLibraryInfo.h" 84 #include "llvm/Analysis/ValueTracking.h" 85 #include "llvm/Config/llvm-config.h" 86 #include "llvm/IR/Argument.h" 87 #include "llvm/IR/BasicBlock.h" 88 #include "llvm/IR/CFG.h" 89 #include "llvm/IR/Constant.h" 90 #include "llvm/IR/ConstantRange.h" 91 #include "llvm/IR/Constants.h" 92 #include "llvm/IR/DataLayout.h" 93 #include "llvm/IR/DerivedTypes.h" 94 #include "llvm/IR/Dominators.h" 95 #include "llvm/IR/Function.h" 96 #include "llvm/IR/GlobalAlias.h" 97 #include "llvm/IR/GlobalValue.h" 98 #include "llvm/IR/InstIterator.h" 99 #include "llvm/IR/InstrTypes.h" 100 #include "llvm/IR/Instruction.h" 101 #include "llvm/IR/Instructions.h" 102 #include "llvm/IR/IntrinsicInst.h" 103 #include "llvm/IR/Intrinsics.h" 104 #include "llvm/IR/LLVMContext.h" 105 #include "llvm/IR/Operator.h" 106 #include "llvm/IR/PatternMatch.h" 107 #include "llvm/IR/Type.h" 108 #include "llvm/IR/Use.h" 109 #include "llvm/IR/User.h" 110 #include "llvm/IR/Value.h" 111 #include "llvm/IR/Verifier.h" 112 #include "llvm/InitializePasses.h" 113 #include "llvm/Pass.h" 114 #include "llvm/Support/Casting.h" 115 #include "llvm/Support/CommandLine.h" 116 #include "llvm/Support/Compiler.h" 117 #include "llvm/Support/Debug.h" 118 #include "llvm/Support/ErrorHandling.h" 119 #include "llvm/Support/KnownBits.h" 120 #include "llvm/Support/SaveAndRestore.h" 121 #include "llvm/Support/raw_ostream.h" 122 #include <algorithm> 123 #include <cassert> 124 #include <climits> 125 #include <cstdint> 126 #include <cstdlib> 127 #include <map> 128 #include <memory> 129 #include <tuple> 130 #include <utility> 131 #include <vector> 132 133 using namespace llvm; 134 using namespace PatternMatch; 135 136 #define DEBUG_TYPE "scalar-evolution" 137 138 STATISTIC(NumTripCountsComputed, 139 "Number of loops with predictable loop counts"); 140 STATISTIC(NumTripCountsNotComputed, 141 "Number of loops without predictable loop counts"); 142 STATISTIC(NumBruteForceTripCountsComputed, 143 "Number of loops with trip counts computed by force"); 144 145 #ifdef EXPENSIVE_CHECKS 146 bool llvm::VerifySCEV = true; 147 #else 148 bool llvm::VerifySCEV = false; 149 #endif 150 151 static cl::opt<unsigned> 152 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 153 cl::ZeroOrMore, 154 cl::desc("Maximum number of iterations SCEV will " 155 "symbolically execute a constant " 156 "derived loop"), 157 cl::init(100)); 158 159 static cl::opt<bool, true> VerifySCEVOpt( 160 "verify-scev", cl::Hidden, cl::location(VerifySCEV), 161 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); 162 static cl::opt<bool> VerifySCEVStrict( 163 "verify-scev-strict", cl::Hidden, 164 cl::desc("Enable stricter verification with -verify-scev is passed")); 165 static cl::opt<bool> 166 VerifySCEVMap("verify-scev-maps", cl::Hidden, 167 cl::desc("Verify no dangling value in ScalarEvolution's " 168 "ExprValueMap (slow)")); 169 170 static cl::opt<bool> VerifyIR( 171 "scev-verify-ir", cl::Hidden, 172 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), 173 cl::init(false)); 174 175 static cl::opt<unsigned> MulOpsInlineThreshold( 176 "scev-mulops-inline-threshold", cl::Hidden, 177 cl::desc("Threshold for inlining multiplication operands into a SCEV"), 178 cl::init(32)); 179 180 static cl::opt<unsigned> AddOpsInlineThreshold( 181 "scev-addops-inline-threshold", cl::Hidden, 182 cl::desc("Threshold for inlining addition operands into a SCEV"), 183 cl::init(500)); 184 185 static cl::opt<unsigned> MaxSCEVCompareDepth( 186 "scalar-evolution-max-scev-compare-depth", cl::Hidden, 187 cl::desc("Maximum depth of recursive SCEV complexity comparisons"), 188 cl::init(32)); 189 190 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth( 191 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, 192 cl::desc("Maximum depth of recursive SCEV operations implication analysis"), 193 cl::init(2)); 194 195 static cl::opt<unsigned> MaxValueCompareDepth( 196 "scalar-evolution-max-value-compare-depth", cl::Hidden, 197 cl::desc("Maximum depth of recursive value complexity comparisons"), 198 cl::init(2)); 199 200 static cl::opt<unsigned> 201 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, 202 cl::desc("Maximum depth of recursive arithmetics"), 203 cl::init(32)); 204 205 static cl::opt<unsigned> MaxConstantEvolvingDepth( 206 "scalar-evolution-max-constant-evolving-depth", cl::Hidden, 207 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32)); 208 209 static cl::opt<unsigned> 210 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, 211 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), 212 cl::init(8)); 213 214 static cl::opt<unsigned> 215 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 216 cl::desc("Max coefficients in AddRec during evolving"), 217 cl::init(8)); 218 219 static cl::opt<unsigned> 220 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, 221 cl::desc("Size of the expression which is considered huge"), 222 cl::init(4096)); 223 224 static cl::opt<bool> 225 ClassifyExpressions("scalar-evolution-classify-expressions", 226 cl::Hidden, cl::init(true), 227 cl::desc("When printing analysis, include information on every instruction")); 228 229 static cl::opt<bool> UseExpensiveRangeSharpening( 230 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden, 231 cl::init(false), 232 cl::desc("Use more powerful methods of sharpening expression ranges. May " 233 "be costly in terms of compile time")); 234 235 static cl::opt<unsigned> MaxPhiSCCAnalysisSize( 236 "scalar-evolution-max-scc-analysis-depth", cl::Hidden, 237 cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " 238 "Phi strongly connected components"), 239 cl::init(8)); 240 241 static cl::opt<bool> 242 EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, 243 cl::desc("Handle <= and >= in finite loops"), 244 cl::init(true)); 245 246 //===----------------------------------------------------------------------===// 247 // SCEV class definitions 248 //===----------------------------------------------------------------------===// 249 250 //===----------------------------------------------------------------------===// 251 // Implementation of the SCEV class. 252 // 253 254 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 255 LLVM_DUMP_METHOD void SCEV::dump() const { 256 print(dbgs()); 257 dbgs() << '\n'; 258 } 259 #endif 260 261 void SCEV::print(raw_ostream &OS) const { 262 switch (getSCEVType()) { 263 case scConstant: 264 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false); 265 return; 266 case scPtrToInt: { 267 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this); 268 const SCEV *Op = PtrToInt->getOperand(); 269 OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to " 270 << *PtrToInt->getType() << ")"; 271 return; 272 } 273 case scTruncate: { 274 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this); 275 const SCEV *Op = Trunc->getOperand(); 276 OS << "(trunc " << *Op->getType() << " " << *Op << " to " 277 << *Trunc->getType() << ")"; 278 return; 279 } 280 case scZeroExtend: { 281 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this); 282 const SCEV *Op = ZExt->getOperand(); 283 OS << "(zext " << *Op->getType() << " " << *Op << " to " 284 << *ZExt->getType() << ")"; 285 return; 286 } 287 case scSignExtend: { 288 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this); 289 const SCEV *Op = SExt->getOperand(); 290 OS << "(sext " << *Op->getType() << " " << *Op << " to " 291 << *SExt->getType() << ")"; 292 return; 293 } 294 case scAddRecExpr: { 295 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this); 296 OS << "{" << *AR->getOperand(0); 297 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) 298 OS << ",+," << *AR->getOperand(i); 299 OS << "}<"; 300 if (AR->hasNoUnsignedWrap()) 301 OS << "nuw><"; 302 if (AR->hasNoSignedWrap()) 303 OS << "nsw><"; 304 if (AR->hasNoSelfWrap() && 305 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW))) 306 OS << "nw><"; 307 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false); 308 OS << ">"; 309 return; 310 } 311 case scAddExpr: 312 case scMulExpr: 313 case scUMaxExpr: 314 case scSMaxExpr: 315 case scUMinExpr: 316 case scSMinExpr: 317 case scSequentialUMinExpr: { 318 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this); 319 const char *OpStr = nullptr; 320 switch (NAry->getSCEVType()) { 321 case scAddExpr: OpStr = " + "; break; 322 case scMulExpr: OpStr = " * "; break; 323 case scUMaxExpr: OpStr = " umax "; break; 324 case scSMaxExpr: OpStr = " smax "; break; 325 case scUMinExpr: 326 OpStr = " umin "; 327 break; 328 case scSMinExpr: 329 OpStr = " smin "; 330 break; 331 case scSequentialUMinExpr: 332 OpStr = " umin_seq "; 333 break; 334 default: 335 llvm_unreachable("There are no other nary expression types."); 336 } 337 OS << "("; 338 ListSeparator LS(OpStr); 339 for (const SCEV *Op : NAry->operands()) 340 OS << LS << *Op; 341 OS << ")"; 342 switch (NAry->getSCEVType()) { 343 case scAddExpr: 344 case scMulExpr: 345 if (NAry->hasNoUnsignedWrap()) 346 OS << "<nuw>"; 347 if (NAry->hasNoSignedWrap()) 348 OS << "<nsw>"; 349 break; 350 default: 351 // Nothing to print for other nary expressions. 352 break; 353 } 354 return; 355 } 356 case scUDivExpr: { 357 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this); 358 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; 359 return; 360 } 361 case scUnknown: { 362 const SCEVUnknown *U = cast<SCEVUnknown>(this); 363 Type *AllocTy; 364 if (U->isSizeOf(AllocTy)) { 365 OS << "sizeof(" << *AllocTy << ")"; 366 return; 367 } 368 if (U->isAlignOf(AllocTy)) { 369 OS << "alignof(" << *AllocTy << ")"; 370 return; 371 } 372 373 Type *CTy; 374 Constant *FieldNo; 375 if (U->isOffsetOf(CTy, FieldNo)) { 376 OS << "offsetof(" << *CTy << ", "; 377 FieldNo->printAsOperand(OS, false); 378 OS << ")"; 379 return; 380 } 381 382 // Otherwise just print it normally. 383 U->getValue()->printAsOperand(OS, false); 384 return; 385 } 386 case scCouldNotCompute: 387 OS << "***COULDNOTCOMPUTE***"; 388 return; 389 } 390 llvm_unreachable("Unknown SCEV kind!"); 391 } 392 393 Type *SCEV::getType() const { 394 switch (getSCEVType()) { 395 case scConstant: 396 return cast<SCEVConstant>(this)->getType(); 397 case scPtrToInt: 398 case scTruncate: 399 case scZeroExtend: 400 case scSignExtend: 401 return cast<SCEVCastExpr>(this)->getType(); 402 case scAddRecExpr: 403 return cast<SCEVAddRecExpr>(this)->getType(); 404 case scMulExpr: 405 return cast<SCEVMulExpr>(this)->getType(); 406 case scUMaxExpr: 407 case scSMaxExpr: 408 case scUMinExpr: 409 case scSMinExpr: 410 return cast<SCEVMinMaxExpr>(this)->getType(); 411 case scSequentialUMinExpr: 412 return cast<SCEVSequentialMinMaxExpr>(this)->getType(); 413 case scAddExpr: 414 return cast<SCEVAddExpr>(this)->getType(); 415 case scUDivExpr: 416 return cast<SCEVUDivExpr>(this)->getType(); 417 case scUnknown: 418 return cast<SCEVUnknown>(this)->getType(); 419 case scCouldNotCompute: 420 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 421 } 422 llvm_unreachable("Unknown SCEV kind!"); 423 } 424 425 bool SCEV::isZero() const { 426 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 427 return SC->getValue()->isZero(); 428 return false; 429 } 430 431 bool SCEV::isOne() const { 432 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 433 return SC->getValue()->isOne(); 434 return false; 435 } 436 437 bool SCEV::isAllOnesValue() const { 438 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 439 return SC->getValue()->isMinusOne(); 440 return false; 441 } 442 443 bool SCEV::isNonConstantNegative() const { 444 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this); 445 if (!Mul) return false; 446 447 // If there is a constant factor, it will be first. 448 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 449 if (!SC) return false; 450 451 // Return true if the value is negative, this matches things like (-42 * V). 452 return SC->getAPInt().isNegative(); 453 } 454 455 SCEVCouldNotCompute::SCEVCouldNotCompute() : 456 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {} 457 458 bool SCEVCouldNotCompute::classof(const SCEV *S) { 459 return S->getSCEVType() == scCouldNotCompute; 460 } 461 462 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 463 FoldingSetNodeID ID; 464 ID.AddInteger(scConstant); 465 ID.AddPointer(V); 466 void *IP = nullptr; 467 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 468 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 469 UniqueSCEVs.InsertNode(S, IP); 470 return S; 471 } 472 473 const SCEV *ScalarEvolution::getConstant(const APInt &Val) { 474 return getConstant(ConstantInt::get(getContext(), Val)); 475 } 476 477 const SCEV * 478 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { 479 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 480 return getConstant(ConstantInt::get(ITy, V, isSigned)); 481 } 482 483 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, 484 const SCEV *op, Type *ty) 485 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) { 486 Operands[0] = op; 487 } 488 489 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, 490 Type *ITy) 491 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) { 492 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() && 493 "Must be a non-bit-width-changing pointer-to-integer cast!"); 494 } 495 496 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, 497 SCEVTypes SCEVTy, const SCEV *op, 498 Type *ty) 499 : SCEVCastExpr(ID, SCEVTy, op, ty) {} 500 501 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, 502 Type *ty) 503 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) { 504 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 505 "Cannot truncate non-integer value!"); 506 } 507 508 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 509 const SCEV *op, Type *ty) 510 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) { 511 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 512 "Cannot zero extend non-integer value!"); 513 } 514 515 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 516 const SCEV *op, Type *ty) 517 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) { 518 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 519 "Cannot sign extend non-integer value!"); 520 } 521 522 void SCEVUnknown::deleted() { 523 // Clear this SCEVUnknown from various maps. 524 SE->forgetMemoizedResults(this); 525 526 // Remove this SCEVUnknown from the uniquing map. 527 SE->UniqueSCEVs.RemoveNode(this); 528 529 // Release the value. 530 setValPtr(nullptr); 531 } 532 533 void SCEVUnknown::allUsesReplacedWith(Value *New) { 534 // Clear this SCEVUnknown from various maps. 535 SE->forgetMemoizedResults(this); 536 537 // Remove this SCEVUnknown from the uniquing map. 538 SE->UniqueSCEVs.RemoveNode(this); 539 540 // Replace the value pointer in case someone is still using this SCEVUnknown. 541 setValPtr(New); 542 } 543 544 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { 545 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 546 if (VCE->getOpcode() == Instruction::PtrToInt) 547 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 548 if (CE->getOpcode() == Instruction::GetElementPtr && 549 CE->getOperand(0)->isNullValue() && 550 CE->getNumOperands() == 2) 551 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 552 if (CI->isOne()) { 553 AllocTy = cast<GEPOperator>(CE)->getSourceElementType(); 554 return true; 555 } 556 557 return false; 558 } 559 560 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { 561 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 562 if (VCE->getOpcode() == Instruction::PtrToInt) 563 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 564 if (CE->getOpcode() == Instruction::GetElementPtr && 565 CE->getOperand(0)->isNullValue()) { 566 Type *Ty = cast<GEPOperator>(CE)->getSourceElementType(); 567 if (StructType *STy = dyn_cast<StructType>(Ty)) 568 if (!STy->isPacked() && 569 CE->getNumOperands() == 3 && 570 CE->getOperand(1)->isNullValue()) { 571 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 572 if (CI->isOne() && 573 STy->getNumElements() == 2 && 574 STy->getElementType(0)->isIntegerTy(1)) { 575 AllocTy = STy->getElementType(1); 576 return true; 577 } 578 } 579 } 580 581 return false; 582 } 583 584 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { 585 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 586 if (VCE->getOpcode() == Instruction::PtrToInt) 587 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 588 if (CE->getOpcode() == Instruction::GetElementPtr && 589 CE->getNumOperands() == 3 && 590 CE->getOperand(0)->isNullValue() && 591 CE->getOperand(1)->isNullValue()) { 592 Type *Ty = cast<GEPOperator>(CE)->getSourceElementType(); 593 // Ignore vector types here so that ScalarEvolutionExpander doesn't 594 // emit getelementptrs that index into vectors. 595 if (Ty->isStructTy() || Ty->isArrayTy()) { 596 CTy = Ty; 597 FieldNo = CE->getOperand(2); 598 return true; 599 } 600 } 601 602 return false; 603 } 604 605 //===----------------------------------------------------------------------===// 606 // SCEV Utilities 607 //===----------------------------------------------------------------------===// 608 609 /// Compare the two values \p LV and \p RV in terms of their "complexity" where 610 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order 611 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that 612 /// have been previously deemed to be "equally complex" by this routine. It is 613 /// intended to avoid exponential time complexity in cases like: 614 /// 615 /// %a = f(%x, %y) 616 /// %b = f(%a, %a) 617 /// %c = f(%b, %b) 618 /// 619 /// %d = f(%x, %y) 620 /// %e = f(%d, %d) 621 /// %f = f(%e, %e) 622 /// 623 /// CompareValueComplexity(%f, %c) 624 /// 625 /// Since we do not continue running this routine on expression trees once we 626 /// have seen unequal values, there is no need to track them in the cache. 627 static int 628 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue, 629 const LoopInfo *const LI, Value *LV, Value *RV, 630 unsigned Depth) { 631 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV)) 632 return 0; 633 634 // Order pointer values after integer values. This helps SCEVExpander form 635 // GEPs. 636 bool LIsPointer = LV->getType()->isPointerTy(), 637 RIsPointer = RV->getType()->isPointerTy(); 638 if (LIsPointer != RIsPointer) 639 return (int)LIsPointer - (int)RIsPointer; 640 641 // Compare getValueID values. 642 unsigned LID = LV->getValueID(), RID = RV->getValueID(); 643 if (LID != RID) 644 return (int)LID - (int)RID; 645 646 // Sort arguments by their position. 647 if (const auto *LA = dyn_cast<Argument>(LV)) { 648 const auto *RA = cast<Argument>(RV); 649 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 650 return (int)LArgNo - (int)RArgNo; 651 } 652 653 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) { 654 const auto *RGV = cast<GlobalValue>(RV); 655 656 const auto IsGVNameSemantic = [&](const GlobalValue *GV) { 657 auto LT = GV->getLinkage(); 658 return !(GlobalValue::isPrivateLinkage(LT) || 659 GlobalValue::isInternalLinkage(LT)); 660 }; 661 662 // Use the names to distinguish the two values, but only if the 663 // names are semantically important. 664 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV)) 665 return LGV->getName().compare(RGV->getName()); 666 } 667 668 // For instructions, compare their loop depth, and their operand count. This 669 // is pretty loose. 670 if (const auto *LInst = dyn_cast<Instruction>(LV)) { 671 const auto *RInst = cast<Instruction>(RV); 672 673 // Compare loop depths. 674 const BasicBlock *LParent = LInst->getParent(), 675 *RParent = RInst->getParent(); 676 if (LParent != RParent) { 677 unsigned LDepth = LI->getLoopDepth(LParent), 678 RDepth = LI->getLoopDepth(RParent); 679 if (LDepth != RDepth) 680 return (int)LDepth - (int)RDepth; 681 } 682 683 // Compare the number of operands. 684 unsigned LNumOps = LInst->getNumOperands(), 685 RNumOps = RInst->getNumOperands(); 686 if (LNumOps != RNumOps) 687 return (int)LNumOps - (int)RNumOps; 688 689 for (unsigned Idx : seq(0u, LNumOps)) { 690 int Result = 691 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx), 692 RInst->getOperand(Idx), Depth + 1); 693 if (Result != 0) 694 return Result; 695 } 696 } 697 698 EqCacheValue.unionSets(LV, RV); 699 return 0; 700 } 701 702 // Return negative, zero, or positive, if LHS is less than, equal to, or greater 703 // than RHS, respectively. A three-way result allows recursive comparisons to be 704 // more efficient. 705 // If the max analysis depth was reached, return None, assuming we do not know 706 // if they are equivalent for sure. 707 static Optional<int> 708 CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV, 709 EquivalenceClasses<const Value *> &EqCacheValue, 710 const LoopInfo *const LI, const SCEV *LHS, 711 const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) { 712 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 713 if (LHS == RHS) 714 return 0; 715 716 // Primarily, sort the SCEVs by their getSCEVType(). 717 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 718 if (LType != RType) 719 return (int)LType - (int)RType; 720 721 if (EqCacheSCEV.isEquivalent(LHS, RHS)) 722 return 0; 723 724 if (Depth > MaxSCEVCompareDepth) 725 return None; 726 727 // Aside from the getSCEVType() ordering, the particular ordering 728 // isn't very important except that it's beneficial to be consistent, 729 // so that (a + b) and (b + a) don't end up as different expressions. 730 switch (LType) { 731 case scUnknown: { 732 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 733 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 734 735 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(), 736 RU->getValue(), Depth + 1); 737 if (X == 0) 738 EqCacheSCEV.unionSets(LHS, RHS); 739 return X; 740 } 741 742 case scConstant: { 743 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 744 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 745 746 // Compare constant values. 747 const APInt &LA = LC->getAPInt(); 748 const APInt &RA = RC->getAPInt(); 749 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 750 if (LBitWidth != RBitWidth) 751 return (int)LBitWidth - (int)RBitWidth; 752 return LA.ult(RA) ? -1 : 1; 753 } 754 755 case scAddRecExpr: { 756 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 757 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 758 759 // There is always a dominance between two recs that are used by one SCEV, 760 // so we can safely sort recs by loop header dominance. We require such 761 // order in getAddExpr. 762 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 763 if (LLoop != RLoop) { 764 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); 765 assert(LHead != RHead && "Two loops share the same header?"); 766 if (DT.dominates(LHead, RHead)) 767 return 1; 768 else 769 assert(DT.dominates(RHead, LHead) && 770 "No dominance between recurrences used by one SCEV?"); 771 return -1; 772 } 773 774 // Addrec complexity grows with operand count. 775 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 776 if (LNumOps != RNumOps) 777 return (int)LNumOps - (int)RNumOps; 778 779 // Lexicographically compare. 780 for (unsigned i = 0; i != LNumOps; ++i) { 781 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 782 LA->getOperand(i), RA->getOperand(i), DT, 783 Depth + 1); 784 if (X != 0) 785 return X; 786 } 787 EqCacheSCEV.unionSets(LHS, RHS); 788 return 0; 789 } 790 791 case scAddExpr: 792 case scMulExpr: 793 case scSMaxExpr: 794 case scUMaxExpr: 795 case scSMinExpr: 796 case scUMinExpr: 797 case scSequentialUMinExpr: { 798 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 799 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 800 801 // Lexicographically compare n-ary expressions. 802 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 803 if (LNumOps != RNumOps) 804 return (int)LNumOps - (int)RNumOps; 805 806 for (unsigned i = 0; i != LNumOps; ++i) { 807 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, 808 LC->getOperand(i), RC->getOperand(i), DT, 809 Depth + 1); 810 if (X != 0) 811 return X; 812 } 813 EqCacheSCEV.unionSets(LHS, RHS); 814 return 0; 815 } 816 817 case scUDivExpr: { 818 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 819 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 820 821 // Lexicographically compare udiv expressions. 822 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(), 823 RC->getLHS(), DT, Depth + 1); 824 if (X != 0) 825 return X; 826 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(), 827 RC->getRHS(), DT, Depth + 1); 828 if (X == 0) 829 EqCacheSCEV.unionSets(LHS, RHS); 830 return X; 831 } 832 833 case scPtrToInt: 834 case scTruncate: 835 case scZeroExtend: 836 case scSignExtend: { 837 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 838 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 839 840 // Compare cast expressions by operand. 841 auto X = 842 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(), 843 RC->getOperand(), DT, Depth + 1); 844 if (X == 0) 845 EqCacheSCEV.unionSets(LHS, RHS); 846 return X; 847 } 848 849 case scCouldNotCompute: 850 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 851 } 852 llvm_unreachable("Unknown SCEV kind!"); 853 } 854 855 /// Given a list of SCEV objects, order them by their complexity, and group 856 /// objects of the same complexity together by value. When this routine is 857 /// finished, we know that any duplicates in the vector are consecutive and that 858 /// complexity is monotonically increasing. 859 /// 860 /// Note that we go take special precautions to ensure that we get deterministic 861 /// results from this routine. In other words, we don't want the results of 862 /// this to depend on where the addresses of various SCEV objects happened to 863 /// land in memory. 864 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 865 LoopInfo *LI, DominatorTree &DT) { 866 if (Ops.size() < 2) return; // Noop 867 868 EquivalenceClasses<const SCEV *> EqCacheSCEV; 869 EquivalenceClasses<const Value *> EqCacheValue; 870 871 // Whether LHS has provably less complexity than RHS. 872 auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) { 873 auto Complexity = 874 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT); 875 return Complexity && *Complexity < 0; 876 }; 877 if (Ops.size() == 2) { 878 // This is the common case, which also happens to be trivially simple. 879 // Special case it. 880 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 881 if (IsLessComplex(RHS, LHS)) 882 std::swap(LHS, RHS); 883 return; 884 } 885 886 // Do the rough sort by complexity. 887 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) { 888 return IsLessComplex(LHS, RHS); 889 }); 890 891 // Now that we are sorted by complexity, group elements of the same 892 // complexity. Note that this is, at worst, N^2, but the vector is likely to 893 // be extremely short in practice. Note that we take this approach because we 894 // do not want to depend on the addresses of the objects we are grouping. 895 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 896 const SCEV *S = Ops[i]; 897 unsigned Complexity = S->getSCEVType(); 898 899 // If there are any objects of the same complexity and same value as this 900 // one, group them. 901 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 902 if (Ops[j] == S) { // Found a duplicate. 903 // Move it to immediately after i'th element. 904 std::swap(Ops[i+1], Ops[j]); 905 ++i; // no need to rescan it. 906 if (i == e-2) return; // Done! 907 } 908 } 909 } 910 } 911 912 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at 913 /// least HugeExprThreshold nodes). 914 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) { 915 return any_of(Ops, [](const SCEV *S) { 916 return S->getExpressionSize() >= HugeExprThreshold; 917 }); 918 } 919 920 //===----------------------------------------------------------------------===// 921 // Simple SCEV method implementations 922 //===----------------------------------------------------------------------===// 923 924 /// Compute BC(It, K). The result has width W. Assume, K > 0. 925 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 926 ScalarEvolution &SE, 927 Type *ResultTy) { 928 // Handle the simplest case efficiently. 929 if (K == 1) 930 return SE.getTruncateOrZeroExtend(It, ResultTy); 931 932 // We are using the following formula for BC(It, K): 933 // 934 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 935 // 936 // Suppose, W is the bitwidth of the return value. We must be prepared for 937 // overflow. Hence, we must assure that the result of our computation is 938 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 939 // safe in modular arithmetic. 940 // 941 // However, this code doesn't use exactly that formula; the formula it uses 942 // is something like the following, where T is the number of factors of 2 in 943 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 944 // exponentiation: 945 // 946 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 947 // 948 // This formula is trivially equivalent to the previous formula. However, 949 // this formula can be implemented much more efficiently. The trick is that 950 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 951 // arithmetic. To do exact division in modular arithmetic, all we have 952 // to do is multiply by the inverse. Therefore, this step can be done at 953 // width W. 954 // 955 // The next issue is how to safely do the division by 2^T. The way this 956 // is done is by doing the multiplication step at a width of at least W + T 957 // bits. This way, the bottom W+T bits of the product are accurate. Then, 958 // when we perform the division by 2^T (which is equivalent to a right shift 959 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 960 // truncated out after the division by 2^T. 961 // 962 // In comparison to just directly using the first formula, this technique 963 // is much more efficient; using the first formula requires W * K bits, 964 // but this formula less than W + K bits. Also, the first formula requires 965 // a division step, whereas this formula only requires multiplies and shifts. 966 // 967 // It doesn't matter whether the subtraction step is done in the calculation 968 // width or the input iteration count's width; if the subtraction overflows, 969 // the result must be zero anyway. We prefer here to do it in the width of 970 // the induction variable because it helps a lot for certain cases; CodeGen 971 // isn't smart enough to ignore the overflow, which leads to much less 972 // efficient code if the width of the subtraction is wider than the native 973 // register width. 974 // 975 // (It's possible to not widen at all by pulling out factors of 2 before 976 // the multiplication; for example, K=2 can be calculated as 977 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 978 // extra arithmetic, so it's not an obvious win, and it gets 979 // much more complicated for K > 3.) 980 981 // Protection from insane SCEVs; this bound is conservative, 982 // but it probably doesn't matter. 983 if (K > 1000) 984 return SE.getCouldNotCompute(); 985 986 unsigned W = SE.getTypeSizeInBits(ResultTy); 987 988 // Calculate K! / 2^T and T; we divide out the factors of two before 989 // multiplying for calculating K! / 2^T to avoid overflow. 990 // Other overflow doesn't matter because we only care about the bottom 991 // W bits of the result. 992 APInt OddFactorial(W, 1); 993 unsigned T = 1; 994 for (unsigned i = 3; i <= K; ++i) { 995 APInt Mult(W, i); 996 unsigned TwoFactors = Mult.countTrailingZeros(); 997 T += TwoFactors; 998 Mult.lshrInPlace(TwoFactors); 999 OddFactorial *= Mult; 1000 } 1001 1002 // We need at least W + T bits for the multiplication step 1003 unsigned CalculationBits = W + T; 1004 1005 // Calculate 2^T, at width T+W. 1006 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T); 1007 1008 // Calculate the multiplicative inverse of K! / 2^T; 1009 // this multiplication factor will perform the exact division by 1010 // K! / 2^T. 1011 APInt Mod = APInt::getSignedMinValue(W+1); 1012 APInt MultiplyFactor = OddFactorial.zext(W+1); 1013 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 1014 MultiplyFactor = MultiplyFactor.trunc(W); 1015 1016 // Calculate the product, at width T+W 1017 IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 1018 CalculationBits); 1019 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 1020 for (unsigned i = 1; i != K; ++i) { 1021 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 1022 Dividend = SE.getMulExpr(Dividend, 1023 SE.getTruncateOrZeroExtend(S, CalculationTy)); 1024 } 1025 1026 // Divide by 2^T 1027 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 1028 1029 // Truncate the result, and divide by K! / 2^T. 1030 1031 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 1032 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 1033 } 1034 1035 /// Return the value of this chain of recurrences at the specified iteration 1036 /// number. We can evaluate this recurrence by multiplying each element in the 1037 /// chain by the binomial coefficient corresponding to it. In other words, we 1038 /// can evaluate {A,+,B,+,C,+,D} as: 1039 /// 1040 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 1041 /// 1042 /// where BC(It, k) stands for binomial coefficient. 1043 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 1044 ScalarEvolution &SE) const { 1045 return evaluateAtIteration(makeArrayRef(op_begin(), op_end()), It, SE); 1046 } 1047 1048 const SCEV * 1049 SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands, 1050 const SCEV *It, ScalarEvolution &SE) { 1051 assert(Operands.size() > 0); 1052 const SCEV *Result = Operands[0]; 1053 for (unsigned i = 1, e = Operands.size(); i != e; ++i) { 1054 // The computation is correct in the face of overflow provided that the 1055 // multiplication is performed _after_ the evaluation of the binomial 1056 // coefficient. 1057 const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType()); 1058 if (isa<SCEVCouldNotCompute>(Coeff)) 1059 return Coeff; 1060 1061 Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff)); 1062 } 1063 return Result; 1064 } 1065 1066 //===----------------------------------------------------------------------===// 1067 // SCEV Expression folder implementations 1068 //===----------------------------------------------------------------------===// 1069 1070 const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op, 1071 unsigned Depth) { 1072 assert(Depth <= 1 && 1073 "getLosslessPtrToIntExpr() should self-recurse at most once."); 1074 1075 // We could be called with an integer-typed operands during SCEV rewrites. 1076 // Since the operand is an integer already, just perform zext/trunc/self cast. 1077 if (!Op->getType()->isPointerTy()) 1078 return Op; 1079 1080 // What would be an ID for such a SCEV cast expression? 1081 FoldingSetNodeID ID; 1082 ID.AddInteger(scPtrToInt); 1083 ID.AddPointer(Op); 1084 1085 void *IP = nullptr; 1086 1087 // Is there already an expression for such a cast? 1088 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1089 return S; 1090 1091 // It isn't legal for optimizations to construct new ptrtoint expressions 1092 // for non-integral pointers. 1093 if (getDataLayout().isNonIntegralPointerType(Op->getType())) 1094 return getCouldNotCompute(); 1095 1096 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType()); 1097 1098 // We can only trivially model ptrtoint if SCEV's effective (integer) type 1099 // is sufficiently wide to represent all possible pointer values. 1100 // We could theoretically teach SCEV to truncate wider pointers, but 1101 // that isn't implemented for now. 1102 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) != 1103 getDataLayout().getTypeSizeInBits(IntPtrTy)) 1104 return getCouldNotCompute(); 1105 1106 // If not, is this expression something we can't reduce any further? 1107 if (auto *U = dyn_cast<SCEVUnknown>(Op)) { 1108 // Perform some basic constant folding. If the operand of the ptr2int cast 1109 // is a null pointer, don't create a ptr2int SCEV expression (that will be 1110 // left as-is), but produce a zero constant. 1111 // NOTE: We could handle a more general case, but lack motivational cases. 1112 if (isa<ConstantPointerNull>(U->getValue())) 1113 return getZero(IntPtrTy); 1114 1115 // Create an explicit cast node. 1116 // We can reuse the existing insert position since if we get here, 1117 // we won't have made any changes which would invalidate it. 1118 SCEV *S = new (SCEVAllocator) 1119 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy); 1120 UniqueSCEVs.InsertNode(S, IP); 1121 registerUser(S, Op); 1122 return S; 1123 } 1124 1125 assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for " 1126 "non-SCEVUnknown's."); 1127 1128 // Otherwise, we've got some expression that is more complex than just a 1129 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an 1130 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown 1131 // only, and the expressions must otherwise be integer-typed. 1132 // So sink the cast down to the SCEVUnknown's. 1133 1134 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression, 1135 /// which computes a pointer-typed value, and rewrites the whole expression 1136 /// tree so that *all* the computations are done on integers, and the only 1137 /// pointer-typed operands in the expression are SCEVUnknown. 1138 class SCEVPtrToIntSinkingRewriter 1139 : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> { 1140 using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>; 1141 1142 public: 1143 SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {} 1144 1145 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) { 1146 SCEVPtrToIntSinkingRewriter Rewriter(SE); 1147 return Rewriter.visit(Scev); 1148 } 1149 1150 const SCEV *visit(const SCEV *S) { 1151 Type *STy = S->getType(); 1152 // If the expression is not pointer-typed, just keep it as-is. 1153 if (!STy->isPointerTy()) 1154 return S; 1155 // Else, recursively sink the cast down into it. 1156 return Base::visit(S); 1157 } 1158 1159 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) { 1160 SmallVector<const SCEV *, 2> Operands; 1161 bool Changed = false; 1162 for (auto *Op : Expr->operands()) { 1163 Operands.push_back(visit(Op)); 1164 Changed |= Op != Operands.back(); 1165 } 1166 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags()); 1167 } 1168 1169 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) { 1170 SmallVector<const SCEV *, 2> Operands; 1171 bool Changed = false; 1172 for (auto *Op : Expr->operands()) { 1173 Operands.push_back(visit(Op)); 1174 Changed |= Op != Operands.back(); 1175 } 1176 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags()); 1177 } 1178 1179 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 1180 assert(Expr->getType()->isPointerTy() && 1181 "Should only reach pointer-typed SCEVUnknown's."); 1182 return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1); 1183 } 1184 }; 1185 1186 // And actually perform the cast sinking. 1187 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this); 1188 assert(IntOp->getType()->isIntegerTy() && 1189 "We must have succeeded in sinking the cast, " 1190 "and ending up with an integer-typed expression!"); 1191 return IntOp; 1192 } 1193 1194 const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) { 1195 assert(Ty->isIntegerTy() && "Target type must be an integer type!"); 1196 1197 const SCEV *IntOp = getLosslessPtrToIntExpr(Op); 1198 if (isa<SCEVCouldNotCompute>(IntOp)) 1199 return IntOp; 1200 1201 return getTruncateOrZeroExtend(IntOp, Ty); 1202 } 1203 1204 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty, 1205 unsigned Depth) { 1206 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 1207 "This is not a truncating conversion!"); 1208 assert(isSCEVable(Ty) && 1209 "This is not a conversion to a SCEVable type!"); 1210 assert(!Op->getType()->isPointerTy() && "Can't truncate pointer!"); 1211 Ty = getEffectiveSCEVType(Ty); 1212 1213 FoldingSetNodeID ID; 1214 ID.AddInteger(scTruncate); 1215 ID.AddPointer(Op); 1216 ID.AddPointer(Ty); 1217 void *IP = nullptr; 1218 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1219 1220 // Fold if the operand is constant. 1221 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1222 return getConstant( 1223 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 1224 1225 // trunc(trunc(x)) --> trunc(x) 1226 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 1227 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1); 1228 1229 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 1230 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1231 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1); 1232 1233 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 1234 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1235 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1); 1236 1237 if (Depth > MaxCastDepth) { 1238 SCEV *S = 1239 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty); 1240 UniqueSCEVs.InsertNode(S, IP); 1241 registerUser(S, Op); 1242 return S; 1243 } 1244 1245 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and 1246 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN), 1247 // if after transforming we have at most one truncate, not counting truncates 1248 // that replace other casts. 1249 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) { 1250 auto *CommOp = cast<SCEVCommutativeExpr>(Op); 1251 SmallVector<const SCEV *, 4> Operands; 1252 unsigned numTruncs = 0; 1253 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2; 1254 ++i) { 1255 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1); 1256 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) && 1257 isa<SCEVTruncateExpr>(S)) 1258 numTruncs++; 1259 Operands.push_back(S); 1260 } 1261 if (numTruncs < 2) { 1262 if (isa<SCEVAddExpr>(Op)) 1263 return getAddExpr(Operands); 1264 else if (isa<SCEVMulExpr>(Op)) 1265 return getMulExpr(Operands); 1266 else 1267 llvm_unreachable("Unexpected SCEV type for Op."); 1268 } 1269 // Although we checked in the beginning that ID is not in the cache, it is 1270 // possible that during recursion and different modification ID was inserted 1271 // into the cache. So if we find it, just return it. 1272 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 1273 return S; 1274 } 1275 1276 // If the input value is a chrec scev, truncate the chrec's operands. 1277 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 1278 SmallVector<const SCEV *, 4> Operands; 1279 for (const SCEV *Op : AddRec->operands()) 1280 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1)); 1281 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap); 1282 } 1283 1284 // Return zero if truncating to known zeros. 1285 uint32_t MinTrailingZeros = GetMinTrailingZeros(Op); 1286 if (MinTrailingZeros >= getTypeSizeInBits(Ty)) 1287 return getZero(Ty); 1288 1289 // The cast wasn't folded; create an explicit cast node. We can reuse 1290 // the existing insert position since if we get here, we won't have 1291 // made any changes which would invalidate it. 1292 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 1293 Op, Ty); 1294 UniqueSCEVs.InsertNode(S, IP); 1295 registerUser(S, Op); 1296 return S; 1297 } 1298 1299 // Get the limit of a recurrence such that incrementing by Step cannot cause 1300 // signed overflow as long as the value of the recurrence within the 1301 // loop does not exceed this limit before incrementing. 1302 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step, 1303 ICmpInst::Predicate *Pred, 1304 ScalarEvolution *SE) { 1305 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1306 if (SE->isKnownPositive(Step)) { 1307 *Pred = ICmpInst::ICMP_SLT; 1308 return SE->getConstant(APInt::getSignedMinValue(BitWidth) - 1309 SE->getSignedRangeMax(Step)); 1310 } 1311 if (SE->isKnownNegative(Step)) { 1312 *Pred = ICmpInst::ICMP_SGT; 1313 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) - 1314 SE->getSignedRangeMin(Step)); 1315 } 1316 return nullptr; 1317 } 1318 1319 // Get the limit of a recurrence such that incrementing by Step cannot cause 1320 // unsigned overflow as long as the value of the recurrence within the loop does 1321 // not exceed this limit before incrementing. 1322 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step, 1323 ICmpInst::Predicate *Pred, 1324 ScalarEvolution *SE) { 1325 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType()); 1326 *Pred = ICmpInst::ICMP_ULT; 1327 1328 return SE->getConstant(APInt::getMinValue(BitWidth) - 1329 SE->getUnsignedRangeMax(Step)); 1330 } 1331 1332 namespace { 1333 1334 struct ExtendOpTraitsBase { 1335 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *, 1336 unsigned); 1337 }; 1338 1339 // Used to make code generic over signed and unsigned overflow. 1340 template <typename ExtendOp> struct ExtendOpTraits { 1341 // Members present: 1342 // 1343 // static const SCEV::NoWrapFlags WrapType; 1344 // 1345 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr; 1346 // 1347 // static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1348 // ICmpInst::Predicate *Pred, 1349 // ScalarEvolution *SE); 1350 }; 1351 1352 template <> 1353 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase { 1354 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW; 1355 1356 static const GetExtendExprTy GetExtendExpr; 1357 1358 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1359 ICmpInst::Predicate *Pred, 1360 ScalarEvolution *SE) { 1361 return getSignedOverflowLimitForStep(Step, Pred, SE); 1362 } 1363 }; 1364 1365 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1366 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr; 1367 1368 template <> 1369 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase { 1370 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW; 1371 1372 static const GetExtendExprTy GetExtendExpr; 1373 1374 static const SCEV *getOverflowLimitForStep(const SCEV *Step, 1375 ICmpInst::Predicate *Pred, 1376 ScalarEvolution *SE) { 1377 return getUnsignedOverflowLimitForStep(Step, Pred, SE); 1378 } 1379 }; 1380 1381 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits< 1382 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr; 1383 1384 } // end anonymous namespace 1385 1386 // The recurrence AR has been shown to have no signed/unsigned wrap or something 1387 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as 1388 // easily prove NSW/NUW for its preincrement or postincrement sibling. This 1389 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step + 1390 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the 1391 // expression "Step + sext/zext(PreIncAR)" is congruent with 1392 // "sext/zext(PostIncAR)" 1393 template <typename ExtendOpTy> 1394 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, 1395 ScalarEvolution *SE, unsigned Depth) { 1396 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1397 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1398 1399 const Loop *L = AR->getLoop(); 1400 const SCEV *Start = AR->getStart(); 1401 const SCEV *Step = AR->getStepRecurrence(*SE); 1402 1403 // Check for a simple looking step prior to loop entry. 1404 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start); 1405 if (!SA) 1406 return nullptr; 1407 1408 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV 1409 // subtraction is expensive. For this purpose, perform a quick and dirty 1410 // difference, by checking for Step in the operand list. 1411 SmallVector<const SCEV *, 4> DiffOps; 1412 for (const SCEV *Op : SA->operands()) 1413 if (Op != Step) 1414 DiffOps.push_back(Op); 1415 1416 if (DiffOps.size() == SA->getNumOperands()) 1417 return nullptr; 1418 1419 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` + 1420 // `Step`: 1421 1422 // 1. NSW/NUW flags on the step increment. 1423 auto PreStartFlags = 1424 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); 1425 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); 1426 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>( 1427 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); 1428 1429 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies 1430 // "S+X does not sign/unsign-overflow". 1431 // 1432 1433 const SCEV *BECount = SE->getBackedgeTakenCount(L); 1434 if (PreAR && PreAR->getNoWrapFlags(WrapType) && 1435 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount)) 1436 return PreStart; 1437 1438 // 2. Direct overflow check on the step operation's expression. 1439 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType()); 1440 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2); 1441 const SCEV *OperandExtendedStart = 1442 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth), 1443 (SE->*GetExtendExpr)(Step, WideTy, Depth)); 1444 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) { 1445 if (PreAR && AR->getNoWrapFlags(WrapType)) { 1446 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW 1447 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then 1448 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact. 1449 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType); 1450 } 1451 return PreStart; 1452 } 1453 1454 // 3. Loop precondition. 1455 ICmpInst::Predicate Pred; 1456 const SCEV *OverflowLimit = 1457 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE); 1458 1459 if (OverflowLimit && 1460 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) 1461 return PreStart; 1462 1463 return nullptr; 1464 } 1465 1466 // Get the normalized zero or sign extended expression for this AddRec's Start. 1467 template <typename ExtendOpTy> 1468 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, 1469 ScalarEvolution *SE, 1470 unsigned Depth) { 1471 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr; 1472 1473 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth); 1474 if (!PreStart) 1475 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth); 1476 1477 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty, 1478 Depth), 1479 (SE->*GetExtendExpr)(PreStart, Ty, Depth)); 1480 } 1481 1482 // Try to prove away overflow by looking at "nearby" add recurrences. A 1483 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it 1484 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`. 1485 // 1486 // Formally: 1487 // 1488 // {S,+,X} == {S-T,+,X} + T 1489 // => Ext({S,+,X}) == Ext({S-T,+,X} + T) 1490 // 1491 // If ({S-T,+,X} + T) does not overflow ... (1) 1492 // 1493 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T) 1494 // 1495 // If {S-T,+,X} does not overflow ... (2) 1496 // 1497 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T) 1498 // == {Ext(S-T)+Ext(T),+,Ext(X)} 1499 // 1500 // If (S-T)+T does not overflow ... (3) 1501 // 1502 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)} 1503 // == {Ext(S),+,Ext(X)} == LHS 1504 // 1505 // Thus, if (1), (2) and (3) are true for some T, then 1506 // Ext({S,+,X}) == {Ext(S),+,Ext(X)} 1507 // 1508 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T) 1509 // does not overflow" restricted to the 0th iteration. Therefore we only need 1510 // to check for (1) and (2). 1511 // 1512 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T 1513 // is `Delta` (defined below). 1514 template <typename ExtendOpTy> 1515 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start, 1516 const SCEV *Step, 1517 const Loop *L) { 1518 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType; 1519 1520 // We restrict `Start` to a constant to prevent SCEV from spending too much 1521 // time here. It is correct (but more expensive) to continue with a 1522 // non-constant `Start` and do a general SCEV subtraction to compute 1523 // `PreStart` below. 1524 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start); 1525 if (!StartC) 1526 return false; 1527 1528 APInt StartAI = StartC->getAPInt(); 1529 1530 for (unsigned Delta : {-2, -1, 1, 2}) { 1531 const SCEV *PreStart = getConstant(StartAI - Delta); 1532 1533 FoldingSetNodeID ID; 1534 ID.AddInteger(scAddRecExpr); 1535 ID.AddPointer(PreStart); 1536 ID.AddPointer(Step); 1537 ID.AddPointer(L); 1538 void *IP = nullptr; 1539 const auto *PreAR = 1540 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1541 1542 // Give up if we don't already have the add recurrence we need because 1543 // actually constructing an add recurrence is relatively expensive. 1544 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2) 1545 const SCEV *DeltaS = getConstant(StartC->getType(), Delta); 1546 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1547 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep( 1548 DeltaS, &Pred, this); 1549 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1) 1550 return true; 1551 } 1552 } 1553 1554 return false; 1555 } 1556 1557 // Finds an integer D for an expression (C + x + y + ...) such that the top 1558 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or 1559 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is 1560 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and 1561 // the (C + x + y + ...) expression is \p WholeAddExpr. 1562 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1563 const SCEVConstant *ConstantTerm, 1564 const SCEVAddExpr *WholeAddExpr) { 1565 const APInt &C = ConstantTerm->getAPInt(); 1566 const unsigned BitWidth = C.getBitWidth(); 1567 // Find number of trailing zeros of (x + y + ...) w/o the C first: 1568 uint32_t TZ = BitWidth; 1569 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) 1570 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); 1571 if (TZ) { 1572 // Set D to be as many least significant bits of C as possible while still 1573 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: 1574 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; 1575 } 1576 return APInt(BitWidth, 0); 1577 } 1578 1579 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top 1580 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the 1581 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p 1582 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. 1583 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, 1584 const APInt &ConstantStart, 1585 const SCEV *Step) { 1586 const unsigned BitWidth = ConstantStart.getBitWidth(); 1587 const uint32_t TZ = SE.GetMinTrailingZeros(Step); 1588 if (TZ) 1589 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) 1590 : ConstantStart; 1591 return APInt(BitWidth, 0); 1592 } 1593 1594 const SCEV * 1595 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1596 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1597 "This is not an extending conversion!"); 1598 assert(isSCEVable(Ty) && 1599 "This is not a conversion to a SCEVable type!"); 1600 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!"); 1601 Ty = getEffectiveSCEVType(Ty); 1602 1603 // Fold if the operand is constant. 1604 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1605 return getConstant( 1606 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty))); 1607 1608 // zext(zext(x)) --> zext(x) 1609 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1610 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1611 1612 // Before doing any expensive analysis, check to see if we've already 1613 // computed a SCEV for this Op and Ty. 1614 FoldingSetNodeID ID; 1615 ID.AddInteger(scZeroExtend); 1616 ID.AddPointer(Op); 1617 ID.AddPointer(Ty); 1618 void *IP = nullptr; 1619 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1620 if (Depth > MaxCastDepth) { 1621 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1622 Op, Ty); 1623 UniqueSCEVs.InsertNode(S, IP); 1624 registerUser(S, Op); 1625 return S; 1626 } 1627 1628 // zext(trunc(x)) --> zext(x) or x or trunc(x) 1629 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1630 // It's possible the bits taken off by the truncate were all zero bits. If 1631 // so, we should be able to simplify this further. 1632 const SCEV *X = ST->getOperand(); 1633 ConstantRange CR = getUnsignedRange(X); 1634 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1635 unsigned NewBits = getTypeSizeInBits(Ty); 1636 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains( 1637 CR.zextOrTrunc(NewBits))) 1638 return getTruncateOrZeroExtend(X, Ty, Depth); 1639 } 1640 1641 // If the input value is a chrec scev, and we can prove that the value 1642 // did not overflow the old, smaller, value, we can zero extend all of the 1643 // operands (often constants). This allows analysis of something like 1644 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 1645 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1646 if (AR->isAffine()) { 1647 const SCEV *Start = AR->getStart(); 1648 const SCEV *Step = AR->getStepRecurrence(*this); 1649 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1650 const Loop *L = AR->getLoop(); 1651 1652 if (!AR->hasNoUnsignedWrap()) { 1653 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1654 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1655 } 1656 1657 // If we have special knowledge that this addrec won't overflow, 1658 // we don't need to do any further analysis. 1659 if (AR->hasNoUnsignedWrap()) { 1660 Start = 1661 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1); 1662 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 1663 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1664 } 1665 1666 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1667 // Note that this serves two purposes: It filters out loops that are 1668 // simply not analyzable, and it covers the case where this code is 1669 // being called from within backedge-taken count analysis, such that 1670 // attempting to ask for the backedge-taken count would likely result 1671 // in infinite recursion. In the later case, the analysis code will 1672 // cope with a conservative value, and it will take care to purge 1673 // that value once it has finished. 1674 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 1675 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1676 // Manually compute the final value for AR, checking for overflow. 1677 1678 // Check whether the backedge-taken count can be losslessly casted to 1679 // the addrec's type. The count is always unsigned. 1680 const SCEV *CastedMaxBECount = 1681 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 1682 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 1683 CastedMaxBECount, MaxBECount->getType(), Depth); 1684 if (MaxBECount == RecastedMaxBECount) { 1685 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1686 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1687 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step, 1688 SCEV::FlagAnyWrap, Depth + 1); 1689 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul, 1690 SCEV::FlagAnyWrap, 1691 Depth + 1), 1692 WideTy, Depth + 1); 1693 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1); 1694 const SCEV *WideMaxBECount = 1695 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 1696 const SCEV *OperandExtendedAdd = 1697 getAddExpr(WideStart, 1698 getMulExpr(WideMaxBECount, 1699 getZeroExtendExpr(Step, WideTy, Depth + 1), 1700 SCEV::FlagAnyWrap, Depth + 1), 1701 SCEV::FlagAnyWrap, Depth + 1); 1702 if (ZAdd == OperandExtendedAdd) { 1703 // Cache knowledge of AR NUW, which is propagated to this AddRec. 1704 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1705 // Return the expression with the addrec on the outside. 1706 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1707 Depth + 1); 1708 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 1709 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1710 } 1711 // Similar to above, only this time treat the step value as signed. 1712 // This covers loops that count down. 1713 OperandExtendedAdd = 1714 getAddExpr(WideStart, 1715 getMulExpr(WideMaxBECount, 1716 getSignExtendExpr(Step, WideTy, Depth + 1), 1717 SCEV::FlagAnyWrap, Depth + 1), 1718 SCEV::FlagAnyWrap, Depth + 1); 1719 if (ZAdd == OperandExtendedAdd) { 1720 // Cache knowledge of AR NW, which is propagated to this AddRec. 1721 // Negative step causes unsigned wrap, but it still can't self-wrap. 1722 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1723 // Return the expression with the addrec on the outside. 1724 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1725 Depth + 1); 1726 Step = getSignExtendExpr(Step, Ty, Depth + 1); 1727 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1728 } 1729 } 1730 } 1731 1732 // Normally, in the cases we can prove no-overflow via a 1733 // backedge guarding condition, we can also compute a backedge 1734 // taken count for the loop. The exceptions are assumptions and 1735 // guards present in the loop -- SCEV is not great at exploiting 1736 // these to compute max backedge taken counts, but can still use 1737 // these to prove lack of overflow. Use this fact to avoid 1738 // doing extra work that may not pay off. 1739 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards || 1740 !AC.assumptions().empty()) { 1741 1742 auto NewFlags = proveNoUnsignedWrapViaInduction(AR); 1743 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1744 if (AR->hasNoUnsignedWrap()) { 1745 // Same as nuw case above - duplicated here to avoid a compile time 1746 // issue. It's not clear that the order of checks does matter, but 1747 // it's one of two issue possible causes for a change which was 1748 // reverted. Be conservative for the moment. 1749 Start = 1750 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1); 1751 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 1752 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1753 } 1754 1755 // For a negative step, we can extend the operands iff doing so only 1756 // traverses values in the range zext([0,UINT_MAX]). 1757 if (isKnownNegative(Step)) { 1758 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1759 getSignedRangeMin(Step)); 1760 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1761 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) { 1762 // Cache knowledge of AR NW, which is propagated to this 1763 // AddRec. Negative step causes unsigned wrap, but it 1764 // still can't self-wrap. 1765 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 1766 // Return the expression with the addrec on the outside. 1767 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 1768 Depth + 1); 1769 Step = getSignExtendExpr(Step, Ty, Depth + 1); 1770 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1771 } 1772 } 1773 } 1774 1775 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw> 1776 // if D + (C - D + Step * n) could be proven to not unsigned wrap 1777 // where D maximizes the number of trailing zeros of (C - D + Step * n) 1778 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 1779 const APInt &C = SC->getAPInt(); 1780 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 1781 if (D != 0) { 1782 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1783 const SCEV *SResidual = 1784 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 1785 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1786 return getAddExpr(SZExtD, SZExtR, 1787 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1788 Depth + 1); 1789 } 1790 } 1791 1792 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) { 1793 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW); 1794 Start = 1795 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1); 1796 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 1797 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 1798 } 1799 } 1800 1801 // zext(A % B) --> zext(A) % zext(B) 1802 { 1803 const SCEV *LHS; 1804 const SCEV *RHS; 1805 if (matchURem(Op, LHS, RHS)) 1806 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1), 1807 getZeroExtendExpr(RHS, Ty, Depth + 1)); 1808 } 1809 1810 // zext(A / B) --> zext(A) / zext(B). 1811 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op)) 1812 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1), 1813 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1)); 1814 1815 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1816 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw> 1817 if (SA->hasNoUnsignedWrap()) { 1818 // If the addition does not unsign overflow then we can, by definition, 1819 // commute the zero extension with the addition operation. 1820 SmallVector<const SCEV *, 4> Ops; 1821 for (const auto *Op : SA->operands()) 1822 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1823 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); 1824 } 1825 1826 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) 1827 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap 1828 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1829 // 1830 // Often address arithmetics contain expressions like 1831 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). 1832 // This transformation is useful while proving that such expressions are 1833 // equal or differ by a small constant amount, see LoadStoreVectorizer pass. 1834 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1835 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1836 if (D != 0) { 1837 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); 1838 const SCEV *SResidual = 1839 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1840 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); 1841 return getAddExpr(SZExtD, SZExtR, 1842 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1843 Depth + 1); 1844 } 1845 } 1846 } 1847 1848 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) { 1849 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw> 1850 if (SM->hasNoUnsignedWrap()) { 1851 // If the multiply does not unsign overflow then we can, by definition, 1852 // commute the zero extension with the multiply operation. 1853 SmallVector<const SCEV *, 4> Ops; 1854 for (const auto *Op : SM->operands()) 1855 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); 1856 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1); 1857 } 1858 1859 // zext(2^K * (trunc X to iN)) to iM -> 1860 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw> 1861 // 1862 // Proof: 1863 // 1864 // zext(2^K * (trunc X to iN)) to iM 1865 // = zext((trunc X to iN) << K) to iM 1866 // = zext((trunc X to i{N-K}) << K)<nuw> to iM 1867 // (because shl removes the top K bits) 1868 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM 1869 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>. 1870 // 1871 if (SM->getNumOperands() == 2) 1872 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0))) 1873 if (MulLHS->getAPInt().isPowerOf2()) 1874 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) { 1875 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) - 1876 MulLHS->getAPInt().logBase2(); 1877 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits); 1878 return getMulExpr( 1879 getZeroExtendExpr(MulLHS, Ty), 1880 getZeroExtendExpr( 1881 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty), 1882 SCEV::FlagNUW, Depth + 1); 1883 } 1884 } 1885 1886 // The cast wasn't folded; create an explicit cast node. 1887 // Recompute the insert position, as it may have been invalidated. 1888 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1889 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1890 Op, Ty); 1891 UniqueSCEVs.InsertNode(S, IP); 1892 registerUser(S, Op); 1893 return S; 1894 } 1895 1896 const SCEV * 1897 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { 1898 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1899 "This is not an extending conversion!"); 1900 assert(isSCEVable(Ty) && 1901 "This is not a conversion to a SCEVable type!"); 1902 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!"); 1903 Ty = getEffectiveSCEVType(Ty); 1904 1905 // Fold if the operand is constant. 1906 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1907 return getConstant( 1908 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty))); 1909 1910 // sext(sext(x)) --> sext(x) 1911 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1912 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1); 1913 1914 // sext(zext(x)) --> zext(x) 1915 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 1916 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1); 1917 1918 // Before doing any expensive analysis, check to see if we've already 1919 // computed a SCEV for this Op and Ty. 1920 FoldingSetNodeID ID; 1921 ID.AddInteger(scSignExtend); 1922 ID.AddPointer(Op); 1923 ID.AddPointer(Ty); 1924 void *IP = nullptr; 1925 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1926 // Limit recursion depth. 1927 if (Depth > MaxCastDepth) { 1928 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1929 Op, Ty); 1930 UniqueSCEVs.InsertNode(S, IP); 1931 registerUser(S, Op); 1932 return S; 1933 } 1934 1935 // sext(trunc(x)) --> sext(x) or x or trunc(x) 1936 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) { 1937 // It's possible the bits taken off by the truncate were all sign bits. If 1938 // so, we should be able to simplify this further. 1939 const SCEV *X = ST->getOperand(); 1940 ConstantRange CR = getSignedRange(X); 1941 unsigned TruncBits = getTypeSizeInBits(ST->getType()); 1942 unsigned NewBits = getTypeSizeInBits(Ty); 1943 if (CR.truncate(TruncBits).signExtend(NewBits).contains( 1944 CR.sextOrTrunc(NewBits))) 1945 return getTruncateOrSignExtend(X, Ty, Depth); 1946 } 1947 1948 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) { 1949 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 1950 if (SA->hasNoSignedWrap()) { 1951 // If the addition does not sign overflow then we can, by definition, 1952 // commute the sign extension with the addition operation. 1953 SmallVector<const SCEV *, 4> Ops; 1954 for (const auto *Op : SA->operands()) 1955 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); 1956 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); 1957 } 1958 1959 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) 1960 // if D + (C - D + x + y + ...) could be proven to not signed wrap 1961 // where D maximizes the number of trailing zeros of (C - D + x + y + ...) 1962 // 1963 // For instance, this will bring two seemingly different expressions: 1964 // 1 + sext(5 + 20 * %x + 24 * %y) and 1965 // sext(6 + 20 * %x + 24 * %y) 1966 // to the same form: 1967 // 2 + sext(4 + 20 * %x + 24 * %y) 1968 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) { 1969 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); 1970 if (D != 0) { 1971 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 1972 const SCEV *SResidual = 1973 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); 1974 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 1975 return getAddExpr(SSExtD, SSExtR, 1976 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 1977 Depth + 1); 1978 } 1979 } 1980 } 1981 // If the input value is a chrec scev, and we can prove that the value 1982 // did not overflow the old, smaller, value, we can sign extend all of the 1983 // operands (often constants). This allows analysis of something like 1984 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1985 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1986 if (AR->isAffine()) { 1987 const SCEV *Start = AR->getStart(); 1988 const SCEV *Step = AR->getStepRecurrence(*this); 1989 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1990 const Loop *L = AR->getLoop(); 1991 1992 if (!AR->hasNoSignedWrap()) { 1993 auto NewFlags = proveNoWrapViaConstantRanges(AR); 1994 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 1995 } 1996 1997 // If we have special knowledge that this addrec won't overflow, 1998 // we don't need to do any further analysis. 1999 if (AR->hasNoSignedWrap()) { 2000 Start = 2001 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1); 2002 Step = getSignExtendExpr(Step, Ty, Depth + 1); 2003 return getAddRecExpr(Start, Step, L, SCEV::FlagNSW); 2004 } 2005 2006 // Check whether the backedge-taken count is SCEVCouldNotCompute. 2007 // Note that this serves two purposes: It filters out loops that are 2008 // simply not analyzable, and it covers the case where this code is 2009 // being called from within backedge-taken count analysis, such that 2010 // attempting to ask for the backedge-taken count would likely result 2011 // in infinite recursion. In the later case, the analysis code will 2012 // cope with a conservative value, and it will take care to purge 2013 // that value once it has finished. 2014 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 2015 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 2016 // Manually compute the final value for AR, checking for 2017 // overflow. 2018 2019 // Check whether the backedge-taken count can be losslessly casted to 2020 // the addrec's type. The count is always unsigned. 2021 const SCEV *CastedMaxBECount = 2022 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth); 2023 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend( 2024 CastedMaxBECount, MaxBECount->getType(), Depth); 2025 if (MaxBECount == RecastedMaxBECount) { 2026 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 2027 // Check whether Start+Step*MaxBECount has no signed overflow. 2028 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step, 2029 SCEV::FlagAnyWrap, Depth + 1); 2030 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul, 2031 SCEV::FlagAnyWrap, 2032 Depth + 1), 2033 WideTy, Depth + 1); 2034 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1); 2035 const SCEV *WideMaxBECount = 2036 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1); 2037 const SCEV *OperandExtendedAdd = 2038 getAddExpr(WideStart, 2039 getMulExpr(WideMaxBECount, 2040 getSignExtendExpr(Step, WideTy, Depth + 1), 2041 SCEV::FlagAnyWrap, Depth + 1), 2042 SCEV::FlagAnyWrap, Depth + 1); 2043 if (SAdd == OperandExtendedAdd) { 2044 // Cache knowledge of AR NSW, which is propagated to this AddRec. 2045 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2046 // Return the expression with the addrec on the outside. 2047 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2048 Depth + 1); 2049 Step = getSignExtendExpr(Step, Ty, Depth + 1); 2050 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 2051 } 2052 // Similar to above, only this time treat the step value as unsigned. 2053 // This covers loops that count up with an unsigned step. 2054 OperandExtendedAdd = 2055 getAddExpr(WideStart, 2056 getMulExpr(WideMaxBECount, 2057 getZeroExtendExpr(Step, WideTy, Depth + 1), 2058 SCEV::FlagAnyWrap, Depth + 1), 2059 SCEV::FlagAnyWrap, Depth + 1); 2060 if (SAdd == OperandExtendedAdd) { 2061 // If AR wraps around then 2062 // 2063 // abs(Step) * MaxBECount > unsigned-max(AR->getType()) 2064 // => SAdd != OperandExtendedAdd 2065 // 2066 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=> 2067 // (SAdd == OperandExtendedAdd => AR is NW) 2068 2069 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW); 2070 2071 // Return the expression with the addrec on the outside. 2072 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, 2073 Depth + 1); 2074 Step = getZeroExtendExpr(Step, Ty, Depth + 1); 2075 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 2076 } 2077 } 2078 } 2079 2080 auto NewFlags = proveNoSignedWrapViaInduction(AR); 2081 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags); 2082 if (AR->hasNoSignedWrap()) { 2083 // Same as nsw case above - duplicated here to avoid a compile time 2084 // issue. It's not clear that the order of checks does matter, but 2085 // it's one of two issue possible causes for a change which was 2086 // reverted. Be conservative for the moment. 2087 Start = 2088 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1); 2089 Step = getSignExtendExpr(Step, Ty, Depth + 1); 2090 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 2091 } 2092 2093 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw> 2094 // if D + (C - D + Step * n) could be proven to not signed wrap 2095 // where D maximizes the number of trailing zeros of (C - D + Step * n) 2096 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) { 2097 const APInt &C = SC->getAPInt(); 2098 const APInt &D = extractConstantWithoutWrapping(*this, C, Step); 2099 if (D != 0) { 2100 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); 2101 const SCEV *SResidual = 2102 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); 2103 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); 2104 return getAddExpr(SSExtD, SSExtR, 2105 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), 2106 Depth + 1); 2107 } 2108 } 2109 2110 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) { 2111 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW); 2112 Start = 2113 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1); 2114 Step = getSignExtendExpr(Step, Ty, Depth + 1); 2115 return getAddRecExpr(Start, Step, L, AR->getNoWrapFlags()); 2116 } 2117 } 2118 2119 // If the input value is provably positive and we could not simplify 2120 // away the sext build a zext instead. 2121 if (isKnownNonNegative(Op)) 2122 return getZeroExtendExpr(Op, Ty, Depth + 1); 2123 2124 // The cast wasn't folded; create an explicit cast node. 2125 // Recompute the insert position, as it may have been invalidated. 2126 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2127 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 2128 Op, Ty); 2129 UniqueSCEVs.InsertNode(S, IP); 2130 registerUser(S, { Op }); 2131 return S; 2132 } 2133 2134 const SCEV *ScalarEvolution::getCastExpr(SCEVTypes Kind, const SCEV *Op, 2135 Type *Ty) { 2136 switch (Kind) { 2137 case scTruncate: 2138 return getTruncateExpr(Op, Ty); 2139 case scZeroExtend: 2140 return getZeroExtendExpr(Op, Ty); 2141 case scSignExtend: 2142 return getSignExtendExpr(Op, Ty); 2143 case scPtrToInt: 2144 return getPtrToIntExpr(Op, Ty); 2145 default: 2146 llvm_unreachable("Not a SCEV cast expression!"); 2147 } 2148 } 2149 2150 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 2151 /// unspecified bits out to the given type. 2152 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 2153 Type *Ty) { 2154 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 2155 "This is not an extending conversion!"); 2156 assert(isSCEVable(Ty) && 2157 "This is not a conversion to a SCEVable type!"); 2158 Ty = getEffectiveSCEVType(Ty); 2159 2160 // Sign-extend negative constants. 2161 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 2162 if (SC->getAPInt().isNegative()) 2163 return getSignExtendExpr(Op, Ty); 2164 2165 // Peel off a truncate cast. 2166 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 2167 const SCEV *NewOp = T->getOperand(); 2168 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 2169 return getAnyExtendExpr(NewOp, Ty); 2170 return getTruncateOrNoop(NewOp, Ty); 2171 } 2172 2173 // Next try a zext cast. If the cast is folded, use it. 2174 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 2175 if (!isa<SCEVZeroExtendExpr>(ZExt)) 2176 return ZExt; 2177 2178 // Next try a sext cast. If the cast is folded, use it. 2179 const SCEV *SExt = getSignExtendExpr(Op, Ty); 2180 if (!isa<SCEVSignExtendExpr>(SExt)) 2181 return SExt; 2182 2183 // Force the cast to be folded into the operands of an addrec. 2184 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 2185 SmallVector<const SCEV *, 4> Ops; 2186 for (const SCEV *Op : AR->operands()) 2187 Ops.push_back(getAnyExtendExpr(Op, Ty)); 2188 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW); 2189 } 2190 2191 // If the expression is obviously signed, use the sext cast value. 2192 if (isa<SCEVSMaxExpr>(Op)) 2193 return SExt; 2194 2195 // Absent any other information, use the zext cast value. 2196 return ZExt; 2197 } 2198 2199 /// Process the given Ops list, which is a list of operands to be added under 2200 /// the given scale, update the given map. This is a helper function for 2201 /// getAddRecExpr. As an example of what it does, given a sequence of operands 2202 /// that would form an add expression like this: 2203 /// 2204 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r) 2205 /// 2206 /// where A and B are constants, update the map with these values: 2207 /// 2208 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 2209 /// 2210 /// and add 13 + A*B*29 to AccumulatedConstant. 2211 /// This will allow getAddRecExpr to produce this: 2212 /// 2213 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 2214 /// 2215 /// This form often exposes folding opportunities that are hidden in 2216 /// the original operand list. 2217 /// 2218 /// Return true iff it appears that any interesting folding opportunities 2219 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 2220 /// the common case where no interesting opportunities are present, and 2221 /// is also used as a check to avoid infinite recursion. 2222 static bool 2223 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 2224 SmallVectorImpl<const SCEV *> &NewOps, 2225 APInt &AccumulatedConstant, 2226 const SCEV *const *Ops, size_t NumOperands, 2227 const APInt &Scale, 2228 ScalarEvolution &SE) { 2229 bool Interesting = false; 2230 2231 // Iterate over the add operands. They are sorted, with constants first. 2232 unsigned i = 0; 2233 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2234 ++i; 2235 // Pull a buried constant out to the outside. 2236 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 2237 Interesting = true; 2238 AccumulatedConstant += Scale * C->getAPInt(); 2239 } 2240 2241 // Next comes everything else. We're especially interested in multiplies 2242 // here, but they're in the middle, so just visit the rest with one loop. 2243 for (; i != NumOperands; ++i) { 2244 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 2245 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 2246 APInt NewScale = 2247 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt(); 2248 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 2249 // A multiplication of a constant with another add; recurse. 2250 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 2251 Interesting |= 2252 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2253 Add->op_begin(), Add->getNumOperands(), 2254 NewScale, SE); 2255 } else { 2256 // A multiplication of a constant with some other value. Update 2257 // the map. 2258 SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands())); 2259 const SCEV *Key = SE.getMulExpr(MulOps); 2260 auto Pair = M.insert({Key, NewScale}); 2261 if (Pair.second) { 2262 NewOps.push_back(Pair.first->first); 2263 } else { 2264 Pair.first->second += NewScale; 2265 // The map already had an entry for this value, which may indicate 2266 // a folding opportunity. 2267 Interesting = true; 2268 } 2269 } 2270 } else { 2271 // An ordinary operand. Update the map. 2272 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 2273 M.insert({Ops[i], Scale}); 2274 if (Pair.second) { 2275 NewOps.push_back(Pair.first->first); 2276 } else { 2277 Pair.first->second += Scale; 2278 // The map already had an entry for this value, which may indicate 2279 // a folding opportunity. 2280 Interesting = true; 2281 } 2282 } 2283 } 2284 2285 return Interesting; 2286 } 2287 2288 bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, 2289 const SCEV *LHS, const SCEV *RHS) { 2290 const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *, 2291 SCEV::NoWrapFlags, unsigned); 2292 switch (BinOp) { 2293 default: 2294 llvm_unreachable("Unsupported binary op"); 2295 case Instruction::Add: 2296 Operation = &ScalarEvolution::getAddExpr; 2297 break; 2298 case Instruction::Sub: 2299 Operation = &ScalarEvolution::getMinusSCEV; 2300 break; 2301 case Instruction::Mul: 2302 Operation = &ScalarEvolution::getMulExpr; 2303 break; 2304 } 2305 2306 const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) = 2307 Signed ? &ScalarEvolution::getSignExtendExpr 2308 : &ScalarEvolution::getZeroExtendExpr; 2309 2310 // Check ext(LHS op RHS) == ext(LHS) op ext(RHS) 2311 auto *NarrowTy = cast<IntegerType>(LHS->getType()); 2312 auto *WideTy = 2313 IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2); 2314 2315 const SCEV *A = (this->*Extension)( 2316 (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0); 2317 const SCEV *LHSB = (this->*Extension)(LHS, WideTy, 0); 2318 const SCEV *RHSB = (this->*Extension)(RHS, WideTy, 0); 2319 const SCEV *B = (this->*Operation)(LHSB, RHSB, SCEV::FlagAnyWrap, 0); 2320 return A == B; 2321 } 2322 2323 std::pair<SCEV::NoWrapFlags, bool /*Deduced*/> 2324 ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp( 2325 const OverflowingBinaryOperator *OBO) { 2326 SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap; 2327 2328 if (OBO->hasNoUnsignedWrap()) 2329 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2330 if (OBO->hasNoSignedWrap()) 2331 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2332 2333 bool Deduced = false; 2334 2335 if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap()) 2336 return {Flags, Deduced}; 2337 2338 if (OBO->getOpcode() != Instruction::Add && 2339 OBO->getOpcode() != Instruction::Sub && 2340 OBO->getOpcode() != Instruction::Mul) 2341 return {Flags, Deduced}; 2342 2343 const SCEV *LHS = getSCEV(OBO->getOperand(0)); 2344 const SCEV *RHS = getSCEV(OBO->getOperand(1)); 2345 2346 if (!OBO->hasNoUnsignedWrap() && 2347 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), 2348 /* Signed */ false, LHS, RHS)) { 2349 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2350 Deduced = true; 2351 } 2352 2353 if (!OBO->hasNoSignedWrap() && 2354 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(), 2355 /* Signed */ true, LHS, RHS)) { 2356 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2357 Deduced = true; 2358 } 2359 2360 return {Flags, Deduced}; 2361 } 2362 2363 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and 2364 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of 2365 // can't-overflow flags for the operation if possible. 2366 static SCEV::NoWrapFlags 2367 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, 2368 const ArrayRef<const SCEV *> Ops, 2369 SCEV::NoWrapFlags Flags) { 2370 using namespace std::placeholders; 2371 2372 using OBO = OverflowingBinaryOperator; 2373 2374 bool CanAnalyze = 2375 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; 2376 (void)CanAnalyze; 2377 assert(CanAnalyze && "don't call from other places!"); 2378 2379 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; 2380 SCEV::NoWrapFlags SignOrUnsignWrap = 2381 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2382 2383 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. 2384 auto IsKnownNonNegative = [&](const SCEV *S) { 2385 return SE->isKnownNonNegative(S); 2386 }; 2387 2388 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative)) 2389 Flags = 2390 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); 2391 2392 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask); 2393 2394 if (SignOrUnsignWrap != SignOrUnsignMask && 2395 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 && 2396 isa<SCEVConstant>(Ops[0])) { 2397 2398 auto Opcode = [&] { 2399 switch (Type) { 2400 case scAddExpr: 2401 return Instruction::Add; 2402 case scMulExpr: 2403 return Instruction::Mul; 2404 default: 2405 llvm_unreachable("Unexpected SCEV op."); 2406 } 2407 }(); 2408 2409 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt(); 2410 2411 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow. 2412 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) { 2413 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2414 Opcode, C, OBO::NoSignedWrap); 2415 if (NSWRegion.contains(SE->getSignedRange(Ops[1]))) 2416 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 2417 } 2418 2419 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow. 2420 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) { 2421 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 2422 Opcode, C, OBO::NoUnsignedWrap); 2423 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1]))) 2424 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2425 } 2426 } 2427 2428 // <0,+,nonnegative><nw> is also nuw 2429 // TODO: Add corresponding nsw case 2430 if (Type == scAddRecExpr && ScalarEvolution::hasFlags(Flags, SCEV::FlagNW) && 2431 !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && Ops.size() == 2 && 2432 Ops[0]->isZero() && IsKnownNonNegative(Ops[1])) 2433 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2434 2435 // both (udiv X, Y) * Y and Y * (udiv X, Y) are always NUW 2436 if (Type == scMulExpr && !ScalarEvolution::hasFlags(Flags, SCEV::FlagNUW) && 2437 Ops.size() == 2) { 2438 if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0])) 2439 if (UDiv->getOperand(1) == Ops[1]) 2440 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2441 if (auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1])) 2442 if (UDiv->getOperand(1) == Ops[0]) 2443 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 2444 } 2445 2446 return Flags; 2447 } 2448 2449 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) { 2450 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader()); 2451 } 2452 2453 /// Get a canonical add expression, or something simpler if possible. 2454 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 2455 SCEV::NoWrapFlags OrigFlags, 2456 unsigned Depth) { 2457 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) && 2458 "only nuw or nsw allowed"); 2459 assert(!Ops.empty() && "Cannot get empty add!"); 2460 if (Ops.size() == 1) return Ops[0]; 2461 #ifndef NDEBUG 2462 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2463 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2464 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2465 "SCEVAddExpr operand types don't match!"); 2466 unsigned NumPtrs = count_if( 2467 Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); }); 2468 assert(NumPtrs <= 1 && "add has at most one pointer operand"); 2469 #endif 2470 2471 // Sort by complexity, this groups all similar expression types together. 2472 GroupByComplexity(Ops, &LI, DT); 2473 2474 // If there are any constants, fold them together. 2475 unsigned Idx = 0; 2476 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2477 ++Idx; 2478 assert(Idx < Ops.size()); 2479 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2480 // We found two constants, fold them together! 2481 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); 2482 if (Ops.size() == 2) return Ops[0]; 2483 Ops.erase(Ops.begin()+1); // Erase the folded element 2484 LHSC = cast<SCEVConstant>(Ops[0]); 2485 } 2486 2487 // If we are left with a constant zero being added, strip it off. 2488 if (LHSC->getValue()->isZero()) { 2489 Ops.erase(Ops.begin()); 2490 --Idx; 2491 } 2492 2493 if (Ops.size() == 1) return Ops[0]; 2494 } 2495 2496 // Delay expensive flag strengthening until necessary. 2497 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 2498 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags); 2499 }; 2500 2501 // Limit recursion calls depth. 2502 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 2503 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2504 2505 if (SCEV *S = findExistingSCEVInCache(scAddExpr, Ops)) { 2506 // Don't strengthen flags if we have no new information. 2507 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S); 2508 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags) 2509 Add->setNoWrapFlags(ComputeFlags(Ops)); 2510 return S; 2511 } 2512 2513 // Okay, check to see if the same value occurs in the operand list more than 2514 // once. If so, merge them together into an multiply expression. Since we 2515 // sorted the list, these values are required to be adjacent. 2516 Type *Ty = Ops[0]->getType(); 2517 bool FoundMatch = false; 2518 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 2519 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 2520 // Scan ahead to count how many equal operands there are. 2521 unsigned Count = 2; 2522 while (i+Count != e && Ops[i+Count] == Ops[i]) 2523 ++Count; 2524 // Merge the values into a multiply. 2525 const SCEV *Scale = getConstant(Ty, Count); 2526 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1); 2527 if (Ops.size() == Count) 2528 return Mul; 2529 Ops[i] = Mul; 2530 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 2531 --i; e -= Count - 1; 2532 FoundMatch = true; 2533 } 2534 if (FoundMatch) 2535 return getAddExpr(Ops, OrigFlags, Depth + 1); 2536 2537 // Check for truncates. If all the operands are truncated from the same 2538 // type, see if factoring out the truncate would permit the result to be 2539 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y) 2540 // if the contents of the resulting outer trunc fold to something simple. 2541 auto FindTruncSrcType = [&]() -> Type * { 2542 // We're ultimately looking to fold an addrec of truncs and muls of only 2543 // constants and truncs, so if we find any other types of SCEV 2544 // as operands of the addrec then we bail and return nullptr here. 2545 // Otherwise, we return the type of the operand of a trunc that we find. 2546 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx])) 2547 return T->getOperand()->getType(); 2548 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 2549 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1); 2550 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp)) 2551 return T->getOperand()->getType(); 2552 } 2553 return nullptr; 2554 }; 2555 if (auto *SrcType = FindTruncSrcType()) { 2556 SmallVector<const SCEV *, 8> LargeOps; 2557 bool Ok = true; 2558 // Check all the operands to see if they can be represented in the 2559 // source type of the truncate. 2560 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 2561 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 2562 if (T->getOperand()->getType() != SrcType) { 2563 Ok = false; 2564 break; 2565 } 2566 LargeOps.push_back(T->getOperand()); 2567 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 2568 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 2569 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 2570 SmallVector<const SCEV *, 8> LargeMulOps; 2571 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 2572 if (const SCEVTruncateExpr *T = 2573 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 2574 if (T->getOperand()->getType() != SrcType) { 2575 Ok = false; 2576 break; 2577 } 2578 LargeMulOps.push_back(T->getOperand()); 2579 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) { 2580 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 2581 } else { 2582 Ok = false; 2583 break; 2584 } 2585 } 2586 if (Ok) 2587 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1)); 2588 } else { 2589 Ok = false; 2590 break; 2591 } 2592 } 2593 if (Ok) { 2594 // Evaluate the expression in the larger type. 2595 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1); 2596 // If it folds to something simple, use it. Otherwise, don't. 2597 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 2598 return getTruncateExpr(Fold, Ty); 2599 } 2600 } 2601 2602 if (Ops.size() == 2) { 2603 // Check if we have an expression of the form ((X + C1) - C2), where C1 and 2604 // C2 can be folded in a way that allows retaining wrapping flags of (X + 2605 // C1). 2606 const SCEV *A = Ops[0]; 2607 const SCEV *B = Ops[1]; 2608 auto *AddExpr = dyn_cast<SCEVAddExpr>(B); 2609 auto *C = dyn_cast<SCEVConstant>(A); 2610 if (AddExpr && C && isa<SCEVConstant>(AddExpr->getOperand(0))) { 2611 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt(); 2612 auto C2 = C->getAPInt(); 2613 SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap; 2614 2615 APInt ConstAdd = C1 + C2; 2616 auto AddFlags = AddExpr->getNoWrapFlags(); 2617 // Adding a smaller constant is NUW if the original AddExpr was NUW. 2618 if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNUW) && 2619 ConstAdd.ule(C1)) { 2620 PreservedFlags = 2621 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW); 2622 } 2623 2624 // Adding a constant with the same sign and small magnitude is NSW, if the 2625 // original AddExpr was NSW. 2626 if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNSW) && 2627 C1.isSignBitSet() == ConstAdd.isSignBitSet() && 2628 ConstAdd.abs().ule(C1.abs())) { 2629 PreservedFlags = 2630 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNSW); 2631 } 2632 2633 if (PreservedFlags != SCEV::FlagAnyWrap) { 2634 SmallVector<const SCEV *, 4> NewOps(AddExpr->operands()); 2635 NewOps[0] = getConstant(ConstAdd); 2636 return getAddExpr(NewOps, PreservedFlags); 2637 } 2638 } 2639 } 2640 2641 // Canonicalize (-1 * urem X, Y) + X --> (Y * X/Y) 2642 if (Ops.size() == 2) { 2643 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[0]); 2644 if (Mul && Mul->getNumOperands() == 2 && 2645 Mul->getOperand(0)->isAllOnesValue()) { 2646 const SCEV *X; 2647 const SCEV *Y; 2648 if (matchURem(Mul->getOperand(1), X, Y) && X == Ops[1]) { 2649 return getMulExpr(Y, getUDivExpr(X, Y)); 2650 } 2651 } 2652 } 2653 2654 // Skip past any other cast SCEVs. 2655 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 2656 ++Idx; 2657 2658 // If there are add operands they would be next. 2659 if (Idx < Ops.size()) { 2660 bool DeletedAdd = false; 2661 // If the original flags and all inlined SCEVAddExprs are NUW, use the 2662 // common NUW flag for expression after inlining. Other flags cannot be 2663 // preserved, because they may depend on the original order of operations. 2664 SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW); 2665 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 2666 if (Ops.size() > AddOpsInlineThreshold || 2667 Add->getNumOperands() > AddOpsInlineThreshold) 2668 break; 2669 // If we have an add, expand the add operands onto the end of the operands 2670 // list. 2671 Ops.erase(Ops.begin()+Idx); 2672 Ops.append(Add->op_begin(), Add->op_end()); 2673 DeletedAdd = true; 2674 CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags()); 2675 } 2676 2677 // If we deleted at least one add, we added operands to the end of the list, 2678 // and they are not necessarily sorted. Recurse to resort and resimplify 2679 // any operands we just acquired. 2680 if (DeletedAdd) 2681 return getAddExpr(Ops, CommonFlags, Depth + 1); 2682 } 2683 2684 // Skip over the add expression until we get to a multiply. 2685 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 2686 ++Idx; 2687 2688 // Check to see if there are any folding opportunities present with 2689 // operands multiplied by constant values. 2690 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 2691 uint64_t BitWidth = getTypeSizeInBits(Ty); 2692 DenseMap<const SCEV *, APInt> M; 2693 SmallVector<const SCEV *, 8> NewOps; 2694 APInt AccumulatedConstant(BitWidth, 0); 2695 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 2696 Ops.data(), Ops.size(), 2697 APInt(BitWidth, 1), *this)) { 2698 struct APIntCompare { 2699 bool operator()(const APInt &LHS, const APInt &RHS) const { 2700 return LHS.ult(RHS); 2701 } 2702 }; 2703 2704 // Some interesting folding opportunity is present, so its worthwhile to 2705 // re-generate the operands list. Group the operands by constant scale, 2706 // to avoid multiplying by the same constant scale multiple times. 2707 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 2708 for (const SCEV *NewOp : NewOps) 2709 MulOpLists[M.find(NewOp)->second].push_back(NewOp); 2710 // Re-generate the operands list. 2711 Ops.clear(); 2712 if (AccumulatedConstant != 0) 2713 Ops.push_back(getConstant(AccumulatedConstant)); 2714 for (auto &MulOp : MulOpLists) { 2715 if (MulOp.first == 1) { 2716 Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1)); 2717 } else if (MulOp.first != 0) { 2718 Ops.push_back(getMulExpr( 2719 getConstant(MulOp.first), 2720 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1), 2721 SCEV::FlagAnyWrap, Depth + 1)); 2722 } 2723 } 2724 if (Ops.empty()) 2725 return getZero(Ty); 2726 if (Ops.size() == 1) 2727 return Ops[0]; 2728 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2729 } 2730 } 2731 2732 // If we are adding something to a multiply expression, make sure the 2733 // something is not already an operand of the multiply. If so, merge it into 2734 // the multiply. 2735 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 2736 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 2737 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 2738 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 2739 if (isa<SCEVConstant>(MulOpSCEV)) 2740 continue; 2741 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 2742 if (MulOpSCEV == Ops[AddOp]) { 2743 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 2744 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 2745 if (Mul->getNumOperands() != 2) { 2746 // If the multiply has more than two operands, we must get the 2747 // Y*Z term. 2748 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2749 Mul->op_begin()+MulOp); 2750 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2751 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2752 } 2753 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul}; 2754 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2755 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV, 2756 SCEV::FlagAnyWrap, Depth + 1); 2757 if (Ops.size() == 2) return OuterMul; 2758 if (AddOp < Idx) { 2759 Ops.erase(Ops.begin()+AddOp); 2760 Ops.erase(Ops.begin()+Idx-1); 2761 } else { 2762 Ops.erase(Ops.begin()+Idx); 2763 Ops.erase(Ops.begin()+AddOp-1); 2764 } 2765 Ops.push_back(OuterMul); 2766 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2767 } 2768 2769 // Check this multiply against other multiplies being added together. 2770 for (unsigned OtherMulIdx = Idx+1; 2771 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 2772 ++OtherMulIdx) { 2773 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 2774 // If MulOp occurs in OtherMul, we can fold the two multiplies 2775 // together. 2776 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 2777 OMulOp != e; ++OMulOp) 2778 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 2779 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 2780 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 2781 if (Mul->getNumOperands() != 2) { 2782 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 2783 Mul->op_begin()+MulOp); 2784 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 2785 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2786 } 2787 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 2788 if (OtherMul->getNumOperands() != 2) { 2789 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 2790 OtherMul->op_begin()+OMulOp); 2791 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 2792 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1); 2793 } 2794 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2}; 2795 const SCEV *InnerMulSum = 2796 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2797 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum, 2798 SCEV::FlagAnyWrap, Depth + 1); 2799 if (Ops.size() == 2) return OuterMul; 2800 Ops.erase(Ops.begin()+Idx); 2801 Ops.erase(Ops.begin()+OtherMulIdx-1); 2802 Ops.push_back(OuterMul); 2803 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2804 } 2805 } 2806 } 2807 } 2808 2809 // If there are any add recurrences in the operands list, see if any other 2810 // added values are loop invariant. If so, we can fold them into the 2811 // recurrence. 2812 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 2813 ++Idx; 2814 2815 // Scan over all recurrences, trying to fold loop invariants into them. 2816 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 2817 // Scan all of the other operands to this add and add them to the vector if 2818 // they are loop invariant w.r.t. the recurrence. 2819 SmallVector<const SCEV *, 8> LIOps; 2820 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 2821 const Loop *AddRecLoop = AddRec->getLoop(); 2822 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2823 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 2824 LIOps.push_back(Ops[i]); 2825 Ops.erase(Ops.begin()+i); 2826 --i; --e; 2827 } 2828 2829 // If we found some loop invariants, fold them into the recurrence. 2830 if (!LIOps.empty()) { 2831 // Compute nowrap flags for the addition of the loop-invariant ops and 2832 // the addrec. Temporarily push it as an operand for that purpose. These 2833 // flags are valid in the scope of the addrec only. 2834 LIOps.push_back(AddRec); 2835 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps); 2836 LIOps.pop_back(); 2837 2838 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 2839 LIOps.push_back(AddRec->getStart()); 2840 2841 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2842 2843 // It is not in general safe to propagate flags valid on an add within 2844 // the addrec scope to one outside it. We must prove that the inner 2845 // scope is guaranteed to execute if the outer one does to be able to 2846 // safely propagate. We know the program is undefined if poison is 2847 // produced on the inner scoped addrec. We also know that *for this use* 2848 // the outer scoped add can't overflow (because of the flags we just 2849 // computed for the inner scoped add) without the program being undefined. 2850 // Proving that entry to the outer scope neccesitates entry to the inner 2851 // scope, thus proves the program undefined if the flags would be violated 2852 // in the outer scope. 2853 SCEV::NoWrapFlags AddFlags = Flags; 2854 if (AddFlags != SCEV::FlagAnyWrap) { 2855 auto *DefI = getDefiningScopeBound(LIOps); 2856 auto *ReachI = &*AddRecLoop->getHeader()->begin(); 2857 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI)) 2858 AddFlags = SCEV::FlagAnyWrap; 2859 } 2860 AddRecOps[0] = getAddExpr(LIOps, AddFlags, Depth + 1); 2861 2862 // Build the new addrec. Propagate the NUW and NSW flags if both the 2863 // outer add and the inner addrec are guaranteed to have no overflow. 2864 // Always propagate NW. 2865 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW)); 2866 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags); 2867 2868 // If all of the other operands were loop invariant, we are done. 2869 if (Ops.size() == 1) return NewRec; 2870 2871 // Otherwise, add the folded AddRec by the non-invariant parts. 2872 for (unsigned i = 0;; ++i) 2873 if (Ops[i] == AddRec) { 2874 Ops[i] = NewRec; 2875 break; 2876 } 2877 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2878 } 2879 2880 // Okay, if there weren't any loop invariants to be folded, check to see if 2881 // there are multiple AddRec's with the same loop induction variable being 2882 // added together. If so, we can fold them. 2883 for (unsigned OtherIdx = Idx+1; 2884 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2885 ++OtherIdx) { 2886 // We expect the AddRecExpr's to be sorted in reverse dominance order, 2887 // so that the 1st found AddRecExpr is dominated by all others. 2888 assert(DT.dominates( 2889 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(), 2890 AddRec->getLoop()->getHeader()) && 2891 "AddRecExprs are not sorted in reverse dominance order?"); 2892 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 2893 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 2894 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands()); 2895 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 2896 ++OtherIdx) { 2897 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 2898 if (OtherAddRec->getLoop() == AddRecLoop) { 2899 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 2900 i != e; ++i) { 2901 if (i >= AddRecOps.size()) { 2902 AddRecOps.append(OtherAddRec->op_begin()+i, 2903 OtherAddRec->op_end()); 2904 break; 2905 } 2906 SmallVector<const SCEV *, 2> TwoOps = { 2907 AddRecOps[i], OtherAddRec->getOperand(i)}; 2908 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); 2909 } 2910 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 2911 } 2912 } 2913 // Step size has changed, so we cannot guarantee no self-wraparound. 2914 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); 2915 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 2916 } 2917 } 2918 2919 // Otherwise couldn't fold anything into this recurrence. Move onto the 2920 // next one. 2921 } 2922 2923 // Okay, it looks like we really DO need an add expr. Check to see if we 2924 // already have one, otherwise create a new one. 2925 return getOrCreateAddExpr(Ops, ComputeFlags(Ops)); 2926 } 2927 2928 const SCEV * 2929 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops, 2930 SCEV::NoWrapFlags Flags) { 2931 FoldingSetNodeID ID; 2932 ID.AddInteger(scAddExpr); 2933 for (const SCEV *Op : Ops) 2934 ID.AddPointer(Op); 2935 void *IP = nullptr; 2936 SCEVAddExpr *S = 2937 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2938 if (!S) { 2939 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2940 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2941 S = new (SCEVAllocator) 2942 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size()); 2943 UniqueSCEVs.InsertNode(S, IP); 2944 registerUser(S, Ops); 2945 } 2946 S->setNoWrapFlags(Flags); 2947 return S; 2948 } 2949 2950 const SCEV * 2951 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops, 2952 const Loop *L, SCEV::NoWrapFlags Flags) { 2953 FoldingSetNodeID ID; 2954 ID.AddInteger(scAddRecExpr); 2955 for (const SCEV *Op : Ops) 2956 ID.AddPointer(Op); 2957 ID.AddPointer(L); 2958 void *IP = nullptr; 2959 SCEVAddRecExpr *S = 2960 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2961 if (!S) { 2962 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2963 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2964 S = new (SCEVAllocator) 2965 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L); 2966 UniqueSCEVs.InsertNode(S, IP); 2967 LoopUsers[L].push_back(S); 2968 registerUser(S, Ops); 2969 } 2970 setNoWrapFlags(S, Flags); 2971 return S; 2972 } 2973 2974 const SCEV * 2975 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops, 2976 SCEV::NoWrapFlags Flags) { 2977 FoldingSetNodeID ID; 2978 ID.AddInteger(scMulExpr); 2979 for (const SCEV *Op : Ops) 2980 ID.AddPointer(Op); 2981 void *IP = nullptr; 2982 SCEVMulExpr *S = 2983 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2984 if (!S) { 2985 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2986 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2987 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 2988 O, Ops.size()); 2989 UniqueSCEVs.InsertNode(S, IP); 2990 registerUser(S, Ops); 2991 } 2992 S->setNoWrapFlags(Flags); 2993 return S; 2994 } 2995 2996 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) { 2997 uint64_t k = i*j; 2998 if (j > 1 && k / j != i) Overflow = true; 2999 return k; 3000 } 3001 3002 /// Compute the result of "n choose k", the binomial coefficient. If an 3003 /// intermediate computation overflows, Overflow will be set and the return will 3004 /// be garbage. Overflow is not cleared on absence of overflow. 3005 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) { 3006 // We use the multiplicative formula: 3007 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 . 3008 // At each iteration, we take the n-th term of the numeral and divide by the 3009 // (k-n)th term of the denominator. This division will always produce an 3010 // integral result, and helps reduce the chance of overflow in the 3011 // intermediate computations. However, we can still overflow even when the 3012 // final result would fit. 3013 3014 if (n == 0 || n == k) return 1; 3015 if (k > n) return 0; 3016 3017 if (k > n/2) 3018 k = n-k; 3019 3020 uint64_t r = 1; 3021 for (uint64_t i = 1; i <= k; ++i) { 3022 r = umul_ov(r, n-(i-1), Overflow); 3023 r /= i; 3024 } 3025 return r; 3026 } 3027 3028 /// Determine if any of the operands in this SCEV are a constant or if 3029 /// any of the add or multiply expressions in this SCEV contain a constant. 3030 static bool containsConstantInAddMulChain(const SCEV *StartExpr) { 3031 struct FindConstantInAddMulChain { 3032 bool FoundConstant = false; 3033 3034 bool follow(const SCEV *S) { 3035 FoundConstant |= isa<SCEVConstant>(S); 3036 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S); 3037 } 3038 3039 bool isDone() const { 3040 return FoundConstant; 3041 } 3042 }; 3043 3044 FindConstantInAddMulChain F; 3045 SCEVTraversal<FindConstantInAddMulChain> ST(F); 3046 ST.visitAll(StartExpr); 3047 return F.FoundConstant; 3048 } 3049 3050 /// Get a canonical multiply expression, or something simpler if possible. 3051 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 3052 SCEV::NoWrapFlags OrigFlags, 3053 unsigned Depth) { 3054 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) && 3055 "only nuw or nsw allowed"); 3056 assert(!Ops.empty() && "Cannot get empty mul!"); 3057 if (Ops.size() == 1) return Ops[0]; 3058 #ifndef NDEBUG 3059 Type *ETy = Ops[0]->getType(); 3060 assert(!ETy->isPointerTy()); 3061 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 3062 assert(Ops[i]->getType() == ETy && 3063 "SCEVMulExpr operand types don't match!"); 3064 #endif 3065 3066 // Sort by complexity, this groups all similar expression types together. 3067 GroupByComplexity(Ops, &LI, DT); 3068 3069 // If there are any constants, fold them together. 3070 unsigned Idx = 0; 3071 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3072 ++Idx; 3073 assert(Idx < Ops.size()); 3074 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3075 // We found two constants, fold them together! 3076 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt()); 3077 if (Ops.size() == 2) return Ops[0]; 3078 Ops.erase(Ops.begin()+1); // Erase the folded element 3079 LHSC = cast<SCEVConstant>(Ops[0]); 3080 } 3081 3082 // If we have a multiply of zero, it will always be zero. 3083 if (LHSC->getValue()->isZero()) 3084 return LHSC; 3085 3086 // If we are left with a constant one being multiplied, strip it off. 3087 if (LHSC->getValue()->isOne()) { 3088 Ops.erase(Ops.begin()); 3089 --Idx; 3090 } 3091 3092 if (Ops.size() == 1) 3093 return Ops[0]; 3094 } 3095 3096 // Delay expensive flag strengthening until necessary. 3097 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) { 3098 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags); 3099 }; 3100 3101 // Limit recursion calls depth. 3102 if (Depth > MaxArithDepth || hasHugeExpression(Ops)) 3103 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3104 3105 if (SCEV *S = findExistingSCEVInCache(scMulExpr, Ops)) { 3106 // Don't strengthen flags if we have no new information. 3107 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S); 3108 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags) 3109 Mul->setNoWrapFlags(ComputeFlags(Ops)); 3110 return S; 3111 } 3112 3113 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3114 if (Ops.size() == 2) { 3115 // C1*(C2+V) -> C1*C2 + C1*V 3116 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 3117 // If any of Add's ops are Adds or Muls with a constant, apply this 3118 // transformation as well. 3119 // 3120 // TODO: There are some cases where this transformation is not 3121 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of 3122 // this transformation should be narrowed down. 3123 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add)) { 3124 const SCEV *LHS = getMulExpr(LHSC, Add->getOperand(0), 3125 SCEV::FlagAnyWrap, Depth + 1); 3126 const SCEV *RHS = getMulExpr(LHSC, Add->getOperand(1), 3127 SCEV::FlagAnyWrap, Depth + 1); 3128 return getAddExpr(LHS, RHS, SCEV::FlagAnyWrap, Depth + 1); 3129 } 3130 3131 if (Ops[0]->isAllOnesValue()) { 3132 // If we have a mul by -1 of an add, try distributing the -1 among the 3133 // add operands. 3134 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 3135 SmallVector<const SCEV *, 4> NewOps; 3136 bool AnyFolded = false; 3137 for (const SCEV *AddOp : Add->operands()) { 3138 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap, 3139 Depth + 1); 3140 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 3141 NewOps.push_back(Mul); 3142 } 3143 if (AnyFolded) 3144 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1); 3145 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) { 3146 // Negation preserves a recurrence's no self-wrap property. 3147 SmallVector<const SCEV *, 4> Operands; 3148 for (const SCEV *AddRecOp : AddRec->operands()) 3149 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap, 3150 Depth + 1)); 3151 3152 return getAddRecExpr(Operands, AddRec->getLoop(), 3153 AddRec->getNoWrapFlags(SCEV::FlagNW)); 3154 } 3155 } 3156 } 3157 } 3158 3159 // Skip over the add expression until we get to a multiply. 3160 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 3161 ++Idx; 3162 3163 // If there are mul operands inline them all into this expression. 3164 if (Idx < Ops.size()) { 3165 bool DeletedMul = false; 3166 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 3167 if (Ops.size() > MulOpsInlineThreshold) 3168 break; 3169 // If we have an mul, expand the mul operands onto the end of the 3170 // operands list. 3171 Ops.erase(Ops.begin()+Idx); 3172 Ops.append(Mul->op_begin(), Mul->op_end()); 3173 DeletedMul = true; 3174 } 3175 3176 // If we deleted at least one mul, we added operands to the end of the 3177 // list, and they are not necessarily sorted. Recurse to resort and 3178 // resimplify any operands we just acquired. 3179 if (DeletedMul) 3180 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3181 } 3182 3183 // If there are any add recurrences in the operands list, see if any other 3184 // added values are loop invariant. If so, we can fold them into the 3185 // recurrence. 3186 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 3187 ++Idx; 3188 3189 // Scan over all recurrences, trying to fold loop invariants into them. 3190 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 3191 // Scan all of the other operands to this mul and add them to the vector 3192 // if they are loop invariant w.r.t. the recurrence. 3193 SmallVector<const SCEV *, 8> LIOps; 3194 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 3195 const Loop *AddRecLoop = AddRec->getLoop(); 3196 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3197 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) { 3198 LIOps.push_back(Ops[i]); 3199 Ops.erase(Ops.begin()+i); 3200 --i; --e; 3201 } 3202 3203 // If we found some loop invariants, fold them into the recurrence. 3204 if (!LIOps.empty()) { 3205 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 3206 SmallVector<const SCEV *, 4> NewOps; 3207 NewOps.reserve(AddRec->getNumOperands()); 3208 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1); 3209 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 3210 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i), 3211 SCEV::FlagAnyWrap, Depth + 1)); 3212 3213 // Build the new addrec. Propagate the NUW and NSW flags if both the 3214 // outer mul and the inner addrec are guaranteed to have no overflow. 3215 // 3216 // No self-wrap cannot be guaranteed after changing the step size, but 3217 // will be inferred if either NUW or NSW is true. 3218 SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec}); 3219 const SCEV *NewRec = getAddRecExpr( 3220 NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags)); 3221 3222 // If all of the other operands were loop invariant, we are done. 3223 if (Ops.size() == 1) return NewRec; 3224 3225 // Otherwise, multiply the folded AddRec by the non-invariant parts. 3226 for (unsigned i = 0;; ++i) 3227 if (Ops[i] == AddRec) { 3228 Ops[i] = NewRec; 3229 break; 3230 } 3231 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3232 } 3233 3234 // Okay, if there weren't any loop invariants to be folded, check to see 3235 // if there are multiple AddRec's with the same loop induction variable 3236 // being multiplied together. If so, we can fold them. 3237 3238 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L> 3239 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [ 3240 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z 3241 // ]]],+,...up to x=2n}. 3242 // Note that the arguments to choose() are always integers with values 3243 // known at compile time, never SCEV objects. 3244 // 3245 // The implementation avoids pointless extra computations when the two 3246 // addrec's are of different length (mathematically, it's equivalent to 3247 // an infinite stream of zeros on the right). 3248 bool OpsModified = false; 3249 for (unsigned OtherIdx = Idx+1; 3250 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 3251 ++OtherIdx) { 3252 const SCEVAddRecExpr *OtherAddRec = 3253 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]); 3254 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) 3255 continue; 3256 3257 // Limit max number of arguments to avoid creation of unreasonably big 3258 // SCEVAddRecs with very complex operands. 3259 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 > 3260 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec})) 3261 continue; 3262 3263 bool Overflow = false; 3264 Type *Ty = AddRec->getType(); 3265 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64; 3266 SmallVector<const SCEV*, 7> AddRecOps; 3267 for (int x = 0, xe = AddRec->getNumOperands() + 3268 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) { 3269 SmallVector <const SCEV *, 7> SumOps; 3270 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) { 3271 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow); 3272 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1), 3273 ze = std::min(x+1, (int)OtherAddRec->getNumOperands()); 3274 z < ze && !Overflow; ++z) { 3275 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow); 3276 uint64_t Coeff; 3277 if (LargerThan64Bits) 3278 Coeff = umul_ov(Coeff1, Coeff2, Overflow); 3279 else 3280 Coeff = Coeff1*Coeff2; 3281 const SCEV *CoeffTerm = getConstant(Ty, Coeff); 3282 const SCEV *Term1 = AddRec->getOperand(y-z); 3283 const SCEV *Term2 = OtherAddRec->getOperand(z); 3284 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2, 3285 SCEV::FlagAnyWrap, Depth + 1)); 3286 } 3287 } 3288 if (SumOps.empty()) 3289 SumOps.push_back(getZero(Ty)); 3290 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1)); 3291 } 3292 if (!Overflow) { 3293 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop, 3294 SCEV::FlagAnyWrap); 3295 if (Ops.size() == 2) return NewAddRec; 3296 Ops[Idx] = NewAddRec; 3297 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 3298 OpsModified = true; 3299 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec); 3300 if (!AddRec) 3301 break; 3302 } 3303 } 3304 if (OpsModified) 3305 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); 3306 3307 // Otherwise couldn't fold anything into this recurrence. Move onto the 3308 // next one. 3309 } 3310 3311 // Okay, it looks like we really DO need an mul expr. Check to see if we 3312 // already have one, otherwise create a new one. 3313 return getOrCreateMulExpr(Ops, ComputeFlags(Ops)); 3314 } 3315 3316 /// Represents an unsigned remainder expression based on unsigned division. 3317 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS, 3318 const SCEV *RHS) { 3319 assert(getEffectiveSCEVType(LHS->getType()) == 3320 getEffectiveSCEVType(RHS->getType()) && 3321 "SCEVURemExpr operand types don't match!"); 3322 3323 // Short-circuit easy cases 3324 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3325 // If constant is one, the result is trivial 3326 if (RHSC->getValue()->isOne()) 3327 return getZero(LHS->getType()); // X urem 1 --> 0 3328 3329 // If constant is a power of two, fold into a zext(trunc(LHS)). 3330 if (RHSC->getAPInt().isPowerOf2()) { 3331 Type *FullTy = LHS->getType(); 3332 Type *TruncTy = 3333 IntegerType::get(getContext(), RHSC->getAPInt().logBase2()); 3334 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy); 3335 } 3336 } 3337 3338 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y) 3339 const SCEV *UDiv = getUDivExpr(LHS, RHS); 3340 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW); 3341 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW); 3342 } 3343 3344 /// Get a canonical unsigned division expression, or something simpler if 3345 /// possible. 3346 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 3347 const SCEV *RHS) { 3348 assert(!LHS->getType()->isPointerTy() && 3349 "SCEVUDivExpr operand can't be pointer!"); 3350 assert(LHS->getType() == RHS->getType() && 3351 "SCEVUDivExpr operand types don't match!"); 3352 3353 FoldingSetNodeID ID; 3354 ID.AddInteger(scUDivExpr); 3355 ID.AddPointer(LHS); 3356 ID.AddPointer(RHS); 3357 void *IP = nullptr; 3358 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3359 return S; 3360 3361 // 0 udiv Y == 0 3362 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) 3363 if (LHSC->getValue()->isZero()) 3364 return LHS; 3365 3366 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 3367 if (RHSC->getValue()->isOne()) 3368 return LHS; // X udiv 1 --> x 3369 // If the denominator is zero, the result of the udiv is undefined. Don't 3370 // try to analyze it, because the resolution chosen here may differ from 3371 // the resolution chosen in other parts of the compiler. 3372 if (!RHSC->getValue()->isZero()) { 3373 // Determine if the division can be folded into the operands of 3374 // its operands. 3375 // TODO: Generalize this to non-constants by using known-bits information. 3376 Type *Ty = LHS->getType(); 3377 unsigned LZ = RHSC->getAPInt().countLeadingZeros(); 3378 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 3379 // For non-power-of-two values, effectively round the value up to the 3380 // nearest power of two. 3381 if (!RHSC->getAPInt().isPowerOf2()) 3382 ++MaxShiftAmt; 3383 IntegerType *ExtTy = 3384 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 3385 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 3386 if (const SCEVConstant *Step = 3387 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) { 3388 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 3389 const APInt &StepInt = Step->getAPInt(); 3390 const APInt &DivInt = RHSC->getAPInt(); 3391 if (!StepInt.urem(DivInt) && 3392 getZeroExtendExpr(AR, ExtTy) == 3393 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3394 getZeroExtendExpr(Step, ExtTy), 3395 AR->getLoop(), SCEV::FlagAnyWrap)) { 3396 SmallVector<const SCEV *, 4> Operands; 3397 for (const SCEV *Op : AR->operands()) 3398 Operands.push_back(getUDivExpr(Op, RHS)); 3399 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW); 3400 } 3401 /// Get a canonical UDivExpr for a recurrence. 3402 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. 3403 // We can currently only fold X%N if X is constant. 3404 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart()); 3405 if (StartC && !DivInt.urem(StepInt) && 3406 getZeroExtendExpr(AR, ExtTy) == 3407 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 3408 getZeroExtendExpr(Step, ExtTy), 3409 AR->getLoop(), SCEV::FlagAnyWrap)) { 3410 const APInt &StartInt = StartC->getAPInt(); 3411 const APInt &StartRem = StartInt.urem(StepInt); 3412 if (StartRem != 0) { 3413 const SCEV *NewLHS = 3414 getAddRecExpr(getConstant(StartInt - StartRem), Step, 3415 AR->getLoop(), SCEV::FlagNW); 3416 if (LHS != NewLHS) { 3417 LHS = NewLHS; 3418 3419 // Reset the ID to include the new LHS, and check if it is 3420 // already cached. 3421 ID.clear(); 3422 ID.AddInteger(scUDivExpr); 3423 ID.AddPointer(LHS); 3424 ID.AddPointer(RHS); 3425 IP = nullptr; 3426 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) 3427 return S; 3428 } 3429 } 3430 } 3431 } 3432 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 3433 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 3434 SmallVector<const SCEV *, 4> Operands; 3435 for (const SCEV *Op : M->operands()) 3436 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3437 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 3438 // Find an operand that's safely divisible. 3439 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 3440 const SCEV *Op = M->getOperand(i); 3441 const SCEV *Div = getUDivExpr(Op, RHSC); 3442 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 3443 Operands = SmallVector<const SCEV *, 4>(M->operands()); 3444 Operands[i] = Div; 3445 return getMulExpr(Operands); 3446 } 3447 } 3448 } 3449 3450 // (A/B)/C --> A/(B*C) if safe and B*C can be folded. 3451 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) { 3452 if (auto *DivisorConstant = 3453 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) { 3454 bool Overflow = false; 3455 APInt NewRHS = 3456 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow); 3457 if (Overflow) { 3458 return getConstant(RHSC->getType(), 0, false); 3459 } 3460 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS)); 3461 } 3462 } 3463 3464 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 3465 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) { 3466 SmallVector<const SCEV *, 4> Operands; 3467 for (const SCEV *Op : A->operands()) 3468 Operands.push_back(getZeroExtendExpr(Op, ExtTy)); 3469 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 3470 Operands.clear(); 3471 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 3472 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 3473 if (isa<SCEVUDivExpr>(Op) || 3474 getMulExpr(Op, RHS) != A->getOperand(i)) 3475 break; 3476 Operands.push_back(Op); 3477 } 3478 if (Operands.size() == A->getNumOperands()) 3479 return getAddExpr(Operands); 3480 } 3481 } 3482 3483 // Fold if both operands are constant. 3484 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 3485 Constant *LHSCV = LHSC->getValue(); 3486 Constant *RHSCV = RHSC->getValue(); 3487 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 3488 RHSCV))); 3489 } 3490 } 3491 } 3492 3493 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs 3494 // changes). Make sure we get a new one. 3495 IP = nullptr; 3496 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 3497 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 3498 LHS, RHS); 3499 UniqueSCEVs.InsertNode(S, IP); 3500 registerUser(S, {LHS, RHS}); 3501 return S; 3502 } 3503 3504 APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) { 3505 APInt A = C1->getAPInt().abs(); 3506 APInt B = C2->getAPInt().abs(); 3507 uint32_t ABW = A.getBitWidth(); 3508 uint32_t BBW = B.getBitWidth(); 3509 3510 if (ABW > BBW) 3511 B = B.zext(ABW); 3512 else if (ABW < BBW) 3513 A = A.zext(BBW); 3514 3515 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B)); 3516 } 3517 3518 /// Get a canonical unsigned division expression, or something simpler if 3519 /// possible. There is no representation for an exact udiv in SCEV IR, but we 3520 /// can attempt to remove factors from the LHS and RHS. We can't do this when 3521 /// it's not exact because the udiv may be clearing bits. 3522 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS, 3523 const SCEV *RHS) { 3524 // TODO: we could try to find factors in all sorts of things, but for now we 3525 // just deal with u/exact (multiply, constant). See SCEVDivision towards the 3526 // end of this file for inspiration. 3527 3528 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS); 3529 if (!Mul || !Mul->hasNoUnsignedWrap()) 3530 return getUDivExpr(LHS, RHS); 3531 3532 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) { 3533 // If the mulexpr multiplies by a constant, then that constant must be the 3534 // first element of the mulexpr. 3535 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3536 if (LHSCst == RHSCst) { 3537 SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands())); 3538 return getMulExpr(Operands); 3539 } 3540 3541 // We can't just assume that LHSCst divides RHSCst cleanly, it could be 3542 // that there's a factor provided by one of the other terms. We need to 3543 // check. 3544 APInt Factor = gcd(LHSCst, RHSCst); 3545 if (!Factor.isIntN(1)) { 3546 LHSCst = 3547 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor))); 3548 RHSCst = 3549 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor))); 3550 SmallVector<const SCEV *, 2> Operands; 3551 Operands.push_back(LHSCst); 3552 Operands.append(Mul->op_begin() + 1, Mul->op_end()); 3553 LHS = getMulExpr(Operands); 3554 RHS = RHSCst; 3555 Mul = dyn_cast<SCEVMulExpr>(LHS); 3556 if (!Mul) 3557 return getUDivExactExpr(LHS, RHS); 3558 } 3559 } 3560 } 3561 3562 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) { 3563 if (Mul->getOperand(i) == RHS) { 3564 SmallVector<const SCEV *, 2> Operands; 3565 Operands.append(Mul->op_begin(), Mul->op_begin() + i); 3566 Operands.append(Mul->op_begin() + i + 1, Mul->op_end()); 3567 return getMulExpr(Operands); 3568 } 3569 } 3570 3571 return getUDivExpr(LHS, RHS); 3572 } 3573 3574 /// Get an add recurrence expression for the specified loop. Simplify the 3575 /// expression as much as possible. 3576 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step, 3577 const Loop *L, 3578 SCEV::NoWrapFlags Flags) { 3579 SmallVector<const SCEV *, 4> Operands; 3580 Operands.push_back(Start); 3581 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 3582 if (StepChrec->getLoop() == L) { 3583 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 3584 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); 3585 } 3586 3587 Operands.push_back(Step); 3588 return getAddRecExpr(Operands, L, Flags); 3589 } 3590 3591 /// Get an add recurrence expression for the specified loop. Simplify the 3592 /// expression as much as possible. 3593 const SCEV * 3594 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 3595 const Loop *L, SCEV::NoWrapFlags Flags) { 3596 if (Operands.size() == 1) return Operands[0]; 3597 #ifndef NDEBUG 3598 Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 3599 for (unsigned i = 1, e = Operands.size(); i != e; ++i) { 3600 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 3601 "SCEVAddRecExpr operand types don't match!"); 3602 assert(!Operands[i]->getType()->isPointerTy() && "Step must be integer"); 3603 } 3604 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 3605 assert(isLoopInvariant(Operands[i], L) && 3606 "SCEVAddRecExpr operand is not loop-invariant!"); 3607 #endif 3608 3609 if (Operands.back()->isZero()) { 3610 Operands.pop_back(); 3611 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X 3612 } 3613 3614 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and 3615 // use that information to infer NUW and NSW flags. However, computing a 3616 // BE count requires calling getAddRecExpr, so we may not yet have a 3617 // meaningful BE count at this point (and if we don't, we'd be stuck 3618 // with a SCEVCouldNotCompute as the cached BE count). 3619 3620 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 3621 3622 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 3623 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 3624 const Loop *NestedLoop = NestedAR->getLoop(); 3625 if (L->contains(NestedLoop) 3626 ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) 3627 : (!NestedLoop->contains(L) && 3628 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { 3629 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands()); 3630 Operands[0] = NestedAR->getStart(); 3631 // AddRecs require their operands be loop-invariant with respect to their 3632 // loops. Don't perform this transformation if it would break this 3633 // requirement. 3634 bool AllInvariant = all_of( 3635 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); }); 3636 3637 if (AllInvariant) { 3638 // Create a recurrence for the outer loop with the same step size. 3639 // 3640 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the 3641 // inner recurrence has the same property. 3642 SCEV::NoWrapFlags OuterFlags = 3643 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags()); 3644 3645 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags); 3646 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) { 3647 return isLoopInvariant(Op, NestedLoop); 3648 }); 3649 3650 if (AllInvariant) { 3651 // Ok, both add recurrences are valid after the transformation. 3652 // 3653 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if 3654 // the outer recurrence has the same property. 3655 SCEV::NoWrapFlags InnerFlags = 3656 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags); 3657 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags); 3658 } 3659 } 3660 // Reset Operands to its original state. 3661 Operands[0] = NestedAR; 3662 } 3663 } 3664 3665 // Okay, it looks like we really DO need an addrec expr. Check to see if we 3666 // already have one, otherwise create a new one. 3667 return getOrCreateAddRecExpr(Operands, L, Flags); 3668 } 3669 3670 const SCEV * 3671 ScalarEvolution::getGEPExpr(GEPOperator *GEP, 3672 const SmallVectorImpl<const SCEV *> &IndexExprs) { 3673 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand()); 3674 // getSCEV(Base)->getType() has the same address space as Base->getType() 3675 // because SCEV::getType() preserves the address space. 3676 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType()); 3677 const bool AssumeInBoundsFlags = [&]() { 3678 if (!GEP->isInBounds()) 3679 return false; 3680 3681 // We'd like to propagate flags from the IR to the corresponding SCEV nodes, 3682 // but to do that, we have to ensure that said flag is valid in the entire 3683 // defined scope of the SCEV. 3684 auto *GEPI = dyn_cast<Instruction>(GEP); 3685 // TODO: non-instructions have global scope. We might be able to prove 3686 // some global scope cases 3687 return GEPI && isSCEVExprNeverPoison(GEPI); 3688 }(); 3689 3690 SCEV::NoWrapFlags OffsetWrap = 3691 AssumeInBoundsFlags ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3692 3693 Type *CurTy = GEP->getType(); 3694 bool FirstIter = true; 3695 SmallVector<const SCEV *, 4> Offsets; 3696 for (const SCEV *IndexExpr : IndexExprs) { 3697 // Compute the (potentially symbolic) offset in bytes for this index. 3698 if (StructType *STy = dyn_cast<StructType>(CurTy)) { 3699 // For a struct, add the member offset. 3700 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue(); 3701 unsigned FieldNo = Index->getZExtValue(); 3702 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo); 3703 Offsets.push_back(FieldOffset); 3704 3705 // Update CurTy to the type of the field at Index. 3706 CurTy = STy->getTypeAtIndex(Index); 3707 } else { 3708 // Update CurTy to its element type. 3709 if (FirstIter) { 3710 assert(isa<PointerType>(CurTy) && 3711 "The first index of a GEP indexes a pointer"); 3712 CurTy = GEP->getSourceElementType(); 3713 FirstIter = false; 3714 } else { 3715 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0); 3716 } 3717 // For an array, add the element offset, explicitly scaled. 3718 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); 3719 // Getelementptr indices are signed. 3720 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy); 3721 3722 // Multiply the index by the element size to compute the element offset. 3723 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap); 3724 Offsets.push_back(LocalOffset); 3725 } 3726 } 3727 3728 // Handle degenerate case of GEP without offsets. 3729 if (Offsets.empty()) 3730 return BaseExpr; 3731 3732 // Add the offsets together, assuming nsw if inbounds. 3733 const SCEV *Offset = getAddExpr(Offsets, OffsetWrap); 3734 // Add the base address and the offset. We cannot use the nsw flag, as the 3735 // base address is unsigned. However, if we know that the offset is 3736 // non-negative, we can use nuw. 3737 SCEV::NoWrapFlags BaseWrap = AssumeInBoundsFlags && isKnownNonNegative(Offset) 3738 ? SCEV::FlagNUW : SCEV::FlagAnyWrap; 3739 auto *GEPExpr = getAddExpr(BaseExpr, Offset, BaseWrap); 3740 assert(BaseExpr->getType() == GEPExpr->getType() && 3741 "GEP should not change type mid-flight."); 3742 return GEPExpr; 3743 } 3744 3745 SCEV *ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType, 3746 ArrayRef<const SCEV *> Ops) { 3747 FoldingSetNodeID ID; 3748 ID.AddInteger(SCEVType); 3749 for (const SCEV *Op : Ops) 3750 ID.AddPointer(Op); 3751 void *IP = nullptr; 3752 return UniqueSCEVs.FindNodeOrInsertPos(ID, IP); 3753 } 3754 3755 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) { 3756 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 3757 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags)); 3758 } 3759 3760 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind, 3761 SmallVectorImpl<const SCEV *> &Ops) { 3762 assert(SCEVMinMaxExpr::isMinMaxType(Kind) && "Not a SCEVMinMaxExpr!"); 3763 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 3764 if (Ops.size() == 1) return Ops[0]; 3765 #ifndef NDEBUG 3766 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 3767 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 3768 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 3769 "Operand types don't match!"); 3770 assert(Ops[0]->getType()->isPointerTy() == 3771 Ops[i]->getType()->isPointerTy() && 3772 "min/max should be consistently pointerish"); 3773 } 3774 #endif 3775 3776 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr; 3777 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr; 3778 3779 // Sort by complexity, this groups all similar expression types together. 3780 GroupByComplexity(Ops, &LI, DT); 3781 3782 // Check if we have created the same expression before. 3783 if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) { 3784 return S; 3785 } 3786 3787 // If there are any constants, fold them together. 3788 unsigned Idx = 0; 3789 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 3790 ++Idx; 3791 assert(Idx < Ops.size()); 3792 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) { 3793 if (Kind == scSMaxExpr) 3794 return APIntOps::smax(LHS, RHS); 3795 else if (Kind == scSMinExpr) 3796 return APIntOps::smin(LHS, RHS); 3797 else if (Kind == scUMaxExpr) 3798 return APIntOps::umax(LHS, RHS); 3799 else if (Kind == scUMinExpr) 3800 return APIntOps::umin(LHS, RHS); 3801 llvm_unreachable("Unknown SCEV min/max opcode"); 3802 }; 3803 3804 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 3805 // We found two constants, fold them together! 3806 ConstantInt *Fold = ConstantInt::get( 3807 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt())); 3808 Ops[0] = getConstant(Fold); 3809 Ops.erase(Ops.begin()+1); // Erase the folded element 3810 if (Ops.size() == 1) return Ops[0]; 3811 LHSC = cast<SCEVConstant>(Ops[0]); 3812 } 3813 3814 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned); 3815 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned); 3816 3817 if (IsMax ? IsMinV : IsMaxV) { 3818 // If we are left with a constant minimum(/maximum)-int, strip it off. 3819 Ops.erase(Ops.begin()); 3820 --Idx; 3821 } else if (IsMax ? IsMaxV : IsMinV) { 3822 // If we have a max(/min) with a constant maximum(/minimum)-int, 3823 // it will always be the extremum. 3824 return LHSC; 3825 } 3826 3827 if (Ops.size() == 1) return Ops[0]; 3828 } 3829 3830 // Find the first operation of the same kind 3831 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind) 3832 ++Idx; 3833 3834 // Check to see if one of the operands is of the same kind. If so, expand its 3835 // operands onto our operand list, and recurse to simplify. 3836 if (Idx < Ops.size()) { 3837 bool DeletedAny = false; 3838 while (Ops[Idx]->getSCEVType() == Kind) { 3839 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]); 3840 Ops.erase(Ops.begin()+Idx); 3841 Ops.append(SMME->op_begin(), SMME->op_end()); 3842 DeletedAny = true; 3843 } 3844 3845 if (DeletedAny) 3846 return getMinMaxExpr(Kind, Ops); 3847 } 3848 3849 // Okay, check to see if the same value occurs in the operand list twice. If 3850 // so, delete one. Since we sorted the list, these values are required to 3851 // be adjacent. 3852 llvm::CmpInst::Predicate GEPred = 3853 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 3854 llvm::CmpInst::Predicate LEPred = 3855 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 3856 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred; 3857 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred; 3858 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) { 3859 if (Ops[i] == Ops[i + 1] || 3860 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) { 3861 // X op Y op Y --> X op Y 3862 // X op Y --> X, if we know X, Y are ordered appropriately 3863 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2); 3864 --i; 3865 --e; 3866 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i], 3867 Ops[i + 1])) { 3868 // X op Y --> Y, if we know X, Y are ordered appropriately 3869 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1); 3870 --i; 3871 --e; 3872 } 3873 } 3874 3875 if (Ops.size() == 1) return Ops[0]; 3876 3877 assert(!Ops.empty() && "Reduced smax down to nothing!"); 3878 3879 // Okay, it looks like we really DO need an expr. Check to see if we 3880 // already have one, otherwise create a new one. 3881 FoldingSetNodeID ID; 3882 ID.AddInteger(Kind); 3883 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3884 ID.AddPointer(Ops[i]); 3885 void *IP = nullptr; 3886 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP); 3887 if (ExistingSCEV) 3888 return ExistingSCEV; 3889 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 3890 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 3891 SCEV *S = new (SCEVAllocator) 3892 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); 3893 3894 UniqueSCEVs.InsertNode(S, IP); 3895 registerUser(S, Ops); 3896 return S; 3897 } 3898 3899 namespace { 3900 3901 class SCEVSequentialMinMaxDeduplicatingVisitor final 3902 : public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor, 3903 Optional<const SCEV *>> { 3904 using RetVal = Optional<const SCEV *>; 3905 using Base = SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor, RetVal>; 3906 3907 ScalarEvolution &SE; 3908 const SCEVTypes RootKind; // Must be a sequential min/max expression. 3909 const SCEVTypes NonSequentialRootKind; // Non-sequential variant of RootKind. 3910 SmallPtrSet<const SCEV *, 16> SeenOps; 3911 3912 bool canRecurseInto(SCEVTypes Kind) const { 3913 // We can only recurse into the SCEV expression of the same effective type 3914 // as the type of our root SCEV expression. 3915 return RootKind == Kind || NonSequentialRootKind == Kind; 3916 }; 3917 3918 RetVal visitAnyMinMaxExpr(const SCEV *S) { 3919 assert((isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) && 3920 "Only for min/max expressions."); 3921 SCEVTypes Kind = S->getSCEVType(); 3922 3923 if (!canRecurseInto(Kind)) 3924 return S; 3925 3926 auto *NAry = cast<SCEVNAryExpr>(S); 3927 SmallVector<const SCEV *> NewOps; 3928 bool Changed = 3929 visit(Kind, makeArrayRef(NAry->op_begin(), NAry->op_end()), NewOps); 3930 3931 if (!Changed) 3932 return S; 3933 if (NewOps.empty()) 3934 return None; 3935 3936 return isa<SCEVSequentialMinMaxExpr>(S) 3937 ? SE.getSequentialMinMaxExpr(Kind, NewOps) 3938 : SE.getMinMaxExpr(Kind, NewOps); 3939 } 3940 3941 RetVal visit(const SCEV *S) { 3942 // Has the whole operand been seen already? 3943 if (!SeenOps.insert(S).second) 3944 return None; 3945 return Base::visit(S); 3946 } 3947 3948 public: 3949 SCEVSequentialMinMaxDeduplicatingVisitor(ScalarEvolution &SE, 3950 SCEVTypes RootKind) 3951 : SE(SE), RootKind(RootKind), 3952 NonSequentialRootKind( 3953 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType( 3954 RootKind)) {} 3955 3956 bool /*Changed*/ visit(SCEVTypes Kind, ArrayRef<const SCEV *> OrigOps, 3957 SmallVectorImpl<const SCEV *> &NewOps) { 3958 bool Changed = false; 3959 SmallVector<const SCEV *> Ops; 3960 Ops.reserve(OrigOps.size()); 3961 3962 for (const SCEV *Op : OrigOps) { 3963 RetVal NewOp = visit(Op); 3964 if (NewOp != Op) 3965 Changed = true; 3966 if (NewOp) 3967 Ops.emplace_back(*NewOp); 3968 } 3969 3970 if (Changed) 3971 NewOps = std::move(Ops); 3972 return Changed; 3973 } 3974 3975 RetVal visitConstant(const SCEVConstant *Constant) { return Constant; } 3976 3977 RetVal visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) { return Expr; } 3978 3979 RetVal visitTruncateExpr(const SCEVTruncateExpr *Expr) { return Expr; } 3980 3981 RetVal visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { return Expr; } 3982 3983 RetVal visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { return Expr; } 3984 3985 RetVal visitAddExpr(const SCEVAddExpr *Expr) { return Expr; } 3986 3987 RetVal visitMulExpr(const SCEVMulExpr *Expr) { return Expr; } 3988 3989 RetVal visitUDivExpr(const SCEVUDivExpr *Expr) { return Expr; } 3990 3991 RetVal visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 3992 3993 RetVal visitSMaxExpr(const SCEVSMaxExpr *Expr) { 3994 return visitAnyMinMaxExpr(Expr); 3995 } 3996 3997 RetVal visitUMaxExpr(const SCEVUMaxExpr *Expr) { 3998 return visitAnyMinMaxExpr(Expr); 3999 } 4000 4001 RetVal visitSMinExpr(const SCEVSMinExpr *Expr) { 4002 return visitAnyMinMaxExpr(Expr); 4003 } 4004 4005 RetVal visitUMinExpr(const SCEVUMinExpr *Expr) { 4006 return visitAnyMinMaxExpr(Expr); 4007 } 4008 4009 RetVal visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Expr) { 4010 return visitAnyMinMaxExpr(Expr); 4011 } 4012 4013 RetVal visitUnknown(const SCEVUnknown *Expr) { return Expr; } 4014 4015 RetVal visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { return Expr; } 4016 }; 4017 4018 } // namespace 4019 4020 /// Return true if V is poison given that AssumedPoison is already poison. 4021 static bool impliesPoison(const SCEV *AssumedPoison, const SCEV *S) { 4022 // The only way poison may be introduced in a SCEV expression is from a 4023 // poison SCEVUnknown (ConstantExprs are also represented as SCEVUnknown, 4024 // not SCEVConstant). Notably, nowrap flags in SCEV nodes can *not* 4025 // introduce poison -- they encode guaranteed, non-speculated knowledge. 4026 // 4027 // Additionally, all SCEV nodes propagate poison from inputs to outputs, 4028 // with the notable exception of umin_seq, where only poison from the first 4029 // operand is (unconditionally) propagated. 4030 struct SCEVPoisonCollector { 4031 bool LookThroughSeq; 4032 SmallPtrSet<const SCEV *, 4> MaybePoison; 4033 SCEVPoisonCollector(bool LookThroughSeq) : LookThroughSeq(LookThroughSeq) {} 4034 4035 bool follow(const SCEV *S) { 4036 // TODO: We can always follow the first operand, but the SCEVTraversal 4037 // API doesn't support this. 4038 if (!LookThroughSeq && isa<SCEVSequentialMinMaxExpr>(S)) 4039 return false; 4040 4041 if (auto *SU = dyn_cast<SCEVUnknown>(S)) { 4042 if (!isGuaranteedNotToBePoison(SU->getValue())) 4043 MaybePoison.insert(S); 4044 } 4045 return true; 4046 } 4047 bool isDone() const { return false; } 4048 }; 4049 4050 // First collect all SCEVs that might result in AssumedPoison to be poison. 4051 // We need to look through umin_seq here, because we want to find all SCEVs 4052 // that *might* result in poison, not only those that are *required* to. 4053 SCEVPoisonCollector PC1(/* LookThroughSeq */ true); 4054 visitAll(AssumedPoison, PC1); 4055 4056 // AssumedPoison is never poison. As the assumption is false, the implication 4057 // is true. Don't bother walking the other SCEV in this case. 4058 if (PC1.MaybePoison.empty()) 4059 return true; 4060 4061 // Collect all SCEVs in S that, if poison, *will* result in S being poison 4062 // as well. We cannot look through umin_seq here, as its argument only *may* 4063 // make the result poison. 4064 SCEVPoisonCollector PC2(/* LookThroughSeq */ false); 4065 visitAll(S, PC2); 4066 4067 // Make sure that no matter which SCEV in PC1.MaybePoison is actually poison, 4068 // it will also make S poison by being part of PC2.MaybePoison. 4069 return all_of(PC1.MaybePoison, 4070 [&](const SCEV *S) { return PC2.MaybePoison.contains(S); }); 4071 } 4072 4073 const SCEV * 4074 ScalarEvolution::getSequentialMinMaxExpr(SCEVTypes Kind, 4075 SmallVectorImpl<const SCEV *> &Ops) { 4076 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) && 4077 "Not a SCEVSequentialMinMaxExpr!"); 4078 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!"); 4079 if (Ops.size() == 1) 4080 return Ops[0]; 4081 #ifndef NDEBUG 4082 Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 4083 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 4084 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 4085 "Operand types don't match!"); 4086 assert(Ops[0]->getType()->isPointerTy() == 4087 Ops[i]->getType()->isPointerTy() && 4088 "min/max should be consistently pointerish"); 4089 } 4090 #endif 4091 4092 // Note that SCEVSequentialMinMaxExpr is *NOT* commutative, 4093 // so we can *NOT* do any kind of sorting of the expressions! 4094 4095 // Check if we have created the same expression before. 4096 if (const SCEV *S = findExistingSCEVInCache(Kind, Ops)) 4097 return S; 4098 4099 // FIXME: there are *some* simplifications that we can do here. 4100 4101 // Keep only the first instance of an operand. 4102 { 4103 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*this, Kind); 4104 bool Changed = Deduplicator.visit(Kind, Ops, Ops); 4105 if (Changed) 4106 return getSequentialMinMaxExpr(Kind, Ops); 4107 } 4108 4109 // Check to see if one of the operands is of the same kind. If so, expand its 4110 // operands onto our operand list, and recurse to simplify. 4111 { 4112 unsigned Idx = 0; 4113 bool DeletedAny = false; 4114 while (Idx < Ops.size()) { 4115 if (Ops[Idx]->getSCEVType() != Kind) { 4116 ++Idx; 4117 continue; 4118 } 4119 const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[Idx]); 4120 Ops.erase(Ops.begin() + Idx); 4121 Ops.insert(Ops.begin() + Idx, SMME->op_begin(), SMME->op_end()); 4122 DeletedAny = true; 4123 } 4124 4125 if (DeletedAny) 4126 return getSequentialMinMaxExpr(Kind, Ops); 4127 } 4128 4129 const SCEV *SaturationPoint; 4130 ICmpInst::Predicate Pred; 4131 switch (Kind) { 4132 case scSequentialUMinExpr: 4133 SaturationPoint = getZero(Ops[0]->getType()); 4134 Pred = ICmpInst::ICMP_ULE; 4135 break; 4136 default: 4137 llvm_unreachable("Not a sequential min/max type."); 4138 } 4139 4140 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 4141 // We can replace %x umin_seq %y with %x umin %y if either: 4142 // * %y being poison implies %x is also poison. 4143 // * %x cannot be the saturating value (e.g. zero for umin). 4144 if (::impliesPoison(Ops[i], Ops[i - 1]) || 4145 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, Ops[i - 1], 4146 SaturationPoint)) { 4147 SmallVector<const SCEV *> SeqOps = {Ops[i - 1], Ops[i]}; 4148 Ops[i - 1] = getMinMaxExpr( 4149 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(Kind), 4150 SeqOps); 4151 Ops.erase(Ops.begin() + i); 4152 return getSequentialMinMaxExpr(Kind, Ops); 4153 } 4154 // Fold %x umin_seq %y to %x if %x ule %y. 4155 // TODO: We might be able to prove the predicate for a later operand. 4156 if (isKnownViaNonRecursiveReasoning(Pred, Ops[i - 1], Ops[i])) { 4157 Ops.erase(Ops.begin() + i); 4158 return getSequentialMinMaxExpr(Kind, Ops); 4159 } 4160 } 4161 4162 // Okay, it looks like we really DO need an expr. Check to see if we 4163 // already have one, otherwise create a new one. 4164 FoldingSetNodeID ID; 4165 ID.AddInteger(Kind); 4166 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 4167 ID.AddPointer(Ops[i]); 4168 void *IP = nullptr; 4169 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(ID, IP); 4170 if (ExistingSCEV) 4171 return ExistingSCEV; 4172 4173 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 4174 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 4175 SCEV *S = new (SCEVAllocator) 4176 SCEVSequentialMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size()); 4177 4178 UniqueSCEVs.InsertNode(S, IP); 4179 registerUser(S, Ops); 4180 return S; 4181 } 4182 4183 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) { 4184 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 4185 return getSMaxExpr(Ops); 4186 } 4187 4188 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 4189 return getMinMaxExpr(scSMaxExpr, Ops); 4190 } 4191 4192 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) { 4193 SmallVector<const SCEV *, 2> Ops = {LHS, RHS}; 4194 return getUMaxExpr(Ops); 4195 } 4196 4197 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 4198 return getMinMaxExpr(scUMaxExpr, Ops); 4199 } 4200 4201 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 4202 const SCEV *RHS) { 4203 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4204 return getSMinExpr(Ops); 4205 } 4206 4207 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) { 4208 return getMinMaxExpr(scSMinExpr, Ops); 4209 } 4210 4211 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, const SCEV *RHS, 4212 bool Sequential) { 4213 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4214 return getUMinExpr(Ops, Sequential); 4215 } 4216 4217 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops, 4218 bool Sequential) { 4219 return Sequential ? getSequentialMinMaxExpr(scSequentialUMinExpr, Ops) 4220 : getMinMaxExpr(scUMinExpr, Ops); 4221 } 4222 4223 const SCEV * 4224 ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy, 4225 ScalableVectorType *ScalableTy) { 4226 Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo()); 4227 Constant *One = ConstantInt::get(IntTy, 1); 4228 Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One); 4229 // Note that the expression we created is the final expression, we don't 4230 // want to simplify it any further Also, if we call a normal getSCEV(), 4231 // we'll end up in an endless recursion. So just create an SCEVUnknown. 4232 return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy)); 4233 } 4234 4235 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { 4236 if (auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy)) 4237 return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy); 4238 // We can bypass creating a target-independent constant expression and then 4239 // folding it back into a ConstantInt. This is just a compile-time 4240 // optimization. 4241 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); 4242 } 4243 4244 const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) { 4245 if (auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy)) 4246 return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy); 4247 // We can bypass creating a target-independent constant expression and then 4248 // folding it back into a ConstantInt. This is just a compile-time 4249 // optimization. 4250 return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy)); 4251 } 4252 4253 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, 4254 StructType *STy, 4255 unsigned FieldNo) { 4256 // We can bypass creating a target-independent constant expression and then 4257 // folding it back into a ConstantInt. This is just a compile-time 4258 // optimization. 4259 return getConstant( 4260 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo)); 4261 } 4262 4263 const SCEV *ScalarEvolution::getUnknown(Value *V) { 4264 // Don't attempt to do anything other than create a SCEVUnknown object 4265 // here. createSCEV only calls getUnknown after checking for all other 4266 // interesting possibilities, and any other code that calls getUnknown 4267 // is doing so in order to hide a value from SCEV canonicalization. 4268 4269 FoldingSetNodeID ID; 4270 ID.AddInteger(scUnknown); 4271 ID.AddPointer(V); 4272 void *IP = nullptr; 4273 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 4274 assert(cast<SCEVUnknown>(S)->getValue() == V && 4275 "Stale SCEVUnknown in uniquing map!"); 4276 return S; 4277 } 4278 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 4279 FirstUnknown); 4280 FirstUnknown = cast<SCEVUnknown>(S); 4281 UniqueSCEVs.InsertNode(S, IP); 4282 return S; 4283 } 4284 4285 //===----------------------------------------------------------------------===// 4286 // Basic SCEV Analysis and PHI Idiom Recognition Code 4287 // 4288 4289 /// Test if values of the given type are analyzable within the SCEV 4290 /// framework. This primarily includes integer types, and it can optionally 4291 /// include pointer types if the ScalarEvolution class has access to 4292 /// target-specific information. 4293 bool ScalarEvolution::isSCEVable(Type *Ty) const { 4294 // Integers and pointers are always SCEVable. 4295 return Ty->isIntOrPtrTy(); 4296 } 4297 4298 /// Return the size in bits of the specified type, for which isSCEVable must 4299 /// return true. 4300 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { 4301 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 4302 if (Ty->isPointerTy()) 4303 return getDataLayout().getIndexTypeSizeInBits(Ty); 4304 return getDataLayout().getTypeSizeInBits(Ty); 4305 } 4306 4307 /// Return a type with the same bitwidth as the given type and which represents 4308 /// how SCEV will treat the given type, for which isSCEVable must return 4309 /// true. For pointer types, this is the pointer index sized integer type. 4310 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { 4311 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 4312 4313 if (Ty->isIntegerTy()) 4314 return Ty; 4315 4316 // The only other support type is pointer. 4317 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 4318 return getDataLayout().getIndexType(Ty); 4319 } 4320 4321 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const { 4322 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2; 4323 } 4324 4325 bool ScalarEvolution::instructionCouldExistWitthOperands(const SCEV *A, 4326 const SCEV *B) { 4327 /// For a valid use point to exist, the defining scope of one operand 4328 /// must dominate the other. 4329 bool PreciseA, PreciseB; 4330 auto *ScopeA = getDefiningScopeBound({A}, PreciseA); 4331 auto *ScopeB = getDefiningScopeBound({B}, PreciseB); 4332 if (!PreciseA || !PreciseB) 4333 // Can't tell. 4334 return false; 4335 return (ScopeA == ScopeB) || DT.dominates(ScopeA, ScopeB) || 4336 DT.dominates(ScopeB, ScopeA); 4337 } 4338 4339 4340 const SCEV *ScalarEvolution::getCouldNotCompute() { 4341 return CouldNotCompute.get(); 4342 } 4343 4344 bool ScalarEvolution::checkValidity(const SCEV *S) const { 4345 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) { 4346 auto *SU = dyn_cast<SCEVUnknown>(S); 4347 return SU && SU->getValue() == nullptr; 4348 }); 4349 4350 return !ContainsNulls; 4351 } 4352 4353 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) { 4354 HasRecMapType::iterator I = HasRecMap.find(S); 4355 if (I != HasRecMap.end()) 4356 return I->second; 4357 4358 bool FoundAddRec = 4359 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); }); 4360 HasRecMap.insert({S, FoundAddRec}); 4361 return FoundAddRec; 4362 } 4363 4364 /// Return the ValueOffsetPair set for \p S. \p S can be represented 4365 /// by the value and offset from any ValueOffsetPair in the set. 4366 ArrayRef<Value *> ScalarEvolution::getSCEVValues(const SCEV *S) { 4367 ExprValueMapType::iterator SI = ExprValueMap.find_as(S); 4368 if (SI == ExprValueMap.end()) 4369 return None; 4370 #ifndef NDEBUG 4371 if (VerifySCEVMap) { 4372 // Check there is no dangling Value in the set returned. 4373 for (Value *V : SI->second) 4374 assert(ValueExprMap.count(V)); 4375 } 4376 #endif 4377 return SI->second.getArrayRef(); 4378 } 4379 4380 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V) 4381 /// cannot be used separately. eraseValueFromMap should be used to remove 4382 /// V from ValueExprMap and ExprValueMap at the same time. 4383 void ScalarEvolution::eraseValueFromMap(Value *V) { 4384 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 4385 if (I != ValueExprMap.end()) { 4386 auto EVIt = ExprValueMap.find(I->second); 4387 bool Removed = EVIt->second.remove(V); 4388 (void) Removed; 4389 assert(Removed && "Value not in ExprValueMap?"); 4390 ValueExprMap.erase(I); 4391 } 4392 } 4393 4394 void ScalarEvolution::insertValueToMap(Value *V, const SCEV *S) { 4395 // A recursive query may have already computed the SCEV. It should be 4396 // equivalent, but may not necessarily be exactly the same, e.g. due to lazily 4397 // inferred nowrap flags. 4398 auto It = ValueExprMap.find_as(V); 4399 if (It == ValueExprMap.end()) { 4400 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 4401 ExprValueMap[S].insert(V); 4402 } 4403 } 4404 4405 /// Return an existing SCEV if it exists, otherwise analyze the expression and 4406 /// create a new one. 4407 const SCEV *ScalarEvolution::getSCEV(Value *V) { 4408 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 4409 4410 const SCEV *S = getExistingSCEV(V); 4411 if (S == nullptr) { 4412 S = createSCEV(V); 4413 // During PHI resolution, it is possible to create two SCEVs for the same 4414 // V, so it is needed to double check whether V->S is inserted into 4415 // ValueExprMap before insert S->{V, 0} into ExprValueMap. 4416 std::pair<ValueExprMapType::iterator, bool> Pair = 4417 ValueExprMap.insert({SCEVCallbackVH(V, this), S}); 4418 if (Pair.second) 4419 ExprValueMap[S].insert(V); 4420 } 4421 return S; 4422 } 4423 4424 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) { 4425 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 4426 4427 ValueExprMapType::iterator I = ValueExprMap.find_as(V); 4428 if (I != ValueExprMap.end()) { 4429 const SCEV *S = I->second; 4430 assert(checkValidity(S) && 4431 "existing SCEV has not been properly invalidated"); 4432 return S; 4433 } 4434 return nullptr; 4435 } 4436 4437 /// Return a SCEV corresponding to -V = -1*V 4438 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, 4439 SCEV::NoWrapFlags Flags) { 4440 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4441 return getConstant( 4442 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 4443 4444 Type *Ty = V->getType(); 4445 Ty = getEffectiveSCEVType(Ty); 4446 return getMulExpr(V, getMinusOne(Ty), Flags); 4447 } 4448 4449 /// If Expr computes ~A, return A else return nullptr 4450 static const SCEV *MatchNotExpr(const SCEV *Expr) { 4451 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr); 4452 if (!Add || Add->getNumOperands() != 2 || 4453 !Add->getOperand(0)->isAllOnesValue()) 4454 return nullptr; 4455 4456 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1)); 4457 if (!AddRHS || AddRHS->getNumOperands() != 2 || 4458 !AddRHS->getOperand(0)->isAllOnesValue()) 4459 return nullptr; 4460 4461 return AddRHS->getOperand(1); 4462 } 4463 4464 /// Return a SCEV corresponding to ~V = -1-V 4465 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 4466 assert(!V->getType()->isPointerTy() && "Can't negate pointer"); 4467 4468 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 4469 return getConstant( 4470 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 4471 4472 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y) 4473 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) { 4474 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) { 4475 SmallVector<const SCEV *, 2> MatchedOperands; 4476 for (const SCEV *Operand : MME->operands()) { 4477 const SCEV *Matched = MatchNotExpr(Operand); 4478 if (!Matched) 4479 return (const SCEV *)nullptr; 4480 MatchedOperands.push_back(Matched); 4481 } 4482 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()), 4483 MatchedOperands); 4484 }; 4485 if (const SCEV *Replaced = MatchMinMaxNegation(MME)) 4486 return Replaced; 4487 } 4488 4489 Type *Ty = V->getType(); 4490 Ty = getEffectiveSCEVType(Ty); 4491 return getMinusSCEV(getMinusOne(Ty), V); 4492 } 4493 4494 const SCEV *ScalarEvolution::removePointerBase(const SCEV *P) { 4495 assert(P->getType()->isPointerTy()); 4496 4497 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) { 4498 // The base of an AddRec is the first operand. 4499 SmallVector<const SCEV *> Ops{AddRec->operands()}; 4500 Ops[0] = removePointerBase(Ops[0]); 4501 // Don't try to transfer nowrap flags for now. We could in some cases 4502 // (for example, if pointer operand of the AddRec is a SCEVUnknown). 4503 return getAddRecExpr(Ops, AddRec->getLoop(), SCEV::FlagAnyWrap); 4504 } 4505 if (auto *Add = dyn_cast<SCEVAddExpr>(P)) { 4506 // The base of an Add is the pointer operand. 4507 SmallVector<const SCEV *> Ops{Add->operands()}; 4508 const SCEV **PtrOp = nullptr; 4509 for (const SCEV *&AddOp : Ops) { 4510 if (AddOp->getType()->isPointerTy()) { 4511 assert(!PtrOp && "Cannot have multiple pointer ops"); 4512 PtrOp = &AddOp; 4513 } 4514 } 4515 *PtrOp = removePointerBase(*PtrOp); 4516 // Don't try to transfer nowrap flags for now. We could in some cases 4517 // (for example, if the pointer operand of the Add is a SCEVUnknown). 4518 return getAddExpr(Ops); 4519 } 4520 // Any other expression must be a pointer base. 4521 return getZero(P->getType()); 4522 } 4523 4524 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, 4525 SCEV::NoWrapFlags Flags, 4526 unsigned Depth) { 4527 // Fast path: X - X --> 0. 4528 if (LHS == RHS) 4529 return getZero(LHS->getType()); 4530 4531 // If we subtract two pointers with different pointer bases, bail. 4532 // Eventually, we're going to add an assertion to getMulExpr that we 4533 // can't multiply by a pointer. 4534 if (RHS->getType()->isPointerTy()) { 4535 if (!LHS->getType()->isPointerTy() || 4536 getPointerBase(LHS) != getPointerBase(RHS)) 4537 return getCouldNotCompute(); 4538 LHS = removePointerBase(LHS); 4539 RHS = removePointerBase(RHS); 4540 } 4541 4542 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation 4543 // makes it so that we cannot make much use of NUW. 4544 auto AddFlags = SCEV::FlagAnyWrap; 4545 const bool RHSIsNotMinSigned = 4546 !getSignedRangeMin(RHS).isMinSignedValue(); 4547 if (hasFlags(Flags, SCEV::FlagNSW)) { 4548 // Let M be the minimum representable signed value. Then (-1)*RHS 4549 // signed-wraps if and only if RHS is M. That can happen even for 4550 // a NSW subtraction because e.g. (-1)*M signed-wraps even though 4551 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS + 4552 // (-1)*RHS, we need to prove that RHS != M. 4553 // 4554 // If LHS is non-negative and we know that LHS - RHS does not 4555 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap 4556 // either by proving that RHS > M or that LHS >= 0. 4557 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) { 4558 AddFlags = SCEV::FlagNSW; 4559 } 4560 } 4561 4562 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS - 4563 // RHS is NSW and LHS >= 0. 4564 // 4565 // The difficulty here is that the NSW flag may have been proven 4566 // relative to a loop that is to be found in a recurrence in LHS and 4567 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a 4568 // larger scope than intended. 4569 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap; 4570 4571 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth); 4572 } 4573 4574 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty, 4575 unsigned Depth) { 4576 Type *SrcTy = V->getType(); 4577 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4578 "Cannot truncate or zero extend with non-integer arguments!"); 4579 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4580 return V; // No conversion 4581 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4582 return getTruncateExpr(V, Ty, Depth); 4583 return getZeroExtendExpr(V, Ty, Depth); 4584 } 4585 4586 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty, 4587 unsigned Depth) { 4588 Type *SrcTy = V->getType(); 4589 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4590 "Cannot truncate or zero extend with non-integer arguments!"); 4591 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4592 return V; // No conversion 4593 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 4594 return getTruncateExpr(V, Ty, Depth); 4595 return getSignExtendExpr(V, Ty, Depth); 4596 } 4597 4598 const SCEV * 4599 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) { 4600 Type *SrcTy = V->getType(); 4601 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4602 "Cannot noop or zero extend with non-integer arguments!"); 4603 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4604 "getNoopOrZeroExtend cannot truncate!"); 4605 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4606 return V; // No conversion 4607 return getZeroExtendExpr(V, Ty); 4608 } 4609 4610 const SCEV * 4611 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) { 4612 Type *SrcTy = V->getType(); 4613 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4614 "Cannot noop or sign extend with non-integer arguments!"); 4615 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4616 "getNoopOrSignExtend cannot truncate!"); 4617 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4618 return V; // No conversion 4619 return getSignExtendExpr(V, Ty); 4620 } 4621 4622 const SCEV * 4623 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) { 4624 Type *SrcTy = V->getType(); 4625 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4626 "Cannot noop or any extend with non-integer arguments!"); 4627 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 4628 "getNoopOrAnyExtend cannot truncate!"); 4629 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4630 return V; // No conversion 4631 return getAnyExtendExpr(V, Ty); 4632 } 4633 4634 const SCEV * 4635 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) { 4636 Type *SrcTy = V->getType(); 4637 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() && 4638 "Cannot truncate or noop with non-integer arguments!"); 4639 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 4640 "getTruncateOrNoop cannot extend!"); 4641 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 4642 return V; // No conversion 4643 return getTruncateExpr(V, Ty); 4644 } 4645 4646 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 4647 const SCEV *RHS) { 4648 const SCEV *PromotedLHS = LHS; 4649 const SCEV *PromotedRHS = RHS; 4650 4651 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 4652 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 4653 else 4654 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 4655 4656 return getUMaxExpr(PromotedLHS, PromotedRHS); 4657 } 4658 4659 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 4660 const SCEV *RHS, 4661 bool Sequential) { 4662 SmallVector<const SCEV *, 2> Ops = { LHS, RHS }; 4663 return getUMinFromMismatchedTypes(Ops, Sequential); 4664 } 4665 4666 const SCEV * 4667 ScalarEvolution::getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops, 4668 bool Sequential) { 4669 assert(!Ops.empty() && "At least one operand must be!"); 4670 // Trivial case. 4671 if (Ops.size() == 1) 4672 return Ops[0]; 4673 4674 // Find the max type first. 4675 Type *MaxType = nullptr; 4676 for (auto *S : Ops) 4677 if (MaxType) 4678 MaxType = getWiderType(MaxType, S->getType()); 4679 else 4680 MaxType = S->getType(); 4681 assert(MaxType && "Failed to find maximum type!"); 4682 4683 // Extend all ops to max type. 4684 SmallVector<const SCEV *, 2> PromotedOps; 4685 for (auto *S : Ops) 4686 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType)); 4687 4688 // Generate umin. 4689 return getUMinExpr(PromotedOps, Sequential); 4690 } 4691 4692 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) { 4693 // A pointer operand may evaluate to a nonpointer expression, such as null. 4694 if (!V->getType()->isPointerTy()) 4695 return V; 4696 4697 while (true) { 4698 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 4699 V = AddRec->getStart(); 4700 } else if (auto *Add = dyn_cast<SCEVAddExpr>(V)) { 4701 const SCEV *PtrOp = nullptr; 4702 for (const SCEV *AddOp : Add->operands()) { 4703 if (AddOp->getType()->isPointerTy()) { 4704 assert(!PtrOp && "Cannot have multiple pointer ops"); 4705 PtrOp = AddOp; 4706 } 4707 } 4708 assert(PtrOp && "Must have pointer op"); 4709 V = PtrOp; 4710 } else // Not something we can look further into. 4711 return V; 4712 } 4713 } 4714 4715 /// Push users of the given Instruction onto the given Worklist. 4716 static void PushDefUseChildren(Instruction *I, 4717 SmallVectorImpl<Instruction *> &Worklist, 4718 SmallPtrSetImpl<Instruction *> &Visited) { 4719 // Push the def-use children onto the Worklist stack. 4720 for (User *U : I->users()) { 4721 auto *UserInsn = cast<Instruction>(U); 4722 if (Visited.insert(UserInsn).second) 4723 Worklist.push_back(UserInsn); 4724 } 4725 } 4726 4727 namespace { 4728 4729 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start 4730 /// expression in case its Loop is L. If it is not L then 4731 /// if IgnoreOtherLoops is true then use AddRec itself 4732 /// otherwise rewrite cannot be done. 4733 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4734 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> { 4735 public: 4736 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 4737 bool IgnoreOtherLoops = true) { 4738 SCEVInitRewriter Rewriter(L, SE); 4739 const SCEV *Result = Rewriter.visit(S); 4740 if (Rewriter.hasSeenLoopVariantSCEVUnknown()) 4741 return SE.getCouldNotCompute(); 4742 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops 4743 ? SE.getCouldNotCompute() 4744 : Result; 4745 } 4746 4747 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4748 if (!SE.isLoopInvariant(Expr, L)) 4749 SeenLoopVariantSCEVUnknown = true; 4750 return Expr; 4751 } 4752 4753 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4754 // Only re-write AddRecExprs for this loop. 4755 if (Expr->getLoop() == L) 4756 return Expr->getStart(); 4757 SeenOtherLoops = true; 4758 return Expr; 4759 } 4760 4761 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4762 4763 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4764 4765 private: 4766 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE) 4767 : SCEVRewriteVisitor(SE), L(L) {} 4768 4769 const Loop *L; 4770 bool SeenLoopVariantSCEVUnknown = false; 4771 bool SeenOtherLoops = false; 4772 }; 4773 4774 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post 4775 /// increment expression in case its Loop is L. If it is not L then 4776 /// use AddRec itself. 4777 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done. 4778 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> { 4779 public: 4780 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) { 4781 SCEVPostIncRewriter Rewriter(L, SE); 4782 const SCEV *Result = Rewriter.visit(S); 4783 return Rewriter.hasSeenLoopVariantSCEVUnknown() 4784 ? SE.getCouldNotCompute() 4785 : Result; 4786 } 4787 4788 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4789 if (!SE.isLoopInvariant(Expr, L)) 4790 SeenLoopVariantSCEVUnknown = true; 4791 return Expr; 4792 } 4793 4794 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4795 // Only re-write AddRecExprs for this loop. 4796 if (Expr->getLoop() == L) 4797 return Expr->getPostIncExpr(SE); 4798 SeenOtherLoops = true; 4799 return Expr; 4800 } 4801 4802 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; } 4803 4804 bool hasSeenOtherLoops() { return SeenOtherLoops; } 4805 4806 private: 4807 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE) 4808 : SCEVRewriteVisitor(SE), L(L) {} 4809 4810 const Loop *L; 4811 bool SeenLoopVariantSCEVUnknown = false; 4812 bool SeenOtherLoops = false; 4813 }; 4814 4815 /// This class evaluates the compare condition by matching it against the 4816 /// condition of loop latch. If there is a match we assume a true value 4817 /// for the condition while building SCEV nodes. 4818 class SCEVBackedgeConditionFolder 4819 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> { 4820 public: 4821 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4822 ScalarEvolution &SE) { 4823 bool IsPosBECond = false; 4824 Value *BECond = nullptr; 4825 if (BasicBlock *Latch = L->getLoopLatch()) { 4826 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 4827 if (BI && BI->isConditional()) { 4828 assert(BI->getSuccessor(0) != BI->getSuccessor(1) && 4829 "Both outgoing branches should not target same header!"); 4830 BECond = BI->getCondition(); 4831 IsPosBECond = BI->getSuccessor(0) == L->getHeader(); 4832 } else { 4833 return S; 4834 } 4835 } 4836 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE); 4837 return Rewriter.visit(S); 4838 } 4839 4840 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4841 const SCEV *Result = Expr; 4842 bool InvariantF = SE.isLoopInvariant(Expr, L); 4843 4844 if (!InvariantF) { 4845 Instruction *I = cast<Instruction>(Expr->getValue()); 4846 switch (I->getOpcode()) { 4847 case Instruction::Select: { 4848 SelectInst *SI = cast<SelectInst>(I); 4849 Optional<const SCEV *> Res = 4850 compareWithBackedgeCondition(SI->getCondition()); 4851 if (Res.hasValue()) { 4852 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne(); 4853 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue()); 4854 } 4855 break; 4856 } 4857 default: { 4858 Optional<const SCEV *> Res = compareWithBackedgeCondition(I); 4859 if (Res.hasValue()) 4860 Result = Res.getValue(); 4861 break; 4862 } 4863 } 4864 } 4865 return Result; 4866 } 4867 4868 private: 4869 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond, 4870 bool IsPosBECond, ScalarEvolution &SE) 4871 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond), 4872 IsPositiveBECond(IsPosBECond) {} 4873 4874 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC); 4875 4876 const Loop *L; 4877 /// Loop back condition. 4878 Value *BackedgeCond = nullptr; 4879 /// Set to true if loop back is on positive branch condition. 4880 bool IsPositiveBECond; 4881 }; 4882 4883 Optional<const SCEV *> 4884 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) { 4885 4886 // If value matches the backedge condition for loop latch, 4887 // then return a constant evolution node based on loopback 4888 // branch taken. 4889 if (BackedgeCond == IC) 4890 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext())) 4891 : SE.getZero(Type::getInt1Ty(SE.getContext())); 4892 return None; 4893 } 4894 4895 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> { 4896 public: 4897 static const SCEV *rewrite(const SCEV *S, const Loop *L, 4898 ScalarEvolution &SE) { 4899 SCEVShiftRewriter Rewriter(L, SE); 4900 const SCEV *Result = Rewriter.visit(S); 4901 return Rewriter.isValid() ? Result : SE.getCouldNotCompute(); 4902 } 4903 4904 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 4905 // Only allow AddRecExprs for this loop. 4906 if (!SE.isLoopInvariant(Expr, L)) 4907 Valid = false; 4908 return Expr; 4909 } 4910 4911 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { 4912 if (Expr->getLoop() == L && Expr->isAffine()) 4913 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE)); 4914 Valid = false; 4915 return Expr; 4916 } 4917 4918 bool isValid() { return Valid; } 4919 4920 private: 4921 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE) 4922 : SCEVRewriteVisitor(SE), L(L) {} 4923 4924 const Loop *L; 4925 bool Valid = true; 4926 }; 4927 4928 } // end anonymous namespace 4929 4930 SCEV::NoWrapFlags 4931 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) { 4932 if (!AR->isAffine()) 4933 return SCEV::FlagAnyWrap; 4934 4935 using OBO = OverflowingBinaryOperator; 4936 4937 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap; 4938 4939 if (!AR->hasNoSignedWrap()) { 4940 ConstantRange AddRecRange = getSignedRange(AR); 4941 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this)); 4942 4943 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4944 Instruction::Add, IncRange, OBO::NoSignedWrap); 4945 if (NSWRegion.contains(AddRecRange)) 4946 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW); 4947 } 4948 4949 if (!AR->hasNoUnsignedWrap()) { 4950 ConstantRange AddRecRange = getUnsignedRange(AR); 4951 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this)); 4952 4953 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 4954 Instruction::Add, IncRange, OBO::NoUnsignedWrap); 4955 if (NUWRegion.contains(AddRecRange)) 4956 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW); 4957 } 4958 4959 return Result; 4960 } 4961 4962 SCEV::NoWrapFlags 4963 ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) { 4964 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 4965 4966 if (AR->hasNoSignedWrap()) 4967 return Result; 4968 4969 if (!AR->isAffine()) 4970 return Result; 4971 4972 const SCEV *Step = AR->getStepRecurrence(*this); 4973 const Loop *L = AR->getLoop(); 4974 4975 // Check whether the backedge-taken count is SCEVCouldNotCompute. 4976 // Note that this serves two purposes: It filters out loops that are 4977 // simply not analyzable, and it covers the case where this code is 4978 // being called from within backedge-taken count analysis, such that 4979 // attempting to ask for the backedge-taken count would likely result 4980 // in infinite recursion. In the later case, the analysis code will 4981 // cope with a conservative value, and it will take care to purge 4982 // that value once it has finished. 4983 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 4984 4985 // Normally, in the cases we can prove no-overflow via a 4986 // backedge guarding condition, we can also compute a backedge 4987 // taken count for the loop. The exceptions are assumptions and 4988 // guards present in the loop -- SCEV is not great at exploiting 4989 // these to compute max backedge taken counts, but can still use 4990 // these to prove lack of overflow. Use this fact to avoid 4991 // doing extra work that may not pay off. 4992 4993 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 4994 AC.assumptions().empty()) 4995 return Result; 4996 4997 // If the backedge is guarded by a comparison with the pre-inc value the 4998 // addrec is safe. Also, if the entry is guarded by a comparison with the 4999 // start value and the backedge is guarded by a comparison with the post-inc 5000 // value, the addrec is safe. 5001 ICmpInst::Predicate Pred; 5002 const SCEV *OverflowLimit = 5003 getSignedOverflowLimitForStep(Step, &Pred, this); 5004 if (OverflowLimit && 5005 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) || 5006 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) { 5007 Result = setFlags(Result, SCEV::FlagNSW); 5008 } 5009 return Result; 5010 } 5011 SCEV::NoWrapFlags 5012 ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) { 5013 SCEV::NoWrapFlags Result = AR->getNoWrapFlags(); 5014 5015 if (AR->hasNoUnsignedWrap()) 5016 return Result; 5017 5018 if (!AR->isAffine()) 5019 return Result; 5020 5021 const SCEV *Step = AR->getStepRecurrence(*this); 5022 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 5023 const Loop *L = AR->getLoop(); 5024 5025 // Check whether the backedge-taken count is SCEVCouldNotCompute. 5026 // Note that this serves two purposes: It filters out loops that are 5027 // simply not analyzable, and it covers the case where this code is 5028 // being called from within backedge-taken count analysis, such that 5029 // attempting to ask for the backedge-taken count would likely result 5030 // in infinite recursion. In the later case, the analysis code will 5031 // cope with a conservative value, and it will take care to purge 5032 // that value once it has finished. 5033 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L); 5034 5035 // Normally, in the cases we can prove no-overflow via a 5036 // backedge guarding condition, we can also compute a backedge 5037 // taken count for the loop. The exceptions are assumptions and 5038 // guards present in the loop -- SCEV is not great at exploiting 5039 // these to compute max backedge taken counts, but can still use 5040 // these to prove lack of overflow. Use this fact to avoid 5041 // doing extra work that may not pay off. 5042 5043 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards && 5044 AC.assumptions().empty()) 5045 return Result; 5046 5047 // If the backedge is guarded by a comparison with the pre-inc value the 5048 // addrec is safe. Also, if the entry is guarded by a comparison with the 5049 // start value and the backedge is guarded by a comparison with the post-inc 5050 // value, the addrec is safe. 5051 if (isKnownPositive(Step)) { 5052 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 5053 getUnsignedRangeMax(Step)); 5054 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 5055 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) { 5056 Result = setFlags(Result, SCEV::FlagNUW); 5057 } 5058 } 5059 5060 return Result; 5061 } 5062 5063 namespace { 5064 5065 /// Represents an abstract binary operation. This may exist as a 5066 /// normal instruction or constant expression, or may have been 5067 /// derived from an expression tree. 5068 struct BinaryOp { 5069 unsigned Opcode; 5070 Value *LHS; 5071 Value *RHS; 5072 bool IsNSW = false; 5073 bool IsNUW = false; 5074 5075 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or 5076 /// constant expression. 5077 Operator *Op = nullptr; 5078 5079 explicit BinaryOp(Operator *Op) 5080 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)), 5081 Op(Op) { 5082 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) { 5083 IsNSW = OBO->hasNoSignedWrap(); 5084 IsNUW = OBO->hasNoUnsignedWrap(); 5085 } 5086 } 5087 5088 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false, 5089 bool IsNUW = false) 5090 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {} 5091 }; 5092 5093 } // end anonymous namespace 5094 5095 /// Try to map \p V into a BinaryOp, and return \c None on failure. 5096 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) { 5097 auto *Op = dyn_cast<Operator>(V); 5098 if (!Op) 5099 return None; 5100 5101 // Implementation detail: all the cleverness here should happen without 5102 // creating new SCEV expressions -- our caller knowns tricks to avoid creating 5103 // SCEV expressions when possible, and we should not break that. 5104 5105 switch (Op->getOpcode()) { 5106 case Instruction::Add: 5107 case Instruction::Sub: 5108 case Instruction::Mul: 5109 case Instruction::UDiv: 5110 case Instruction::URem: 5111 case Instruction::And: 5112 case Instruction::Or: 5113 case Instruction::AShr: 5114 case Instruction::Shl: 5115 return BinaryOp(Op); 5116 5117 case Instruction::Xor: 5118 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1))) 5119 // If the RHS of the xor is a signmask, then this is just an add. 5120 // Instcombine turns add of signmask into xor as a strength reduction step. 5121 if (RHSC->getValue().isSignMask()) 5122 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 5123 // Binary `xor` is a bit-wise `add`. 5124 if (V->getType()->isIntegerTy(1)) 5125 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1)); 5126 return BinaryOp(Op); 5127 5128 case Instruction::LShr: 5129 // Turn logical shift right of a constant into a unsigned divide. 5130 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) { 5131 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth(); 5132 5133 // If the shift count is not less than the bitwidth, the result of 5134 // the shift is undefined. Don't try to analyze it, because the 5135 // resolution chosen here may differ from the resolution chosen in 5136 // other parts of the compiler. 5137 if (SA->getValue().ult(BitWidth)) { 5138 Constant *X = 5139 ConstantInt::get(SA->getContext(), 5140 APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 5141 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X); 5142 } 5143 } 5144 return BinaryOp(Op); 5145 5146 case Instruction::ExtractValue: { 5147 auto *EVI = cast<ExtractValueInst>(Op); 5148 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) 5149 break; 5150 5151 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()); 5152 if (!WO) 5153 break; 5154 5155 Instruction::BinaryOps BinOp = WO->getBinaryOp(); 5156 bool Signed = WO->isSigned(); 5157 // TODO: Should add nuw/nsw flags for mul as well. 5158 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) 5159 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); 5160 5161 // Now that we know that all uses of the arithmetic-result component of 5162 // CI are guarded by the overflow check, we can go ahead and pretend 5163 // that the arithmetic is non-overflowing. 5164 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), 5165 /* IsNSW = */ Signed, /* IsNUW = */ !Signed); 5166 } 5167 5168 default: 5169 break; 5170 } 5171 5172 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same 5173 // semantics as a Sub, return a binary sub expression. 5174 if (auto *II = dyn_cast<IntrinsicInst>(V)) 5175 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg) 5176 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1)); 5177 5178 return None; 5179 } 5180 5181 /// Helper function to createAddRecFromPHIWithCasts. We have a phi 5182 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via 5183 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the 5184 /// way. This function checks if \p Op, an operand of this SCEVAddExpr, 5185 /// follows one of the following patterns: 5186 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 5187 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) 5188 /// If the SCEV expression of \p Op conforms with one of the expected patterns 5189 /// we return the type of the truncation operation, and indicate whether the 5190 /// truncated type should be treated as signed/unsigned by setting 5191 /// \p Signed to true/false, respectively. 5192 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, 5193 bool &Signed, ScalarEvolution &SE) { 5194 // The case where Op == SymbolicPHI (that is, with no type conversions on 5195 // the way) is handled by the regular add recurrence creating logic and 5196 // would have already been triggered in createAddRecForPHI. Reaching it here 5197 // means that createAddRecFromPHI had failed for this PHI before (e.g., 5198 // because one of the other operands of the SCEVAddExpr updating this PHI is 5199 // not invariant). 5200 // 5201 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in 5202 // this case predicates that allow us to prove that Op == SymbolicPHI will 5203 // be added. 5204 if (Op == SymbolicPHI) 5205 return nullptr; 5206 5207 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType()); 5208 unsigned NewBits = SE.getTypeSizeInBits(Op->getType()); 5209 if (SourceBits != NewBits) 5210 return nullptr; 5211 5212 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op); 5213 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op); 5214 if (!SExt && !ZExt) 5215 return nullptr; 5216 const SCEVTruncateExpr *Trunc = 5217 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand()) 5218 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand()); 5219 if (!Trunc) 5220 return nullptr; 5221 const SCEV *X = Trunc->getOperand(); 5222 if (X != SymbolicPHI) 5223 return nullptr; 5224 Signed = SExt != nullptr; 5225 return Trunc->getType(); 5226 } 5227 5228 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) { 5229 if (!PN->getType()->isIntegerTy()) 5230 return nullptr; 5231 const Loop *L = LI.getLoopFor(PN->getParent()); 5232 if (!L || L->getHeader() != PN->getParent()) 5233 return nullptr; 5234 return L; 5235 } 5236 5237 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the 5238 // computation that updates the phi follows the following pattern: 5239 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum 5240 // which correspond to a phi->trunc->sext/zext->add->phi update chain. 5241 // If so, try to see if it can be rewritten as an AddRecExpr under some 5242 // Predicates. If successful, return them as a pair. Also cache the results 5243 // of the analysis. 5244 // 5245 // Example usage scenario: 5246 // Say the Rewriter is called for the following SCEV: 5247 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 5248 // where: 5249 // %X = phi i64 (%Start, %BEValue) 5250 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X), 5251 // and call this function with %SymbolicPHI = %X. 5252 // 5253 // The analysis will find that the value coming around the backedge has 5254 // the following SCEV: 5255 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step) 5256 // Upon concluding that this matches the desired pattern, the function 5257 // will return the pair {NewAddRec, SmallPredsVec} where: 5258 // NewAddRec = {%Start,+,%Step} 5259 // SmallPredsVec = {P1, P2, P3} as follows: 5260 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw> 5261 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64) 5262 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64) 5263 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec 5264 // under the predicates {P1,P2,P3}. 5265 // This predicated rewrite will be cached in PredicatedSCEVRewrites: 5266 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)} 5267 // 5268 // TODO's: 5269 // 5270 // 1) Extend the Induction descriptor to also support inductions that involve 5271 // casts: When needed (namely, when we are called in the context of the 5272 // vectorizer induction analysis), a Set of cast instructions will be 5273 // populated by this method, and provided back to isInductionPHI. This is 5274 // needed to allow the vectorizer to properly record them to be ignored by 5275 // the cost model and to avoid vectorizing them (otherwise these casts, 5276 // which are redundant under the runtime overflow checks, will be 5277 // vectorized, which can be costly). 5278 // 5279 // 2) Support additional induction/PHISCEV patterns: We also want to support 5280 // inductions where the sext-trunc / zext-trunc operations (partly) occur 5281 // after the induction update operation (the induction increment): 5282 // 5283 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix) 5284 // which correspond to a phi->add->trunc->sext/zext->phi update chain. 5285 // 5286 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix) 5287 // which correspond to a phi->trunc->add->sext/zext->phi update chain. 5288 // 5289 // 3) Outline common code with createAddRecFromPHI to avoid duplication. 5290 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5291 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) { 5292 SmallVector<const SCEVPredicate *, 3> Predicates; 5293 5294 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can 5295 // return an AddRec expression under some predicate. 5296 5297 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 5298 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 5299 assert(L && "Expecting an integer loop header phi"); 5300 5301 // The loop may have multiple entrances or multiple exits; we can analyze 5302 // this phi as an addrec if it has a unique entry value and a unique 5303 // backedge value. 5304 Value *BEValueV = nullptr, *StartValueV = nullptr; 5305 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5306 Value *V = PN->getIncomingValue(i); 5307 if (L->contains(PN->getIncomingBlock(i))) { 5308 if (!BEValueV) { 5309 BEValueV = V; 5310 } else if (BEValueV != V) { 5311 BEValueV = nullptr; 5312 break; 5313 } 5314 } else if (!StartValueV) { 5315 StartValueV = V; 5316 } else if (StartValueV != V) { 5317 StartValueV = nullptr; 5318 break; 5319 } 5320 } 5321 if (!BEValueV || !StartValueV) 5322 return None; 5323 5324 const SCEV *BEValue = getSCEV(BEValueV); 5325 5326 // If the value coming around the backedge is an add with the symbolic 5327 // value we just inserted, possibly with casts that we can ignore under 5328 // an appropriate runtime guard, then we found a simple induction variable! 5329 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue); 5330 if (!Add) 5331 return None; 5332 5333 // If there is a single occurrence of the symbolic value, possibly 5334 // casted, replace it with a recurrence. 5335 unsigned FoundIndex = Add->getNumOperands(); 5336 Type *TruncTy = nullptr; 5337 bool Signed; 5338 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5339 if ((TruncTy = 5340 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this))) 5341 if (FoundIndex == e) { 5342 FoundIndex = i; 5343 break; 5344 } 5345 5346 if (FoundIndex == Add->getNumOperands()) 5347 return None; 5348 5349 // Create an add with everything but the specified operand. 5350 SmallVector<const SCEV *, 8> Ops; 5351 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5352 if (i != FoundIndex) 5353 Ops.push_back(Add->getOperand(i)); 5354 const SCEV *Accum = getAddExpr(Ops); 5355 5356 // The runtime checks will not be valid if the step amount is 5357 // varying inside the loop. 5358 if (!isLoopInvariant(Accum, L)) 5359 return None; 5360 5361 // *** Part2: Create the predicates 5362 5363 // Analysis was successful: we have a phi-with-cast pattern for which we 5364 // can return an AddRec expression under the following predicates: 5365 // 5366 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum) 5367 // fits within the truncated type (does not overflow) for i = 0 to n-1. 5368 // P2: An Equal predicate that guarantees that 5369 // Start = (Ext ix (Trunc iy (Start) to ix) to iy) 5370 // P3: An Equal predicate that guarantees that 5371 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy) 5372 // 5373 // As we next prove, the above predicates guarantee that: 5374 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy) 5375 // 5376 // 5377 // More formally, we want to prove that: 5378 // Expr(i+1) = Start + (i+1) * Accum 5379 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 5380 // 5381 // Given that: 5382 // 1) Expr(0) = Start 5383 // 2) Expr(1) = Start + Accum 5384 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2 5385 // 3) Induction hypothesis (step i): 5386 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum 5387 // 5388 // Proof: 5389 // Expr(i+1) = 5390 // = Start + (i+1)*Accum 5391 // = (Start + i*Accum) + Accum 5392 // = Expr(i) + Accum 5393 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum 5394 // :: from step i 5395 // 5396 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum 5397 // 5398 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) 5399 // + (Ext ix (Trunc iy (Accum) to ix) to iy) 5400 // + Accum :: from P3 5401 // 5402 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy) 5403 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y) 5404 // 5405 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum 5406 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum 5407 // 5408 // By induction, the same applies to all iterations 1<=i<n: 5409 // 5410 5411 // Create a truncated addrec for which we will add a no overflow check (P1). 5412 const SCEV *StartVal = getSCEV(StartValueV); 5413 const SCEV *PHISCEV = 5414 getAddRecExpr(getTruncateExpr(StartVal, TruncTy), 5415 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap); 5416 5417 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr. 5418 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV 5419 // will be constant. 5420 // 5421 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't 5422 // add P1. 5423 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) { 5424 SCEVWrapPredicate::IncrementWrapFlags AddedFlags = 5425 Signed ? SCEVWrapPredicate::IncrementNSSW 5426 : SCEVWrapPredicate::IncrementNUSW; 5427 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags); 5428 Predicates.push_back(AddRecPred); 5429 } 5430 5431 // Create the Equal Predicates P2,P3: 5432 5433 // It is possible that the predicates P2 and/or P3 are computable at 5434 // compile time due to StartVal and/or Accum being constants. 5435 // If either one is, then we can check that now and escape if either P2 5436 // or P3 is false. 5437 5438 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy) 5439 // for each of StartVal and Accum 5440 auto getExtendedExpr = [&](const SCEV *Expr, 5441 bool CreateSignExtend) -> const SCEV * { 5442 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant"); 5443 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy); 5444 const SCEV *ExtendedExpr = 5445 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType()) 5446 : getZeroExtendExpr(TruncatedExpr, Expr->getType()); 5447 return ExtendedExpr; 5448 }; 5449 5450 // Given: 5451 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy 5452 // = getExtendedExpr(Expr) 5453 // Determine whether the predicate P: Expr == ExtendedExpr 5454 // is known to be false at compile time 5455 auto PredIsKnownFalse = [&](const SCEV *Expr, 5456 const SCEV *ExtendedExpr) -> bool { 5457 return Expr != ExtendedExpr && 5458 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr); 5459 }; 5460 5461 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed); 5462 if (PredIsKnownFalse(StartVal, StartExtended)) { 5463 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";); 5464 return None; 5465 } 5466 5467 // The Step is always Signed (because the overflow checks are either 5468 // NSSW or NUSW) 5469 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true); 5470 if (PredIsKnownFalse(Accum, AccumExtended)) { 5471 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";); 5472 return None; 5473 } 5474 5475 auto AppendPredicate = [&](const SCEV *Expr, 5476 const SCEV *ExtendedExpr) -> void { 5477 if (Expr != ExtendedExpr && 5478 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) { 5479 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr); 5480 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred); 5481 Predicates.push_back(Pred); 5482 } 5483 }; 5484 5485 AppendPredicate(StartVal, StartExtended); 5486 AppendPredicate(Accum, AccumExtended); 5487 5488 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in 5489 // which the casts had been folded away. The caller can rewrite SymbolicPHI 5490 // into NewAR if it will also add the runtime overflow checks specified in 5491 // Predicates. 5492 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap); 5493 5494 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite = 5495 std::make_pair(NewAR, Predicates); 5496 // Remember the result of the analysis for this SCEV at this locayyytion. 5497 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite; 5498 return PredRewrite; 5499 } 5500 5501 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5502 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) { 5503 auto *PN = cast<PHINode>(SymbolicPHI->getValue()); 5504 const Loop *L = isIntegerLoopHeaderPHI(PN, LI); 5505 if (!L) 5506 return None; 5507 5508 // Check to see if we already analyzed this PHI. 5509 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L}); 5510 if (I != PredicatedSCEVRewrites.end()) { 5511 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite = 5512 I->second; 5513 // Analysis was done before and failed to create an AddRec: 5514 if (Rewrite.first == SymbolicPHI) 5515 return None; 5516 // Analysis was done before and succeeded to create an AddRec under 5517 // a predicate: 5518 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec"); 5519 assert(!(Rewrite.second).empty() && "Expected to find Predicates"); 5520 return Rewrite; 5521 } 5522 5523 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 5524 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI); 5525 5526 // Record in the cache that the analysis failed 5527 if (!Rewrite) { 5528 SmallVector<const SCEVPredicate *, 3> Predicates; 5529 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates}; 5530 return None; 5531 } 5532 5533 return Rewrite; 5534 } 5535 5536 // FIXME: This utility is currently required because the Rewriter currently 5537 // does not rewrite this expression: 5538 // {0, +, (sext ix (trunc iy to ix) to iy)} 5539 // into {0, +, %step}, 5540 // even when the following Equal predicate exists: 5541 // "%step == (sext ix (trunc iy to ix) to iy)". 5542 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds( 5543 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const { 5544 if (AR1 == AR2) 5545 return true; 5546 5547 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool { 5548 if (Expr1 != Expr2 && !Preds->implies(SE.getEqualPredicate(Expr1, Expr2)) && 5549 !Preds->implies(SE.getEqualPredicate(Expr2, Expr1))) 5550 return false; 5551 return true; 5552 }; 5553 5554 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) || 5555 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE))) 5556 return false; 5557 return true; 5558 } 5559 5560 /// A helper function for createAddRecFromPHI to handle simple cases. 5561 /// 5562 /// This function tries to find an AddRec expression for the simplest (yet most 5563 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)). 5564 /// If it fails, createAddRecFromPHI will use a more general, but slow, 5565 /// technique for finding the AddRec expression. 5566 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN, 5567 Value *BEValueV, 5568 Value *StartValueV) { 5569 const Loop *L = LI.getLoopFor(PN->getParent()); 5570 assert(L && L->getHeader() == PN->getParent()); 5571 assert(BEValueV && StartValueV); 5572 5573 auto BO = MatchBinaryOp(BEValueV, DT); 5574 if (!BO) 5575 return nullptr; 5576 5577 if (BO->Opcode != Instruction::Add) 5578 return nullptr; 5579 5580 const SCEV *Accum = nullptr; 5581 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS)) 5582 Accum = getSCEV(BO->RHS); 5583 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS)) 5584 Accum = getSCEV(BO->LHS); 5585 5586 if (!Accum) 5587 return nullptr; 5588 5589 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5590 if (BO->IsNUW) 5591 Flags = setFlags(Flags, SCEV::FlagNUW); 5592 if (BO->IsNSW) 5593 Flags = setFlags(Flags, SCEV::FlagNSW); 5594 5595 const SCEV *StartVal = getSCEV(StartValueV); 5596 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5597 insertValueToMap(PN, PHISCEV); 5598 5599 // We can add Flags to the post-inc expression only if we 5600 // know that it is *undefined behavior* for BEValueV to 5601 // overflow. 5602 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) { 5603 assert(isLoopInvariant(Accum, L) && 5604 "Accum is defined outside L, but is not invariant?"); 5605 if (isAddRecNeverPoison(BEInst, L)) 5606 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5607 } 5608 5609 return PHISCEV; 5610 } 5611 5612 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) { 5613 const Loop *L = LI.getLoopFor(PN->getParent()); 5614 if (!L || L->getHeader() != PN->getParent()) 5615 return nullptr; 5616 5617 // The loop may have multiple entrances or multiple exits; we can analyze 5618 // this phi as an addrec if it has a unique entry value and a unique 5619 // backedge value. 5620 Value *BEValueV = nullptr, *StartValueV = nullptr; 5621 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 5622 Value *V = PN->getIncomingValue(i); 5623 if (L->contains(PN->getIncomingBlock(i))) { 5624 if (!BEValueV) { 5625 BEValueV = V; 5626 } else if (BEValueV != V) { 5627 BEValueV = nullptr; 5628 break; 5629 } 5630 } else if (!StartValueV) { 5631 StartValueV = V; 5632 } else if (StartValueV != V) { 5633 StartValueV = nullptr; 5634 break; 5635 } 5636 } 5637 if (!BEValueV || !StartValueV) 5638 return nullptr; 5639 5640 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() && 5641 "PHI node already processed?"); 5642 5643 // First, try to find AddRec expression without creating a fictituos symbolic 5644 // value for PN. 5645 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV)) 5646 return S; 5647 5648 // Handle PHI node value symbolically. 5649 const SCEV *SymbolicName = getUnknown(PN); 5650 insertValueToMap(PN, SymbolicName); 5651 5652 // Using this symbolic name for the PHI, analyze the value coming around 5653 // the back-edge. 5654 const SCEV *BEValue = getSCEV(BEValueV); 5655 5656 // NOTE: If BEValue is loop invariant, we know that the PHI node just 5657 // has a special value for the first iteration of the loop. 5658 5659 // If the value coming around the backedge is an add with the symbolic 5660 // value we just inserted, then we found a simple induction variable! 5661 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 5662 // If there is a single occurrence of the symbolic value, replace it 5663 // with a recurrence. 5664 unsigned FoundIndex = Add->getNumOperands(); 5665 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5666 if (Add->getOperand(i) == SymbolicName) 5667 if (FoundIndex == e) { 5668 FoundIndex = i; 5669 break; 5670 } 5671 5672 if (FoundIndex != Add->getNumOperands()) { 5673 // Create an add with everything but the specified operand. 5674 SmallVector<const SCEV *, 8> Ops; 5675 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 5676 if (i != FoundIndex) 5677 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i), 5678 L, *this)); 5679 const SCEV *Accum = getAddExpr(Ops); 5680 5681 // This is not a valid addrec if the step amount is varying each 5682 // loop iteration, but is not itself an addrec in this loop. 5683 if (isLoopInvariant(Accum, L) || 5684 (isa<SCEVAddRecExpr>(Accum) && 5685 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 5686 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 5687 5688 if (auto BO = MatchBinaryOp(BEValueV, DT)) { 5689 if (BO->Opcode == Instruction::Add && BO->LHS == PN) { 5690 if (BO->IsNUW) 5691 Flags = setFlags(Flags, SCEV::FlagNUW); 5692 if (BO->IsNSW) 5693 Flags = setFlags(Flags, SCEV::FlagNSW); 5694 } 5695 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) { 5696 // If the increment is an inbounds GEP, then we know the address 5697 // space cannot be wrapped around. We cannot make any guarantee 5698 // about signed or unsigned overflow because pointers are 5699 // unsigned but we may have a negative index from the base 5700 // pointer. We can guarantee that no unsigned wrap occurs if the 5701 // indices form a positive value. 5702 if (GEP->isInBounds() && GEP->getOperand(0) == PN) { 5703 Flags = setFlags(Flags, SCEV::FlagNW); 5704 5705 const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); 5706 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) 5707 Flags = setFlags(Flags, SCEV::FlagNUW); 5708 } 5709 5710 // We cannot transfer nuw and nsw flags from subtraction 5711 // operations -- sub nuw X, Y is not the same as add nuw X, -Y 5712 // for instance. 5713 } 5714 5715 const SCEV *StartVal = getSCEV(StartValueV); 5716 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); 5717 5718 // Okay, for the entire analysis of this edge we assumed the PHI 5719 // to be symbolic. We now need to go back and purge all of the 5720 // entries for the scalars that use the symbolic expression. 5721 forgetMemoizedResults(SymbolicName); 5722 insertValueToMap(PN, PHISCEV); 5723 5724 // We can add Flags to the post-inc expression only if we 5725 // know that it is *undefined behavior* for BEValueV to 5726 // overflow. 5727 if (auto *BEInst = dyn_cast<Instruction>(BEValueV)) 5728 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L)) 5729 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags); 5730 5731 return PHISCEV; 5732 } 5733 } 5734 } else { 5735 // Otherwise, this could be a loop like this: 5736 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 5737 // In this case, j = {1,+,1} and BEValue is j. 5738 // Because the other in-value of i (0) fits the evolution of BEValue 5739 // i really is an addrec evolution. 5740 // 5741 // We can generalize this saying that i is the shifted value of BEValue 5742 // by one iteration: 5743 // PHI(f(0), f({1,+,1})) --> f({0,+,1}) 5744 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this); 5745 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false); 5746 if (Shifted != getCouldNotCompute() && 5747 Start != getCouldNotCompute()) { 5748 const SCEV *StartVal = getSCEV(StartValueV); 5749 if (Start == StartVal) { 5750 // Okay, for the entire analysis of this edge we assumed the PHI 5751 // to be symbolic. We now need to go back and purge all of the 5752 // entries for the scalars that use the symbolic expression. 5753 forgetMemoizedResults(SymbolicName); 5754 insertValueToMap(PN, Shifted); 5755 return Shifted; 5756 } 5757 } 5758 } 5759 5760 // Remove the temporary PHI node SCEV that has been inserted while intending 5761 // to create an AddRecExpr for this PHI node. We can not keep this temporary 5762 // as it will prevent later (possibly simpler) SCEV expressions to be added 5763 // to the ValueExprMap. 5764 eraseValueFromMap(PN); 5765 5766 return nullptr; 5767 } 5768 5769 // Checks if the SCEV S is available at BB. S is considered available at BB 5770 // if S can be materialized at BB without introducing a fault. 5771 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S, 5772 BasicBlock *BB) { 5773 struct CheckAvailable { 5774 bool TraversalDone = false; 5775 bool Available = true; 5776 5777 const Loop *L = nullptr; // The loop BB is in (can be nullptr) 5778 BasicBlock *BB = nullptr; 5779 DominatorTree &DT; 5780 5781 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT) 5782 : L(L), BB(BB), DT(DT) {} 5783 5784 bool setUnavailable() { 5785 TraversalDone = true; 5786 Available = false; 5787 return false; 5788 } 5789 5790 bool follow(const SCEV *S) { 5791 switch (S->getSCEVType()) { 5792 case scConstant: 5793 case scPtrToInt: 5794 case scTruncate: 5795 case scZeroExtend: 5796 case scSignExtend: 5797 case scAddExpr: 5798 case scMulExpr: 5799 case scUMaxExpr: 5800 case scSMaxExpr: 5801 case scUMinExpr: 5802 case scSMinExpr: 5803 case scSequentialUMinExpr: 5804 // These expressions are available if their operand(s) is/are. 5805 return true; 5806 5807 case scAddRecExpr: { 5808 // We allow add recurrences that are on the loop BB is in, or some 5809 // outer loop. This guarantees availability because the value of the 5810 // add recurrence at BB is simply the "current" value of the induction 5811 // variable. We can relax this in the future; for instance an add 5812 // recurrence on a sibling dominating loop is also available at BB. 5813 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop(); 5814 if (L && (ARLoop == L || ARLoop->contains(L))) 5815 return true; 5816 5817 return setUnavailable(); 5818 } 5819 5820 case scUnknown: { 5821 // For SCEVUnknown, we check for simple dominance. 5822 const auto *SU = cast<SCEVUnknown>(S); 5823 Value *V = SU->getValue(); 5824 5825 if (isa<Argument>(V)) 5826 return false; 5827 5828 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB)) 5829 return false; 5830 5831 return setUnavailable(); 5832 } 5833 5834 case scUDivExpr: 5835 case scCouldNotCompute: 5836 // We do not try to smart about these at all. 5837 return setUnavailable(); 5838 } 5839 llvm_unreachable("Unknown SCEV kind!"); 5840 } 5841 5842 bool isDone() { return TraversalDone; } 5843 }; 5844 5845 CheckAvailable CA(L, BB, DT); 5846 SCEVTraversal<CheckAvailable> ST(CA); 5847 5848 ST.visitAll(S); 5849 return CA.Available; 5850 } 5851 5852 // Try to match a control flow sequence that branches out at BI and merges back 5853 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful 5854 // match. 5855 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, 5856 Value *&C, Value *&LHS, Value *&RHS) { 5857 C = BI->getCondition(); 5858 5859 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0)); 5860 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1)); 5861 5862 if (!LeftEdge.isSingleEdge()) 5863 return false; 5864 5865 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()"); 5866 5867 Use &LeftUse = Merge->getOperandUse(0); 5868 Use &RightUse = Merge->getOperandUse(1); 5869 5870 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) { 5871 LHS = LeftUse; 5872 RHS = RightUse; 5873 return true; 5874 } 5875 5876 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) { 5877 LHS = RightUse; 5878 RHS = LeftUse; 5879 return true; 5880 } 5881 5882 return false; 5883 } 5884 5885 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) { 5886 auto IsReachable = 5887 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); }; 5888 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) { 5889 const Loop *L = LI.getLoopFor(PN->getParent()); 5890 5891 // We don't want to break LCSSA, even in a SCEV expression tree. 5892 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 5893 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L) 5894 return nullptr; 5895 5896 // Try to match 5897 // 5898 // br %cond, label %left, label %right 5899 // left: 5900 // br label %merge 5901 // right: 5902 // br label %merge 5903 // merge: 5904 // V = phi [ %x, %left ], [ %y, %right ] 5905 // 5906 // as "select %cond, %x, %y" 5907 5908 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock(); 5909 assert(IDom && "At least the entry block should dominate PN"); 5910 5911 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator()); 5912 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr; 5913 5914 if (BI && BI->isConditional() && 5915 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) && 5916 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) && 5917 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent())) 5918 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS); 5919 } 5920 5921 return nullptr; 5922 } 5923 5924 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 5925 if (const SCEV *S = createAddRecFromPHI(PN)) 5926 return S; 5927 5928 if (const SCEV *S = createNodeFromSelectLikePHI(PN)) 5929 return S; 5930 5931 // If the PHI has a single incoming value, follow that value, unless the 5932 // PHI's incoming blocks are in a different loop, in which case doing so 5933 // risks breaking LCSSA form. Instcombine would normally zap these, but 5934 // it doesn't have DominatorTree information, so it may miss cases. 5935 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC})) 5936 if (LI.replacementPreservesLCSSAForm(PN, V)) 5937 return getSCEV(V); 5938 5939 // If it's not a loop phi, we can't handle it yet. 5940 return getUnknown(PN); 5941 } 5942 5943 bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, 5944 SCEVTypes RootKind) { 5945 struct FindClosure { 5946 const SCEV *OperandToFind; 5947 const SCEVTypes RootKind; // Must be a sequential min/max expression. 5948 const SCEVTypes NonSequentialRootKind; // Non-seq variant of RootKind. 5949 5950 bool Found = false; 5951 5952 bool canRecurseInto(SCEVTypes Kind) const { 5953 // We can only recurse into the SCEV expression of the same effective type 5954 // as the type of our root SCEV expression, and into zero-extensions. 5955 return RootKind == Kind || NonSequentialRootKind == Kind || 5956 scZeroExtend == Kind; 5957 }; 5958 5959 FindClosure(const SCEV *OperandToFind, SCEVTypes RootKind) 5960 : OperandToFind(OperandToFind), RootKind(RootKind), 5961 NonSequentialRootKind( 5962 SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType( 5963 RootKind)) {} 5964 5965 bool follow(const SCEV *S) { 5966 Found = S == OperandToFind; 5967 5968 return !isDone() && canRecurseInto(S->getSCEVType()); 5969 } 5970 5971 bool isDone() const { return Found; } 5972 }; 5973 5974 FindClosure FC(OperandToFind, RootKind); 5975 visitAll(Root, FC); 5976 return FC.Found; 5977 } 5978 5979 const SCEV *ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond( 5980 Instruction *I, ICmpInst *Cond, Value *TrueVal, Value *FalseVal) { 5981 // Try to match some simple smax or umax patterns. 5982 auto *ICI = Cond; 5983 5984 Value *LHS = ICI->getOperand(0); 5985 Value *RHS = ICI->getOperand(1); 5986 5987 switch (ICI->getPredicate()) { 5988 case ICmpInst::ICMP_SLT: 5989 case ICmpInst::ICMP_SLE: 5990 case ICmpInst::ICMP_ULT: 5991 case ICmpInst::ICMP_ULE: 5992 std::swap(LHS, RHS); 5993 LLVM_FALLTHROUGH; 5994 case ICmpInst::ICMP_SGT: 5995 case ICmpInst::ICMP_SGE: 5996 case ICmpInst::ICMP_UGT: 5997 case ICmpInst::ICMP_UGE: 5998 // a > b ? a+x : b+x -> max(a, b)+x 5999 // a > b ? b+x : a+x -> min(a, b)+x 6000 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) { 6001 bool Signed = ICI->isSigned(); 6002 const SCEV *LA = getSCEV(TrueVal); 6003 const SCEV *RA = getSCEV(FalseVal); 6004 const SCEV *LS = getSCEV(LHS); 6005 const SCEV *RS = getSCEV(RHS); 6006 if (LA->getType()->isPointerTy()) { 6007 // FIXME: Handle cases where LS/RS are pointers not equal to LA/RA. 6008 // Need to make sure we can't produce weird expressions involving 6009 // negated pointers. 6010 if (LA == LS && RA == RS) 6011 return Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS); 6012 if (LA == RS && RA == LS) 6013 return Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS); 6014 } 6015 auto CoerceOperand = [&](const SCEV *Op) -> const SCEV * { 6016 if (Op->getType()->isPointerTy()) { 6017 Op = getLosslessPtrToIntExpr(Op); 6018 if (isa<SCEVCouldNotCompute>(Op)) 6019 return Op; 6020 } 6021 if (Signed) 6022 Op = getNoopOrSignExtend(Op, I->getType()); 6023 else 6024 Op = getNoopOrZeroExtend(Op, I->getType()); 6025 return Op; 6026 }; 6027 LS = CoerceOperand(LS); 6028 RS = CoerceOperand(RS); 6029 if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS)) 6030 break; 6031 const SCEV *LDiff = getMinusSCEV(LA, LS); 6032 const SCEV *RDiff = getMinusSCEV(RA, RS); 6033 if (LDiff == RDiff) 6034 return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS), 6035 LDiff); 6036 LDiff = getMinusSCEV(LA, RS); 6037 RDiff = getMinusSCEV(RA, LS); 6038 if (LDiff == RDiff) 6039 return getAddExpr(Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS), 6040 LDiff); 6041 } 6042 break; 6043 case ICmpInst::ICMP_NE: 6044 // x != 0 ? x+y : C+y -> x == 0 ? C+y : x+y 6045 std::swap(TrueVal, FalseVal); 6046 LLVM_FALLTHROUGH; 6047 case ICmpInst::ICMP_EQ: 6048 // x == 0 ? C+y : x+y -> umax(x, C)+y iff C u<= 1 6049 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) && 6050 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) { 6051 const SCEV *X = getNoopOrZeroExtend(getSCEV(LHS), I->getType()); 6052 const SCEV *TrueValExpr = getSCEV(TrueVal); // C+y 6053 const SCEV *FalseValExpr = getSCEV(FalseVal); // x+y 6054 const SCEV *Y = getMinusSCEV(FalseValExpr, X); // y = (x+y)-x 6055 const SCEV *C = getMinusSCEV(TrueValExpr, Y); // C = (C+y)-y 6056 if (isa<SCEVConstant>(C) && cast<SCEVConstant>(C)->getAPInt().ule(1)) 6057 return getAddExpr(getUMaxExpr(X, C), Y); 6058 } 6059 // x == 0 ? 0 : umin (..., x, ...) -> umin_seq(x, umin (...)) 6060 // x == 0 ? 0 : umin_seq(..., x, ...) -> umin_seq(x, umin_seq(...)) 6061 // x == 0 ? 0 : umin (..., umin_seq(..., x, ...), ...) 6062 // -> umin_seq(x, umin (..., umin_seq(...), ...)) 6063 if (isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero() && 6064 isa<ConstantInt>(TrueVal) && cast<ConstantInt>(TrueVal)->isZero()) { 6065 const SCEV *X = getSCEV(LHS); 6066 while (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(X)) 6067 X = ZExt->getOperand(); 6068 if (getTypeSizeInBits(X->getType()) <= getTypeSizeInBits(I->getType())) { 6069 const SCEV *FalseValExpr = getSCEV(FalseVal); 6070 if (SCEVMinMaxExprContains(FalseValExpr, X, scSequentialUMinExpr)) 6071 return getUMinExpr(getNoopOrZeroExtend(X, I->getType()), FalseValExpr, 6072 /*Sequential=*/true); 6073 } 6074 } 6075 break; 6076 default: 6077 break; 6078 } 6079 6080 return getUnknown(I); 6081 } 6082 6083 static Optional<const SCEV *> 6084 createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, 6085 const SCEV *TrueExpr, const SCEV *FalseExpr) { 6086 assert(CondExpr->getType()->isIntegerTy(1) && 6087 TrueExpr->getType() == FalseExpr->getType() && 6088 TrueExpr->getType()->isIntegerTy(1) && 6089 "Unexpected operands of a select."); 6090 6091 // i1 cond ? i1 x : i1 C --> C + (i1 cond ? (i1 x - i1 C) : i1 0) 6092 // --> C + (umin_seq cond, x - C) 6093 // 6094 // i1 cond ? i1 C : i1 x --> C + (i1 cond ? i1 0 : (i1 x - i1 C)) 6095 // --> C + (i1 ~cond ? (i1 x - i1 C) : i1 0) 6096 // --> C + (umin_seq ~cond, x - C) 6097 6098 // FIXME: while we can't legally model the case where both of the hands 6099 // are fully variable, we only require that the *difference* is constant. 6100 if (!isa<SCEVConstant>(TrueExpr) && !isa<SCEVConstant>(FalseExpr)) 6101 return None; 6102 6103 const SCEV *X, *C; 6104 if (isa<SCEVConstant>(TrueExpr)) { 6105 CondExpr = SE->getNotSCEV(CondExpr); 6106 X = FalseExpr; 6107 C = TrueExpr; 6108 } else { 6109 X = TrueExpr; 6110 C = FalseExpr; 6111 } 6112 return SE->getAddExpr(C, SE->getUMinExpr(CondExpr, SE->getMinusSCEV(X, C), 6113 /*Sequential=*/true)); 6114 } 6115 6116 static Optional<const SCEV *> createNodeForSelectViaUMinSeq(ScalarEvolution *SE, 6117 Value *Cond, 6118 Value *TrueVal, 6119 Value *FalseVal) { 6120 if (!isa<ConstantInt>(TrueVal) && !isa<ConstantInt>(FalseVal)) 6121 return None; 6122 6123 const auto *SECond = SE->getSCEV(Cond); 6124 const auto *SETrue = SE->getSCEV(TrueVal); 6125 const auto *SEFalse = SE->getSCEV(FalseVal); 6126 return createNodeForSelectViaUMinSeq(SE, SECond, SETrue, SEFalse); 6127 } 6128 6129 const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq( 6130 Value *V, Value *Cond, Value *TrueVal, Value *FalseVal) { 6131 assert(Cond->getType()->isIntegerTy(1) && "Select condition is not an i1?"); 6132 assert(TrueVal->getType() == FalseVal->getType() && 6133 V->getType() == TrueVal->getType() && 6134 "Types of select hands and of the result must match."); 6135 6136 // For now, only deal with i1-typed `select`s. 6137 if (!V->getType()->isIntegerTy(1)) 6138 return getUnknown(V); 6139 6140 if (Optional<const SCEV *> S = 6141 createNodeForSelectViaUMinSeq(this, Cond, TrueVal, FalseVal)) 6142 return *S; 6143 6144 return getUnknown(V); 6145 } 6146 6147 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Value *V, Value *Cond, 6148 Value *TrueVal, 6149 Value *FalseVal) { 6150 // Handle "constant" branch or select. This can occur for instance when a 6151 // loop pass transforms an inner loop and moves on to process the outer loop. 6152 if (auto *CI = dyn_cast<ConstantInt>(Cond)) 6153 return getSCEV(CI->isOne() ? TrueVal : FalseVal); 6154 6155 if (auto *I = dyn_cast<Instruction>(V)) { 6156 if (auto *ICI = dyn_cast<ICmpInst>(Cond)) { 6157 const SCEV *S = createNodeForSelectOrPHIInstWithICmpInstCond( 6158 I, ICI, TrueVal, FalseVal); 6159 if (!isa<SCEVUnknown>(S)) 6160 return S; 6161 } 6162 } 6163 6164 return createNodeForSelectOrPHIViaUMinSeq(V, Cond, TrueVal, FalseVal); 6165 } 6166 6167 /// Expand GEP instructions into add and multiply operations. This allows them 6168 /// to be analyzed by regular SCEV code. 6169 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 6170 // Don't attempt to analyze GEPs over unsized objects. 6171 if (!GEP->getSourceElementType()->isSized()) 6172 return getUnknown(GEP); 6173 6174 SmallVector<const SCEV *, 4> IndexExprs; 6175 for (Value *Index : GEP->indices()) 6176 IndexExprs.push_back(getSCEV(Index)); 6177 return getGEPExpr(GEP, IndexExprs); 6178 } 6179 6180 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) { 6181 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 6182 return C->getAPInt().countTrailingZeros(); 6183 6184 if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S)) 6185 return GetMinTrailingZeros(I->getOperand()); 6186 6187 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 6188 return std::min(GetMinTrailingZeros(T->getOperand()), 6189 (uint32_t)getTypeSizeInBits(T->getType())); 6190 6191 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 6192 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 6193 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 6194 ? getTypeSizeInBits(E->getType()) 6195 : OpRes; 6196 } 6197 6198 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 6199 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 6200 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) 6201 ? getTypeSizeInBits(E->getType()) 6202 : OpRes; 6203 } 6204 6205 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 6206 // The result is the min of all operands results. 6207 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 6208 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 6209 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 6210 return MinOpRes; 6211 } 6212 6213 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 6214 // The result is the sum of all operands results. 6215 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 6216 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 6217 for (unsigned i = 1, e = M->getNumOperands(); 6218 SumOpRes != BitWidth && i != e; ++i) 6219 SumOpRes = 6220 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth); 6221 return SumOpRes; 6222 } 6223 6224 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 6225 // The result is the min of all operands results. 6226 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 6227 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 6228 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 6229 return MinOpRes; 6230 } 6231 6232 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 6233 // The result is the min of all operands results. 6234 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 6235 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 6236 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 6237 return MinOpRes; 6238 } 6239 6240 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 6241 // The result is the min of all operands results. 6242 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 6243 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 6244 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 6245 return MinOpRes; 6246 } 6247 6248 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 6249 // For a SCEVUnknown, ask ValueTracking. 6250 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT); 6251 return Known.countMinTrailingZeros(); 6252 } 6253 6254 // SCEVUDivExpr 6255 return 0; 6256 } 6257 6258 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 6259 auto I = MinTrailingZerosCache.find(S); 6260 if (I != MinTrailingZerosCache.end()) 6261 return I->second; 6262 6263 uint32_t Result = GetMinTrailingZerosImpl(S); 6264 auto InsertPair = MinTrailingZerosCache.insert({S, Result}); 6265 assert(InsertPair.second && "Should insert a new key"); 6266 return InsertPair.first->second; 6267 } 6268 6269 /// Helper method to assign a range to V from metadata present in the IR. 6270 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) { 6271 if (Instruction *I = dyn_cast<Instruction>(V)) 6272 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) 6273 return getConstantRangeFromMetadata(*MD); 6274 6275 return None; 6276 } 6277 6278 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec, 6279 SCEV::NoWrapFlags Flags) { 6280 if (AddRec->getNoWrapFlags(Flags) != Flags) { 6281 AddRec->setNoWrapFlags(Flags); 6282 UnsignedRanges.erase(AddRec); 6283 SignedRanges.erase(AddRec); 6284 } 6285 } 6286 6287 ConstantRange ScalarEvolution:: 6288 getRangeForUnknownRecurrence(const SCEVUnknown *U) { 6289 const DataLayout &DL = getDataLayout(); 6290 6291 unsigned BitWidth = getTypeSizeInBits(U->getType()); 6292 const ConstantRange FullSet(BitWidth, /*isFullSet=*/true); 6293 6294 // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then 6295 // use information about the trip count to improve our available range. Note 6296 // that the trip count independent cases are already handled by known bits. 6297 // WARNING: The definition of recurrence used here is subtly different than 6298 // the one used by AddRec (and thus most of this file). Step is allowed to 6299 // be arbitrarily loop varying here, where AddRec allows only loop invariant 6300 // and other addrecs in the same loop (for non-affine addrecs). The code 6301 // below intentionally handles the case where step is not loop invariant. 6302 auto *P = dyn_cast<PHINode>(U->getValue()); 6303 if (!P) 6304 return FullSet; 6305 6306 // Make sure that no Phi input comes from an unreachable block. Otherwise, 6307 // even the values that are not available in these blocks may come from them, 6308 // and this leads to false-positive recurrence test. 6309 for (auto *Pred : predecessors(P->getParent())) 6310 if (!DT.isReachableFromEntry(Pred)) 6311 return FullSet; 6312 6313 BinaryOperator *BO; 6314 Value *Start, *Step; 6315 if (!matchSimpleRecurrence(P, BO, Start, Step)) 6316 return FullSet; 6317 6318 // If we found a recurrence in reachable code, we must be in a loop. Note 6319 // that BO might be in some subloop of L, and that's completely okay. 6320 auto *L = LI.getLoopFor(P->getParent()); 6321 assert(L && L->getHeader() == P->getParent()); 6322 if (!L->contains(BO->getParent())) 6323 // NOTE: This bailout should be an assert instead. However, asserting 6324 // the condition here exposes a case where LoopFusion is querying SCEV 6325 // with malformed loop information during the midst of the transform. 6326 // There doesn't appear to be an obvious fix, so for the moment bailout 6327 // until the caller issue can be fixed. PR49566 tracks the bug. 6328 return FullSet; 6329 6330 // TODO: Extend to other opcodes such as mul, and div 6331 switch (BO->getOpcode()) { 6332 default: 6333 return FullSet; 6334 case Instruction::AShr: 6335 case Instruction::LShr: 6336 case Instruction::Shl: 6337 break; 6338 }; 6339 6340 if (BO->getOperand(0) != P) 6341 // TODO: Handle the power function forms some day. 6342 return FullSet; 6343 6344 unsigned TC = getSmallConstantMaxTripCount(L); 6345 if (!TC || TC >= BitWidth) 6346 return FullSet; 6347 6348 auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT); 6349 auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT); 6350 assert(KnownStart.getBitWidth() == BitWidth && 6351 KnownStep.getBitWidth() == BitWidth); 6352 6353 // Compute total shift amount, being careful of overflow and bitwidths. 6354 auto MaxShiftAmt = KnownStep.getMaxValue(); 6355 APInt TCAP(BitWidth, TC-1); 6356 bool Overflow = false; 6357 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow); 6358 if (Overflow) 6359 return FullSet; 6360 6361 switch (BO->getOpcode()) { 6362 default: 6363 llvm_unreachable("filtered out above"); 6364 case Instruction::AShr: { 6365 // For each ashr, three cases: 6366 // shift = 0 => unchanged value 6367 // saturation => 0 or -1 6368 // other => a value closer to zero (of the same sign) 6369 // Thus, the end value is closer to zero than the start. 6370 auto KnownEnd = KnownBits::ashr(KnownStart, 6371 KnownBits::makeConstant(TotalShift)); 6372 if (KnownStart.isNonNegative()) 6373 // Analogous to lshr (simply not yet canonicalized) 6374 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), 6375 KnownStart.getMaxValue() + 1); 6376 if (KnownStart.isNegative()) 6377 // End >=u Start && End <=s Start 6378 return ConstantRange::getNonEmpty(KnownStart.getMinValue(), 6379 KnownEnd.getMaxValue() + 1); 6380 break; 6381 } 6382 case Instruction::LShr: { 6383 // For each lshr, three cases: 6384 // shift = 0 => unchanged value 6385 // saturation => 0 6386 // other => a smaller positive number 6387 // Thus, the low end of the unsigned range is the last value produced. 6388 auto KnownEnd = KnownBits::lshr(KnownStart, 6389 KnownBits::makeConstant(TotalShift)); 6390 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(), 6391 KnownStart.getMaxValue() + 1); 6392 } 6393 case Instruction::Shl: { 6394 // Iff no bits are shifted out, value increases on every shift. 6395 auto KnownEnd = KnownBits::shl(KnownStart, 6396 KnownBits::makeConstant(TotalShift)); 6397 if (TotalShift.ult(KnownStart.countMinLeadingZeros())) 6398 return ConstantRange(KnownStart.getMinValue(), 6399 KnownEnd.getMaxValue() + 1); 6400 break; 6401 } 6402 }; 6403 return FullSet; 6404 } 6405 6406 /// Determine the range for a particular SCEV. If SignHint is 6407 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges 6408 /// with a "cleaner" unsigned (resp. signed) representation. 6409 const ConstantRange & 6410 ScalarEvolution::getRangeRef(const SCEV *S, 6411 ScalarEvolution::RangeSignHint SignHint) { 6412 DenseMap<const SCEV *, ConstantRange> &Cache = 6413 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges 6414 : SignedRanges; 6415 ConstantRange::PreferredRangeType RangeType = 6416 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED 6417 ? ConstantRange::Unsigned : ConstantRange::Signed; 6418 6419 // See if we've computed this range already. 6420 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S); 6421 if (I != Cache.end()) 6422 return I->second; 6423 6424 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 6425 return setRange(C, SignHint, ConstantRange(C->getAPInt())); 6426 6427 unsigned BitWidth = getTypeSizeInBits(S->getType()); 6428 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 6429 using OBO = OverflowingBinaryOperator; 6430 6431 // If the value has known zeros, the maximum value will have those known zeros 6432 // as well. 6433 uint32_t TZ = GetMinTrailingZeros(S); 6434 if (TZ != 0) { 6435 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) 6436 ConservativeResult = 6437 ConstantRange(APInt::getMinValue(BitWidth), 6438 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 6439 else 6440 ConservativeResult = ConstantRange( 6441 APInt::getSignedMinValue(BitWidth), 6442 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 6443 } 6444 6445 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 6446 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); 6447 unsigned WrapType = OBO::AnyWrap; 6448 if (Add->hasNoSignedWrap()) 6449 WrapType |= OBO::NoSignedWrap; 6450 if (Add->hasNoUnsignedWrap()) 6451 WrapType |= OBO::NoUnsignedWrap; 6452 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 6453 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint), 6454 WrapType, RangeType); 6455 return setRange(Add, SignHint, 6456 ConservativeResult.intersectWith(X, RangeType)); 6457 } 6458 6459 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 6460 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); 6461 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 6462 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); 6463 return setRange(Mul, SignHint, 6464 ConservativeResult.intersectWith(X, RangeType)); 6465 } 6466 6467 if (isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) { 6468 Intrinsic::ID ID; 6469 switch (S->getSCEVType()) { 6470 case scUMaxExpr: 6471 ID = Intrinsic::umax; 6472 break; 6473 case scSMaxExpr: 6474 ID = Intrinsic::smax; 6475 break; 6476 case scUMinExpr: 6477 case scSequentialUMinExpr: 6478 ID = Intrinsic::umin; 6479 break; 6480 case scSMinExpr: 6481 ID = Intrinsic::smin; 6482 break; 6483 default: 6484 llvm_unreachable("Unknown SCEVMinMaxExpr/SCEVSequentialMinMaxExpr."); 6485 } 6486 6487 const auto *NAry = cast<SCEVNAryExpr>(S); 6488 ConstantRange X = getRangeRef(NAry->getOperand(0), SignHint); 6489 for (unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i) 6490 X = X.intrinsic(ID, {X, getRangeRef(NAry->getOperand(i), SignHint)}); 6491 return setRange(S, SignHint, 6492 ConservativeResult.intersectWith(X, RangeType)); 6493 } 6494 6495 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 6496 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); 6497 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); 6498 return setRange(UDiv, SignHint, 6499 ConservativeResult.intersectWith(X.udiv(Y), RangeType)); 6500 } 6501 6502 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 6503 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); 6504 return setRange(ZExt, SignHint, 6505 ConservativeResult.intersectWith(X.zeroExtend(BitWidth), 6506 RangeType)); 6507 } 6508 6509 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 6510 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); 6511 return setRange(SExt, SignHint, 6512 ConservativeResult.intersectWith(X.signExtend(BitWidth), 6513 RangeType)); 6514 } 6515 6516 if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast<SCEVPtrToIntExpr>(S)) { 6517 ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint); 6518 return setRange(PtrToInt, SignHint, X); 6519 } 6520 6521 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 6522 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); 6523 return setRange(Trunc, SignHint, 6524 ConservativeResult.intersectWith(X.truncate(BitWidth), 6525 RangeType)); 6526 } 6527 6528 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 6529 // If there's no unsigned wrap, the value will never be less than its 6530 // initial value. 6531 if (AddRec->hasNoUnsignedWrap()) { 6532 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart()); 6533 if (!UnsignedMinValue.isZero()) 6534 ConservativeResult = ConservativeResult.intersectWith( 6535 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType); 6536 } 6537 6538 // If there's no signed wrap, and all the operands except initial value have 6539 // the same sign or zero, the value won't ever be: 6540 // 1: smaller than initial value if operands are non negative, 6541 // 2: bigger than initial value if operands are non positive. 6542 // For both cases, value can not cross signed min/max boundary. 6543 if (AddRec->hasNoSignedWrap()) { 6544 bool AllNonNeg = true; 6545 bool AllNonPos = true; 6546 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) { 6547 if (!isKnownNonNegative(AddRec->getOperand(i))) 6548 AllNonNeg = false; 6549 if (!isKnownNonPositive(AddRec->getOperand(i))) 6550 AllNonPos = false; 6551 } 6552 if (AllNonNeg) 6553 ConservativeResult = ConservativeResult.intersectWith( 6554 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()), 6555 APInt::getSignedMinValue(BitWidth)), 6556 RangeType); 6557 else if (AllNonPos) 6558 ConservativeResult = ConservativeResult.intersectWith( 6559 ConstantRange::getNonEmpty( 6560 APInt::getSignedMinValue(BitWidth), 6561 getSignedRangeMax(AddRec->getStart()) + 1), 6562 RangeType); 6563 } 6564 6565 // TODO: non-affine addrec 6566 if (AddRec->isAffine()) { 6567 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop()); 6568 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 6569 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 6570 auto RangeFromAffine = getRangeForAffineAR( 6571 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 6572 BitWidth); 6573 ConservativeResult = 6574 ConservativeResult.intersectWith(RangeFromAffine, RangeType); 6575 6576 auto RangeFromFactoring = getRangeViaFactoring( 6577 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, 6578 BitWidth); 6579 ConservativeResult = 6580 ConservativeResult.intersectWith(RangeFromFactoring, RangeType); 6581 } 6582 6583 // Now try symbolic BE count and more powerful methods. 6584 if (UseExpensiveRangeSharpening) { 6585 const SCEV *SymbolicMaxBECount = 6586 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop()); 6587 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) && 6588 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 6589 AddRec->hasNoSelfWrap()) { 6590 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR( 6591 AddRec, SymbolicMaxBECount, BitWidth, SignHint); 6592 ConservativeResult = 6593 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType); 6594 } 6595 } 6596 } 6597 6598 return setRange(AddRec, SignHint, std::move(ConservativeResult)); 6599 } 6600 6601 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 6602 6603 // Check if the IR explicitly contains !range metadata. 6604 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue()); 6605 if (MDRange.hasValue()) 6606 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(), 6607 RangeType); 6608 6609 // Use facts about recurrences in the underlying IR. Note that add 6610 // recurrences are AddRecExprs and thus don't hit this path. This 6611 // primarily handles shift recurrences. 6612 auto CR = getRangeForUnknownRecurrence(U); 6613 ConservativeResult = ConservativeResult.intersectWith(CR); 6614 6615 // See if ValueTracking can give us a useful range. 6616 const DataLayout &DL = getDataLayout(); 6617 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 6618 if (Known.getBitWidth() != BitWidth) 6619 Known = Known.zextOrTrunc(BitWidth); 6620 6621 // ValueTracking may be able to compute a tighter result for the number of 6622 // sign bits than for the value of those sign bits. 6623 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); 6624 if (U->getType()->isPointerTy()) { 6625 // If the pointer size is larger than the index size type, this can cause 6626 // NS to be larger than BitWidth. So compensate for this. 6627 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType()); 6628 int ptrIdxDiff = ptrSize - BitWidth; 6629 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff) 6630 NS -= ptrIdxDiff; 6631 } 6632 6633 if (NS > 1) { 6634 // If we know any of the sign bits, we know all of the sign bits. 6635 if (!Known.Zero.getHiBits(NS).isZero()) 6636 Known.Zero.setHighBits(NS); 6637 if (!Known.One.getHiBits(NS).isZero()) 6638 Known.One.setHighBits(NS); 6639 } 6640 6641 if (Known.getMinValue() != Known.getMaxValue() + 1) 6642 ConservativeResult = ConservativeResult.intersectWith( 6643 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1), 6644 RangeType); 6645 if (NS > 1) 6646 ConservativeResult = ConservativeResult.intersectWith( 6647 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 6648 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), 6649 RangeType); 6650 6651 // A range of Phi is a subset of union of all ranges of its input. 6652 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) { 6653 // Make sure that we do not run over cycled Phis. 6654 if (PendingPhiRanges.insert(Phi).second) { 6655 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false); 6656 for (auto &Op : Phi->operands()) { 6657 auto OpRange = getRangeRef(getSCEV(Op), SignHint); 6658 RangeFromOps = RangeFromOps.unionWith(OpRange); 6659 // No point to continue if we already have a full set. 6660 if (RangeFromOps.isFullSet()) 6661 break; 6662 } 6663 ConservativeResult = 6664 ConservativeResult.intersectWith(RangeFromOps, RangeType); 6665 bool Erased = PendingPhiRanges.erase(Phi); 6666 assert(Erased && "Failed to erase Phi properly?"); 6667 (void) Erased; 6668 } 6669 } 6670 6671 return setRange(U, SignHint, std::move(ConservativeResult)); 6672 } 6673 6674 return setRange(S, SignHint, std::move(ConservativeResult)); 6675 } 6676 6677 // Given a StartRange, Step and MaxBECount for an expression compute a range of 6678 // values that the expression can take. Initially, the expression has a value 6679 // from StartRange and then is changed by Step up to MaxBECount times. Signed 6680 // argument defines if we treat Step as signed or unsigned. 6681 static ConstantRange getRangeForAffineARHelper(APInt Step, 6682 const ConstantRange &StartRange, 6683 const APInt &MaxBECount, 6684 unsigned BitWidth, bool Signed) { 6685 // If either Step or MaxBECount is 0, then the expression won't change, and we 6686 // just need to return the initial range. 6687 if (Step == 0 || MaxBECount == 0) 6688 return StartRange; 6689 6690 // If we don't know anything about the initial value (i.e. StartRange is 6691 // FullRange), then we don't know anything about the final range either. 6692 // Return FullRange. 6693 if (StartRange.isFullSet()) 6694 return ConstantRange::getFull(BitWidth); 6695 6696 // If Step is signed and negative, then we use its absolute value, but we also 6697 // note that we're moving in the opposite direction. 6698 bool Descending = Signed && Step.isNegative(); 6699 6700 if (Signed) 6701 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this: 6702 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128. 6703 // This equations hold true due to the well-defined wrap-around behavior of 6704 // APInt. 6705 Step = Step.abs(); 6706 6707 // Check if Offset is more than full span of BitWidth. If it is, the 6708 // expression is guaranteed to overflow. 6709 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount)) 6710 return ConstantRange::getFull(BitWidth); 6711 6712 // Offset is by how much the expression can change. Checks above guarantee no 6713 // overflow here. 6714 APInt Offset = Step * MaxBECount; 6715 6716 // Minimum value of the final range will match the minimal value of StartRange 6717 // if the expression is increasing and will be decreased by Offset otherwise. 6718 // Maximum value of the final range will match the maximal value of StartRange 6719 // if the expression is decreasing and will be increased by Offset otherwise. 6720 APInt StartLower = StartRange.getLower(); 6721 APInt StartUpper = StartRange.getUpper() - 1; 6722 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset)) 6723 : (StartUpper + std::move(Offset)); 6724 6725 // It's possible that the new minimum/maximum value will fall into the initial 6726 // range (due to wrap around). This means that the expression can take any 6727 // value in this bitwidth, and we have to return full range. 6728 if (StartRange.contains(MovedBoundary)) 6729 return ConstantRange::getFull(BitWidth); 6730 6731 APInt NewLower = 6732 Descending ? std::move(MovedBoundary) : std::move(StartLower); 6733 APInt NewUpper = 6734 Descending ? std::move(StartUpper) : std::move(MovedBoundary); 6735 NewUpper += 1; 6736 6737 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range. 6738 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper)); 6739 } 6740 6741 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, 6742 const SCEV *Step, 6743 const SCEV *MaxBECount, 6744 unsigned BitWidth) { 6745 assert(!isa<SCEVCouldNotCompute>(MaxBECount) && 6746 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth && 6747 "Precondition!"); 6748 6749 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType()); 6750 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount); 6751 6752 // First, consider step signed. 6753 ConstantRange StartSRange = getSignedRange(Start); 6754 ConstantRange StepSRange = getSignedRange(Step); 6755 6756 // If Step can be both positive and negative, we need to find ranges for the 6757 // maximum absolute step values in both directions and union them. 6758 ConstantRange SR = 6759 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange, 6760 MaxBECountValue, BitWidth, /* Signed = */ true); 6761 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(), 6762 StartSRange, MaxBECountValue, 6763 BitWidth, /* Signed = */ true)); 6764 6765 // Next, consider step unsigned. 6766 ConstantRange UR = getRangeForAffineARHelper( 6767 getUnsignedRangeMax(Step), getUnsignedRange(Start), 6768 MaxBECountValue, BitWidth, /* Signed = */ false); 6769 6770 // Finally, intersect signed and unsigned ranges. 6771 return SR.intersectWith(UR, ConstantRange::Smallest); 6772 } 6773 6774 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR( 6775 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth, 6776 ScalarEvolution::RangeSignHint SignHint) { 6777 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n"); 6778 assert(AddRec->hasNoSelfWrap() && 6779 "This only works for non-self-wrapping AddRecs!"); 6780 const bool IsSigned = SignHint == HINT_RANGE_SIGNED; 6781 const SCEV *Step = AddRec->getStepRecurrence(*this); 6782 // Only deal with constant step to save compile time. 6783 if (!isa<SCEVConstant>(Step)) 6784 return ConstantRange::getFull(BitWidth); 6785 // Let's make sure that we can prove that we do not self-wrap during 6786 // MaxBECount iterations. We need this because MaxBECount is a maximum 6787 // iteration count estimate, and we might infer nw from some exit for which we 6788 // do not know max exit count (or any other side reasoning). 6789 // TODO: Turn into assert at some point. 6790 if (getTypeSizeInBits(MaxBECount->getType()) > 6791 getTypeSizeInBits(AddRec->getType())) 6792 return ConstantRange::getFull(BitWidth); 6793 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType()); 6794 const SCEV *RangeWidth = getMinusOne(AddRec->getType()); 6795 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step)); 6796 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs); 6797 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount, 6798 MaxItersWithoutWrap)) 6799 return ConstantRange::getFull(BitWidth); 6800 6801 ICmpInst::Predicate LEPred = 6802 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 6803 ICmpInst::Predicate GEPred = 6804 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 6805 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); 6806 6807 // We know that there is no self-wrap. Let's take Start and End values and 6808 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during 6809 // the iteration. They either lie inside the range [Min(Start, End), 6810 // Max(Start, End)] or outside it: 6811 // 6812 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax; 6813 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax; 6814 // 6815 // No self wrap flag guarantees that the intermediate values cannot be BOTH 6816 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that 6817 // knowledge, let's try to prove that we are dealing with Case 1. It is so if 6818 // Start <= End and step is positive, or Start >= End and step is negative. 6819 const SCEV *Start = AddRec->getStart(); 6820 ConstantRange StartRange = getRangeRef(Start, SignHint); 6821 ConstantRange EndRange = getRangeRef(End, SignHint); 6822 ConstantRange RangeBetween = StartRange.unionWith(EndRange); 6823 // If they already cover full iteration space, we will know nothing useful 6824 // even if we prove what we want to prove. 6825 if (RangeBetween.isFullSet()) 6826 return RangeBetween; 6827 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax). 6828 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet() 6829 : RangeBetween.isWrappedSet(); 6830 if (IsWrappedSet) 6831 return ConstantRange::getFull(BitWidth); 6832 6833 if (isKnownPositive(Step) && 6834 isKnownPredicateViaConstantRanges(LEPred, Start, End)) 6835 return RangeBetween; 6836 else if (isKnownNegative(Step) && 6837 isKnownPredicateViaConstantRanges(GEPred, Start, End)) 6838 return RangeBetween; 6839 return ConstantRange::getFull(BitWidth); 6840 } 6841 6842 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, 6843 const SCEV *Step, 6844 const SCEV *MaxBECount, 6845 unsigned BitWidth) { 6846 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q}) 6847 // == RangeOf({A,+,P}) union RangeOf({B,+,Q}) 6848 6849 struct SelectPattern { 6850 Value *Condition = nullptr; 6851 APInt TrueValue; 6852 APInt FalseValue; 6853 6854 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth, 6855 const SCEV *S) { 6856 Optional<unsigned> CastOp; 6857 APInt Offset(BitWidth, 0); 6858 6859 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth && 6860 "Should be!"); 6861 6862 // Peel off a constant offset: 6863 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) { 6864 // In the future we could consider being smarter here and handle 6865 // {Start+Step,+,Step} too. 6866 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0))) 6867 return; 6868 6869 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt(); 6870 S = SA->getOperand(1); 6871 } 6872 6873 // Peel off a cast operation 6874 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) { 6875 CastOp = SCast->getSCEVType(); 6876 S = SCast->getOperand(); 6877 } 6878 6879 using namespace llvm::PatternMatch; 6880 6881 auto *SU = dyn_cast<SCEVUnknown>(S); 6882 const APInt *TrueVal, *FalseVal; 6883 if (!SU || 6884 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal), 6885 m_APInt(FalseVal)))) { 6886 Condition = nullptr; 6887 return; 6888 } 6889 6890 TrueValue = *TrueVal; 6891 FalseValue = *FalseVal; 6892 6893 // Re-apply the cast we peeled off earlier 6894 if (CastOp.hasValue()) 6895 switch (*CastOp) { 6896 default: 6897 llvm_unreachable("Unknown SCEV cast type!"); 6898 6899 case scTruncate: 6900 TrueValue = TrueValue.trunc(BitWidth); 6901 FalseValue = FalseValue.trunc(BitWidth); 6902 break; 6903 case scZeroExtend: 6904 TrueValue = TrueValue.zext(BitWidth); 6905 FalseValue = FalseValue.zext(BitWidth); 6906 break; 6907 case scSignExtend: 6908 TrueValue = TrueValue.sext(BitWidth); 6909 FalseValue = FalseValue.sext(BitWidth); 6910 break; 6911 } 6912 6913 // Re-apply the constant offset we peeled off earlier 6914 TrueValue += Offset; 6915 FalseValue += Offset; 6916 } 6917 6918 bool isRecognized() { return Condition != nullptr; } 6919 }; 6920 6921 SelectPattern StartPattern(*this, BitWidth, Start); 6922 if (!StartPattern.isRecognized()) 6923 return ConstantRange::getFull(BitWidth); 6924 6925 SelectPattern StepPattern(*this, BitWidth, Step); 6926 if (!StepPattern.isRecognized()) 6927 return ConstantRange::getFull(BitWidth); 6928 6929 if (StartPattern.Condition != StepPattern.Condition) { 6930 // We don't handle this case today; but we could, by considering four 6931 // possibilities below instead of two. I'm not sure if there are cases where 6932 // that will help over what getRange already does, though. 6933 return ConstantRange::getFull(BitWidth); 6934 } 6935 6936 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to 6937 // construct arbitrary general SCEV expressions here. This function is called 6938 // from deep in the call stack, and calling getSCEV (on a sext instruction, 6939 // say) can end up caching a suboptimal value. 6940 6941 // FIXME: without the explicit `this` receiver below, MSVC errors out with 6942 // C2352 and C2512 (otherwise it isn't needed). 6943 6944 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue); 6945 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue); 6946 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue); 6947 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue); 6948 6949 ConstantRange TrueRange = 6950 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth); 6951 ConstantRange FalseRange = 6952 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth); 6953 6954 return TrueRange.unionWith(FalseRange); 6955 } 6956 6957 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { 6958 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap; 6959 const BinaryOperator *BinOp = cast<BinaryOperator>(V); 6960 6961 // Return early if there are no flags to propagate to the SCEV. 6962 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 6963 if (BinOp->hasNoUnsignedWrap()) 6964 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW); 6965 if (BinOp->hasNoSignedWrap()) 6966 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW); 6967 if (Flags == SCEV::FlagAnyWrap) 6968 return SCEV::FlagAnyWrap; 6969 6970 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap; 6971 } 6972 6973 const Instruction * 6974 ScalarEvolution::getNonTrivialDefiningScopeBound(const SCEV *S) { 6975 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(S)) 6976 return &*AddRec->getLoop()->getHeader()->begin(); 6977 if (auto *U = dyn_cast<SCEVUnknown>(S)) 6978 if (auto *I = dyn_cast<Instruction>(U->getValue())) 6979 return I; 6980 return nullptr; 6981 } 6982 6983 /// Fills \p Ops with unique operands of \p S, if it has operands. If not, 6984 /// \p Ops remains unmodified. 6985 static void collectUniqueOps(const SCEV *S, 6986 SmallVectorImpl<const SCEV *> &Ops) { 6987 SmallPtrSet<const SCEV *, 4> Unique; 6988 auto InsertUnique = [&](const SCEV *S) { 6989 if (Unique.insert(S).second) 6990 Ops.push_back(S); 6991 }; 6992 if (auto *S2 = dyn_cast<SCEVCastExpr>(S)) 6993 for (auto *Op : S2->operands()) 6994 InsertUnique(Op); 6995 else if (auto *S2 = dyn_cast<SCEVNAryExpr>(S)) 6996 for (auto *Op : S2->operands()) 6997 InsertUnique(Op); 6998 else if (auto *S2 = dyn_cast<SCEVUDivExpr>(S)) 6999 for (auto *Op : S2->operands()) 7000 InsertUnique(Op); 7001 } 7002 7003 const Instruction * 7004 ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops, 7005 bool &Precise) { 7006 Precise = true; 7007 // Do a bounded search of the def relation of the requested SCEVs. 7008 SmallSet<const SCEV *, 16> Visited; 7009 SmallVector<const SCEV *> Worklist; 7010 auto pushOp = [&](const SCEV *S) { 7011 if (!Visited.insert(S).second) 7012 return; 7013 // Threshold of 30 here is arbitrary. 7014 if (Visited.size() > 30) { 7015 Precise = false; 7016 return; 7017 } 7018 Worklist.push_back(S); 7019 }; 7020 7021 for (auto *S : Ops) 7022 pushOp(S); 7023 7024 const Instruction *Bound = nullptr; 7025 while (!Worklist.empty()) { 7026 auto *S = Worklist.pop_back_val(); 7027 if (auto *DefI = getNonTrivialDefiningScopeBound(S)) { 7028 if (!Bound || DT.dominates(Bound, DefI)) 7029 Bound = DefI; 7030 } else { 7031 SmallVector<const SCEV *, 4> Ops; 7032 collectUniqueOps(S, Ops); 7033 for (auto *Op : Ops) 7034 pushOp(Op); 7035 } 7036 } 7037 return Bound ? Bound : &*F.getEntryBlock().begin(); 7038 } 7039 7040 const Instruction * 7041 ScalarEvolution::getDefiningScopeBound(ArrayRef<const SCEV *> Ops) { 7042 bool Discard; 7043 return getDefiningScopeBound(Ops, Discard); 7044 } 7045 7046 bool ScalarEvolution::isGuaranteedToTransferExecutionTo(const Instruction *A, 7047 const Instruction *B) { 7048 if (A->getParent() == B->getParent() && 7049 isGuaranteedToTransferExecutionToSuccessor(A->getIterator(), 7050 B->getIterator())) 7051 return true; 7052 7053 auto *BLoop = LI.getLoopFor(B->getParent()); 7054 if (BLoop && BLoop->getHeader() == B->getParent() && 7055 BLoop->getLoopPreheader() == A->getParent() && 7056 isGuaranteedToTransferExecutionToSuccessor(A->getIterator(), 7057 A->getParent()->end()) && 7058 isGuaranteedToTransferExecutionToSuccessor(B->getParent()->begin(), 7059 B->getIterator())) 7060 return true; 7061 return false; 7062 } 7063 7064 7065 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) { 7066 // Only proceed if we can prove that I does not yield poison. 7067 if (!programUndefinedIfPoison(I)) 7068 return false; 7069 7070 // At this point we know that if I is executed, then it does not wrap 7071 // according to at least one of NSW or NUW. If I is not executed, then we do 7072 // not know if the calculation that I represents would wrap. Multiple 7073 // instructions can map to the same SCEV. If we apply NSW or NUW from I to 7074 // the SCEV, we must guarantee no wrapping for that SCEV also when it is 7075 // derived from other instructions that map to the same SCEV. We cannot make 7076 // that guarantee for cases where I is not executed. So we need to find a 7077 // upper bound on the defining scope for the SCEV, and prove that I is 7078 // executed every time we enter that scope. When the bounding scope is a 7079 // loop (the common case), this is equivalent to proving I executes on every 7080 // iteration of that loop. 7081 SmallVector<const SCEV *> SCEVOps; 7082 for (const Use &Op : I->operands()) { 7083 // I could be an extractvalue from a call to an overflow intrinsic. 7084 // TODO: We can do better here in some cases. 7085 if (isSCEVable(Op->getType())) 7086 SCEVOps.push_back(getSCEV(Op)); 7087 } 7088 auto *DefI = getDefiningScopeBound(SCEVOps); 7089 return isGuaranteedToTransferExecutionTo(DefI, I); 7090 } 7091 7092 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) { 7093 // If we know that \c I can never be poison period, then that's enough. 7094 if (isSCEVExprNeverPoison(I)) 7095 return true; 7096 7097 // For an add recurrence specifically, we assume that infinite loops without 7098 // side effects are undefined behavior, and then reason as follows: 7099 // 7100 // If the add recurrence is poison in any iteration, it is poison on all 7101 // future iterations (since incrementing poison yields poison). If the result 7102 // of the add recurrence is fed into the loop latch condition and the loop 7103 // does not contain any throws or exiting blocks other than the latch, we now 7104 // have the ability to "choose" whether the backedge is taken or not (by 7105 // choosing a sufficiently evil value for the poison feeding into the branch) 7106 // for every iteration including and after the one in which \p I first became 7107 // poison. There are two possibilities (let's call the iteration in which \p 7108 // I first became poison as K): 7109 // 7110 // 1. In the set of iterations including and after K, the loop body executes 7111 // no side effects. In this case executing the backege an infinte number 7112 // of times will yield undefined behavior. 7113 // 7114 // 2. In the set of iterations including and after K, the loop body executes 7115 // at least one side effect. In this case, that specific instance of side 7116 // effect is control dependent on poison, which also yields undefined 7117 // behavior. 7118 7119 auto *ExitingBB = L->getExitingBlock(); 7120 auto *LatchBB = L->getLoopLatch(); 7121 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB) 7122 return false; 7123 7124 SmallPtrSet<const Instruction *, 16> Pushed; 7125 SmallVector<const Instruction *, 8> PoisonStack; 7126 7127 // We start by assuming \c I, the post-inc add recurrence, is poison. Only 7128 // things that are known to be poison under that assumption go on the 7129 // PoisonStack. 7130 Pushed.insert(I); 7131 PoisonStack.push_back(I); 7132 7133 bool LatchControlDependentOnPoison = false; 7134 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) { 7135 const Instruction *Poison = PoisonStack.pop_back_val(); 7136 7137 for (auto *PoisonUser : Poison->users()) { 7138 if (propagatesPoison(cast<Operator>(PoisonUser))) { 7139 if (Pushed.insert(cast<Instruction>(PoisonUser)).second) 7140 PoisonStack.push_back(cast<Instruction>(PoisonUser)); 7141 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) { 7142 assert(BI->isConditional() && "Only possibility!"); 7143 if (BI->getParent() == LatchBB) { 7144 LatchControlDependentOnPoison = true; 7145 break; 7146 } 7147 } 7148 } 7149 } 7150 7151 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L); 7152 } 7153 7154 ScalarEvolution::LoopProperties 7155 ScalarEvolution::getLoopProperties(const Loop *L) { 7156 using LoopProperties = ScalarEvolution::LoopProperties; 7157 7158 auto Itr = LoopPropertiesCache.find(L); 7159 if (Itr == LoopPropertiesCache.end()) { 7160 auto HasSideEffects = [](Instruction *I) { 7161 if (auto *SI = dyn_cast<StoreInst>(I)) 7162 return !SI->isSimple(); 7163 7164 return I->mayThrow() || I->mayWriteToMemory(); 7165 }; 7166 7167 LoopProperties LP = {/* HasNoAbnormalExits */ true, 7168 /*HasNoSideEffects*/ true}; 7169 7170 for (auto *BB : L->getBlocks()) 7171 for (auto &I : *BB) { 7172 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 7173 LP.HasNoAbnormalExits = false; 7174 if (HasSideEffects(&I)) 7175 LP.HasNoSideEffects = false; 7176 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects) 7177 break; // We're already as pessimistic as we can get. 7178 } 7179 7180 auto InsertPair = LoopPropertiesCache.insert({L, LP}); 7181 assert(InsertPair.second && "We just checked!"); 7182 Itr = InsertPair.first; 7183 } 7184 7185 return Itr->second; 7186 } 7187 7188 bool ScalarEvolution::loopIsFiniteByAssumption(const Loop *L) { 7189 // A mustprogress loop without side effects must be finite. 7190 // TODO: The check used here is very conservative. It's only *specific* 7191 // side effects which are well defined in infinite loops. 7192 return isFinite(L) || (isMustProgress(L) && loopHasNoSideEffects(L)); 7193 } 7194 7195 const SCEV *ScalarEvolution::createSCEV(Value *V) { 7196 if (!isSCEVable(V->getType())) 7197 return getUnknown(V); 7198 7199 if (Instruction *I = dyn_cast<Instruction>(V)) { 7200 // Don't attempt to analyze instructions in blocks that aren't 7201 // reachable. Such instructions don't matter, and they aren't required 7202 // to obey basic rules for definitions dominating uses which this 7203 // analysis depends on. 7204 if (!DT.isReachableFromEntry(I->getParent())) 7205 return getUnknown(UndefValue::get(V->getType())); 7206 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 7207 return getConstant(CI); 7208 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 7209 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); 7210 else if (!isa<ConstantExpr>(V)) 7211 return getUnknown(V); 7212 7213 const SCEV *LHS; 7214 const SCEV *RHS; 7215 7216 Operator *U = cast<Operator>(V); 7217 if (auto BO = MatchBinaryOp(U, DT)) { 7218 switch (BO->Opcode) { 7219 case Instruction::Add: { 7220 // The simple thing to do would be to just call getSCEV on both operands 7221 // and call getAddExpr with the result. However if we're looking at a 7222 // bunch of things all added together, this can be quite inefficient, 7223 // because it leads to N-1 getAddExpr calls for N ultimate operands. 7224 // Instead, gather up all the operands and make a single getAddExpr call. 7225 // LLVM IR canonical form means we need only traverse the left operands. 7226 SmallVector<const SCEV *, 4> AddOps; 7227 do { 7228 if (BO->Op) { 7229 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 7230 AddOps.push_back(OpSCEV); 7231 break; 7232 } 7233 7234 // If a NUW or NSW flag can be applied to the SCEV for this 7235 // addition, then compute the SCEV for this addition by itself 7236 // with a separate call to getAddExpr. We need to do that 7237 // instead of pushing the operands of the addition onto AddOps, 7238 // since the flags are only known to apply to this particular 7239 // addition - they may not apply to other additions that can be 7240 // formed with operands from AddOps. 7241 const SCEV *RHS = getSCEV(BO->RHS); 7242 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 7243 if (Flags != SCEV::FlagAnyWrap) { 7244 const SCEV *LHS = getSCEV(BO->LHS); 7245 if (BO->Opcode == Instruction::Sub) 7246 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags)); 7247 else 7248 AddOps.push_back(getAddExpr(LHS, RHS, Flags)); 7249 break; 7250 } 7251 } 7252 7253 if (BO->Opcode == Instruction::Sub) 7254 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS))); 7255 else 7256 AddOps.push_back(getSCEV(BO->RHS)); 7257 7258 auto NewBO = MatchBinaryOp(BO->LHS, DT); 7259 if (!NewBO || (NewBO->Opcode != Instruction::Add && 7260 NewBO->Opcode != Instruction::Sub)) { 7261 AddOps.push_back(getSCEV(BO->LHS)); 7262 break; 7263 } 7264 BO = NewBO; 7265 } while (true); 7266 7267 return getAddExpr(AddOps); 7268 } 7269 7270 case Instruction::Mul: { 7271 SmallVector<const SCEV *, 4> MulOps; 7272 do { 7273 if (BO->Op) { 7274 if (auto *OpSCEV = getExistingSCEV(BO->Op)) { 7275 MulOps.push_back(OpSCEV); 7276 break; 7277 } 7278 7279 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op); 7280 if (Flags != SCEV::FlagAnyWrap) { 7281 LHS = getSCEV(BO->LHS); 7282 RHS = getSCEV(BO->RHS); 7283 MulOps.push_back(getMulExpr(LHS, RHS, Flags)); 7284 break; 7285 } 7286 } 7287 7288 MulOps.push_back(getSCEV(BO->RHS)); 7289 auto NewBO = MatchBinaryOp(BO->LHS, DT); 7290 if (!NewBO || NewBO->Opcode != Instruction::Mul) { 7291 MulOps.push_back(getSCEV(BO->LHS)); 7292 break; 7293 } 7294 BO = NewBO; 7295 } while (true); 7296 7297 return getMulExpr(MulOps); 7298 } 7299 case Instruction::UDiv: 7300 LHS = getSCEV(BO->LHS); 7301 RHS = getSCEV(BO->RHS); 7302 return getUDivExpr(LHS, RHS); 7303 case Instruction::URem: 7304 LHS = getSCEV(BO->LHS); 7305 RHS = getSCEV(BO->RHS); 7306 return getURemExpr(LHS, RHS); 7307 case Instruction::Sub: { 7308 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; 7309 if (BO->Op) 7310 Flags = getNoWrapFlagsFromUB(BO->Op); 7311 LHS = getSCEV(BO->LHS); 7312 RHS = getSCEV(BO->RHS); 7313 return getMinusSCEV(LHS, RHS, Flags); 7314 } 7315 case Instruction::And: 7316 // For an expression like x&255 that merely masks off the high bits, 7317 // use zext(trunc(x)) as the SCEV expression. 7318 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 7319 if (CI->isZero()) 7320 return getSCEV(BO->RHS); 7321 if (CI->isMinusOne()) 7322 return getSCEV(BO->LHS); 7323 const APInt &A = CI->getValue(); 7324 7325 // Instcombine's ShrinkDemandedConstant may strip bits out of 7326 // constants, obscuring what would otherwise be a low-bits mask. 7327 // Use computeKnownBits to compute what ShrinkDemandedConstant 7328 // knew about to reconstruct a low-bits mask value. 7329 unsigned LZ = A.countLeadingZeros(); 7330 unsigned TZ = A.countTrailingZeros(); 7331 unsigned BitWidth = A.getBitWidth(); 7332 KnownBits Known(BitWidth); 7333 computeKnownBits(BO->LHS, Known, getDataLayout(), 7334 0, &AC, nullptr, &DT); 7335 7336 APInt EffectiveMask = 7337 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); 7338 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) { 7339 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ)); 7340 const SCEV *LHS = getSCEV(BO->LHS); 7341 const SCEV *ShiftedLHS = nullptr; 7342 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) { 7343 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) { 7344 // For an expression like (x * 8) & 8, simplify the multiply. 7345 unsigned MulZeros = OpC->getAPInt().countTrailingZeros(); 7346 unsigned GCD = std::min(MulZeros, TZ); 7347 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD); 7348 SmallVector<const SCEV*, 4> MulOps; 7349 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD))); 7350 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end()); 7351 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags()); 7352 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt)); 7353 } 7354 } 7355 if (!ShiftedLHS) 7356 ShiftedLHS = getUDivExpr(LHS, MulCount); 7357 return getMulExpr( 7358 getZeroExtendExpr( 7359 getTruncateExpr(ShiftedLHS, 7360 IntegerType::get(getContext(), BitWidth - LZ - TZ)), 7361 BO->LHS->getType()), 7362 MulCount); 7363 } 7364 } 7365 // Binary `and` is a bit-wise `umin`. 7366 if (BO->LHS->getType()->isIntegerTy(1)) { 7367 LHS = getSCEV(BO->LHS); 7368 RHS = getSCEV(BO->RHS); 7369 return getUMinExpr(LHS, RHS); 7370 } 7371 break; 7372 7373 case Instruction::Or: 7374 // If the RHS of the Or is a constant, we may have something like: 7375 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 7376 // optimizations will transparently handle this case. 7377 // 7378 // In order for this transformation to be safe, the LHS must be of the 7379 // form X*(2^n) and the Or constant must be less than 2^n. 7380 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 7381 const SCEV *LHS = getSCEV(BO->LHS); 7382 const APInt &CIVal = CI->getValue(); 7383 if (GetMinTrailingZeros(LHS) >= 7384 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 7385 // Build a plain add SCEV. 7386 return getAddExpr(LHS, getSCEV(CI), 7387 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW)); 7388 } 7389 } 7390 // Binary `or` is a bit-wise `umax`. 7391 if (BO->LHS->getType()->isIntegerTy(1)) { 7392 LHS = getSCEV(BO->LHS); 7393 RHS = getSCEV(BO->RHS); 7394 return getUMaxExpr(LHS, RHS); 7395 } 7396 break; 7397 7398 case Instruction::Xor: 7399 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) { 7400 // If the RHS of xor is -1, then this is a not operation. 7401 if (CI->isMinusOne()) 7402 return getNotSCEV(getSCEV(BO->LHS)); 7403 7404 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 7405 // This is a variant of the check for xor with -1, and it handles 7406 // the case where instcombine has trimmed non-demanded bits out 7407 // of an xor with -1. 7408 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS)) 7409 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1))) 7410 if (LBO->getOpcode() == Instruction::And && 7411 LCI->getValue() == CI->getValue()) 7412 if (const SCEVZeroExtendExpr *Z = 7413 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) { 7414 Type *UTy = BO->LHS->getType(); 7415 const SCEV *Z0 = Z->getOperand(); 7416 Type *Z0Ty = Z0->getType(); 7417 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 7418 7419 // If C is a low-bits mask, the zero extend is serving to 7420 // mask off the high bits. Complement the operand and 7421 // re-apply the zext. 7422 if (CI->getValue().isMask(Z0TySize)) 7423 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 7424 7425 // If C is a single bit, it may be in the sign-bit position 7426 // before the zero-extend. In this case, represent the xor 7427 // using an add, which is equivalent, and re-apply the zext. 7428 APInt Trunc = CI->getValue().trunc(Z0TySize); 7429 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() && 7430 Trunc.isSignMask()) 7431 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 7432 UTy); 7433 } 7434 } 7435 break; 7436 7437 case Instruction::Shl: 7438 // Turn shift left of a constant amount into a multiply. 7439 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) { 7440 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth(); 7441 7442 // If the shift count is not less than the bitwidth, the result of 7443 // the shift is undefined. Don't try to analyze it, because the 7444 // resolution chosen here may differ from the resolution chosen in 7445 // other parts of the compiler. 7446 if (SA->getValue().uge(BitWidth)) 7447 break; 7448 7449 // We can safely preserve the nuw flag in all cases. It's also safe to 7450 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation 7451 // requires special handling. It can be preserved as long as we're not 7452 // left shifting by bitwidth - 1. 7453 auto Flags = SCEV::FlagAnyWrap; 7454 if (BO->Op) { 7455 auto MulFlags = getNoWrapFlagsFromUB(BO->Op); 7456 if ((MulFlags & SCEV::FlagNSW) && 7457 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1))) 7458 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW); 7459 if (MulFlags & SCEV::FlagNUW) 7460 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW); 7461 } 7462 7463 ConstantInt *X = ConstantInt::get( 7464 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue())); 7465 return getMulExpr(getSCEV(BO->LHS), getConstant(X), Flags); 7466 } 7467 break; 7468 7469 case Instruction::AShr: { 7470 // AShr X, C, where C is a constant. 7471 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS); 7472 if (!CI) 7473 break; 7474 7475 Type *OuterTy = BO->LHS->getType(); 7476 uint64_t BitWidth = getTypeSizeInBits(OuterTy); 7477 // If the shift count is not less than the bitwidth, the result of 7478 // the shift is undefined. Don't try to analyze it, because the 7479 // resolution chosen here may differ from the resolution chosen in 7480 // other parts of the compiler. 7481 if (CI->getValue().uge(BitWidth)) 7482 break; 7483 7484 if (CI->isZero()) 7485 return getSCEV(BO->LHS); // shift by zero --> noop 7486 7487 uint64_t AShrAmt = CI->getZExtValue(); 7488 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt); 7489 7490 Operator *L = dyn_cast<Operator>(BO->LHS); 7491 if (L && L->getOpcode() == Instruction::Shl) { 7492 // X = Shl A, n 7493 // Y = AShr X, m 7494 // Both n and m are constant. 7495 7496 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0)); 7497 if (L->getOperand(1) == BO->RHS) 7498 // For a two-shift sext-inreg, i.e. n = m, 7499 // use sext(trunc(x)) as the SCEV expression. 7500 return getSignExtendExpr( 7501 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy); 7502 7503 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1)); 7504 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) { 7505 uint64_t ShlAmt = ShlAmtCI->getZExtValue(); 7506 if (ShlAmt > AShrAmt) { 7507 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV 7508 // expression. We already checked that ShlAmt < BitWidth, so 7509 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as 7510 // ShlAmt - AShrAmt < Amt. 7511 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt, 7512 ShlAmt - AShrAmt); 7513 return getSignExtendExpr( 7514 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy), 7515 getConstant(Mul)), OuterTy); 7516 } 7517 } 7518 } 7519 break; 7520 } 7521 } 7522 } 7523 7524 switch (U->getOpcode()) { 7525 case Instruction::Trunc: 7526 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 7527 7528 case Instruction::ZExt: 7529 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 7530 7531 case Instruction::SExt: 7532 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) { 7533 // The NSW flag of a subtract does not always survive the conversion to 7534 // A + (-1)*B. By pushing sign extension onto its operands we are much 7535 // more likely to preserve NSW and allow later AddRec optimisations. 7536 // 7537 // NOTE: This is effectively duplicating this logic from getSignExtend: 7538 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw> 7539 // but by that point the NSW information has potentially been lost. 7540 if (BO->Opcode == Instruction::Sub && BO->IsNSW) { 7541 Type *Ty = U->getType(); 7542 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty); 7543 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty); 7544 return getMinusSCEV(V1, V2, SCEV::FlagNSW); 7545 } 7546 } 7547 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 7548 7549 case Instruction::BitCast: 7550 // BitCasts are no-op casts so we just eliminate the cast. 7551 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 7552 return getSCEV(U->getOperand(0)); 7553 break; 7554 7555 case Instruction::PtrToInt: { 7556 // Pointer to integer cast is straight-forward, so do model it. 7557 const SCEV *Op = getSCEV(U->getOperand(0)); 7558 Type *DstIntTy = U->getType(); 7559 // But only if effective SCEV (integer) type is wide enough to represent 7560 // all possible pointer values. 7561 const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy); 7562 if (isa<SCEVCouldNotCompute>(IntOp)) 7563 return getUnknown(V); 7564 return IntOp; 7565 } 7566 case Instruction::IntToPtr: 7567 // Just don't deal with inttoptr casts. 7568 return getUnknown(V); 7569 7570 case Instruction::SDiv: 7571 // If both operands are non-negative, this is just an udiv. 7572 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 7573 isKnownNonNegative(getSCEV(U->getOperand(1)))) 7574 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 7575 break; 7576 7577 case Instruction::SRem: 7578 // If both operands are non-negative, this is just an urem. 7579 if (isKnownNonNegative(getSCEV(U->getOperand(0))) && 7580 isKnownNonNegative(getSCEV(U->getOperand(1)))) 7581 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); 7582 break; 7583 7584 case Instruction::GetElementPtr: 7585 return createNodeForGEP(cast<GEPOperator>(U)); 7586 7587 case Instruction::PHI: 7588 return createNodeForPHI(cast<PHINode>(U)); 7589 7590 case Instruction::Select: 7591 return createNodeForSelectOrPHI(U, U->getOperand(0), U->getOperand(1), 7592 U->getOperand(2)); 7593 7594 case Instruction::Call: 7595 case Instruction::Invoke: 7596 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) 7597 return getSCEV(RV); 7598 7599 if (auto *II = dyn_cast<IntrinsicInst>(U)) { 7600 switch (II->getIntrinsicID()) { 7601 case Intrinsic::abs: 7602 return getAbsExpr( 7603 getSCEV(II->getArgOperand(0)), 7604 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne()); 7605 case Intrinsic::umax: 7606 LHS = getSCEV(II->getArgOperand(0)); 7607 RHS = getSCEV(II->getArgOperand(1)); 7608 return getUMaxExpr(LHS, RHS); 7609 case Intrinsic::umin: 7610 LHS = getSCEV(II->getArgOperand(0)); 7611 RHS = getSCEV(II->getArgOperand(1)); 7612 return getUMinExpr(LHS, RHS); 7613 case Intrinsic::smax: 7614 LHS = getSCEV(II->getArgOperand(0)); 7615 RHS = getSCEV(II->getArgOperand(1)); 7616 return getSMaxExpr(LHS, RHS); 7617 case Intrinsic::smin: 7618 LHS = getSCEV(II->getArgOperand(0)); 7619 RHS = getSCEV(II->getArgOperand(1)); 7620 return getSMinExpr(LHS, RHS); 7621 case Intrinsic::usub_sat: { 7622 const SCEV *X = getSCEV(II->getArgOperand(0)); 7623 const SCEV *Y = getSCEV(II->getArgOperand(1)); 7624 const SCEV *ClampedY = getUMinExpr(X, Y); 7625 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW); 7626 } 7627 case Intrinsic::uadd_sat: { 7628 const SCEV *X = getSCEV(II->getArgOperand(0)); 7629 const SCEV *Y = getSCEV(II->getArgOperand(1)); 7630 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y)); 7631 return getAddExpr(ClampedX, Y, SCEV::FlagNUW); 7632 } 7633 case Intrinsic::start_loop_iterations: 7634 // A start_loop_iterations is just equivalent to the first operand for 7635 // SCEV purposes. 7636 return getSCEV(II->getArgOperand(0)); 7637 default: 7638 break; 7639 } 7640 } 7641 break; 7642 } 7643 7644 return getUnknown(V); 7645 } 7646 7647 //===----------------------------------------------------------------------===// 7648 // Iteration Count Computation Code 7649 // 7650 7651 const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount, 7652 bool Extend) { 7653 if (isa<SCEVCouldNotCompute>(ExitCount)) 7654 return getCouldNotCompute(); 7655 7656 auto *ExitCountType = ExitCount->getType(); 7657 assert(ExitCountType->isIntegerTy()); 7658 7659 if (!Extend) 7660 return getAddExpr(ExitCount, getOne(ExitCountType)); 7661 7662 auto *WiderType = Type::getIntNTy(ExitCountType->getContext(), 7663 1 + ExitCountType->getScalarSizeInBits()); 7664 return getAddExpr(getNoopOrZeroExtend(ExitCount, WiderType), 7665 getOne(WiderType)); 7666 } 7667 7668 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) { 7669 if (!ExitCount) 7670 return 0; 7671 7672 ConstantInt *ExitConst = ExitCount->getValue(); 7673 7674 // Guard against huge trip counts. 7675 if (ExitConst->getValue().getActiveBits() > 32) 7676 return 0; 7677 7678 // In case of integer overflow, this returns 0, which is correct. 7679 return ((unsigned)ExitConst->getZExtValue()) + 1; 7680 } 7681 7682 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) { 7683 auto *ExitCount = dyn_cast<SCEVConstant>(getBackedgeTakenCount(L, Exact)); 7684 return getConstantTripCount(ExitCount); 7685 } 7686 7687 unsigned 7688 ScalarEvolution::getSmallConstantTripCount(const Loop *L, 7689 const BasicBlock *ExitingBlock) { 7690 assert(ExitingBlock && "Must pass a non-null exiting block!"); 7691 assert(L->isLoopExiting(ExitingBlock) && 7692 "Exiting block must actually branch out of the loop!"); 7693 const SCEVConstant *ExitCount = 7694 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock)); 7695 return getConstantTripCount(ExitCount); 7696 } 7697 7698 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) { 7699 const auto *MaxExitCount = 7700 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L)); 7701 return getConstantTripCount(MaxExitCount); 7702 } 7703 7704 const SCEV *ScalarEvolution::getConstantMaxTripCountFromArray(const Loop *L) { 7705 // We can't infer from Array in Irregular Loop. 7706 // FIXME: It's hard to infer loop bound from array operated in Nested Loop. 7707 if (!L->isLoopSimplifyForm() || !L->isInnermost()) 7708 return getCouldNotCompute(); 7709 7710 // FIXME: To make the scene more typical, we only analysis loops that have 7711 // one exiting block and that block must be the latch. To make it easier to 7712 // capture loops that have memory access and memory access will be executed 7713 // in each iteration. 7714 const BasicBlock *LoopLatch = L->getLoopLatch(); 7715 assert(LoopLatch && "See defination of simplify form loop."); 7716 if (L->getExitingBlock() != LoopLatch) 7717 return getCouldNotCompute(); 7718 7719 const DataLayout &DL = getDataLayout(); 7720 SmallVector<const SCEV *> InferCountColl; 7721 for (auto *BB : L->getBlocks()) { 7722 // Go here, we can know that Loop is a single exiting and simplified form 7723 // loop. Make sure that infer from Memory Operation in those BBs must be 7724 // executed in loop. First step, we can make sure that max execution time 7725 // of MemAccessBB in loop represents latch max excution time. 7726 // If MemAccessBB does not dom Latch, skip. 7727 // Entry 7728 // │ 7729 // ┌─────▼─────┐ 7730 // │Loop Header◄─────┐ 7731 // └──┬──────┬─┘ │ 7732 // │ │ │ 7733 // ┌────────▼──┐ ┌─▼─────┐ │ 7734 // │MemAccessBB│ │OtherBB│ │ 7735 // └────────┬──┘ └─┬─────┘ │ 7736 // │ │ │ 7737 // ┌─▼──────▼─┐ │ 7738 // │Loop Latch├─────┘ 7739 // └────┬─────┘ 7740 // ▼ 7741 // Exit 7742 if (!DT.dominates(BB, LoopLatch)) 7743 continue; 7744 7745 for (Instruction &Inst : *BB) { 7746 // Find Memory Operation Instruction. 7747 auto *GEP = getLoadStorePointerOperand(&Inst); 7748 if (!GEP) 7749 continue; 7750 7751 auto *ElemSize = dyn_cast<SCEVConstant>(getElementSize(&Inst)); 7752 // Do not infer from scalar type, eg."ElemSize = sizeof()". 7753 if (!ElemSize) 7754 continue; 7755 7756 // Use a existing polynomial recurrence on the trip count. 7757 auto *AddRec = dyn_cast<SCEVAddRecExpr>(getSCEV(GEP)); 7758 if (!AddRec) 7759 continue; 7760 auto *ArrBase = dyn_cast<SCEVUnknown>(getPointerBase(AddRec)); 7761 auto *Step = dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(*this)); 7762 if (!ArrBase || !Step) 7763 continue; 7764 assert(isLoopInvariant(ArrBase, L) && "See addrec definition"); 7765 7766 // Only handle { %array + step }, 7767 // FIXME: {(SCEVAddRecExpr) + step } could not be analysed here. 7768 if (AddRec->getStart() != ArrBase) 7769 continue; 7770 7771 // Memory operation pattern which have gaps. 7772 // Or repeat memory opreation. 7773 // And index of GEP wraps arround. 7774 if (Step->getAPInt().getActiveBits() > 32 || 7775 Step->getAPInt().getZExtValue() != 7776 ElemSize->getAPInt().getZExtValue() || 7777 Step->isZero() || Step->getAPInt().isNegative()) 7778 continue; 7779 7780 // Only infer from stack array which has certain size. 7781 // Make sure alloca instruction is not excuted in loop. 7782 AllocaInst *AllocateInst = dyn_cast<AllocaInst>(ArrBase->getValue()); 7783 if (!AllocateInst || L->contains(AllocateInst->getParent())) 7784 continue; 7785 7786 // Make sure only handle normal array. 7787 auto *Ty = dyn_cast<ArrayType>(AllocateInst->getAllocatedType()); 7788 auto *ArrSize = dyn_cast<ConstantInt>(AllocateInst->getArraySize()); 7789 if (!Ty || !ArrSize || !ArrSize->isOne()) 7790 continue; 7791 7792 // FIXME: Since gep indices are silently zext to the indexing type, 7793 // we will have a narrow gep index which wraps around rather than 7794 // increasing strictly, we shoule ensure that step is increasing 7795 // strictly by the loop iteration. 7796 // Now we can infer a max execution time by MemLength/StepLength. 7797 const SCEV *MemSize = 7798 getConstant(Step->getType(), DL.getTypeAllocSize(Ty)); 7799 auto *MaxExeCount = 7800 dyn_cast<SCEVConstant>(getUDivCeilSCEV(MemSize, Step)); 7801 if (!MaxExeCount || MaxExeCount->getAPInt().getActiveBits() > 32) 7802 continue; 7803 7804 // If the loop reaches the maximum number of executions, we can not 7805 // access bytes starting outside the statically allocated size without 7806 // being immediate UB. But it is allowed to enter loop header one more 7807 // time. 7808 auto *InferCount = dyn_cast<SCEVConstant>( 7809 getAddExpr(MaxExeCount, getOne(MaxExeCount->getType()))); 7810 // Discard the maximum number of execution times under 32bits. 7811 if (!InferCount || InferCount->getAPInt().getActiveBits() > 32) 7812 continue; 7813 7814 InferCountColl.push_back(InferCount); 7815 } 7816 } 7817 7818 if (InferCountColl.size() == 0) 7819 return getCouldNotCompute(); 7820 7821 return getUMinFromMismatchedTypes(InferCountColl); 7822 } 7823 7824 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) { 7825 SmallVector<BasicBlock *, 8> ExitingBlocks; 7826 L->getExitingBlocks(ExitingBlocks); 7827 7828 Optional<unsigned> Res = None; 7829 for (auto *ExitingBB : ExitingBlocks) { 7830 unsigned Multiple = getSmallConstantTripMultiple(L, ExitingBB); 7831 if (!Res) 7832 Res = Multiple; 7833 Res = (unsigned)GreatestCommonDivisor64(*Res, Multiple); 7834 } 7835 return Res.getValueOr(1); 7836 } 7837 7838 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 7839 const SCEV *ExitCount) { 7840 if (ExitCount == getCouldNotCompute()) 7841 return 1; 7842 7843 // Get the trip count 7844 const SCEV *TCExpr = getTripCountFromExitCount(ExitCount); 7845 7846 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr); 7847 if (!TC) 7848 // Attempt to factor more general cases. Returns the greatest power of 7849 // two divisor. If overflow happens, the trip count expression is still 7850 // divisible by the greatest power of 2 divisor returned. 7851 return 1U << std::min((uint32_t)31, 7852 GetMinTrailingZeros(applyLoopGuards(TCExpr, L))); 7853 7854 ConstantInt *Result = TC->getValue(); 7855 7856 // Guard against huge trip counts (this requires checking 7857 // for zero to handle the case where the trip count == -1 and the 7858 // addition wraps). 7859 if (!Result || Result->getValue().getActiveBits() > 32 || 7860 Result->getValue().getActiveBits() == 0) 7861 return 1; 7862 7863 return (unsigned)Result->getZExtValue(); 7864 } 7865 7866 /// Returns the largest constant divisor of the trip count of this loop as a 7867 /// normal unsigned value, if possible. This means that the actual trip count is 7868 /// always a multiple of the returned value (don't forget the trip count could 7869 /// very well be zero as well!). 7870 /// 7871 /// Returns 1 if the trip count is unknown or not guaranteed to be the 7872 /// multiple of a constant (which is also the case if the trip count is simply 7873 /// constant, use getSmallConstantTripCount for that case), Will also return 1 7874 /// if the trip count is very large (>= 2^32). 7875 /// 7876 /// As explained in the comments for getSmallConstantTripCount, this assumes 7877 /// that control exits the loop via ExitingBlock. 7878 unsigned 7879 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L, 7880 const BasicBlock *ExitingBlock) { 7881 assert(ExitingBlock && "Must pass a non-null exiting block!"); 7882 assert(L->isLoopExiting(ExitingBlock) && 7883 "Exiting block must actually branch out of the loop!"); 7884 const SCEV *ExitCount = getExitCount(L, ExitingBlock); 7885 return getSmallConstantTripMultiple(L, ExitCount); 7886 } 7887 7888 const SCEV *ScalarEvolution::getExitCount(const Loop *L, 7889 const BasicBlock *ExitingBlock, 7890 ExitCountKind Kind) { 7891 switch (Kind) { 7892 case Exact: 7893 case SymbolicMaximum: 7894 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this); 7895 case ConstantMaximum: 7896 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this); 7897 }; 7898 llvm_unreachable("Invalid ExitCountKind!"); 7899 } 7900 7901 const SCEV * 7902 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L, 7903 SmallVector<const SCEVPredicate *, 4> &Preds) { 7904 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds); 7905 } 7906 7907 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L, 7908 ExitCountKind Kind) { 7909 switch (Kind) { 7910 case Exact: 7911 return getBackedgeTakenInfo(L).getExact(L, this); 7912 case ConstantMaximum: 7913 return getBackedgeTakenInfo(L).getConstantMax(this); 7914 case SymbolicMaximum: 7915 return getBackedgeTakenInfo(L).getSymbolicMax(L, this); 7916 }; 7917 llvm_unreachable("Invalid ExitCountKind!"); 7918 } 7919 7920 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) { 7921 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this); 7922 } 7923 7924 /// Push PHI nodes in the header of the given loop onto the given Worklist. 7925 static void PushLoopPHIs(const Loop *L, 7926 SmallVectorImpl<Instruction *> &Worklist, 7927 SmallPtrSetImpl<Instruction *> &Visited) { 7928 BasicBlock *Header = L->getHeader(); 7929 7930 // Push all Loop-header PHIs onto the Worklist stack. 7931 for (PHINode &PN : Header->phis()) 7932 if (Visited.insert(&PN).second) 7933 Worklist.push_back(&PN); 7934 } 7935 7936 const ScalarEvolution::BackedgeTakenInfo & 7937 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) { 7938 auto &BTI = getBackedgeTakenInfo(L); 7939 if (BTI.hasFullInfo()) 7940 return BTI; 7941 7942 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 7943 7944 if (!Pair.second) 7945 return Pair.first->second; 7946 7947 BackedgeTakenInfo Result = 7948 computeBackedgeTakenCount(L, /*AllowPredicates=*/true); 7949 7950 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result); 7951 } 7952 7953 ScalarEvolution::BackedgeTakenInfo & 7954 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 7955 // Initially insert an invalid entry for this loop. If the insertion 7956 // succeeds, proceed to actually compute a backedge-taken count and 7957 // update the value. The temporary CouldNotCompute value tells SCEV 7958 // code elsewhere that it shouldn't attempt to request a new 7959 // backedge-taken count, which could result in infinite recursion. 7960 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 7961 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()}); 7962 if (!Pair.second) 7963 return Pair.first->second; 7964 7965 // computeBackedgeTakenCount may allocate memory for its result. Inserting it 7966 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result 7967 // must be cleared in this scope. 7968 BackedgeTakenInfo Result = computeBackedgeTakenCount(L); 7969 7970 // In product build, there are no usage of statistic. 7971 (void)NumTripCountsComputed; 7972 (void)NumTripCountsNotComputed; 7973 #if LLVM_ENABLE_STATS || !defined(NDEBUG) 7974 const SCEV *BEExact = Result.getExact(L, this); 7975 if (BEExact != getCouldNotCompute()) { 7976 assert(isLoopInvariant(BEExact, L) && 7977 isLoopInvariant(Result.getConstantMax(this), L) && 7978 "Computed backedge-taken count isn't loop invariant for loop!"); 7979 ++NumTripCountsComputed; 7980 } else if (Result.getConstantMax(this) == getCouldNotCompute() && 7981 isa<PHINode>(L->getHeader()->begin())) { 7982 // Only count loops that have phi nodes as not being computable. 7983 ++NumTripCountsNotComputed; 7984 } 7985 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG) 7986 7987 // Now that we know more about the trip count for this loop, forget any 7988 // existing SCEV values for PHI nodes in this loop since they are only 7989 // conservative estimates made without the benefit of trip count 7990 // information. This invalidation is not necessary for correctness, and is 7991 // only done to produce more precise results. 7992 if (Result.hasAnyInfo()) { 7993 // Invalidate any expression using an addrec in this loop. 7994 SmallVector<const SCEV *, 8> ToForget; 7995 auto LoopUsersIt = LoopUsers.find(L); 7996 if (LoopUsersIt != LoopUsers.end()) 7997 append_range(ToForget, LoopUsersIt->second); 7998 forgetMemoizedResults(ToForget); 7999 8000 // Invalidate constant-evolved loop header phis. 8001 for (PHINode &PN : L->getHeader()->phis()) 8002 ConstantEvolutionLoopExitValue.erase(&PN); 8003 } 8004 8005 // Re-lookup the insert position, since the call to 8006 // computeBackedgeTakenCount above could result in a 8007 // recusive call to getBackedgeTakenInfo (on a different 8008 // loop), which would invalidate the iterator computed 8009 // earlier. 8010 return BackedgeTakenCounts.find(L)->second = std::move(Result); 8011 } 8012 8013 void ScalarEvolution::forgetAllLoops() { 8014 // This method is intended to forget all info about loops. It should 8015 // invalidate caches as if the following happened: 8016 // - The trip counts of all loops have changed arbitrarily 8017 // - Every llvm::Value has been updated in place to produce a different 8018 // result. 8019 BackedgeTakenCounts.clear(); 8020 PredicatedBackedgeTakenCounts.clear(); 8021 BECountUsers.clear(); 8022 LoopPropertiesCache.clear(); 8023 ConstantEvolutionLoopExitValue.clear(); 8024 ValueExprMap.clear(); 8025 ValuesAtScopes.clear(); 8026 ValuesAtScopesUsers.clear(); 8027 LoopDispositions.clear(); 8028 BlockDispositions.clear(); 8029 UnsignedRanges.clear(); 8030 SignedRanges.clear(); 8031 ExprValueMap.clear(); 8032 HasRecMap.clear(); 8033 MinTrailingZerosCache.clear(); 8034 PredicatedSCEVRewrites.clear(); 8035 } 8036 8037 void ScalarEvolution::forgetLoop(const Loop *L) { 8038 SmallVector<const Loop *, 16> LoopWorklist(1, L); 8039 SmallVector<Instruction *, 32> Worklist; 8040 SmallPtrSet<Instruction *, 16> Visited; 8041 SmallVector<const SCEV *, 16> ToForget; 8042 8043 // Iterate over all the loops and sub-loops to drop SCEV information. 8044 while (!LoopWorklist.empty()) { 8045 auto *CurrL = LoopWorklist.pop_back_val(); 8046 8047 // Drop any stored trip count value. 8048 forgetBackedgeTakenCounts(CurrL, /* Predicated */ false); 8049 forgetBackedgeTakenCounts(CurrL, /* Predicated */ true); 8050 8051 // Drop information about predicated SCEV rewrites for this loop. 8052 for (auto I = PredicatedSCEVRewrites.begin(); 8053 I != PredicatedSCEVRewrites.end();) { 8054 std::pair<const SCEV *, const Loop *> Entry = I->first; 8055 if (Entry.second == CurrL) 8056 PredicatedSCEVRewrites.erase(I++); 8057 else 8058 ++I; 8059 } 8060 8061 auto LoopUsersItr = LoopUsers.find(CurrL); 8062 if (LoopUsersItr != LoopUsers.end()) { 8063 ToForget.insert(ToForget.end(), LoopUsersItr->second.begin(), 8064 LoopUsersItr->second.end()); 8065 } 8066 8067 // Drop information about expressions based on loop-header PHIs. 8068 PushLoopPHIs(CurrL, Worklist, Visited); 8069 8070 while (!Worklist.empty()) { 8071 Instruction *I = Worklist.pop_back_val(); 8072 8073 ValueExprMapType::iterator It = 8074 ValueExprMap.find_as(static_cast<Value *>(I)); 8075 if (It != ValueExprMap.end()) { 8076 eraseValueFromMap(It->first); 8077 ToForget.push_back(It->second); 8078 if (PHINode *PN = dyn_cast<PHINode>(I)) 8079 ConstantEvolutionLoopExitValue.erase(PN); 8080 } 8081 8082 PushDefUseChildren(I, Worklist, Visited); 8083 } 8084 8085 LoopPropertiesCache.erase(CurrL); 8086 // Forget all contained loops too, to avoid dangling entries in the 8087 // ValuesAtScopes map. 8088 LoopWorklist.append(CurrL->begin(), CurrL->end()); 8089 } 8090 forgetMemoizedResults(ToForget); 8091 } 8092 8093 void ScalarEvolution::forgetTopmostLoop(const Loop *L) { 8094 while (Loop *Parent = L->getParentLoop()) 8095 L = Parent; 8096 forgetLoop(L); 8097 } 8098 8099 void ScalarEvolution::forgetValue(Value *V) { 8100 Instruction *I = dyn_cast<Instruction>(V); 8101 if (!I) return; 8102 8103 // Drop information about expressions based on loop-header PHIs. 8104 SmallVector<Instruction *, 16> Worklist; 8105 SmallPtrSet<Instruction *, 8> Visited; 8106 SmallVector<const SCEV *, 8> ToForget; 8107 Worklist.push_back(I); 8108 Visited.insert(I); 8109 8110 while (!Worklist.empty()) { 8111 I = Worklist.pop_back_val(); 8112 ValueExprMapType::iterator It = 8113 ValueExprMap.find_as(static_cast<Value *>(I)); 8114 if (It != ValueExprMap.end()) { 8115 eraseValueFromMap(It->first); 8116 ToForget.push_back(It->second); 8117 if (PHINode *PN = dyn_cast<PHINode>(I)) 8118 ConstantEvolutionLoopExitValue.erase(PN); 8119 } 8120 8121 PushDefUseChildren(I, Worklist, Visited); 8122 } 8123 forgetMemoizedResults(ToForget); 8124 } 8125 8126 void ScalarEvolution::forgetLoopDispositions(const Loop *L) { 8127 LoopDispositions.clear(); 8128 } 8129 8130 /// Get the exact loop backedge taken count considering all loop exits. A 8131 /// computable result can only be returned for loops with all exiting blocks 8132 /// dominating the latch. howFarToZero assumes that the limit of each loop test 8133 /// is never skipped. This is a valid assumption as long as the loop exits via 8134 /// that test. For precise results, it is the caller's responsibility to specify 8135 /// the relevant loop exiting block using getExact(ExitingBlock, SE). 8136 const SCEV * 8137 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE, 8138 SmallVector<const SCEVPredicate *, 4> *Preds) const { 8139 // If any exits were not computable, the loop is not computable. 8140 if (!isComplete() || ExitNotTaken.empty()) 8141 return SE->getCouldNotCompute(); 8142 8143 const BasicBlock *Latch = L->getLoopLatch(); 8144 // All exiting blocks we have collected must dominate the only backedge. 8145 if (!Latch) 8146 return SE->getCouldNotCompute(); 8147 8148 // All exiting blocks we have gathered dominate loop's latch, so exact trip 8149 // count is simply a minimum out of all these calculated exit counts. 8150 SmallVector<const SCEV *, 2> Ops; 8151 for (auto &ENT : ExitNotTaken) { 8152 const SCEV *BECount = ENT.ExactNotTaken; 8153 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!"); 8154 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) && 8155 "We should only have known counts for exiting blocks that dominate " 8156 "latch!"); 8157 8158 Ops.push_back(BECount); 8159 8160 if (Preds) 8161 for (auto *P : ENT.Predicates) 8162 Preds->push_back(P); 8163 8164 assert((Preds || ENT.hasAlwaysTruePredicate()) && 8165 "Predicate should be always true!"); 8166 } 8167 8168 return SE->getUMinFromMismatchedTypes(Ops); 8169 } 8170 8171 /// Get the exact not taken count for this loop exit. 8172 const SCEV * 8173 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock, 8174 ScalarEvolution *SE) const { 8175 for (auto &ENT : ExitNotTaken) 8176 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 8177 return ENT.ExactNotTaken; 8178 8179 return SE->getCouldNotCompute(); 8180 } 8181 8182 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax( 8183 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const { 8184 for (auto &ENT : ExitNotTaken) 8185 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate()) 8186 return ENT.MaxNotTaken; 8187 8188 return SE->getCouldNotCompute(); 8189 } 8190 8191 /// getConstantMax - Get the constant max backedge taken count for the loop. 8192 const SCEV * 8193 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const { 8194 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 8195 return !ENT.hasAlwaysTruePredicate(); 8196 }; 8197 8198 if (!getConstantMax() || any_of(ExitNotTaken, PredicateNotAlwaysTrue)) 8199 return SE->getCouldNotCompute(); 8200 8201 assert((isa<SCEVCouldNotCompute>(getConstantMax()) || 8202 isa<SCEVConstant>(getConstantMax())) && 8203 "No point in having a non-constant max backedge taken count!"); 8204 return getConstantMax(); 8205 } 8206 8207 const SCEV * 8208 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L, 8209 ScalarEvolution *SE) { 8210 if (!SymbolicMax) 8211 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L); 8212 return SymbolicMax; 8213 } 8214 8215 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero( 8216 ScalarEvolution *SE) const { 8217 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) { 8218 return !ENT.hasAlwaysTruePredicate(); 8219 }; 8220 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue); 8221 } 8222 8223 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E) 8224 : ExitLimit(E, E, false, None) { 8225 } 8226 8227 ScalarEvolution::ExitLimit::ExitLimit( 8228 const SCEV *E, const SCEV *M, bool MaxOrZero, 8229 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList) 8230 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) { 8231 // If we prove the max count is zero, so is the symbolic bound. This happens 8232 // in practice due to differences in a) how context sensitive we've chosen 8233 // to be and b) how we reason about bounds impied by UB. 8234 if (MaxNotTaken->isZero()) 8235 ExactNotTaken = MaxNotTaken; 8236 8237 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) || 8238 !isa<SCEVCouldNotCompute>(MaxNotTaken)) && 8239 "Exact is not allowed to be less precise than Max"); 8240 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) || 8241 isa<SCEVConstant>(MaxNotTaken)) && 8242 "No point in having a non-constant max backedge taken count!"); 8243 for (auto *PredSet : PredSetList) 8244 for (auto *P : *PredSet) 8245 addPredicate(P); 8246 assert((isa<SCEVCouldNotCompute>(E) || !E->getType()->isPointerTy()) && 8247 "Backedge count should be int"); 8248 assert((isa<SCEVCouldNotCompute>(M) || !M->getType()->isPointerTy()) && 8249 "Max backedge count should be int"); 8250 } 8251 8252 ScalarEvolution::ExitLimit::ExitLimit( 8253 const SCEV *E, const SCEV *M, bool MaxOrZero, 8254 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet) 8255 : ExitLimit(E, M, MaxOrZero, {&PredSet}) { 8256 } 8257 8258 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M, 8259 bool MaxOrZero) 8260 : ExitLimit(E, M, MaxOrZero, None) { 8261 } 8262 8263 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each 8264 /// computable exit into a persistent ExitNotTakenInfo array. 8265 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( 8266 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts, 8267 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero) 8268 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) { 8269 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 8270 8271 ExitNotTaken.reserve(ExitCounts.size()); 8272 std::transform( 8273 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken), 8274 [&](const EdgeExitInfo &EEI) { 8275 BasicBlock *ExitBB = EEI.first; 8276 const ExitLimit &EL = EEI.second; 8277 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken, 8278 EL.Predicates); 8279 }); 8280 assert((isa<SCEVCouldNotCompute>(ConstantMax) || 8281 isa<SCEVConstant>(ConstantMax)) && 8282 "No point in having a non-constant max backedge taken count!"); 8283 } 8284 8285 /// Compute the number of times the backedge of the specified loop will execute. 8286 ScalarEvolution::BackedgeTakenInfo 8287 ScalarEvolution::computeBackedgeTakenCount(const Loop *L, 8288 bool AllowPredicates) { 8289 SmallVector<BasicBlock *, 8> ExitingBlocks; 8290 L->getExitingBlocks(ExitingBlocks); 8291 8292 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; 8293 8294 SmallVector<EdgeExitInfo, 4> ExitCounts; 8295 bool CouldComputeBECount = true; 8296 BasicBlock *Latch = L->getLoopLatch(); // may be NULL. 8297 const SCEV *MustExitMaxBECount = nullptr; 8298 const SCEV *MayExitMaxBECount = nullptr; 8299 bool MustExitMaxOrZero = false; 8300 8301 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts 8302 // and compute maxBECount. 8303 // Do a union of all the predicates here. 8304 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 8305 BasicBlock *ExitBB = ExitingBlocks[i]; 8306 8307 // We canonicalize untaken exits to br (constant), ignore them so that 8308 // proving an exit untaken doesn't negatively impact our ability to reason 8309 // about the loop as whole. 8310 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator())) 8311 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) { 8312 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 8313 if (ExitIfTrue == CI->isZero()) 8314 continue; 8315 } 8316 8317 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates); 8318 8319 assert((AllowPredicates || EL.Predicates.empty()) && 8320 "Predicated exit limit when predicates are not allowed!"); 8321 8322 // 1. For each exit that can be computed, add an entry to ExitCounts. 8323 // CouldComputeBECount is true only if all exits can be computed. 8324 if (EL.ExactNotTaken == getCouldNotCompute()) 8325 // We couldn't compute an exact value for this exit, so 8326 // we won't be able to compute an exact value for the loop. 8327 CouldComputeBECount = false; 8328 else 8329 ExitCounts.emplace_back(ExitBB, EL); 8330 8331 // 2. Derive the loop's MaxBECount from each exit's max number of 8332 // non-exiting iterations. Partition the loop exits into two kinds: 8333 // LoopMustExits and LoopMayExits. 8334 // 8335 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it 8336 // is a LoopMayExit. If any computable LoopMustExit is found, then 8337 // MaxBECount is the minimum EL.MaxNotTaken of computable 8338 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum 8339 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any 8340 // computable EL.MaxNotTaken. 8341 if (EL.MaxNotTaken != getCouldNotCompute() && Latch && 8342 DT.dominates(ExitBB, Latch)) { 8343 if (!MustExitMaxBECount) { 8344 MustExitMaxBECount = EL.MaxNotTaken; 8345 MustExitMaxOrZero = EL.MaxOrZero; 8346 } else { 8347 MustExitMaxBECount = 8348 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken); 8349 } 8350 } else if (MayExitMaxBECount != getCouldNotCompute()) { 8351 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute()) 8352 MayExitMaxBECount = EL.MaxNotTaken; 8353 else { 8354 MayExitMaxBECount = 8355 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken); 8356 } 8357 } 8358 } 8359 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount : 8360 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute()); 8361 // The loop backedge will be taken the maximum or zero times if there's 8362 // a single exit that must be taken the maximum or zero times. 8363 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1); 8364 8365 // Remember which SCEVs are used in exit limits for invalidation purposes. 8366 // We only care about non-constant SCEVs here, so we can ignore EL.MaxNotTaken 8367 // and MaxBECount, which must be SCEVConstant. 8368 for (const auto &Pair : ExitCounts) 8369 if (!isa<SCEVConstant>(Pair.second.ExactNotTaken)) 8370 BECountUsers[Pair.second.ExactNotTaken].insert({L, AllowPredicates}); 8371 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount, 8372 MaxBECount, MaxOrZero); 8373 } 8374 8375 ScalarEvolution::ExitLimit 8376 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock, 8377 bool AllowPredicates) { 8378 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?"); 8379 // If our exiting block does not dominate the latch, then its connection with 8380 // loop's exit limit may be far from trivial. 8381 const BasicBlock *Latch = L->getLoopLatch(); 8382 if (!Latch || !DT.dominates(ExitingBlock, Latch)) 8383 return getCouldNotCompute(); 8384 8385 bool IsOnlyExit = (L->getExitingBlock() != nullptr); 8386 Instruction *Term = ExitingBlock->getTerminator(); 8387 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) { 8388 assert(BI->isConditional() && "If unconditional, it can't be in loop!"); 8389 bool ExitIfTrue = !L->contains(BI->getSuccessor(0)); 8390 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) && 8391 "It should have one successor in loop and one exit block!"); 8392 // Proceed to the next level to examine the exit condition expression. 8393 return computeExitLimitFromCond( 8394 L, BI->getCondition(), ExitIfTrue, 8395 /*ControlsExit=*/IsOnlyExit, AllowPredicates); 8396 } 8397 8398 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) { 8399 // For switch, make sure that there is a single exit from the loop. 8400 BasicBlock *Exit = nullptr; 8401 for (auto *SBB : successors(ExitingBlock)) 8402 if (!L->contains(SBB)) { 8403 if (Exit) // Multiple exit successors. 8404 return getCouldNotCompute(); 8405 Exit = SBB; 8406 } 8407 assert(Exit && "Exiting block must have at least one exit"); 8408 return computeExitLimitFromSingleExitSwitch(L, SI, Exit, 8409 /*ControlsExit=*/IsOnlyExit); 8410 } 8411 8412 return getCouldNotCompute(); 8413 } 8414 8415 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond( 8416 const Loop *L, Value *ExitCond, bool ExitIfTrue, 8417 bool ControlsExit, bool AllowPredicates) { 8418 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates); 8419 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue, 8420 ControlsExit, AllowPredicates); 8421 } 8422 8423 Optional<ScalarEvolution::ExitLimit> 8424 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond, 8425 bool ExitIfTrue, bool ControlsExit, 8426 bool AllowPredicates) { 8427 (void)this->L; 8428 (void)this->ExitIfTrue; 8429 (void)this->AllowPredicates; 8430 8431 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 8432 this->AllowPredicates == AllowPredicates && 8433 "Variance in assumed invariant key components!"); 8434 auto Itr = TripCountMap.find({ExitCond, ControlsExit}); 8435 if (Itr == TripCountMap.end()) 8436 return None; 8437 return Itr->second; 8438 } 8439 8440 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond, 8441 bool ExitIfTrue, 8442 bool ControlsExit, 8443 bool AllowPredicates, 8444 const ExitLimit &EL) { 8445 assert(this->L == L && this->ExitIfTrue == ExitIfTrue && 8446 this->AllowPredicates == AllowPredicates && 8447 "Variance in assumed invariant key components!"); 8448 8449 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL}); 8450 assert(InsertResult.second && "Expected successful insertion!"); 8451 (void)InsertResult; 8452 (void)ExitIfTrue; 8453 } 8454 8455 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached( 8456 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 8457 bool ControlsExit, bool AllowPredicates) { 8458 8459 if (auto MaybeEL = 8460 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 8461 return *MaybeEL; 8462 8463 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue, 8464 ControlsExit, AllowPredicates); 8465 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL); 8466 return EL; 8467 } 8468 8469 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl( 8470 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 8471 bool ControlsExit, bool AllowPredicates) { 8472 // Handle BinOp conditions (And, Or). 8473 if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp( 8474 Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates)) 8475 return *LimitFromBinOp; 8476 8477 // With an icmp, it may be feasible to compute an exact backedge-taken count. 8478 // Proceed to the next level to examine the icmp. 8479 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) { 8480 ExitLimit EL = 8481 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit); 8482 if (EL.hasFullInfo() || !AllowPredicates) 8483 return EL; 8484 8485 // Try again, but use SCEV predicates this time. 8486 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit, 8487 /*AllowPredicates=*/true); 8488 } 8489 8490 // Check for a constant condition. These are normally stripped out by 8491 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 8492 // preserve the CFG and is temporarily leaving constant conditions 8493 // in place. 8494 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 8495 if (ExitIfTrue == !CI->getZExtValue()) 8496 // The backedge is always taken. 8497 return getCouldNotCompute(); 8498 else 8499 // The backedge is never taken. 8500 return getZero(CI->getType()); 8501 } 8502 8503 // If we're exiting based on the overflow flag of an x.with.overflow intrinsic 8504 // with a constant step, we can form an equivalent icmp predicate and figure 8505 // out how many iterations will be taken before we exit. 8506 const WithOverflowInst *WO; 8507 const APInt *C; 8508 if (match(ExitCond, m_ExtractValue<1>(m_WithOverflowInst(WO))) && 8509 match(WO->getRHS(), m_APInt(C))) { 8510 ConstantRange NWR = 8511 ConstantRange::makeExactNoWrapRegion(WO->getBinaryOp(), *C, 8512 WO->getNoWrapKind()); 8513 CmpInst::Predicate Pred; 8514 APInt NewRHSC, Offset; 8515 NWR.getEquivalentICmp(Pred, NewRHSC, Offset); 8516 if (!ExitIfTrue) 8517 Pred = ICmpInst::getInversePredicate(Pred); 8518 auto *LHS = getSCEV(WO->getLHS()); 8519 if (Offset != 0) 8520 LHS = getAddExpr(LHS, getConstant(Offset)); 8521 auto EL = computeExitLimitFromICmp(L, Pred, LHS, getConstant(NewRHSC), 8522 ControlsExit, AllowPredicates); 8523 if (EL.hasAnyInfo()) return EL; 8524 } 8525 8526 // If it's not an integer or pointer comparison then compute it the hard way. 8527 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 8528 } 8529 8530 Optional<ScalarEvolution::ExitLimit> 8531 ScalarEvolution::computeExitLimitFromCondFromBinOp( 8532 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue, 8533 bool ControlsExit, bool AllowPredicates) { 8534 // Check if the controlling expression for this loop is an And or Or. 8535 Value *Op0, *Op1; 8536 bool IsAnd = false; 8537 if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) 8538 IsAnd = true; 8539 else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) 8540 IsAnd = false; 8541 else 8542 return None; 8543 8544 // EitherMayExit is true in these two cases: 8545 // br (and Op0 Op1), loop, exit 8546 // br (or Op0 Op1), exit, loop 8547 bool EitherMayExit = IsAnd ^ ExitIfTrue; 8548 ExitLimit EL0 = computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue, 8549 ControlsExit && !EitherMayExit, 8550 AllowPredicates); 8551 ExitLimit EL1 = computeExitLimitFromCondCached(Cache, L, Op1, ExitIfTrue, 8552 ControlsExit && !EitherMayExit, 8553 AllowPredicates); 8554 8555 // Be robust against unsimplified IR for the form "op i1 X, NeutralElement" 8556 const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd); 8557 if (isa<ConstantInt>(Op1)) 8558 return Op1 == NeutralElement ? EL0 : EL1; 8559 if (isa<ConstantInt>(Op0)) 8560 return Op0 == NeutralElement ? EL1 : EL0; 8561 8562 const SCEV *BECount = getCouldNotCompute(); 8563 const SCEV *MaxBECount = getCouldNotCompute(); 8564 if (EitherMayExit) { 8565 // Both conditions must be same for the loop to continue executing. 8566 // Choose the less conservative count. 8567 if (EL0.ExactNotTaken != getCouldNotCompute() && 8568 EL1.ExactNotTaken != getCouldNotCompute()) { 8569 BECount = getUMinFromMismatchedTypes( 8570 EL0.ExactNotTaken, EL1.ExactNotTaken, 8571 /*Sequential=*/!isa<BinaryOperator>(ExitCond)); 8572 } 8573 if (EL0.MaxNotTaken == getCouldNotCompute()) 8574 MaxBECount = EL1.MaxNotTaken; 8575 else if (EL1.MaxNotTaken == getCouldNotCompute()) 8576 MaxBECount = EL0.MaxNotTaken; 8577 else 8578 MaxBECount = getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken); 8579 } else { 8580 // Both conditions must be same at the same time for the loop to exit. 8581 // For now, be conservative. 8582 if (EL0.ExactNotTaken == EL1.ExactNotTaken) 8583 BECount = EL0.ExactNotTaken; 8584 } 8585 8586 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able 8587 // to be more aggressive when computing BECount than when computing 8588 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and 8589 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken 8590 // to not. 8591 if (isa<SCEVCouldNotCompute>(MaxBECount) && 8592 !isa<SCEVCouldNotCompute>(BECount)) 8593 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 8594 8595 return ExitLimit(BECount, MaxBECount, false, 8596 { &EL0.Predicates, &EL1.Predicates }); 8597 } 8598 8599 ScalarEvolution::ExitLimit 8600 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 8601 ICmpInst *ExitCond, 8602 bool ExitIfTrue, 8603 bool ControlsExit, 8604 bool AllowPredicates) { 8605 // If the condition was exit on true, convert the condition to exit on false 8606 ICmpInst::Predicate Pred; 8607 if (!ExitIfTrue) 8608 Pred = ExitCond->getPredicate(); 8609 else 8610 Pred = ExitCond->getInversePredicate(); 8611 const ICmpInst::Predicate OriginalPred = Pred; 8612 8613 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 8614 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 8615 8616 ExitLimit EL = computeExitLimitFromICmp(L, Pred, LHS, RHS, ControlsExit, 8617 AllowPredicates); 8618 if (EL.hasAnyInfo()) return EL; 8619 8620 auto *ExhaustiveCount = 8621 computeExitCountExhaustively(L, ExitCond, ExitIfTrue); 8622 8623 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount)) 8624 return ExhaustiveCount; 8625 8626 return computeShiftCompareExitLimit(ExitCond->getOperand(0), 8627 ExitCond->getOperand(1), L, OriginalPred); 8628 } 8629 ScalarEvolution::ExitLimit 8630 ScalarEvolution::computeExitLimitFromICmp(const Loop *L, 8631 ICmpInst::Predicate Pred, 8632 const SCEV *LHS, const SCEV *RHS, 8633 bool ControlsExit, 8634 bool AllowPredicates) { 8635 8636 // Try to evaluate any dependencies out of the loop. 8637 LHS = getSCEVAtScope(LHS, L); 8638 RHS = getSCEVAtScope(RHS, L); 8639 8640 // At this point, we would like to compute how many iterations of the 8641 // loop the predicate will return true for these inputs. 8642 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) { 8643 // If there is a loop-invariant, force it into the RHS. 8644 std::swap(LHS, RHS); 8645 Pred = ICmpInst::getSwappedPredicate(Pred); 8646 } 8647 8648 bool ControllingFiniteLoop = 8649 ControlsExit && loopHasNoAbnormalExits(L) && loopIsFiniteByAssumption(L); 8650 // Simplify the operands before analyzing them. 8651 (void)SimplifyICmpOperands(Pred, LHS, RHS, /*Depth=*/0, 8652 (EnableFiniteLoopControl ? ControllingFiniteLoop 8653 : false)); 8654 8655 // If we have a comparison of a chrec against a constant, try to use value 8656 // ranges to answer this query. 8657 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 8658 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 8659 if (AddRec->getLoop() == L) { 8660 // Form the constant range. 8661 ConstantRange CompRange = 8662 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt()); 8663 8664 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 8665 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 8666 } 8667 8668 // If this loop must exit based on this condition (or execute undefined 8669 // behaviour), and we can prove the test sequence produced must repeat 8670 // the same values on self-wrap of the IV, then we can infer that IV 8671 // doesn't self wrap because if it did, we'd have an infinite (undefined) 8672 // loop. 8673 if (ControllingFiniteLoop && isLoopInvariant(RHS, L)) { 8674 // TODO: We can peel off any functions which are invertible *in L*. Loop 8675 // invariant terms are effectively constants for our purposes here. 8676 auto *InnerLHS = LHS; 8677 if (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS)) 8678 InnerLHS = ZExt->getOperand(); 8679 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(InnerLHS)) { 8680 auto *StrideC = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)); 8681 if (!AR->hasNoSelfWrap() && AR->getLoop() == L && AR->isAffine() && 8682 StrideC && StrideC->getAPInt().isPowerOf2()) { 8683 auto Flags = AR->getNoWrapFlags(); 8684 Flags = setFlags(Flags, SCEV::FlagNW); 8685 SmallVector<const SCEV*> Operands{AR->operands()}; 8686 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); 8687 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags); 8688 } 8689 } 8690 } 8691 8692 switch (Pred) { 8693 case ICmpInst::ICMP_NE: { // while (X != Y) 8694 // Convert to: while (X-Y != 0) 8695 if (LHS->getType()->isPointerTy()) { 8696 LHS = getLosslessPtrToIntExpr(LHS); 8697 if (isa<SCEVCouldNotCompute>(LHS)) 8698 return LHS; 8699 } 8700 if (RHS->getType()->isPointerTy()) { 8701 RHS = getLosslessPtrToIntExpr(RHS); 8702 if (isa<SCEVCouldNotCompute>(RHS)) 8703 return RHS; 8704 } 8705 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit, 8706 AllowPredicates); 8707 if (EL.hasAnyInfo()) return EL; 8708 break; 8709 } 8710 case ICmpInst::ICMP_EQ: { // while (X == Y) 8711 // Convert to: while (X-Y == 0) 8712 if (LHS->getType()->isPointerTy()) { 8713 LHS = getLosslessPtrToIntExpr(LHS); 8714 if (isa<SCEVCouldNotCompute>(LHS)) 8715 return LHS; 8716 } 8717 if (RHS->getType()->isPointerTy()) { 8718 RHS = getLosslessPtrToIntExpr(RHS); 8719 if (isa<SCEVCouldNotCompute>(RHS)) 8720 return RHS; 8721 } 8722 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L); 8723 if (EL.hasAnyInfo()) return EL; 8724 break; 8725 } 8726 case ICmpInst::ICMP_SLT: 8727 case ICmpInst::ICMP_ULT: { // while (X < Y) 8728 bool IsSigned = Pred == ICmpInst::ICMP_SLT; 8729 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit, 8730 AllowPredicates); 8731 if (EL.hasAnyInfo()) return EL; 8732 break; 8733 } 8734 case ICmpInst::ICMP_SGT: 8735 case ICmpInst::ICMP_UGT: { // while (X > Y) 8736 bool IsSigned = Pred == ICmpInst::ICMP_SGT; 8737 ExitLimit EL = 8738 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit, 8739 AllowPredicates); 8740 if (EL.hasAnyInfo()) return EL; 8741 break; 8742 } 8743 default: 8744 break; 8745 } 8746 8747 return getCouldNotCompute(); 8748 } 8749 8750 ScalarEvolution::ExitLimit 8751 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L, 8752 SwitchInst *Switch, 8753 BasicBlock *ExitingBlock, 8754 bool ControlsExit) { 8755 assert(!L->contains(ExitingBlock) && "Not an exiting block!"); 8756 8757 // Give up if the exit is the default dest of a switch. 8758 if (Switch->getDefaultDest() == ExitingBlock) 8759 return getCouldNotCompute(); 8760 8761 assert(L->contains(Switch->getDefaultDest()) && 8762 "Default case must not exit the loop!"); 8763 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L); 8764 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock)); 8765 8766 // while (X != Y) --> while (X-Y != 0) 8767 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit); 8768 if (EL.hasAnyInfo()) 8769 return EL; 8770 8771 return getCouldNotCompute(); 8772 } 8773 8774 static ConstantInt * 8775 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 8776 ScalarEvolution &SE) { 8777 const SCEV *InVal = SE.getConstant(C); 8778 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 8779 assert(isa<SCEVConstant>(Val) && 8780 "Evaluation of SCEV at constant didn't fold correctly?"); 8781 return cast<SCEVConstant>(Val)->getValue(); 8782 } 8783 8784 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( 8785 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { 8786 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV); 8787 if (!RHS) 8788 return getCouldNotCompute(); 8789 8790 const BasicBlock *Latch = L->getLoopLatch(); 8791 if (!Latch) 8792 return getCouldNotCompute(); 8793 8794 const BasicBlock *Predecessor = L->getLoopPredecessor(); 8795 if (!Predecessor) 8796 return getCouldNotCompute(); 8797 8798 // Return true if V is of the form "LHS `shift_op` <positive constant>". 8799 // Return LHS in OutLHS and shift_opt in OutOpCode. 8800 auto MatchPositiveShift = 8801 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) { 8802 8803 using namespace PatternMatch; 8804 8805 ConstantInt *ShiftAmt; 8806 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 8807 OutOpCode = Instruction::LShr; 8808 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 8809 OutOpCode = Instruction::AShr; 8810 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt)))) 8811 OutOpCode = Instruction::Shl; 8812 else 8813 return false; 8814 8815 return ShiftAmt->getValue().isStrictlyPositive(); 8816 }; 8817 8818 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in 8819 // 8820 // loop: 8821 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ] 8822 // %iv.shifted = lshr i32 %iv, <positive constant> 8823 // 8824 // Return true on a successful match. Return the corresponding PHI node (%iv 8825 // above) in PNOut and the opcode of the shift operation in OpCodeOut. 8826 auto MatchShiftRecurrence = 8827 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) { 8828 Optional<Instruction::BinaryOps> PostShiftOpCode; 8829 8830 { 8831 Instruction::BinaryOps OpC; 8832 Value *V; 8833 8834 // If we encounter a shift instruction, "peel off" the shift operation, 8835 // and remember that we did so. Later when we inspect %iv's backedge 8836 // value, we will make sure that the backedge value uses the same 8837 // operation. 8838 // 8839 // Note: the peeled shift operation does not have to be the same 8840 // instruction as the one feeding into the PHI's backedge value. We only 8841 // really care about it being the same *kind* of shift instruction -- 8842 // that's all that is required for our later inferences to hold. 8843 if (MatchPositiveShift(LHS, V, OpC)) { 8844 PostShiftOpCode = OpC; 8845 LHS = V; 8846 } 8847 } 8848 8849 PNOut = dyn_cast<PHINode>(LHS); 8850 if (!PNOut || PNOut->getParent() != L->getHeader()) 8851 return false; 8852 8853 Value *BEValue = PNOut->getIncomingValueForBlock(Latch); 8854 Value *OpLHS; 8855 8856 return 8857 // The backedge value for the PHI node must be a shift by a positive 8858 // amount 8859 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) && 8860 8861 // of the PHI node itself 8862 OpLHS == PNOut && 8863 8864 // and the kind of shift should be match the kind of shift we peeled 8865 // off, if any. 8866 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut); 8867 }; 8868 8869 PHINode *PN; 8870 Instruction::BinaryOps OpCode; 8871 if (!MatchShiftRecurrence(LHS, PN, OpCode)) 8872 return getCouldNotCompute(); 8873 8874 const DataLayout &DL = getDataLayout(); 8875 8876 // The key rationale for this optimization is that for some kinds of shift 8877 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1 8878 // within a finite number of iterations. If the condition guarding the 8879 // backedge (in the sense that the backedge is taken if the condition is true) 8880 // is false for the value the shift recurrence stabilizes to, then we know 8881 // that the backedge is taken only a finite number of times. 8882 8883 ConstantInt *StableValue = nullptr; 8884 switch (OpCode) { 8885 default: 8886 llvm_unreachable("Impossible case!"); 8887 8888 case Instruction::AShr: { 8889 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most 8890 // bitwidth(K) iterations. 8891 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor); 8892 KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC, 8893 Predecessor->getTerminator(), &DT); 8894 auto *Ty = cast<IntegerType>(RHS->getType()); 8895 if (Known.isNonNegative()) 8896 StableValue = ConstantInt::get(Ty, 0); 8897 else if (Known.isNegative()) 8898 StableValue = ConstantInt::get(Ty, -1, true); 8899 else 8900 return getCouldNotCompute(); 8901 8902 break; 8903 } 8904 case Instruction::LShr: 8905 case Instruction::Shl: 8906 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>} 8907 // stabilize to 0 in at most bitwidth(K) iterations. 8908 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0); 8909 break; 8910 } 8911 8912 auto *Result = 8913 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI); 8914 assert(Result->getType()->isIntegerTy(1) && 8915 "Otherwise cannot be an operand to a branch instruction"); 8916 8917 if (Result->isZeroValue()) { 8918 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 8919 const SCEV *UpperBound = 8920 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth); 8921 return ExitLimit(getCouldNotCompute(), UpperBound, false); 8922 } 8923 8924 return getCouldNotCompute(); 8925 } 8926 8927 /// Return true if we can constant fold an instruction of the specified type, 8928 /// assuming that all operands were constants. 8929 static bool CanConstantFold(const Instruction *I) { 8930 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 8931 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 8932 isa<LoadInst>(I) || isa<ExtractValueInst>(I)) 8933 return true; 8934 8935 if (const CallInst *CI = dyn_cast<CallInst>(I)) 8936 if (const Function *F = CI->getCalledFunction()) 8937 return canConstantFoldCallTo(CI, F); 8938 return false; 8939 } 8940 8941 /// Determine whether this instruction can constant evolve within this loop 8942 /// assuming its operands can all constant evolve. 8943 static bool canConstantEvolve(Instruction *I, const Loop *L) { 8944 // An instruction outside of the loop can't be derived from a loop PHI. 8945 if (!L->contains(I)) return false; 8946 8947 if (isa<PHINode>(I)) { 8948 // We don't currently keep track of the control flow needed to evaluate 8949 // PHIs, so we cannot handle PHIs inside of loops. 8950 return L->getHeader() == I->getParent(); 8951 } 8952 8953 // If we won't be able to constant fold this expression even if the operands 8954 // are constants, bail early. 8955 return CanConstantFold(I); 8956 } 8957 8958 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by 8959 /// recursing through each instruction operand until reaching a loop header phi. 8960 static PHINode * 8961 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, 8962 DenseMap<Instruction *, PHINode *> &PHIMap, 8963 unsigned Depth) { 8964 if (Depth > MaxConstantEvolvingDepth) 8965 return nullptr; 8966 8967 // Otherwise, we can evaluate this instruction if all of its operands are 8968 // constant or derived from a PHI node themselves. 8969 PHINode *PHI = nullptr; 8970 for (Value *Op : UseInst->operands()) { 8971 if (isa<Constant>(Op)) continue; 8972 8973 Instruction *OpInst = dyn_cast<Instruction>(Op); 8974 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; 8975 8976 PHINode *P = dyn_cast<PHINode>(OpInst); 8977 if (!P) 8978 // If this operand is already visited, reuse the prior result. 8979 // We may have P != PHI if this is the deepest point at which the 8980 // inconsistent paths meet. 8981 P = PHIMap.lookup(OpInst); 8982 if (!P) { 8983 // Recurse and memoize the results, whether a phi is found or not. 8984 // This recursive call invalidates pointers into PHIMap. 8985 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1); 8986 PHIMap[OpInst] = P; 8987 } 8988 if (!P) 8989 return nullptr; // Not evolving from PHI 8990 if (PHI && PHI != P) 8991 return nullptr; // Evolving from multiple different PHIs. 8992 PHI = P; 8993 } 8994 // This is a expression evolving from a constant PHI! 8995 return PHI; 8996 } 8997 8998 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 8999 /// in the loop that V is derived from. We allow arbitrary operations along the 9000 /// way, but the operands of an operation must either be constants or a value 9001 /// derived from a constant PHI. If this expression does not fit with these 9002 /// constraints, return null. 9003 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 9004 Instruction *I = dyn_cast<Instruction>(V); 9005 if (!I || !canConstantEvolve(I, L)) return nullptr; 9006 9007 if (PHINode *PN = dyn_cast<PHINode>(I)) 9008 return PN; 9009 9010 // Record non-constant instructions contained by the loop. 9011 DenseMap<Instruction *, PHINode *> PHIMap; 9012 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0); 9013 } 9014 9015 /// EvaluateExpression - Given an expression that passes the 9016 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 9017 /// in the loop has the value PHIVal. If we can't fold this expression for some 9018 /// reason, return null. 9019 static Constant *EvaluateExpression(Value *V, const Loop *L, 9020 DenseMap<Instruction *, Constant *> &Vals, 9021 const DataLayout &DL, 9022 const TargetLibraryInfo *TLI) { 9023 // Convenient constant check, but redundant for recursive calls. 9024 if (Constant *C = dyn_cast<Constant>(V)) return C; 9025 Instruction *I = dyn_cast<Instruction>(V); 9026 if (!I) return nullptr; 9027 9028 if (Constant *C = Vals.lookup(I)) return C; 9029 9030 // An instruction inside the loop depends on a value outside the loop that we 9031 // weren't given a mapping for, or a value such as a call inside the loop. 9032 if (!canConstantEvolve(I, L)) return nullptr; 9033 9034 // An unmapped PHI can be due to a branch or another loop inside this loop, 9035 // or due to this not being the initial iteration through a loop where we 9036 // couldn't compute the evolution of this particular PHI last time. 9037 if (isa<PHINode>(I)) return nullptr; 9038 9039 std::vector<Constant*> Operands(I->getNumOperands()); 9040 9041 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 9042 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i)); 9043 if (!Operand) { 9044 Operands[i] = dyn_cast<Constant>(I->getOperand(i)); 9045 if (!Operands[i]) return nullptr; 9046 continue; 9047 } 9048 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI); 9049 Vals[Operand] = C; 9050 if (!C) return nullptr; 9051 Operands[i] = C; 9052 } 9053 9054 if (CmpInst *CI = dyn_cast<CmpInst>(I)) 9055 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 9056 Operands[1], DL, TLI); 9057 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 9058 if (!LI->isVolatile()) 9059 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); 9060 } 9061 return ConstantFoldInstOperands(I, Operands, DL, TLI); 9062 } 9063 9064 9065 // If every incoming value to PN except the one for BB is a specific Constant, 9066 // return that, else return nullptr. 9067 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) { 9068 Constant *IncomingVal = nullptr; 9069 9070 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 9071 if (PN->getIncomingBlock(i) == BB) 9072 continue; 9073 9074 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i)); 9075 if (!CurrentVal) 9076 return nullptr; 9077 9078 if (IncomingVal != CurrentVal) { 9079 if (IncomingVal) 9080 return nullptr; 9081 IncomingVal = CurrentVal; 9082 } 9083 } 9084 9085 return IncomingVal; 9086 } 9087 9088 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 9089 /// in the header of its containing loop, we know the loop executes a 9090 /// constant number of times, and the PHI node is just a recurrence 9091 /// involving constants, fold it. 9092 Constant * 9093 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 9094 const APInt &BEs, 9095 const Loop *L) { 9096 auto I = ConstantEvolutionLoopExitValue.find(PN); 9097 if (I != ConstantEvolutionLoopExitValue.end()) 9098 return I->second; 9099 9100 if (BEs.ugt(MaxBruteForceIterations)) 9101 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it. 9102 9103 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 9104 9105 DenseMap<Instruction *, Constant *> CurrentIterVals; 9106 BasicBlock *Header = L->getHeader(); 9107 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 9108 9109 BasicBlock *Latch = L->getLoopLatch(); 9110 if (!Latch) 9111 return nullptr; 9112 9113 for (PHINode &PHI : Header->phis()) { 9114 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 9115 CurrentIterVals[&PHI] = StartCST; 9116 } 9117 if (!CurrentIterVals.count(PN)) 9118 return RetVal = nullptr; 9119 9120 Value *BEValue = PN->getIncomingValueForBlock(Latch); 9121 9122 // Execute the loop symbolically to determine the exit value. 9123 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) && 9124 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!"); 9125 9126 unsigned NumIterations = BEs.getZExtValue(); // must be in range 9127 unsigned IterationNum = 0; 9128 const DataLayout &DL = getDataLayout(); 9129 for (; ; ++IterationNum) { 9130 if (IterationNum == NumIterations) 9131 return RetVal = CurrentIterVals[PN]; // Got exit value! 9132 9133 // Compute the value of the PHIs for the next iteration. 9134 // EvaluateExpression adds non-phi values to the CurrentIterVals map. 9135 DenseMap<Instruction *, Constant *> NextIterVals; 9136 Constant *NextPHI = 9137 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 9138 if (!NextPHI) 9139 return nullptr; // Couldn't evaluate! 9140 NextIterVals[PN] = NextPHI; 9141 9142 bool StoppedEvolving = NextPHI == CurrentIterVals[PN]; 9143 9144 // Also evaluate the other PHI nodes. However, we don't get to stop if we 9145 // cease to be able to evaluate one of them or if they stop evolving, 9146 // because that doesn't necessarily prevent us from computing PN. 9147 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute; 9148 for (const auto &I : CurrentIterVals) { 9149 PHINode *PHI = dyn_cast<PHINode>(I.first); 9150 if (!PHI || PHI == PN || PHI->getParent() != Header) continue; 9151 PHIsToCompute.emplace_back(PHI, I.second); 9152 } 9153 // We use two distinct loops because EvaluateExpression may invalidate any 9154 // iterators into CurrentIterVals. 9155 for (const auto &I : PHIsToCompute) { 9156 PHINode *PHI = I.first; 9157 Constant *&NextPHI = NextIterVals[PHI]; 9158 if (!NextPHI) { // Not already computed. 9159 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 9160 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 9161 } 9162 if (NextPHI != I.second) 9163 StoppedEvolving = false; 9164 } 9165 9166 // If all entries in CurrentIterVals == NextIterVals then we can stop 9167 // iterating, the loop can't continue to change. 9168 if (StoppedEvolving) 9169 return RetVal = CurrentIterVals[PN]; 9170 9171 CurrentIterVals.swap(NextIterVals); 9172 } 9173 } 9174 9175 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L, 9176 Value *Cond, 9177 bool ExitWhen) { 9178 PHINode *PN = getConstantEvolvingPHI(Cond, L); 9179 if (!PN) return getCouldNotCompute(); 9180 9181 // If the loop is canonicalized, the PHI will have exactly two entries. 9182 // That's the only form we support here. 9183 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 9184 9185 DenseMap<Instruction *, Constant *> CurrentIterVals; 9186 BasicBlock *Header = L->getHeader(); 9187 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); 9188 9189 BasicBlock *Latch = L->getLoopLatch(); 9190 assert(Latch && "Should follow from NumIncomingValues == 2!"); 9191 9192 for (PHINode &PHI : Header->phis()) { 9193 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch)) 9194 CurrentIterVals[&PHI] = StartCST; 9195 } 9196 if (!CurrentIterVals.count(PN)) 9197 return getCouldNotCompute(); 9198 9199 // Okay, we find a PHI node that defines the trip count of this loop. Execute 9200 // the loop symbolically to determine when the condition gets a value of 9201 // "ExitWhen". 9202 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 9203 const DataLayout &DL = getDataLayout(); 9204 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ 9205 auto *CondVal = dyn_cast_or_null<ConstantInt>( 9206 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); 9207 9208 // Couldn't symbolically evaluate. 9209 if (!CondVal) return getCouldNotCompute(); 9210 9211 if (CondVal->getValue() == uint64_t(ExitWhen)) { 9212 ++NumBruteForceTripCountsComputed; 9213 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 9214 } 9215 9216 // Update all the PHI nodes for the next iteration. 9217 DenseMap<Instruction *, Constant *> NextIterVals; 9218 9219 // Create a list of which PHIs we need to compute. We want to do this before 9220 // calling EvaluateExpression on them because that may invalidate iterators 9221 // into CurrentIterVals. 9222 SmallVector<PHINode *, 8> PHIsToCompute; 9223 for (const auto &I : CurrentIterVals) { 9224 PHINode *PHI = dyn_cast<PHINode>(I.first); 9225 if (!PHI || PHI->getParent() != Header) continue; 9226 PHIsToCompute.push_back(PHI); 9227 } 9228 for (PHINode *PHI : PHIsToCompute) { 9229 Constant *&NextPHI = NextIterVals[PHI]; 9230 if (NextPHI) continue; // Already computed! 9231 9232 Value *BEValue = PHI->getIncomingValueForBlock(Latch); 9233 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); 9234 } 9235 CurrentIterVals.swap(NextIterVals); 9236 } 9237 9238 // Too many iterations were needed to evaluate. 9239 return getCouldNotCompute(); 9240 } 9241 9242 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 9243 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = 9244 ValuesAtScopes[V]; 9245 // Check to see if we've folded this expression at this loop before. 9246 for (auto &LS : Values) 9247 if (LS.first == L) 9248 return LS.second ? LS.second : V; 9249 9250 Values.emplace_back(L, nullptr); 9251 9252 // Otherwise compute it. 9253 const SCEV *C = computeSCEVAtScope(V, L); 9254 for (auto &LS : reverse(ValuesAtScopes[V])) 9255 if (LS.first == L) { 9256 LS.second = C; 9257 if (!isa<SCEVConstant>(C)) 9258 ValuesAtScopesUsers[C].push_back({L, V}); 9259 break; 9260 } 9261 return C; 9262 } 9263 9264 /// This builds up a Constant using the ConstantExpr interface. That way, we 9265 /// will return Constants for objects which aren't represented by a 9266 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. 9267 /// Returns NULL if the SCEV isn't representable as a Constant. 9268 static Constant *BuildConstantFromSCEV(const SCEV *V) { 9269 switch (V->getSCEVType()) { 9270 case scCouldNotCompute: 9271 case scAddRecExpr: 9272 return nullptr; 9273 case scConstant: 9274 return cast<SCEVConstant>(V)->getValue(); 9275 case scUnknown: 9276 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue()); 9277 case scSignExtend: { 9278 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V); 9279 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) 9280 return ConstantExpr::getSExt(CastOp, SS->getType()); 9281 return nullptr; 9282 } 9283 case scZeroExtend: { 9284 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V); 9285 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) 9286 return ConstantExpr::getZExt(CastOp, SZ->getType()); 9287 return nullptr; 9288 } 9289 case scPtrToInt: { 9290 const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V); 9291 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand())) 9292 return ConstantExpr::getPtrToInt(CastOp, P2I->getType()); 9293 9294 return nullptr; 9295 } 9296 case scTruncate: { 9297 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V); 9298 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) 9299 return ConstantExpr::getTrunc(CastOp, ST->getType()); 9300 return nullptr; 9301 } 9302 case scAddExpr: { 9303 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V); 9304 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { 9305 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) { 9306 unsigned AS = PTy->getAddressSpace(); 9307 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 9308 C = ConstantExpr::getBitCast(C, DestPtrTy); 9309 } 9310 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { 9311 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); 9312 if (!C2) 9313 return nullptr; 9314 9315 // First pointer! 9316 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { 9317 unsigned AS = C2->getType()->getPointerAddressSpace(); 9318 std::swap(C, C2); 9319 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); 9320 // The offsets have been converted to bytes. We can add bytes to an 9321 // i8* by GEP with the byte count in the first index. 9322 C = ConstantExpr::getBitCast(C, DestPtrTy); 9323 } 9324 9325 // Don't bother trying to sum two pointers. We probably can't 9326 // statically compute a load that results from it anyway. 9327 if (C2->getType()->isPointerTy()) 9328 return nullptr; 9329 9330 if (C->getType()->isPointerTy()) { 9331 C = ConstantExpr::getGetElementPtr(Type::getInt8Ty(C->getContext()), 9332 C, C2); 9333 } else { 9334 C = ConstantExpr::getAdd(C, C2); 9335 } 9336 } 9337 return C; 9338 } 9339 return nullptr; 9340 } 9341 case scMulExpr: { 9342 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V); 9343 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { 9344 // Don't bother with pointers at all. 9345 if (C->getType()->isPointerTy()) 9346 return nullptr; 9347 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { 9348 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); 9349 if (!C2 || C2->getType()->isPointerTy()) 9350 return nullptr; 9351 C = ConstantExpr::getMul(C, C2); 9352 } 9353 return C; 9354 } 9355 return nullptr; 9356 } 9357 case scUDivExpr: { 9358 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V); 9359 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) 9360 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) 9361 if (LHS->getType() == RHS->getType()) 9362 return ConstantExpr::getUDiv(LHS, RHS); 9363 return nullptr; 9364 } 9365 case scSMaxExpr: 9366 case scUMaxExpr: 9367 case scSMinExpr: 9368 case scUMinExpr: 9369 case scSequentialUMinExpr: 9370 return nullptr; // TODO: smax, umax, smin, umax, umin_seq. 9371 } 9372 llvm_unreachable("Unknown SCEV kind!"); 9373 } 9374 9375 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 9376 if (isa<SCEVConstant>(V)) return V; 9377 9378 // If this instruction is evolved from a constant-evolving PHI, compute the 9379 // exit value from the loop without using SCEVs. 9380 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 9381 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 9382 if (PHINode *PN = dyn_cast<PHINode>(I)) { 9383 const Loop *CurrLoop = this->LI[I->getParent()]; 9384 // Looking for loop exit value. 9385 if (CurrLoop && CurrLoop->getParentLoop() == L && 9386 PN->getParent() == CurrLoop->getHeader()) { 9387 // Okay, there is no closed form solution for the PHI node. Check 9388 // to see if the loop that contains it has a known backedge-taken 9389 // count. If so, we may be able to force computation of the exit 9390 // value. 9391 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop); 9392 // This trivial case can show up in some degenerate cases where 9393 // the incoming IR has not yet been fully simplified. 9394 if (BackedgeTakenCount->isZero()) { 9395 Value *InitValue = nullptr; 9396 bool MultipleInitValues = false; 9397 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) { 9398 if (!CurrLoop->contains(PN->getIncomingBlock(i))) { 9399 if (!InitValue) 9400 InitValue = PN->getIncomingValue(i); 9401 else if (InitValue != PN->getIncomingValue(i)) { 9402 MultipleInitValues = true; 9403 break; 9404 } 9405 } 9406 } 9407 if (!MultipleInitValues && InitValue) 9408 return getSCEV(InitValue); 9409 } 9410 // Do we have a loop invariant value flowing around the backedge 9411 // for a loop which must execute the backedge? 9412 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 9413 isKnownPositive(BackedgeTakenCount) && 9414 PN->getNumIncomingValues() == 2) { 9415 9416 unsigned InLoopPred = 9417 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1; 9418 Value *BackedgeVal = PN->getIncomingValue(InLoopPred); 9419 if (CurrLoop->isLoopInvariant(BackedgeVal)) 9420 return getSCEV(BackedgeVal); 9421 } 9422 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 9423 // Okay, we know how many times the containing loop executes. If 9424 // this is a constant evolving PHI node, get the final value at 9425 // the specified iteration number. 9426 Constant *RV = getConstantEvolutionLoopExitValue( 9427 PN, BTCC->getAPInt(), CurrLoop); 9428 if (RV) return getSCEV(RV); 9429 } 9430 } 9431 9432 // If there is a single-input Phi, evaluate it at our scope. If we can 9433 // prove that this replacement does not break LCSSA form, use new value. 9434 if (PN->getNumOperands() == 1) { 9435 const SCEV *Input = getSCEV(PN->getOperand(0)); 9436 const SCEV *InputAtScope = getSCEVAtScope(Input, L); 9437 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm, 9438 // for the simplest case just support constants. 9439 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope; 9440 } 9441 } 9442 9443 // Okay, this is an expression that we cannot symbolically evaluate 9444 // into a SCEV. Check to see if it's possible to symbolically evaluate 9445 // the arguments into constants, and if so, try to constant propagate the 9446 // result. This is particularly useful for computing loop exit values. 9447 if (CanConstantFold(I)) { 9448 SmallVector<Constant *, 4> Operands; 9449 bool MadeImprovement = false; 9450 for (Value *Op : I->operands()) { 9451 if (Constant *C = dyn_cast<Constant>(Op)) { 9452 Operands.push_back(C); 9453 continue; 9454 } 9455 9456 // If any of the operands is non-constant and if they are 9457 // non-integer and non-pointer, don't even try to analyze them 9458 // with scev techniques. 9459 if (!isSCEVable(Op->getType())) 9460 return V; 9461 9462 const SCEV *OrigV = getSCEV(Op); 9463 const SCEV *OpV = getSCEVAtScope(OrigV, L); 9464 MadeImprovement |= OrigV != OpV; 9465 9466 Constant *C = BuildConstantFromSCEV(OpV); 9467 if (!C) return V; 9468 if (C->getType() != Op->getType()) 9469 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 9470 Op->getType(), 9471 false), 9472 C, Op->getType()); 9473 Operands.push_back(C); 9474 } 9475 9476 // Check to see if getSCEVAtScope actually made an improvement. 9477 if (MadeImprovement) { 9478 Constant *C = nullptr; 9479 const DataLayout &DL = getDataLayout(); 9480 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 9481 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 9482 Operands[1], DL, &TLI); 9483 else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) { 9484 if (!Load->isVolatile()) 9485 C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(), 9486 DL); 9487 } else 9488 C = ConstantFoldInstOperands(I, Operands, DL, &TLI); 9489 if (!C) return V; 9490 return getSCEV(C); 9491 } 9492 } 9493 } 9494 9495 // This is some other type of SCEVUnknown, just return it. 9496 return V; 9497 } 9498 9499 if (isa<SCEVCommutativeExpr>(V) || isa<SCEVSequentialMinMaxExpr>(V)) { 9500 const auto *Comm = cast<SCEVNAryExpr>(V); 9501 // Avoid performing the look-up in the common case where the specified 9502 // expression has no loop-variant portions. 9503 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 9504 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 9505 if (OpAtScope != Comm->getOperand(i)) { 9506 // Okay, at least one of these operands is loop variant but might be 9507 // foldable. Build a new instance of the folded commutative expression. 9508 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 9509 Comm->op_begin()+i); 9510 NewOps.push_back(OpAtScope); 9511 9512 for (++i; i != e; ++i) { 9513 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 9514 NewOps.push_back(OpAtScope); 9515 } 9516 if (isa<SCEVAddExpr>(Comm)) 9517 return getAddExpr(NewOps, Comm->getNoWrapFlags()); 9518 if (isa<SCEVMulExpr>(Comm)) 9519 return getMulExpr(NewOps, Comm->getNoWrapFlags()); 9520 if (isa<SCEVMinMaxExpr>(Comm)) 9521 return getMinMaxExpr(Comm->getSCEVType(), NewOps); 9522 if (isa<SCEVSequentialMinMaxExpr>(Comm)) 9523 return getSequentialMinMaxExpr(Comm->getSCEVType(), NewOps); 9524 llvm_unreachable("Unknown commutative / sequential min/max SCEV type!"); 9525 } 9526 } 9527 // If we got here, all operands are loop invariant. 9528 return Comm; 9529 } 9530 9531 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 9532 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 9533 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 9534 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 9535 return Div; // must be loop invariant 9536 return getUDivExpr(LHS, RHS); 9537 } 9538 9539 // If this is a loop recurrence for a loop that does not contain L, then we 9540 // are dealing with the final value computed by the loop. 9541 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 9542 // First, attempt to evaluate each operand. 9543 // Avoid performing the look-up in the common case where the specified 9544 // expression has no loop-variant portions. 9545 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 9546 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 9547 if (OpAtScope == AddRec->getOperand(i)) 9548 continue; 9549 9550 // Okay, at least one of these operands is loop variant but might be 9551 // foldable. Build a new instance of the folded commutative expression. 9552 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 9553 AddRec->op_begin()+i); 9554 NewOps.push_back(OpAtScope); 9555 for (++i; i != e; ++i) 9556 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 9557 9558 const SCEV *FoldedRec = 9559 getAddRecExpr(NewOps, AddRec->getLoop(), 9560 AddRec->getNoWrapFlags(SCEV::FlagNW)); 9561 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec); 9562 // The addrec may be folded to a nonrecurrence, for example, if the 9563 // induction variable is multiplied by zero after constant folding. Go 9564 // ahead and return the folded value. 9565 if (!AddRec) 9566 return FoldedRec; 9567 break; 9568 } 9569 9570 // If the scope is outside the addrec's loop, evaluate it by using the 9571 // loop exit value of the addrec. 9572 if (!AddRec->getLoop()->contains(L)) { 9573 // To evaluate this recurrence, we need to know how many times the AddRec 9574 // loop iterates. Compute this now. 9575 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 9576 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 9577 9578 // Then, evaluate the AddRec. 9579 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 9580 } 9581 9582 return AddRec; 9583 } 9584 9585 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) { 9586 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 9587 if (Op == Cast->getOperand()) 9588 return Cast; // must be loop invariant 9589 return getCastExpr(Cast->getSCEVType(), Op, Cast->getType()); 9590 } 9591 9592 llvm_unreachable("Unknown SCEV type!"); 9593 } 9594 9595 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 9596 return getSCEVAtScope(getSCEV(V), L); 9597 } 9598 9599 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const { 9600 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) 9601 return stripInjectiveFunctions(ZExt->getOperand()); 9602 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) 9603 return stripInjectiveFunctions(SExt->getOperand()); 9604 return S; 9605 } 9606 9607 /// Finds the minimum unsigned root of the following equation: 9608 /// 9609 /// A * X = B (mod N) 9610 /// 9611 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 9612 /// A and B isn't important. 9613 /// 9614 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 9615 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, 9616 ScalarEvolution &SE) { 9617 uint32_t BW = A.getBitWidth(); 9618 assert(BW == SE.getTypeSizeInBits(B->getType())); 9619 assert(A != 0 && "A must be non-zero."); 9620 9621 // 1. D = gcd(A, N) 9622 // 9623 // The gcd of A and N may have only one prime factor: 2. The number of 9624 // trailing zeros in A is its multiplicity 9625 uint32_t Mult2 = A.countTrailingZeros(); 9626 // D = 2^Mult2 9627 9628 // 2. Check if B is divisible by D. 9629 // 9630 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 9631 // is not less than multiplicity of this prime factor for D. 9632 if (SE.GetMinTrailingZeros(B) < Mult2) 9633 return SE.getCouldNotCompute(); 9634 9635 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 9636 // modulo (N / D). 9637 // 9638 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent 9639 // (N / D) in general. The inverse itself always fits into BW bits, though, 9640 // so we immediately truncate it. 9641 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 9642 APInt Mod(BW + 1, 0); 9643 Mod.setBit(BW - Mult2); // Mod = N / D 9644 APInt I = AD.multiplicativeInverse(Mod).trunc(BW); 9645 9646 // 4. Compute the minimum unsigned root of the equation: 9647 // I * (B / D) mod (N / D) 9648 // To simplify the computation, we factor out the divide by D: 9649 // (I * B mod N) / D 9650 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2)); 9651 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D); 9652 } 9653 9654 /// For a given quadratic addrec, generate coefficients of the corresponding 9655 /// quadratic equation, multiplied by a common value to ensure that they are 9656 /// integers. 9657 /// The returned value is a tuple { A, B, C, M, BitWidth }, where 9658 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C 9659 /// were multiplied by, and BitWidth is the bit width of the original addrec 9660 /// coefficients. 9661 /// This function returns None if the addrec coefficients are not compile- 9662 /// time constants. 9663 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>> 9664 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) { 9665 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 9666 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 9667 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 9668 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 9669 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: " 9670 << *AddRec << '\n'); 9671 9672 // We currently can only solve this if the coefficients are constants. 9673 if (!LC || !MC || !NC) { 9674 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n"); 9675 return None; 9676 } 9677 9678 APInt L = LC->getAPInt(); 9679 APInt M = MC->getAPInt(); 9680 APInt N = NC->getAPInt(); 9681 assert(!N.isZero() && "This is not a quadratic addrec"); 9682 9683 unsigned BitWidth = LC->getAPInt().getBitWidth(); 9684 unsigned NewWidth = BitWidth + 1; 9685 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: " 9686 << BitWidth << '\n'); 9687 // The sign-extension (as opposed to a zero-extension) here matches the 9688 // extension used in SolveQuadraticEquationWrap (with the same motivation). 9689 N = N.sext(NewWidth); 9690 M = M.sext(NewWidth); 9691 L = L.sext(NewWidth); 9692 9693 // The increments are M, M+N, M+2N, ..., so the accumulated values are 9694 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is, 9695 // L+M, L+2M+N, L+3M+3N, ... 9696 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N. 9697 // 9698 // The equation Acc = 0 is then 9699 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0. 9700 // In a quadratic form it becomes: 9701 // N n^2 + (2M-N) n + 2L = 0. 9702 9703 APInt A = N; 9704 APInt B = 2 * M - A; 9705 APInt C = 2 * L; 9706 APInt T = APInt(NewWidth, 2); 9707 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B 9708 << "x + " << C << ", coeff bw: " << NewWidth 9709 << ", multiplied by " << T << '\n'); 9710 return std::make_tuple(A, B, C, T, BitWidth); 9711 } 9712 9713 /// Helper function to compare optional APInts: 9714 /// (a) if X and Y both exist, return min(X, Y), 9715 /// (b) if neither X nor Y exist, return None, 9716 /// (c) if exactly one of X and Y exists, return that value. 9717 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) { 9718 if (X.hasValue() && Y.hasValue()) { 9719 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth()); 9720 APInt XW = X->sext(W); 9721 APInt YW = Y->sext(W); 9722 return XW.slt(YW) ? *X : *Y; 9723 } 9724 if (!X.hasValue() && !Y.hasValue()) 9725 return None; 9726 return X.hasValue() ? *X : *Y; 9727 } 9728 9729 /// Helper function to truncate an optional APInt to a given BitWidth. 9730 /// When solving addrec-related equations, it is preferable to return a value 9731 /// that has the same bit width as the original addrec's coefficients. If the 9732 /// solution fits in the original bit width, truncate it (except for i1). 9733 /// Returning a value of a different bit width may inhibit some optimizations. 9734 /// 9735 /// In general, a solution to a quadratic equation generated from an addrec 9736 /// may require BW+1 bits, where BW is the bit width of the addrec's 9737 /// coefficients. The reason is that the coefficients of the quadratic 9738 /// equation are BW+1 bits wide (to avoid truncation when converting from 9739 /// the addrec to the equation). 9740 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) { 9741 if (!X.hasValue()) 9742 return None; 9743 unsigned W = X->getBitWidth(); 9744 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth)) 9745 return X->trunc(BitWidth); 9746 return X; 9747 } 9748 9749 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n 9750 /// iterations. The values L, M, N are assumed to be signed, and they 9751 /// should all have the same bit widths. 9752 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW, 9753 /// where BW is the bit width of the addrec's coefficients. 9754 /// If the calculated value is a BW-bit integer (for BW > 1), it will be 9755 /// returned as such, otherwise the bit width of the returned value may 9756 /// be greater than BW. 9757 /// 9758 /// This function returns None if 9759 /// (a) the addrec coefficients are not constant, or 9760 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases 9761 /// like x^2 = 5, no integer solutions exist, in other cases an integer 9762 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it. 9763 static Optional<APInt> 9764 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 9765 APInt A, B, C, M; 9766 unsigned BitWidth; 9767 auto T = GetQuadraticEquation(AddRec); 9768 if (!T.hasValue()) 9769 return None; 9770 9771 std::tie(A, B, C, M, BitWidth) = *T; 9772 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n"); 9773 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1); 9774 if (!X.hasValue()) 9775 return None; 9776 9777 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X); 9778 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE); 9779 if (!V->isZero()) 9780 return None; 9781 9782 return TruncIfPossible(X, BitWidth); 9783 } 9784 9785 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n 9786 /// iterations. The values M, N are assumed to be signed, and they 9787 /// should all have the same bit widths. 9788 /// Find the least n such that c(n) does not belong to the given range, 9789 /// while c(n-1) does. 9790 /// 9791 /// This function returns None if 9792 /// (a) the addrec coefficients are not constant, or 9793 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the 9794 /// bounds of the range. 9795 static Optional<APInt> 9796 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, 9797 const ConstantRange &Range, ScalarEvolution &SE) { 9798 assert(AddRec->getOperand(0)->isZero() && 9799 "Starting value of addrec should be 0"); 9800 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range " 9801 << Range << ", addrec " << *AddRec << '\n'); 9802 // This case is handled in getNumIterationsInRange. Here we can assume that 9803 // we start in the range. 9804 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) && 9805 "Addrec's initial value should be in range"); 9806 9807 APInt A, B, C, M; 9808 unsigned BitWidth; 9809 auto T = GetQuadraticEquation(AddRec); 9810 if (!T.hasValue()) 9811 return None; 9812 9813 // Be careful about the return value: there can be two reasons for not 9814 // returning an actual number. First, if no solutions to the equations 9815 // were found, and second, if the solutions don't leave the given range. 9816 // The first case means that the actual solution is "unknown", the second 9817 // means that it's known, but not valid. If the solution is unknown, we 9818 // cannot make any conclusions. 9819 // Return a pair: the optional solution and a flag indicating if the 9820 // solution was found. 9821 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> { 9822 // Solve for signed overflow and unsigned overflow, pick the lower 9823 // solution. 9824 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary " 9825 << Bound << " (before multiplying by " << M << ")\n"); 9826 Bound *= M; // The quadratic equation multiplier. 9827 9828 Optional<APInt> SO = None; 9829 if (BitWidth > 1) { 9830 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 9831 "signed overflow\n"); 9832 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth); 9833 } 9834 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for " 9835 "unsigned overflow\n"); 9836 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, 9837 BitWidth+1); 9838 9839 auto LeavesRange = [&] (const APInt &X) { 9840 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X); 9841 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE); 9842 if (Range.contains(V0->getValue())) 9843 return false; 9844 // X should be at least 1, so X-1 is non-negative. 9845 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1); 9846 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE); 9847 if (Range.contains(V1->getValue())) 9848 return true; 9849 return false; 9850 }; 9851 9852 // If SolveQuadraticEquationWrap returns None, it means that there can 9853 // be a solution, but the function failed to find it. We cannot treat it 9854 // as "no solution". 9855 if (!SO.hasValue() || !UO.hasValue()) 9856 return { None, false }; 9857 9858 // Check the smaller value first to see if it leaves the range. 9859 // At this point, both SO and UO must have values. 9860 Optional<APInt> Min = MinOptional(SO, UO); 9861 if (LeavesRange(*Min)) 9862 return { Min, true }; 9863 Optional<APInt> Max = Min == SO ? UO : SO; 9864 if (LeavesRange(*Max)) 9865 return { Max, true }; 9866 9867 // Solutions were found, but were eliminated, hence the "true". 9868 return { None, true }; 9869 }; 9870 9871 std::tie(A, B, C, M, BitWidth) = *T; 9872 // Lower bound is inclusive, subtract 1 to represent the exiting value. 9873 APInt Lower = Range.getLower().sext(A.getBitWidth()) - 1; 9874 APInt Upper = Range.getUpper().sext(A.getBitWidth()); 9875 auto SL = SolveForBoundary(Lower); 9876 auto SU = SolveForBoundary(Upper); 9877 // If any of the solutions was unknown, no meaninigful conclusions can 9878 // be made. 9879 if (!SL.second || !SU.second) 9880 return None; 9881 9882 // Claim: The correct solution is not some value between Min and Max. 9883 // 9884 // Justification: Assuming that Min and Max are different values, one of 9885 // them is when the first signed overflow happens, the other is when the 9886 // first unsigned overflow happens. Crossing the range boundary is only 9887 // possible via an overflow (treating 0 as a special case of it, modeling 9888 // an overflow as crossing k*2^W for some k). 9889 // 9890 // The interesting case here is when Min was eliminated as an invalid 9891 // solution, but Max was not. The argument is that if there was another 9892 // overflow between Min and Max, it would also have been eliminated if 9893 // it was considered. 9894 // 9895 // For a given boundary, it is possible to have two overflows of the same 9896 // type (signed/unsigned) without having the other type in between: this 9897 // can happen when the vertex of the parabola is between the iterations 9898 // corresponding to the overflows. This is only possible when the two 9899 // overflows cross k*2^W for the same k. In such case, if the second one 9900 // left the range (and was the first one to do so), the first overflow 9901 // would have to enter the range, which would mean that either we had left 9902 // the range before or that we started outside of it. Both of these cases 9903 // are contradictions. 9904 // 9905 // Claim: In the case where SolveForBoundary returns None, the correct 9906 // solution is not some value between the Max for this boundary and the 9907 // Min of the other boundary. 9908 // 9909 // Justification: Assume that we had such Max_A and Min_B corresponding 9910 // to range boundaries A and B and such that Max_A < Min_B. If there was 9911 // a solution between Max_A and Min_B, it would have to be caused by an 9912 // overflow corresponding to either A or B. It cannot correspond to B, 9913 // since Min_B is the first occurrence of such an overflow. If it 9914 // corresponded to A, it would have to be either a signed or an unsigned 9915 // overflow that is larger than both eliminated overflows for A. But 9916 // between the eliminated overflows and this overflow, the values would 9917 // cover the entire value space, thus crossing the other boundary, which 9918 // is a contradiction. 9919 9920 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth); 9921 } 9922 9923 ScalarEvolution::ExitLimit 9924 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit, 9925 bool AllowPredicates) { 9926 9927 // This is only used for loops with a "x != y" exit test. The exit condition 9928 // is now expressed as a single expression, V = x-y. So the exit test is 9929 // effectively V != 0. We know and take advantage of the fact that this 9930 // expression only being used in a comparison by zero context. 9931 9932 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 9933 // If the value is a constant 9934 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 9935 // If the value is already zero, the branch will execute zero times. 9936 if (C->getValue()->isZero()) return C; 9937 return getCouldNotCompute(); // Otherwise it will loop infinitely. 9938 } 9939 9940 const SCEVAddRecExpr *AddRec = 9941 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V)); 9942 9943 if (!AddRec && AllowPredicates) 9944 // Try to make this an AddRec using runtime tests, in the first X 9945 // iterations of this loop, where X is the SCEV expression found by the 9946 // algorithm below. 9947 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates); 9948 9949 if (!AddRec || AddRec->getLoop() != L) 9950 return getCouldNotCompute(); 9951 9952 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 9953 // the quadratic equation to solve it. 9954 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 9955 // We can only use this value if the chrec ends up with an exact zero 9956 // value at this index. When solving for "X*X != 5", for example, we 9957 // should not accept a root of 2. 9958 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) { 9959 const auto *R = cast<SCEVConstant>(getConstant(S.getValue())); 9960 return ExitLimit(R, R, false, Predicates); 9961 } 9962 return getCouldNotCompute(); 9963 } 9964 9965 // Otherwise we can only handle this if it is affine. 9966 if (!AddRec->isAffine()) 9967 return getCouldNotCompute(); 9968 9969 // If this is an affine expression, the execution count of this branch is 9970 // the minimum unsigned root of the following equation: 9971 // 9972 // Start + Step*N = 0 (mod 2^BW) 9973 // 9974 // equivalent to: 9975 // 9976 // Step*N = -Start (mod 2^BW) 9977 // 9978 // where BW is the common bit width of Start and Step. 9979 9980 // Get the initial value for the loop. 9981 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop()); 9982 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop()); 9983 9984 // For now we handle only constant steps. 9985 // 9986 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the 9987 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap 9988 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. 9989 // We have not yet seen any such cases. 9990 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step); 9991 if (!StepC || StepC->getValue()->isZero()) 9992 return getCouldNotCompute(); 9993 9994 // For positive steps (counting up until unsigned overflow): 9995 // N = -Start/Step (as unsigned) 9996 // For negative steps (counting down to zero): 9997 // N = Start/-Step 9998 // First compute the unsigned distance from zero in the direction of Step. 9999 bool CountDown = StepC->getAPInt().isNegative(); 10000 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start); 10001 10002 // Handle unitary steps, which cannot wraparound. 10003 // 1*N = -Start; -1*N = Start (mod 2^BW), so: 10004 // N = Distance (as unsigned) 10005 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) { 10006 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L)); 10007 MaxBECount = APIntOps::umin(MaxBECount, getUnsignedRangeMax(Distance)); 10008 10009 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated, 10010 // we end up with a loop whose backedge-taken count is n - 1. Detect this 10011 // case, and see if we can improve the bound. 10012 // 10013 // Explicitly handling this here is necessary because getUnsignedRange 10014 // isn't context-sensitive; it doesn't know that we only care about the 10015 // range inside the loop. 10016 const SCEV *Zero = getZero(Distance->getType()); 10017 const SCEV *One = getOne(Distance->getType()); 10018 const SCEV *DistancePlusOne = getAddExpr(Distance, One); 10019 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) { 10020 // If Distance + 1 doesn't overflow, we can compute the maximum distance 10021 // as "unsigned_max(Distance + 1) - 1". 10022 ConstantRange CR = getUnsignedRange(DistancePlusOne); 10023 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1); 10024 } 10025 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates); 10026 } 10027 10028 // If the condition controls loop exit (the loop exits only if the expression 10029 // is true) and the addition is no-wrap we can use unsigned divide to 10030 // compute the backedge count. In this case, the step may not divide the 10031 // distance, but we don't care because if the condition is "missed" the loop 10032 // will have undefined behavior due to wrapping. 10033 if (ControlsExit && AddRec->hasNoSelfWrap() && 10034 loopHasNoAbnormalExits(AddRec->getLoop())) { 10035 const SCEV *Exact = 10036 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step); 10037 const SCEV *Max = getCouldNotCompute(); 10038 if (Exact != getCouldNotCompute()) { 10039 APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, L)); 10040 Max = getConstant(APIntOps::umin(MaxInt, getUnsignedRangeMax(Exact))); 10041 } 10042 return ExitLimit(Exact, Max, false, Predicates); 10043 } 10044 10045 // Solve the general equation. 10046 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(), 10047 getNegativeSCEV(Start), *this); 10048 10049 const SCEV *M = E; 10050 if (E != getCouldNotCompute()) { 10051 APInt MaxWithGuards = getUnsignedRangeMax(applyLoopGuards(E, L)); 10052 M = getConstant(APIntOps::umin(MaxWithGuards, getUnsignedRangeMax(E))); 10053 } 10054 return ExitLimit(E, M, false, Predicates); 10055 } 10056 10057 ScalarEvolution::ExitLimit 10058 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) { 10059 // Loops that look like: while (X == 0) are very strange indeed. We don't 10060 // handle them yet except for the trivial case. This could be expanded in the 10061 // future as needed. 10062 10063 // If the value is a constant, check to see if it is known to be non-zero 10064 // already. If so, the backedge will execute zero times. 10065 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 10066 if (!C->getValue()->isZero()) 10067 return getZero(C->getType()); 10068 return getCouldNotCompute(); // Otherwise it will loop infinitely. 10069 } 10070 10071 // We could implement others, but I really doubt anyone writes loops like 10072 // this, and if they did, they would already be constant folded. 10073 return getCouldNotCompute(); 10074 } 10075 10076 std::pair<const BasicBlock *, const BasicBlock *> 10077 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) 10078 const { 10079 // If the block has a unique predecessor, then there is no path from the 10080 // predecessor to the block that does not go through the direct edge 10081 // from the predecessor to the block. 10082 if (const BasicBlock *Pred = BB->getSinglePredecessor()) 10083 return {Pred, BB}; 10084 10085 // A loop's header is defined to be a block that dominates the loop. 10086 // If the header has a unique predecessor outside the loop, it must be 10087 // a block that has exactly one successor that can reach the loop. 10088 if (const Loop *L = LI.getLoopFor(BB)) 10089 return {L->getLoopPredecessor(), L->getHeader()}; 10090 10091 return {nullptr, nullptr}; 10092 } 10093 10094 /// SCEV structural equivalence is usually sufficient for testing whether two 10095 /// expressions are equal, however for the purposes of looking for a condition 10096 /// guarding a loop, it can be useful to be a little more general, since a 10097 /// front-end may have replicated the controlling expression. 10098 static bool HasSameValue(const SCEV *A, const SCEV *B) { 10099 // Quick check to see if they are the same SCEV. 10100 if (A == B) return true; 10101 10102 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) { 10103 // Not all instructions that are "identical" compute the same value. For 10104 // instance, two distinct alloca instructions allocating the same type are 10105 // identical and do not read memory; but compute distinct values. 10106 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A)); 10107 }; 10108 10109 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 10110 // two different instructions with the same value. Check for this case. 10111 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 10112 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 10113 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 10114 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 10115 if (ComputesEqualValues(AI, BI)) 10116 return true; 10117 10118 // Otherwise assume they may have a different value. 10119 return false; 10120 } 10121 10122 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 10123 const SCEV *&LHS, const SCEV *&RHS, 10124 unsigned Depth, 10125 bool ControllingFiniteLoop) { 10126 bool Changed = false; 10127 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or 10128 // '0 != 0'. 10129 auto TrivialCase = [&](bool TriviallyTrue) { 10130 LHS = RHS = getConstant(ConstantInt::getFalse(getContext())); 10131 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 10132 return true; 10133 }; 10134 // If we hit the max recursion limit bail out. 10135 if (Depth >= 3) 10136 return false; 10137 10138 // Canonicalize a constant to the right side. 10139 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 10140 // Check for both operands constant. 10141 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 10142 if (ConstantExpr::getICmp(Pred, 10143 LHSC->getValue(), 10144 RHSC->getValue())->isNullValue()) 10145 return TrivialCase(false); 10146 else 10147 return TrivialCase(true); 10148 } 10149 // Otherwise swap the operands to put the constant on the right. 10150 std::swap(LHS, RHS); 10151 Pred = ICmpInst::getSwappedPredicate(Pred); 10152 Changed = true; 10153 } 10154 10155 // If we're comparing an addrec with a value which is loop-invariant in the 10156 // addrec's loop, put the addrec on the left. Also make a dominance check, 10157 // as both operands could be addrecs loop-invariant in each other's loop. 10158 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 10159 const Loop *L = AR->getLoop(); 10160 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { 10161 std::swap(LHS, RHS); 10162 Pred = ICmpInst::getSwappedPredicate(Pred); 10163 Changed = true; 10164 } 10165 } 10166 10167 // If there's a constant operand, canonicalize comparisons with boundary 10168 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 10169 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 10170 const APInt &RA = RC->getAPInt(); 10171 10172 bool SimplifiedByConstantRange = false; 10173 10174 if (!ICmpInst::isEquality(Pred)) { 10175 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA); 10176 if (ExactCR.isFullSet()) 10177 return TrivialCase(true); 10178 else if (ExactCR.isEmptySet()) 10179 return TrivialCase(false); 10180 10181 APInt NewRHS; 10182 CmpInst::Predicate NewPred; 10183 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) && 10184 ICmpInst::isEquality(NewPred)) { 10185 // We were able to convert an inequality to an equality. 10186 Pred = NewPred; 10187 RHS = getConstant(NewRHS); 10188 Changed = SimplifiedByConstantRange = true; 10189 } 10190 } 10191 10192 if (!SimplifiedByConstantRange) { 10193 switch (Pred) { 10194 default: 10195 break; 10196 case ICmpInst::ICMP_EQ: 10197 case ICmpInst::ICMP_NE: 10198 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. 10199 if (!RA) 10200 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS)) 10201 if (const SCEVMulExpr *ME = 10202 dyn_cast<SCEVMulExpr>(AE->getOperand(0))) 10203 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && 10204 ME->getOperand(0)->isAllOnesValue()) { 10205 RHS = AE->getOperand(1); 10206 LHS = ME->getOperand(1); 10207 Changed = true; 10208 } 10209 break; 10210 10211 10212 // The "Should have been caught earlier!" messages refer to the fact 10213 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above 10214 // should have fired on the corresponding cases, and canonicalized the 10215 // check to trivial case. 10216 10217 case ICmpInst::ICMP_UGE: 10218 assert(!RA.isMinValue() && "Should have been caught earlier!"); 10219 Pred = ICmpInst::ICMP_UGT; 10220 RHS = getConstant(RA - 1); 10221 Changed = true; 10222 break; 10223 case ICmpInst::ICMP_ULE: 10224 assert(!RA.isMaxValue() && "Should have been caught earlier!"); 10225 Pred = ICmpInst::ICMP_ULT; 10226 RHS = getConstant(RA + 1); 10227 Changed = true; 10228 break; 10229 case ICmpInst::ICMP_SGE: 10230 assert(!RA.isMinSignedValue() && "Should have been caught earlier!"); 10231 Pred = ICmpInst::ICMP_SGT; 10232 RHS = getConstant(RA - 1); 10233 Changed = true; 10234 break; 10235 case ICmpInst::ICMP_SLE: 10236 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!"); 10237 Pred = ICmpInst::ICMP_SLT; 10238 RHS = getConstant(RA + 1); 10239 Changed = true; 10240 break; 10241 } 10242 } 10243 } 10244 10245 // Check for obvious equality. 10246 if (HasSameValue(LHS, RHS)) { 10247 if (ICmpInst::isTrueWhenEqual(Pred)) 10248 return TrivialCase(true); 10249 if (ICmpInst::isFalseWhenEqual(Pred)) 10250 return TrivialCase(false); 10251 } 10252 10253 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 10254 // adding or subtracting 1 from one of the operands. This can be done for 10255 // one of two reasons: 10256 // 1) The range of the RHS does not include the (signed/unsigned) boundaries 10257 // 2) The loop is finite, with this comparison controlling the exit. Since the 10258 // loop is finite, the bound cannot include the corresponding boundary 10259 // (otherwise it would loop forever). 10260 switch (Pred) { 10261 case ICmpInst::ICMP_SLE: 10262 if (ControllingFiniteLoop || !getSignedRangeMax(RHS).isMaxSignedValue()) { 10263 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 10264 SCEV::FlagNSW); 10265 Pred = ICmpInst::ICMP_SLT; 10266 Changed = true; 10267 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) { 10268 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 10269 SCEV::FlagNSW); 10270 Pred = ICmpInst::ICMP_SLT; 10271 Changed = true; 10272 } 10273 break; 10274 case ICmpInst::ICMP_SGE: 10275 if (ControllingFiniteLoop || !getSignedRangeMin(RHS).isMinSignedValue()) { 10276 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 10277 SCEV::FlagNSW); 10278 Pred = ICmpInst::ICMP_SGT; 10279 Changed = true; 10280 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) { 10281 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 10282 SCEV::FlagNSW); 10283 Pred = ICmpInst::ICMP_SGT; 10284 Changed = true; 10285 } 10286 break; 10287 case ICmpInst::ICMP_ULE: 10288 if (ControllingFiniteLoop || !getUnsignedRangeMax(RHS).isMaxValue()) { 10289 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 10290 SCEV::FlagNUW); 10291 Pred = ICmpInst::ICMP_ULT; 10292 Changed = true; 10293 } else if (!getUnsignedRangeMin(LHS).isMinValue()) { 10294 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS); 10295 Pred = ICmpInst::ICMP_ULT; 10296 Changed = true; 10297 } 10298 break; 10299 case ICmpInst::ICMP_UGE: 10300 if (ControllingFiniteLoop || !getUnsignedRangeMin(RHS).isMinValue()) { 10301 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS); 10302 Pred = ICmpInst::ICMP_UGT; 10303 Changed = true; 10304 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) { 10305 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 10306 SCEV::FlagNUW); 10307 Pred = ICmpInst::ICMP_UGT; 10308 Changed = true; 10309 } 10310 break; 10311 default: 10312 break; 10313 } 10314 10315 // TODO: More simplifications are possible here. 10316 10317 // Recursively simplify until we either hit a recursion limit or nothing 10318 // changes. 10319 if (Changed) 10320 return SimplifyICmpOperands(Pred, LHS, RHS, Depth + 1, 10321 ControllingFiniteLoop); 10322 10323 return Changed; 10324 } 10325 10326 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 10327 return getSignedRangeMax(S).isNegative(); 10328 } 10329 10330 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 10331 return getSignedRangeMin(S).isStrictlyPositive(); 10332 } 10333 10334 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 10335 return !getSignedRangeMin(S).isNegative(); 10336 } 10337 10338 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 10339 return !getSignedRangeMax(S).isStrictlyPositive(); 10340 } 10341 10342 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 10343 return getUnsignedRangeMin(S) != 0; 10344 } 10345 10346 std::pair<const SCEV *, const SCEV *> 10347 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) { 10348 // Compute SCEV on entry of loop L. 10349 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this); 10350 if (Start == getCouldNotCompute()) 10351 return { Start, Start }; 10352 // Compute post increment SCEV for loop L. 10353 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this); 10354 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute"); 10355 return { Start, PostInc }; 10356 } 10357 10358 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred, 10359 const SCEV *LHS, const SCEV *RHS) { 10360 // First collect all loops. 10361 SmallPtrSet<const Loop *, 8> LoopsUsed; 10362 getUsedLoops(LHS, LoopsUsed); 10363 getUsedLoops(RHS, LoopsUsed); 10364 10365 if (LoopsUsed.empty()) 10366 return false; 10367 10368 // Domination relationship must be a linear order on collected loops. 10369 #ifndef NDEBUG 10370 for (auto *L1 : LoopsUsed) 10371 for (auto *L2 : LoopsUsed) 10372 assert((DT.dominates(L1->getHeader(), L2->getHeader()) || 10373 DT.dominates(L2->getHeader(), L1->getHeader())) && 10374 "Domination relationship is not a linear order"); 10375 #endif 10376 10377 const Loop *MDL = 10378 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(), 10379 [&](const Loop *L1, const Loop *L2) { 10380 return DT.properlyDominates(L1->getHeader(), L2->getHeader()); 10381 }); 10382 10383 // Get init and post increment value for LHS. 10384 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS); 10385 // if LHS contains unknown non-invariant SCEV then bail out. 10386 if (SplitLHS.first == getCouldNotCompute()) 10387 return false; 10388 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC"); 10389 // Get init and post increment value for RHS. 10390 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS); 10391 // if RHS contains unknown non-invariant SCEV then bail out. 10392 if (SplitRHS.first == getCouldNotCompute()) 10393 return false; 10394 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC"); 10395 // It is possible that init SCEV contains an invariant load but it does 10396 // not dominate MDL and is not available at MDL loop entry, so we should 10397 // check it here. 10398 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) || 10399 !isAvailableAtLoopEntry(SplitRHS.first, MDL)) 10400 return false; 10401 10402 // It seems backedge guard check is faster than entry one so in some cases 10403 // it can speed up whole estimation by short circuit 10404 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second, 10405 SplitRHS.second) && 10406 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first); 10407 } 10408 10409 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 10410 const SCEV *LHS, const SCEV *RHS) { 10411 // Canonicalize the inputs first. 10412 (void)SimplifyICmpOperands(Pred, LHS, RHS); 10413 10414 if (isKnownViaInduction(Pred, LHS, RHS)) 10415 return true; 10416 10417 if (isKnownPredicateViaSplitting(Pred, LHS, RHS)) 10418 return true; 10419 10420 // Otherwise see what can be done with some simple reasoning. 10421 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS); 10422 } 10423 10424 Optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred, 10425 const SCEV *LHS, 10426 const SCEV *RHS) { 10427 if (isKnownPredicate(Pred, LHS, RHS)) 10428 return true; 10429 else if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS)) 10430 return false; 10431 return None; 10432 } 10433 10434 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred, 10435 const SCEV *LHS, const SCEV *RHS, 10436 const Instruction *CtxI) { 10437 // TODO: Analyze guards and assumes from Context's block. 10438 return isKnownPredicate(Pred, LHS, RHS) || 10439 isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS); 10440 } 10441 10442 Optional<bool> ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, 10443 const SCEV *LHS, 10444 const SCEV *RHS, 10445 const Instruction *CtxI) { 10446 Optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS); 10447 if (KnownWithoutContext) 10448 return KnownWithoutContext; 10449 10450 if (isBasicBlockEntryGuardedByCond(CtxI->getParent(), Pred, LHS, RHS)) 10451 return true; 10452 else if (isBasicBlockEntryGuardedByCond(CtxI->getParent(), 10453 ICmpInst::getInversePredicate(Pred), 10454 LHS, RHS)) 10455 return false; 10456 return None; 10457 } 10458 10459 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred, 10460 const SCEVAddRecExpr *LHS, 10461 const SCEV *RHS) { 10462 const Loop *L = LHS->getLoop(); 10463 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) && 10464 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS); 10465 } 10466 10467 Optional<ScalarEvolution::MonotonicPredicateType> 10468 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS, 10469 ICmpInst::Predicate Pred) { 10470 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred); 10471 10472 #ifndef NDEBUG 10473 // Verify an invariant: inverting the predicate should turn a monotonically 10474 // increasing change to a monotonically decreasing one, and vice versa. 10475 if (Result) { 10476 auto ResultSwapped = 10477 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred)); 10478 10479 assert(ResultSwapped.hasValue() && "should be able to analyze both!"); 10480 assert(ResultSwapped.getValue() != Result.getValue() && 10481 "monotonicity should flip as we flip the predicate"); 10482 } 10483 #endif 10484 10485 return Result; 10486 } 10487 10488 Optional<ScalarEvolution::MonotonicPredicateType> 10489 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS, 10490 ICmpInst::Predicate Pred) { 10491 // A zero step value for LHS means the induction variable is essentially a 10492 // loop invariant value. We don't really depend on the predicate actually 10493 // flipping from false to true (for increasing predicates, and the other way 10494 // around for decreasing predicates), all we care about is that *if* the 10495 // predicate changes then it only changes from false to true. 10496 // 10497 // A zero step value in itself is not very useful, but there may be places 10498 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be 10499 // as general as possible. 10500 10501 // Only handle LE/LT/GE/GT predicates. 10502 if (!ICmpInst::isRelational(Pred)) 10503 return None; 10504 10505 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred); 10506 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) && 10507 "Should be greater or less!"); 10508 10509 // Check that AR does not wrap. 10510 if (ICmpInst::isUnsigned(Pred)) { 10511 if (!LHS->hasNoUnsignedWrap()) 10512 return None; 10513 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 10514 } else { 10515 assert(ICmpInst::isSigned(Pred) && 10516 "Relational predicate is either signed or unsigned!"); 10517 if (!LHS->hasNoSignedWrap()) 10518 return None; 10519 10520 const SCEV *Step = LHS->getStepRecurrence(*this); 10521 10522 if (isKnownNonNegative(Step)) 10523 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 10524 10525 if (isKnownNonPositive(Step)) 10526 return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing; 10527 10528 return None; 10529 } 10530 } 10531 10532 Optional<ScalarEvolution::LoopInvariantPredicate> 10533 ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred, 10534 const SCEV *LHS, const SCEV *RHS, 10535 const Loop *L) { 10536 10537 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 10538 if (!isLoopInvariant(RHS, L)) { 10539 if (!isLoopInvariant(LHS, L)) 10540 return None; 10541 10542 std::swap(LHS, RHS); 10543 Pred = ICmpInst::getSwappedPredicate(Pred); 10544 } 10545 10546 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS); 10547 if (!ArLHS || ArLHS->getLoop() != L) 10548 return None; 10549 10550 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred); 10551 if (!MonotonicType) 10552 return None; 10553 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to 10554 // true as the loop iterates, and the backedge is control dependent on 10555 // "ArLHS `Pred` RHS" == true then we can reason as follows: 10556 // 10557 // * if the predicate was false in the first iteration then the predicate 10558 // is never evaluated again, since the loop exits without taking the 10559 // backedge. 10560 // * if the predicate was true in the first iteration then it will 10561 // continue to be true for all future iterations since it is 10562 // monotonically increasing. 10563 // 10564 // For both the above possibilities, we can replace the loop varying 10565 // predicate with its value on the first iteration of the loop (which is 10566 // loop invariant). 10567 // 10568 // A similar reasoning applies for a monotonically decreasing predicate, by 10569 // replacing true with false and false with true in the above two bullets. 10570 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing; 10571 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred); 10572 10573 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS)) 10574 return None; 10575 10576 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), RHS); 10577 } 10578 10579 Optional<ScalarEvolution::LoopInvariantPredicate> 10580 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations( 10581 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, 10582 const Instruction *CtxI, const SCEV *MaxIter) { 10583 // Try to prove the following set of facts: 10584 // - The predicate is monotonic in the iteration space. 10585 // - If the check does not fail on the 1st iteration: 10586 // - No overflow will happen during first MaxIter iterations; 10587 // - It will not fail on the MaxIter'th iteration. 10588 // If the check does fail on the 1st iteration, we leave the loop and no 10589 // other checks matter. 10590 10591 // If there is a loop-invariant, force it into the RHS, otherwise bail out. 10592 if (!isLoopInvariant(RHS, L)) { 10593 if (!isLoopInvariant(LHS, L)) 10594 return None; 10595 10596 std::swap(LHS, RHS); 10597 Pred = ICmpInst::getSwappedPredicate(Pred); 10598 } 10599 10600 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS); 10601 if (!AR || AR->getLoop() != L) 10602 return None; 10603 10604 // The predicate must be relational (i.e. <, <=, >=, >). 10605 if (!ICmpInst::isRelational(Pred)) 10606 return None; 10607 10608 // TODO: Support steps other than +/- 1. 10609 const SCEV *Step = AR->getStepRecurrence(*this); 10610 auto *One = getOne(Step->getType()); 10611 auto *MinusOne = getNegativeSCEV(One); 10612 if (Step != One && Step != MinusOne) 10613 return None; 10614 10615 // Type mismatch here means that MaxIter is potentially larger than max 10616 // unsigned value in start type, which mean we cannot prove no wrap for the 10617 // indvar. 10618 if (AR->getType() != MaxIter->getType()) 10619 return None; 10620 10621 // Value of IV on suggested last iteration. 10622 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this); 10623 // Does it still meet the requirement? 10624 if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS)) 10625 return None; 10626 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does 10627 // not exceed max unsigned value of this type), this effectively proves 10628 // that there is no wrap during the iteration. To prove that there is no 10629 // signed/unsigned wrap, we need to check that 10630 // Start <= Last for step = 1 or Start >= Last for step = -1. 10631 ICmpInst::Predicate NoOverflowPred = 10632 CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 10633 if (Step == MinusOne) 10634 NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred); 10635 const SCEV *Start = AR->getStart(); 10636 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, CtxI)) 10637 return None; 10638 10639 // Everything is fine. 10640 return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS); 10641 } 10642 10643 bool ScalarEvolution::isKnownPredicateViaConstantRanges( 10644 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) { 10645 if (HasSameValue(LHS, RHS)) 10646 return ICmpInst::isTrueWhenEqual(Pred); 10647 10648 // This code is split out from isKnownPredicate because it is called from 10649 // within isLoopEntryGuardedByCond. 10650 10651 auto CheckRanges = [&](const ConstantRange &RangeLHS, 10652 const ConstantRange &RangeRHS) { 10653 return RangeLHS.icmp(Pred, RangeRHS); 10654 }; 10655 10656 // The check at the top of the function catches the case where the values are 10657 // known to be equal. 10658 if (Pred == CmpInst::ICMP_EQ) 10659 return false; 10660 10661 if (Pred == CmpInst::ICMP_NE) { 10662 auto SL = getSignedRange(LHS); 10663 auto SR = getSignedRange(RHS); 10664 if (CheckRanges(SL, SR)) 10665 return true; 10666 auto UL = getUnsignedRange(LHS); 10667 auto UR = getUnsignedRange(RHS); 10668 if (CheckRanges(UL, UR)) 10669 return true; 10670 auto *Diff = getMinusSCEV(LHS, RHS); 10671 return !isa<SCEVCouldNotCompute>(Diff) && isKnownNonZero(Diff); 10672 } 10673 10674 if (CmpInst::isSigned(Pred)) { 10675 auto SL = getSignedRange(LHS); 10676 auto SR = getSignedRange(RHS); 10677 return CheckRanges(SL, SR); 10678 } 10679 10680 auto UL = getUnsignedRange(LHS); 10681 auto UR = getUnsignedRange(RHS); 10682 return CheckRanges(UL, UR); 10683 } 10684 10685 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, 10686 const SCEV *LHS, 10687 const SCEV *RHS) { 10688 // Match X to (A + C1)<ExpectedFlags> and Y to (A + C2)<ExpectedFlags>, where 10689 // C1 and C2 are constant integers. If either X or Y are not add expressions, 10690 // consider them as X + 0 and Y + 0 respectively. C1 and C2 are returned via 10691 // OutC1 and OutC2. 10692 auto MatchBinaryAddToConst = [this](const SCEV *X, const SCEV *Y, 10693 APInt &OutC1, APInt &OutC2, 10694 SCEV::NoWrapFlags ExpectedFlags) { 10695 const SCEV *XNonConstOp, *XConstOp; 10696 const SCEV *YNonConstOp, *YConstOp; 10697 SCEV::NoWrapFlags XFlagsPresent; 10698 SCEV::NoWrapFlags YFlagsPresent; 10699 10700 if (!splitBinaryAdd(X, XConstOp, XNonConstOp, XFlagsPresent)) { 10701 XConstOp = getZero(X->getType()); 10702 XNonConstOp = X; 10703 XFlagsPresent = ExpectedFlags; 10704 } 10705 if (!isa<SCEVConstant>(XConstOp) || 10706 (XFlagsPresent & ExpectedFlags) != ExpectedFlags) 10707 return false; 10708 10709 if (!splitBinaryAdd(Y, YConstOp, YNonConstOp, YFlagsPresent)) { 10710 YConstOp = getZero(Y->getType()); 10711 YNonConstOp = Y; 10712 YFlagsPresent = ExpectedFlags; 10713 } 10714 10715 if (!isa<SCEVConstant>(YConstOp) || 10716 (YFlagsPresent & ExpectedFlags) != ExpectedFlags) 10717 return false; 10718 10719 if (YNonConstOp != XNonConstOp) 10720 return false; 10721 10722 OutC1 = cast<SCEVConstant>(XConstOp)->getAPInt(); 10723 OutC2 = cast<SCEVConstant>(YConstOp)->getAPInt(); 10724 10725 return true; 10726 }; 10727 10728 APInt C1; 10729 APInt C2; 10730 10731 switch (Pred) { 10732 default: 10733 break; 10734 10735 case ICmpInst::ICMP_SGE: 10736 std::swap(LHS, RHS); 10737 LLVM_FALLTHROUGH; 10738 case ICmpInst::ICMP_SLE: 10739 // (X + C1)<nsw> s<= (X + C2)<nsw> if C1 s<= C2. 10740 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.sle(C2)) 10741 return true; 10742 10743 break; 10744 10745 case ICmpInst::ICMP_SGT: 10746 std::swap(LHS, RHS); 10747 LLVM_FALLTHROUGH; 10748 case ICmpInst::ICMP_SLT: 10749 // (X + C1)<nsw> s< (X + C2)<nsw> if C1 s< C2. 10750 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.slt(C2)) 10751 return true; 10752 10753 break; 10754 10755 case ICmpInst::ICMP_UGE: 10756 std::swap(LHS, RHS); 10757 LLVM_FALLTHROUGH; 10758 case ICmpInst::ICMP_ULE: 10759 // (X + C1)<nuw> u<= (X + C2)<nuw> for C1 u<= C2. 10760 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ule(C2)) 10761 return true; 10762 10763 break; 10764 10765 case ICmpInst::ICMP_UGT: 10766 std::swap(LHS, RHS); 10767 LLVM_FALLTHROUGH; 10768 case ICmpInst::ICMP_ULT: 10769 // (X + C1)<nuw> u< (X + C2)<nuw> if C1 u< C2. 10770 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ult(C2)) 10771 return true; 10772 break; 10773 } 10774 10775 return false; 10776 } 10777 10778 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, 10779 const SCEV *LHS, 10780 const SCEV *RHS) { 10781 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate) 10782 return false; 10783 10784 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on 10785 // the stack can result in exponential time complexity. 10786 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true); 10787 10788 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L 10789 // 10790 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use 10791 // isKnownPredicate. isKnownPredicate is more powerful, but also more 10792 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the 10793 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to 10794 // use isKnownPredicate later if needed. 10795 return isKnownNonNegative(RHS) && 10796 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) && 10797 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS); 10798 } 10799 10800 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB, 10801 ICmpInst::Predicate Pred, 10802 const SCEV *LHS, const SCEV *RHS) { 10803 // No need to even try if we know the module has no guards. 10804 if (!HasGuards) 10805 return false; 10806 10807 return any_of(*BB, [&](const Instruction &I) { 10808 using namespace llvm::PatternMatch; 10809 10810 Value *Condition; 10811 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>( 10812 m_Value(Condition))) && 10813 isImpliedCond(Pred, LHS, RHS, Condition, false); 10814 }); 10815 } 10816 10817 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 10818 /// protected by a conditional between LHS and RHS. This is used to 10819 /// to eliminate casts. 10820 bool 10821 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 10822 ICmpInst::Predicate Pred, 10823 const SCEV *LHS, const SCEV *RHS) { 10824 // Interpret a null as meaning no loop, where there is obviously no guard 10825 // (interprocedural conditions notwithstanding). 10826 if (!L) return true; 10827 10828 if (VerifyIR) 10829 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) && 10830 "This cannot be done on broken IR!"); 10831 10832 10833 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 10834 return true; 10835 10836 BasicBlock *Latch = L->getLoopLatch(); 10837 if (!Latch) 10838 return false; 10839 10840 BranchInst *LoopContinuePredicate = 10841 dyn_cast<BranchInst>(Latch->getTerminator()); 10842 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && 10843 isImpliedCond(Pred, LHS, RHS, 10844 LoopContinuePredicate->getCondition(), 10845 LoopContinuePredicate->getSuccessor(0) != L->getHeader())) 10846 return true; 10847 10848 // We don't want more than one activation of the following loops on the stack 10849 // -- that can lead to O(n!) time complexity. 10850 if (WalkingBEDominatingConds) 10851 return false; 10852 10853 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true); 10854 10855 // See if we can exploit a trip count to prove the predicate. 10856 const auto &BETakenInfo = getBackedgeTakenInfo(L); 10857 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this); 10858 if (LatchBECount != getCouldNotCompute()) { 10859 // We know that Latch branches back to the loop header exactly 10860 // LatchBECount times. This means the backdege condition at Latch is 10861 // equivalent to "{0,+,1} u< LatchBECount". 10862 Type *Ty = LatchBECount->getType(); 10863 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW); 10864 const SCEV *LoopCounter = 10865 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags); 10866 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter, 10867 LatchBECount)) 10868 return true; 10869 } 10870 10871 // Check conditions due to any @llvm.assume intrinsics. 10872 for (auto &AssumeVH : AC.assumptions()) { 10873 if (!AssumeVH) 10874 continue; 10875 auto *CI = cast<CallInst>(AssumeVH); 10876 if (!DT.dominates(CI, Latch->getTerminator())) 10877 continue; 10878 10879 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) 10880 return true; 10881 } 10882 10883 // If the loop is not reachable from the entry block, we risk running into an 10884 // infinite loop as we walk up into the dom tree. These loops do not matter 10885 // anyway, so we just return a conservative answer when we see them. 10886 if (!DT.isReachableFromEntry(L->getHeader())) 10887 return false; 10888 10889 if (isImpliedViaGuard(Latch, Pred, LHS, RHS)) 10890 return true; 10891 10892 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; 10893 DTN != HeaderDTN; DTN = DTN->getIDom()) { 10894 assert(DTN && "should reach the loop header before reaching the root!"); 10895 10896 BasicBlock *BB = DTN->getBlock(); 10897 if (isImpliedViaGuard(BB, Pred, LHS, RHS)) 10898 return true; 10899 10900 BasicBlock *PBB = BB->getSinglePredecessor(); 10901 if (!PBB) 10902 continue; 10903 10904 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator()); 10905 if (!ContinuePredicate || !ContinuePredicate->isConditional()) 10906 continue; 10907 10908 Value *Condition = ContinuePredicate->getCondition(); 10909 10910 // If we have an edge `E` within the loop body that dominates the only 10911 // latch, the condition guarding `E` also guards the backedge. This 10912 // reasoning works only for loops with a single latch. 10913 10914 BasicBlockEdge DominatingEdge(PBB, BB); 10915 if (DominatingEdge.isSingleEdge()) { 10916 // We're constructively (and conservatively) enumerating edges within the 10917 // loop body that dominate the latch. The dominator tree better agree 10918 // with us on this: 10919 assert(DT.dominates(DominatingEdge, Latch) && "should be!"); 10920 10921 if (isImpliedCond(Pred, LHS, RHS, Condition, 10922 BB != ContinuePredicate->getSuccessor(0))) 10923 return true; 10924 } 10925 } 10926 10927 return false; 10928 } 10929 10930 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB, 10931 ICmpInst::Predicate Pred, 10932 const SCEV *LHS, 10933 const SCEV *RHS) { 10934 if (VerifyIR) 10935 assert(!verifyFunction(*BB->getParent(), &dbgs()) && 10936 "This cannot be done on broken IR!"); 10937 10938 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove 10939 // the facts (a >= b && a != b) separately. A typical situation is when the 10940 // non-strict comparison is known from ranges and non-equality is known from 10941 // dominating predicates. If we are proving strict comparison, we always try 10942 // to prove non-equality and non-strict comparison separately. 10943 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred); 10944 const bool ProvingStrictComparison = (Pred != NonStrictPredicate); 10945 bool ProvedNonStrictComparison = false; 10946 bool ProvedNonEquality = false; 10947 10948 auto SplitAndProve = 10949 [&](std::function<bool(ICmpInst::Predicate)> Fn) -> bool { 10950 if (!ProvedNonStrictComparison) 10951 ProvedNonStrictComparison = Fn(NonStrictPredicate); 10952 if (!ProvedNonEquality) 10953 ProvedNonEquality = Fn(ICmpInst::ICMP_NE); 10954 if (ProvedNonStrictComparison && ProvedNonEquality) 10955 return true; 10956 return false; 10957 }; 10958 10959 if (ProvingStrictComparison) { 10960 auto ProofFn = [&](ICmpInst::Predicate P) { 10961 return isKnownViaNonRecursiveReasoning(P, LHS, RHS); 10962 }; 10963 if (SplitAndProve(ProofFn)) 10964 return true; 10965 } 10966 10967 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard. 10968 auto ProveViaGuard = [&](const BasicBlock *Block) { 10969 if (isImpliedViaGuard(Block, Pred, LHS, RHS)) 10970 return true; 10971 if (ProvingStrictComparison) { 10972 auto ProofFn = [&](ICmpInst::Predicate P) { 10973 return isImpliedViaGuard(Block, P, LHS, RHS); 10974 }; 10975 if (SplitAndProve(ProofFn)) 10976 return true; 10977 } 10978 return false; 10979 }; 10980 10981 // Try to prove (Pred, LHS, RHS) using isImpliedCond. 10982 auto ProveViaCond = [&](const Value *Condition, bool Inverse) { 10983 const Instruction *CtxI = &BB->front(); 10984 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, CtxI)) 10985 return true; 10986 if (ProvingStrictComparison) { 10987 auto ProofFn = [&](ICmpInst::Predicate P) { 10988 return isImpliedCond(P, LHS, RHS, Condition, Inverse, CtxI); 10989 }; 10990 if (SplitAndProve(ProofFn)) 10991 return true; 10992 } 10993 return false; 10994 }; 10995 10996 // Starting at the block's predecessor, climb up the predecessor chain, as long 10997 // as there are predecessors that can be found that have unique successors 10998 // leading to the original block. 10999 const Loop *ContainingLoop = LI.getLoopFor(BB); 11000 const BasicBlock *PredBB; 11001 if (ContainingLoop && ContainingLoop->getHeader() == BB) 11002 PredBB = ContainingLoop->getLoopPredecessor(); 11003 else 11004 PredBB = BB->getSinglePredecessor(); 11005 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB); 11006 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 11007 if (ProveViaGuard(Pair.first)) 11008 return true; 11009 11010 const BranchInst *LoopEntryPredicate = 11011 dyn_cast<BranchInst>(Pair.first->getTerminator()); 11012 if (!LoopEntryPredicate || 11013 LoopEntryPredicate->isUnconditional()) 11014 continue; 11015 11016 if (ProveViaCond(LoopEntryPredicate->getCondition(), 11017 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 11018 return true; 11019 } 11020 11021 // Check conditions due to any @llvm.assume intrinsics. 11022 for (auto &AssumeVH : AC.assumptions()) { 11023 if (!AssumeVH) 11024 continue; 11025 auto *CI = cast<CallInst>(AssumeVH); 11026 if (!DT.dominates(CI, BB)) 11027 continue; 11028 11029 if (ProveViaCond(CI->getArgOperand(0), false)) 11030 return true; 11031 } 11032 11033 return false; 11034 } 11035 11036 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 11037 ICmpInst::Predicate Pred, 11038 const SCEV *LHS, 11039 const SCEV *RHS) { 11040 // Interpret a null as meaning no loop, where there is obviously no guard 11041 // (interprocedural conditions notwithstanding). 11042 if (!L) 11043 return false; 11044 11045 // Both LHS and RHS must be available at loop entry. 11046 assert(isAvailableAtLoopEntry(LHS, L) && 11047 "LHS is not available at Loop Entry"); 11048 assert(isAvailableAtLoopEntry(RHS, L) && 11049 "RHS is not available at Loop Entry"); 11050 11051 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS)) 11052 return true; 11053 11054 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS); 11055 } 11056 11057 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 11058 const SCEV *RHS, 11059 const Value *FoundCondValue, bool Inverse, 11060 const Instruction *CtxI) { 11061 // False conditions implies anything. Do not bother analyzing it further. 11062 if (FoundCondValue == 11063 ConstantInt::getBool(FoundCondValue->getContext(), Inverse)) 11064 return true; 11065 11066 if (!PendingLoopPredicates.insert(FoundCondValue).second) 11067 return false; 11068 11069 auto ClearOnExit = 11070 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); 11071 11072 // Recursively handle And and Or conditions. 11073 const Value *Op0, *Op1; 11074 if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { 11075 if (!Inverse) 11076 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) || 11077 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI); 11078 } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) { 11079 if (Inverse) 11080 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, CtxI) || 11081 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, CtxI); 11082 } 11083 11084 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 11085 if (!ICI) return false; 11086 11087 // Now that we found a conditional branch that dominates the loop or controls 11088 // the loop latch. Check to see if it is the comparison we are looking for. 11089 ICmpInst::Predicate FoundPred; 11090 if (Inverse) 11091 FoundPred = ICI->getInversePredicate(); 11092 else 11093 FoundPred = ICI->getPredicate(); 11094 11095 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 11096 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 11097 11098 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, CtxI); 11099 } 11100 11101 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, 11102 const SCEV *RHS, 11103 ICmpInst::Predicate FoundPred, 11104 const SCEV *FoundLHS, const SCEV *FoundRHS, 11105 const Instruction *CtxI) { 11106 // Balance the types. 11107 if (getTypeSizeInBits(LHS->getType()) < 11108 getTypeSizeInBits(FoundLHS->getType())) { 11109 // For unsigned and equality predicates, try to prove that both found 11110 // operands fit into narrow unsigned range. If so, try to prove facts in 11111 // narrow types. 11112 if (!CmpInst::isSigned(FoundPred) && !FoundLHS->getType()->isPointerTy() && 11113 !FoundRHS->getType()->isPointerTy()) { 11114 auto *NarrowType = LHS->getType(); 11115 auto *WideType = FoundLHS->getType(); 11116 auto BitWidth = getTypeSizeInBits(NarrowType); 11117 const SCEV *MaxValue = getZeroExtendExpr( 11118 getConstant(APInt::getMaxValue(BitWidth)), WideType); 11119 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundLHS, 11120 MaxValue) && 11121 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, FoundRHS, 11122 MaxValue)) { 11123 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType); 11124 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType); 11125 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS, 11126 TruncFoundRHS, CtxI)) 11127 return true; 11128 } 11129 } 11130 11131 if (LHS->getType()->isPointerTy() || RHS->getType()->isPointerTy()) 11132 return false; 11133 if (CmpInst::isSigned(Pred)) { 11134 LHS = getSignExtendExpr(LHS, FoundLHS->getType()); 11135 RHS = getSignExtendExpr(RHS, FoundLHS->getType()); 11136 } else { 11137 LHS = getZeroExtendExpr(LHS, FoundLHS->getType()); 11138 RHS = getZeroExtendExpr(RHS, FoundLHS->getType()); 11139 } 11140 } else if (getTypeSizeInBits(LHS->getType()) > 11141 getTypeSizeInBits(FoundLHS->getType())) { 11142 if (FoundLHS->getType()->isPointerTy() || FoundRHS->getType()->isPointerTy()) 11143 return false; 11144 if (CmpInst::isSigned(FoundPred)) { 11145 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 11146 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 11147 } else { 11148 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 11149 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 11150 } 11151 } 11152 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS, 11153 FoundRHS, CtxI); 11154 } 11155 11156 bool ScalarEvolution::isImpliedCondBalancedTypes( 11157 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 11158 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS, 11159 const Instruction *CtxI) { 11160 assert(getTypeSizeInBits(LHS->getType()) == 11161 getTypeSizeInBits(FoundLHS->getType()) && 11162 "Types should be balanced!"); 11163 // Canonicalize the query to match the way instcombine will have 11164 // canonicalized the comparison. 11165 if (SimplifyICmpOperands(Pred, LHS, RHS)) 11166 if (LHS == RHS) 11167 return CmpInst::isTrueWhenEqual(Pred); 11168 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 11169 if (FoundLHS == FoundRHS) 11170 return CmpInst::isFalseWhenEqual(FoundPred); 11171 11172 // Check to see if we can make the LHS or RHS match. 11173 if (LHS == FoundRHS || RHS == FoundLHS) { 11174 if (isa<SCEVConstant>(RHS)) { 11175 std::swap(FoundLHS, FoundRHS); 11176 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 11177 } else { 11178 std::swap(LHS, RHS); 11179 Pred = ICmpInst::getSwappedPredicate(Pred); 11180 } 11181 } 11182 11183 // Check whether the found predicate is the same as the desired predicate. 11184 if (FoundPred == Pred) 11185 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI); 11186 11187 // Check whether swapping the found predicate makes it the same as the 11188 // desired predicate. 11189 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 11190 // We can write the implication 11191 // 0. LHS Pred RHS <- FoundLHS SwapPred FoundRHS 11192 // using one of the following ways: 11193 // 1. LHS Pred RHS <- FoundRHS Pred FoundLHS 11194 // 2. RHS SwapPred LHS <- FoundLHS SwapPred FoundRHS 11195 // 3. LHS Pred RHS <- ~FoundLHS Pred ~FoundRHS 11196 // 4. ~LHS SwapPred ~RHS <- FoundLHS SwapPred FoundRHS 11197 // Forms 1. and 2. require swapping the operands of one condition. Don't 11198 // do this if it would break canonical constant/addrec ordering. 11199 if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS)) 11200 return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS, 11201 CtxI); 11202 if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS)) 11203 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, CtxI); 11204 11205 // There's no clear preference between forms 3. and 4., try both. Avoid 11206 // forming getNotSCEV of pointer values as the resulting subtract is 11207 // not legal. 11208 if (!LHS->getType()->isPointerTy() && !RHS->getType()->isPointerTy() && 11209 isImpliedCondOperands(FoundPred, getNotSCEV(LHS), getNotSCEV(RHS), 11210 FoundLHS, FoundRHS, CtxI)) 11211 return true; 11212 11213 if (!FoundLHS->getType()->isPointerTy() && 11214 !FoundRHS->getType()->isPointerTy() && 11215 isImpliedCondOperands(Pred, LHS, RHS, getNotSCEV(FoundLHS), 11216 getNotSCEV(FoundRHS), CtxI)) 11217 return true; 11218 11219 return false; 11220 } 11221 11222 auto IsSignFlippedPredicate = [](CmpInst::Predicate P1, 11223 CmpInst::Predicate P2) { 11224 assert(P1 != P2 && "Handled earlier!"); 11225 return CmpInst::isRelational(P2) && 11226 P1 == CmpInst::getFlippedSignednessPredicate(P2); 11227 }; 11228 if (IsSignFlippedPredicate(Pred, FoundPred)) { 11229 // Unsigned comparison is the same as signed comparison when both the 11230 // operands are non-negative or negative. 11231 if ((isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) || 11232 (isKnownNegative(FoundLHS) && isKnownNegative(FoundRHS))) 11233 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI); 11234 // Create local copies that we can freely swap and canonicalize our 11235 // conditions to "le/lt". 11236 ICmpInst::Predicate CanonicalPred = Pred, CanonicalFoundPred = FoundPred; 11237 const SCEV *CanonicalLHS = LHS, *CanonicalRHS = RHS, 11238 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS; 11239 if (ICmpInst::isGT(CanonicalPred) || ICmpInst::isGE(CanonicalPred)) { 11240 CanonicalPred = ICmpInst::getSwappedPredicate(CanonicalPred); 11241 CanonicalFoundPred = ICmpInst::getSwappedPredicate(CanonicalFoundPred); 11242 std::swap(CanonicalLHS, CanonicalRHS); 11243 std::swap(CanonicalFoundLHS, CanonicalFoundRHS); 11244 } 11245 assert((ICmpInst::isLT(CanonicalPred) || ICmpInst::isLE(CanonicalPred)) && 11246 "Must be!"); 11247 assert((ICmpInst::isLT(CanonicalFoundPred) || 11248 ICmpInst::isLE(CanonicalFoundPred)) && 11249 "Must be!"); 11250 if (ICmpInst::isSigned(CanonicalPred) && isKnownNonNegative(CanonicalRHS)) 11251 // Use implication: 11252 // x <u y && y >=s 0 --> x <s y. 11253 // If we can prove the left part, the right part is also proven. 11254 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS, 11255 CanonicalRHS, CanonicalFoundLHS, 11256 CanonicalFoundRHS); 11257 if (ICmpInst::isUnsigned(CanonicalPred) && isKnownNegative(CanonicalRHS)) 11258 // Use implication: 11259 // x <s y && y <s 0 --> x <u y. 11260 // If we can prove the left part, the right part is also proven. 11261 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS, 11262 CanonicalRHS, CanonicalFoundLHS, 11263 CanonicalFoundRHS); 11264 } 11265 11266 // Check if we can make progress by sharpening ranges. 11267 if (FoundPred == ICmpInst::ICMP_NE && 11268 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) { 11269 11270 const SCEVConstant *C = nullptr; 11271 const SCEV *V = nullptr; 11272 11273 if (isa<SCEVConstant>(FoundLHS)) { 11274 C = cast<SCEVConstant>(FoundLHS); 11275 V = FoundRHS; 11276 } else { 11277 C = cast<SCEVConstant>(FoundRHS); 11278 V = FoundLHS; 11279 } 11280 11281 // The guarding predicate tells us that C != V. If the known range 11282 // of V is [C, t), we can sharpen the range to [C + 1, t). The 11283 // range we consider has to correspond to same signedness as the 11284 // predicate we're interested in folding. 11285 11286 APInt Min = ICmpInst::isSigned(Pred) ? 11287 getSignedRangeMin(V) : getUnsignedRangeMin(V); 11288 11289 if (Min == C->getAPInt()) { 11290 // Given (V >= Min && V != Min) we conclude V >= (Min + 1). 11291 // This is true even if (Min + 1) wraps around -- in case of 11292 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)). 11293 11294 APInt SharperMin = Min + 1; 11295 11296 switch (Pred) { 11297 case ICmpInst::ICMP_SGE: 11298 case ICmpInst::ICMP_UGE: 11299 // We know V `Pred` SharperMin. If this implies LHS `Pred` 11300 // RHS, we're done. 11301 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin), 11302 CtxI)) 11303 return true; 11304 LLVM_FALLTHROUGH; 11305 11306 case ICmpInst::ICMP_SGT: 11307 case ICmpInst::ICMP_UGT: 11308 // We know from the range information that (V `Pred` Min || 11309 // V == Min). We know from the guarding condition that !(V 11310 // == Min). This gives us 11311 // 11312 // V `Pred` Min || V == Min && !(V == Min) 11313 // => V `Pred` Min 11314 // 11315 // If V `Pred` Min implies LHS `Pred` RHS, we're done. 11316 11317 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min), CtxI)) 11318 return true; 11319 break; 11320 11321 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively. 11322 case ICmpInst::ICMP_SLE: 11323 case ICmpInst::ICMP_ULE: 11324 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 11325 LHS, V, getConstant(SharperMin), CtxI)) 11326 return true; 11327 LLVM_FALLTHROUGH; 11328 11329 case ICmpInst::ICMP_SLT: 11330 case ICmpInst::ICMP_ULT: 11331 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS, 11332 LHS, V, getConstant(Min), CtxI)) 11333 return true; 11334 break; 11335 11336 default: 11337 // No change 11338 break; 11339 } 11340 } 11341 } 11342 11343 // Check whether the actual condition is beyond sufficient. 11344 if (FoundPred == ICmpInst::ICMP_EQ) 11345 if (ICmpInst::isTrueWhenEqual(Pred)) 11346 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI)) 11347 return true; 11348 if (Pred == ICmpInst::ICMP_NE) 11349 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 11350 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, CtxI)) 11351 return true; 11352 11353 // Otherwise assume the worst. 11354 return false; 11355 } 11356 11357 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr, 11358 const SCEV *&L, const SCEV *&R, 11359 SCEV::NoWrapFlags &Flags) { 11360 const auto *AE = dyn_cast<SCEVAddExpr>(Expr); 11361 if (!AE || AE->getNumOperands() != 2) 11362 return false; 11363 11364 L = AE->getOperand(0); 11365 R = AE->getOperand(1); 11366 Flags = AE->getNoWrapFlags(); 11367 return true; 11368 } 11369 11370 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More, 11371 const SCEV *Less) { 11372 // We avoid subtracting expressions here because this function is usually 11373 // fairly deep in the call stack (i.e. is called many times). 11374 11375 // X - X = 0. 11376 if (More == Less) 11377 return APInt(getTypeSizeInBits(More->getType()), 0); 11378 11379 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) { 11380 const auto *LAR = cast<SCEVAddRecExpr>(Less); 11381 const auto *MAR = cast<SCEVAddRecExpr>(More); 11382 11383 if (LAR->getLoop() != MAR->getLoop()) 11384 return None; 11385 11386 // We look at affine expressions only; not for correctness but to keep 11387 // getStepRecurrence cheap. 11388 if (!LAR->isAffine() || !MAR->isAffine()) 11389 return None; 11390 11391 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this)) 11392 return None; 11393 11394 Less = LAR->getStart(); 11395 More = MAR->getStart(); 11396 11397 // fall through 11398 } 11399 11400 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) { 11401 const auto &M = cast<SCEVConstant>(More)->getAPInt(); 11402 const auto &L = cast<SCEVConstant>(Less)->getAPInt(); 11403 return M - L; 11404 } 11405 11406 SCEV::NoWrapFlags Flags; 11407 const SCEV *LLess = nullptr, *RLess = nullptr; 11408 const SCEV *LMore = nullptr, *RMore = nullptr; 11409 const SCEVConstant *C1 = nullptr, *C2 = nullptr; 11410 // Compare (X + C1) vs X. 11411 if (splitBinaryAdd(Less, LLess, RLess, Flags)) 11412 if ((C1 = dyn_cast<SCEVConstant>(LLess))) 11413 if (RLess == More) 11414 return -(C1->getAPInt()); 11415 11416 // Compare X vs (X + C2). 11417 if (splitBinaryAdd(More, LMore, RMore, Flags)) 11418 if ((C2 = dyn_cast<SCEVConstant>(LMore))) 11419 if (RMore == Less) 11420 return C2->getAPInt(); 11421 11422 // Compare (X + C1) vs (X + C2). 11423 if (C1 && C2 && RLess == RMore) 11424 return C2->getAPInt() - C1->getAPInt(); 11425 11426 return None; 11427 } 11428 11429 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart( 11430 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 11431 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *CtxI) { 11432 // Try to recognize the following pattern: 11433 // 11434 // FoundRHS = ... 11435 // ... 11436 // loop: 11437 // FoundLHS = {Start,+,W} 11438 // context_bb: // Basic block from the same loop 11439 // known(Pred, FoundLHS, FoundRHS) 11440 // 11441 // If some predicate is known in the context of a loop, it is also known on 11442 // each iteration of this loop, including the first iteration. Therefore, in 11443 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to 11444 // prove the original pred using this fact. 11445 if (!CtxI) 11446 return false; 11447 const BasicBlock *ContextBB = CtxI->getParent(); 11448 // Make sure AR varies in the context block. 11449 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) { 11450 const Loop *L = AR->getLoop(); 11451 // Make sure that context belongs to the loop and executes on 1st iteration 11452 // (if it ever executes at all). 11453 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 11454 return false; 11455 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop())) 11456 return false; 11457 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS); 11458 } 11459 11460 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) { 11461 const Loop *L = AR->getLoop(); 11462 // Make sure that context belongs to the loop and executes on 1st iteration 11463 // (if it ever executes at all). 11464 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch())) 11465 return false; 11466 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop())) 11467 return false; 11468 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart()); 11469 } 11470 11471 return false; 11472 } 11473 11474 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow( 11475 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, 11476 const SCEV *FoundLHS, const SCEV *FoundRHS) { 11477 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT) 11478 return false; 11479 11480 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS); 11481 if (!AddRecLHS) 11482 return false; 11483 11484 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS); 11485 if (!AddRecFoundLHS) 11486 return false; 11487 11488 // We'd like to let SCEV reason about control dependencies, so we constrain 11489 // both the inequalities to be about add recurrences on the same loop. This 11490 // way we can use isLoopEntryGuardedByCond later. 11491 11492 const Loop *L = AddRecFoundLHS->getLoop(); 11493 if (L != AddRecLHS->getLoop()) 11494 return false; 11495 11496 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1) 11497 // 11498 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C) 11499 // ... (2) 11500 // 11501 // Informal proof for (2), assuming (1) [*]: 11502 // 11503 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**] 11504 // 11505 // Then 11506 // 11507 // FoundLHS s< FoundRHS s< INT_MIN - C 11508 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ] 11509 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ] 11510 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s< 11511 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ] 11512 // <=> FoundLHS + C s< FoundRHS + C 11513 // 11514 // [*]: (1) can be proved by ruling out overflow. 11515 // 11516 // [**]: This can be proved by analyzing all the four possibilities: 11517 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and 11518 // (A s>= 0, B s>= 0). 11519 // 11520 // Note: 11521 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C" 11522 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS 11523 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS 11524 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is 11525 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS + 11526 // C)". 11527 11528 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS); 11529 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS); 11530 if (!LDiff || !RDiff || *LDiff != *RDiff) 11531 return false; 11532 11533 if (LDiff->isMinValue()) 11534 return true; 11535 11536 APInt FoundRHSLimit; 11537 11538 if (Pred == CmpInst::ICMP_ULT) { 11539 FoundRHSLimit = -(*RDiff); 11540 } else { 11541 assert(Pred == CmpInst::ICMP_SLT && "Checked above!"); 11542 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff; 11543 } 11544 11545 // Try to prove (1) or (2), as needed. 11546 return isAvailableAtLoopEntry(FoundRHS, L) && 11547 isLoopEntryGuardedByCond(L, Pred, FoundRHS, 11548 getConstant(FoundRHSLimit)); 11549 } 11550 11551 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred, 11552 const SCEV *LHS, const SCEV *RHS, 11553 const SCEV *FoundLHS, 11554 const SCEV *FoundRHS, unsigned Depth) { 11555 const PHINode *LPhi = nullptr, *RPhi = nullptr; 11556 11557 auto ClearOnExit = make_scope_exit([&]() { 11558 if (LPhi) { 11559 bool Erased = PendingMerges.erase(LPhi); 11560 assert(Erased && "Failed to erase LPhi!"); 11561 (void)Erased; 11562 } 11563 if (RPhi) { 11564 bool Erased = PendingMerges.erase(RPhi); 11565 assert(Erased && "Failed to erase RPhi!"); 11566 (void)Erased; 11567 } 11568 }); 11569 11570 // Find respective Phis and check that they are not being pending. 11571 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) 11572 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) { 11573 if (!PendingMerges.insert(Phi).second) 11574 return false; 11575 LPhi = Phi; 11576 } 11577 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS)) 11578 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) { 11579 // If we detect a loop of Phi nodes being processed by this method, for 11580 // example: 11581 // 11582 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ] 11583 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ] 11584 // 11585 // we don't want to deal with a case that complex, so return conservative 11586 // answer false. 11587 if (!PendingMerges.insert(Phi).second) 11588 return false; 11589 RPhi = Phi; 11590 } 11591 11592 // If none of LHS, RHS is a Phi, nothing to do here. 11593 if (!LPhi && !RPhi) 11594 return false; 11595 11596 // If there is a SCEVUnknown Phi we are interested in, make it left. 11597 if (!LPhi) { 11598 std::swap(LHS, RHS); 11599 std::swap(FoundLHS, FoundRHS); 11600 std::swap(LPhi, RPhi); 11601 Pred = ICmpInst::getSwappedPredicate(Pred); 11602 } 11603 11604 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!"); 11605 const BasicBlock *LBB = LPhi->getParent(); 11606 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 11607 11608 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) { 11609 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) || 11610 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) || 11611 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth); 11612 }; 11613 11614 if (RPhi && RPhi->getParent() == LBB) { 11615 // Case one: RHS is also a SCEVUnknown Phi from the same basic block. 11616 // If we compare two Phis from the same block, and for each entry block 11617 // the predicate is true for incoming values from this block, then the 11618 // predicate is also true for the Phis. 11619 for (const BasicBlock *IncBB : predecessors(LBB)) { 11620 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 11621 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB)); 11622 if (!ProvedEasily(L, R)) 11623 return false; 11624 } 11625 } else if (RAR && RAR->getLoop()->getHeader() == LBB) { 11626 // Case two: RHS is also a Phi from the same basic block, and it is an 11627 // AddRec. It means that there is a loop which has both AddRec and Unknown 11628 // PHIs, for it we can compare incoming values of AddRec from above the loop 11629 // and latch with their respective incoming values of LPhi. 11630 // TODO: Generalize to handle loops with many inputs in a header. 11631 if (LPhi->getNumIncomingValues() != 2) return false; 11632 11633 auto *RLoop = RAR->getLoop(); 11634 auto *Predecessor = RLoop->getLoopPredecessor(); 11635 assert(Predecessor && "Loop with AddRec with no predecessor?"); 11636 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor)); 11637 if (!ProvedEasily(L1, RAR->getStart())) 11638 return false; 11639 auto *Latch = RLoop->getLoopLatch(); 11640 assert(Latch && "Loop with AddRec with no latch?"); 11641 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch)); 11642 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this))) 11643 return false; 11644 } else { 11645 // In all other cases go over inputs of LHS and compare each of them to RHS, 11646 // the predicate is true for (LHS, RHS) if it is true for all such pairs. 11647 // At this point RHS is either a non-Phi, or it is a Phi from some block 11648 // different from LBB. 11649 for (const BasicBlock *IncBB : predecessors(LBB)) { 11650 // Check that RHS is available in this block. 11651 if (!dominates(RHS, IncBB)) 11652 return false; 11653 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB)); 11654 // Make sure L does not refer to a value from a potentially previous 11655 // iteration of a loop. 11656 if (!properlyDominates(L, IncBB)) 11657 return false; 11658 if (!ProvedEasily(L, RHS)) 11659 return false; 11660 } 11661 } 11662 return true; 11663 } 11664 11665 bool ScalarEvolution::isImpliedCondOperandsViaShift(ICmpInst::Predicate Pred, 11666 const SCEV *LHS, 11667 const SCEV *RHS, 11668 const SCEV *FoundLHS, 11669 const SCEV *FoundRHS) { 11670 // We want to imply LHS < RHS from LHS < (RHS >> shiftvalue). First, make 11671 // sure that we are dealing with same LHS. 11672 if (RHS == FoundRHS) { 11673 std::swap(LHS, RHS); 11674 std::swap(FoundLHS, FoundRHS); 11675 Pred = ICmpInst::getSwappedPredicate(Pred); 11676 } 11677 if (LHS != FoundLHS) 11678 return false; 11679 11680 auto *SUFoundRHS = dyn_cast<SCEVUnknown>(FoundRHS); 11681 if (!SUFoundRHS) 11682 return false; 11683 11684 Value *Shiftee, *ShiftValue; 11685 11686 using namespace PatternMatch; 11687 if (match(SUFoundRHS->getValue(), 11688 m_LShr(m_Value(Shiftee), m_Value(ShiftValue)))) { 11689 auto *ShifteeS = getSCEV(Shiftee); 11690 // Prove one of the following: 11691 // LHS <u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <u RHS 11692 // LHS <=u (shiftee >> shiftvalue) && shiftee <=u RHS ---> LHS <=u RHS 11693 // LHS <s (shiftee >> shiftvalue) && shiftee <=s RHS && shiftee >=s 0 11694 // ---> LHS <s RHS 11695 // LHS <=s (shiftee >> shiftvalue) && shiftee <=s RHS && shiftee >=s 0 11696 // ---> LHS <=s RHS 11697 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) 11698 return isKnownPredicate(ICmpInst::ICMP_ULE, ShifteeS, RHS); 11699 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 11700 if (isKnownNonNegative(ShifteeS)) 11701 return isKnownPredicate(ICmpInst::ICMP_SLE, ShifteeS, RHS); 11702 } 11703 11704 return false; 11705 } 11706 11707 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 11708 const SCEV *LHS, const SCEV *RHS, 11709 const SCEV *FoundLHS, 11710 const SCEV *FoundRHS, 11711 const Instruction *CtxI) { 11712 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS)) 11713 return true; 11714 11715 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS)) 11716 return true; 11717 11718 if (isImpliedCondOperandsViaShift(Pred, LHS, RHS, FoundLHS, FoundRHS)) 11719 return true; 11720 11721 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS, 11722 CtxI)) 11723 return true; 11724 11725 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 11726 FoundLHS, FoundRHS); 11727 } 11728 11729 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values? 11730 template <typename MinMaxExprType> 11731 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, 11732 const SCEV *Candidate) { 11733 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr); 11734 if (!MinMaxExpr) 11735 return false; 11736 11737 return is_contained(MinMaxExpr->operands(), Candidate); 11738 } 11739 11740 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, 11741 ICmpInst::Predicate Pred, 11742 const SCEV *LHS, const SCEV *RHS) { 11743 // If both sides are affine addrecs for the same loop, with equal 11744 // steps, and we know the recurrences don't wrap, then we only 11745 // need to check the predicate on the starting values. 11746 11747 if (!ICmpInst::isRelational(Pred)) 11748 return false; 11749 11750 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS); 11751 if (!LAR) 11752 return false; 11753 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS); 11754 if (!RAR) 11755 return false; 11756 if (LAR->getLoop() != RAR->getLoop()) 11757 return false; 11758 if (!LAR->isAffine() || !RAR->isAffine()) 11759 return false; 11760 11761 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE)) 11762 return false; 11763 11764 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? 11765 SCEV::FlagNSW : SCEV::FlagNUW; 11766 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) 11767 return false; 11768 11769 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart()); 11770 } 11771 11772 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max 11773 /// expression? 11774 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, 11775 ICmpInst::Predicate Pred, 11776 const SCEV *LHS, const SCEV *RHS) { 11777 switch (Pred) { 11778 default: 11779 return false; 11780 11781 case ICmpInst::ICMP_SGE: 11782 std::swap(LHS, RHS); 11783 LLVM_FALLTHROUGH; 11784 case ICmpInst::ICMP_SLE: 11785 return 11786 // min(A, ...) <= A 11787 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) || 11788 // A <= max(A, ...) 11789 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS); 11790 11791 case ICmpInst::ICMP_UGE: 11792 std::swap(LHS, RHS); 11793 LLVM_FALLTHROUGH; 11794 case ICmpInst::ICMP_ULE: 11795 return 11796 // min(A, ...) <= A 11797 // FIXME: what about umin_seq? 11798 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) || 11799 // A <= max(A, ...) 11800 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS); 11801 } 11802 11803 llvm_unreachable("covered switch fell through?!"); 11804 } 11805 11806 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred, 11807 const SCEV *LHS, const SCEV *RHS, 11808 const SCEV *FoundLHS, 11809 const SCEV *FoundRHS, 11810 unsigned Depth) { 11811 assert(getTypeSizeInBits(LHS->getType()) == 11812 getTypeSizeInBits(RHS->getType()) && 11813 "LHS and RHS have different sizes?"); 11814 assert(getTypeSizeInBits(FoundLHS->getType()) == 11815 getTypeSizeInBits(FoundRHS->getType()) && 11816 "FoundLHS and FoundRHS have different sizes?"); 11817 // We want to avoid hurting the compile time with analysis of too big trees. 11818 if (Depth > MaxSCEVOperationsImplicationDepth) 11819 return false; 11820 11821 // We only want to work with GT comparison so far. 11822 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) { 11823 Pred = CmpInst::getSwappedPredicate(Pred); 11824 std::swap(LHS, RHS); 11825 std::swap(FoundLHS, FoundRHS); 11826 } 11827 11828 // For unsigned, try to reduce it to corresponding signed comparison. 11829 if (Pred == ICmpInst::ICMP_UGT) 11830 // We can replace unsigned predicate with its signed counterpart if all 11831 // involved values are non-negative. 11832 // TODO: We could have better support for unsigned. 11833 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) { 11834 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing 11835 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us 11836 // use this fact to prove that LHS and RHS are non-negative. 11837 const SCEV *MinusOne = getMinusOne(LHS->getType()); 11838 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS, 11839 FoundRHS) && 11840 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS, 11841 FoundRHS)) 11842 Pred = ICmpInst::ICMP_SGT; 11843 } 11844 11845 if (Pred != ICmpInst::ICMP_SGT) 11846 return false; 11847 11848 auto GetOpFromSExt = [&](const SCEV *S) { 11849 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S)) 11850 return Ext->getOperand(); 11851 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off 11852 // the constant in some cases. 11853 return S; 11854 }; 11855 11856 // Acquire values from extensions. 11857 auto *OrigLHS = LHS; 11858 auto *OrigFoundLHS = FoundLHS; 11859 LHS = GetOpFromSExt(LHS); 11860 FoundLHS = GetOpFromSExt(FoundLHS); 11861 11862 // Is the SGT predicate can be proved trivially or using the found context. 11863 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) { 11864 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) || 11865 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS, 11866 FoundRHS, Depth + 1); 11867 }; 11868 11869 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) { 11870 // We want to avoid creation of any new non-constant SCEV. Since we are 11871 // going to compare the operands to RHS, we should be certain that we don't 11872 // need any size extensions for this. So let's decline all cases when the 11873 // sizes of types of LHS and RHS do not match. 11874 // TODO: Maybe try to get RHS from sext to catch more cases? 11875 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType())) 11876 return false; 11877 11878 // Should not overflow. 11879 if (!LHSAddExpr->hasNoSignedWrap()) 11880 return false; 11881 11882 auto *LL = LHSAddExpr->getOperand(0); 11883 auto *LR = LHSAddExpr->getOperand(1); 11884 auto *MinusOne = getMinusOne(RHS->getType()); 11885 11886 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context. 11887 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) { 11888 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS); 11889 }; 11890 // Try to prove the following rule: 11891 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS). 11892 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS). 11893 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL)) 11894 return true; 11895 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) { 11896 Value *LL, *LR; 11897 // FIXME: Once we have SDiv implemented, we can get rid of this matching. 11898 11899 using namespace llvm::PatternMatch; 11900 11901 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) { 11902 // Rules for division. 11903 // We are going to perform some comparisons with Denominator and its 11904 // derivative expressions. In general case, creating a SCEV for it may 11905 // lead to a complex analysis of the entire graph, and in particular it 11906 // can request trip count recalculation for the same loop. This would 11907 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid 11908 // this, we only want to create SCEVs that are constants in this section. 11909 // So we bail if Denominator is not a constant. 11910 if (!isa<ConstantInt>(LR)) 11911 return false; 11912 11913 auto *Denominator = cast<SCEVConstant>(getSCEV(LR)); 11914 11915 // We want to make sure that LHS = FoundLHS / Denominator. If it is so, 11916 // then a SCEV for the numerator already exists and matches with FoundLHS. 11917 auto *Numerator = getExistingSCEV(LL); 11918 if (!Numerator || Numerator->getType() != FoundLHS->getType()) 11919 return false; 11920 11921 // Make sure that the numerator matches with FoundLHS and the denominator 11922 // is positive. 11923 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator)) 11924 return false; 11925 11926 auto *DTy = Denominator->getType(); 11927 auto *FRHSTy = FoundRHS->getType(); 11928 if (DTy->isPointerTy() != FRHSTy->isPointerTy()) 11929 // One of types is a pointer and another one is not. We cannot extend 11930 // them properly to a wider type, so let us just reject this case. 11931 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help 11932 // to avoid this check. 11933 return false; 11934 11935 // Given that: 11936 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0. 11937 auto *WTy = getWiderType(DTy, FRHSTy); 11938 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy); 11939 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy); 11940 11941 // Try to prove the following rule: 11942 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS). 11943 // For example, given that FoundLHS > 2. It means that FoundLHS is at 11944 // least 3. If we divide it by Denominator < 4, we will have at least 1. 11945 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2)); 11946 if (isKnownNonPositive(RHS) && 11947 IsSGTViaContext(FoundRHSExt, DenomMinusTwo)) 11948 return true; 11949 11950 // Try to prove the following rule: 11951 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS). 11952 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2. 11953 // If we divide it by Denominator > 2, then: 11954 // 1. If FoundLHS is negative, then the result is 0. 11955 // 2. If FoundLHS is non-negative, then the result is non-negative. 11956 // Anyways, the result is non-negative. 11957 auto *MinusOne = getMinusOne(WTy); 11958 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt); 11959 if (isKnownNegative(RHS) && 11960 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne)) 11961 return true; 11962 } 11963 } 11964 11965 // If our expression contained SCEVUnknown Phis, and we split it down and now 11966 // need to prove something for them, try to prove the predicate for every 11967 // possible incoming values of those Phis. 11968 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1)) 11969 return true; 11970 11971 return false; 11972 } 11973 11974 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, 11975 const SCEV *LHS, const SCEV *RHS) { 11976 // zext x u<= sext x, sext x s<= zext x 11977 switch (Pred) { 11978 case ICmpInst::ICMP_SGE: 11979 std::swap(LHS, RHS); 11980 LLVM_FALLTHROUGH; 11981 case ICmpInst::ICMP_SLE: { 11982 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt. 11983 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS); 11984 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS); 11985 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 11986 return true; 11987 break; 11988 } 11989 case ICmpInst::ICMP_UGE: 11990 std::swap(LHS, RHS); 11991 LLVM_FALLTHROUGH; 11992 case ICmpInst::ICMP_ULE: { 11993 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt. 11994 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS); 11995 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS); 11996 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand()) 11997 return true; 11998 break; 11999 } 12000 default: 12001 break; 12002 }; 12003 return false; 12004 } 12005 12006 bool 12007 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred, 12008 const SCEV *LHS, const SCEV *RHS) { 12009 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) || 12010 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) || 12011 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) || 12012 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) || 12013 isKnownPredicateViaNoOverflow(Pred, LHS, RHS); 12014 } 12015 12016 bool 12017 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 12018 const SCEV *LHS, const SCEV *RHS, 12019 const SCEV *FoundLHS, 12020 const SCEV *FoundRHS) { 12021 switch (Pred) { 12022 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 12023 case ICmpInst::ICMP_EQ: 12024 case ICmpInst::ICMP_NE: 12025 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 12026 return true; 12027 break; 12028 case ICmpInst::ICMP_SLT: 12029 case ICmpInst::ICMP_SLE: 12030 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 12031 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 12032 return true; 12033 break; 12034 case ICmpInst::ICMP_SGT: 12035 case ICmpInst::ICMP_SGE: 12036 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 12037 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 12038 return true; 12039 break; 12040 case ICmpInst::ICMP_ULT: 12041 case ICmpInst::ICMP_ULE: 12042 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 12043 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 12044 return true; 12045 break; 12046 case ICmpInst::ICMP_UGT: 12047 case ICmpInst::ICMP_UGE: 12048 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 12049 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 12050 return true; 12051 break; 12052 } 12053 12054 // Maybe it can be proved via operations? 12055 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS)) 12056 return true; 12057 12058 return false; 12059 } 12060 12061 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, 12062 const SCEV *LHS, 12063 const SCEV *RHS, 12064 const SCEV *FoundLHS, 12065 const SCEV *FoundRHS) { 12066 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS)) 12067 // The restriction on `FoundRHS` be lifted easily -- it exists only to 12068 // reduce the compile time impact of this optimization. 12069 return false; 12070 12071 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS); 12072 if (!Addend) 12073 return false; 12074 12075 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt(); 12076 12077 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the 12078 // antecedent "`FoundLHS` `Pred` `FoundRHS`". 12079 ConstantRange FoundLHSRange = 12080 ConstantRange::makeExactICmpRegion(Pred, ConstFoundRHS); 12081 12082 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`: 12083 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend)); 12084 12085 // We can also compute the range of values for `LHS` that satisfy the 12086 // consequent, "`LHS` `Pred` `RHS`": 12087 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt(); 12088 // The antecedent implies the consequent if every value of `LHS` that 12089 // satisfies the antecedent also satisfies the consequent. 12090 return LHSRange.icmp(Pred, ConstRHS); 12091 } 12092 12093 bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, 12094 bool IsSigned) { 12095 assert(isKnownPositive(Stride) && "Positive stride expected!"); 12096 12097 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 12098 const SCEV *One = getOne(Stride->getType()); 12099 12100 if (IsSigned) { 12101 APInt MaxRHS = getSignedRangeMax(RHS); 12102 APInt MaxValue = APInt::getSignedMaxValue(BitWidth); 12103 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 12104 12105 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow! 12106 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS); 12107 } 12108 12109 APInt MaxRHS = getUnsignedRangeMax(RHS); 12110 APInt MaxValue = APInt::getMaxValue(BitWidth); 12111 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 12112 12113 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow! 12114 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS); 12115 } 12116 12117 bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, 12118 bool IsSigned) { 12119 12120 unsigned BitWidth = getTypeSizeInBits(RHS->getType()); 12121 const SCEV *One = getOne(Stride->getType()); 12122 12123 if (IsSigned) { 12124 APInt MinRHS = getSignedRangeMin(RHS); 12125 APInt MinValue = APInt::getSignedMinValue(BitWidth); 12126 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One)); 12127 12128 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow! 12129 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS); 12130 } 12131 12132 APInt MinRHS = getUnsignedRangeMin(RHS); 12133 APInt MinValue = APInt::getMinValue(BitWidth); 12134 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One)); 12135 12136 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow! 12137 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS); 12138 } 12139 12140 const SCEV *ScalarEvolution::getUDivCeilSCEV(const SCEV *N, const SCEV *D) { 12141 // umin(N, 1) + floor((N - umin(N, 1)) / D) 12142 // This is equivalent to "1 + floor((N - 1) / D)" for N != 0. The umin 12143 // expression fixes the case of N=0. 12144 const SCEV *MinNOne = getUMinExpr(N, getOne(N->getType())); 12145 const SCEV *NMinusOne = getMinusSCEV(N, MinNOne); 12146 return getAddExpr(MinNOne, getUDivExpr(NMinusOne, D)); 12147 } 12148 12149 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start, 12150 const SCEV *Stride, 12151 const SCEV *End, 12152 unsigned BitWidth, 12153 bool IsSigned) { 12154 // The logic in this function assumes we can represent a positive stride. 12155 // If we can't, the backedge-taken count must be zero. 12156 if (IsSigned && BitWidth == 1) 12157 return getZero(Stride->getType()); 12158 12159 // This code has only been closely audited for negative strides in the 12160 // unsigned comparison case, it may be correct for signed comparison, but 12161 // that needs to be established. 12162 assert((!IsSigned || !isKnownNonPositive(Stride)) && 12163 "Stride is expected strictly positive for signed case!"); 12164 12165 // Calculate the maximum backedge count based on the range of values 12166 // permitted by Start, End, and Stride. 12167 APInt MinStart = 12168 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start); 12169 12170 APInt MinStride = 12171 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); 12172 12173 // We assume either the stride is positive, or the backedge-taken count 12174 // is zero. So force StrideForMaxBECount to be at least one. 12175 APInt One(BitWidth, 1); 12176 APInt StrideForMaxBECount = IsSigned ? APIntOps::smax(One, MinStride) 12177 : APIntOps::umax(One, MinStride); 12178 12179 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth) 12180 : APInt::getMaxValue(BitWidth); 12181 APInt Limit = MaxValue - (StrideForMaxBECount - 1); 12182 12183 // Although End can be a MAX expression we estimate MaxEnd considering only 12184 // the case End = RHS of the loop termination condition. This is safe because 12185 // in the other case (End - Start) is zero, leading to a zero maximum backedge 12186 // taken count. 12187 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit) 12188 : APIntOps::umin(getUnsignedRangeMax(End), Limit); 12189 12190 // MaxBECount = ceil((max(MaxEnd, MinStart) - MinStart) / Stride) 12191 MaxEnd = IsSigned ? APIntOps::smax(MaxEnd, MinStart) 12192 : APIntOps::umax(MaxEnd, MinStart); 12193 12194 return getUDivCeilSCEV(getConstant(MaxEnd - MinStart) /* Delta */, 12195 getConstant(StrideForMaxBECount) /* Step */); 12196 } 12197 12198 ScalarEvolution::ExitLimit 12199 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS, 12200 const Loop *L, bool IsSigned, 12201 bool ControlsExit, bool AllowPredicates) { 12202 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 12203 12204 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 12205 bool PredicatedIV = false; 12206 12207 auto canAssumeNoSelfWrap = [&](const SCEVAddRecExpr *AR) { 12208 // Can we prove this loop *must* be UB if overflow of IV occurs? 12209 // Reasoning goes as follows: 12210 // * Suppose the IV did self wrap. 12211 // * If Stride evenly divides the iteration space, then once wrap 12212 // occurs, the loop must revisit the same values. 12213 // * We know that RHS is invariant, and that none of those values 12214 // caused this exit to be taken previously. Thus, this exit is 12215 // dynamically dead. 12216 // * If this is the sole exit, then a dead exit implies the loop 12217 // must be infinite if there are no abnormal exits. 12218 // * If the loop were infinite, then it must either not be mustprogress 12219 // or have side effects. Otherwise, it must be UB. 12220 // * It can't (by assumption), be UB so we have contradicted our 12221 // premise and can conclude the IV did not in fact self-wrap. 12222 if (!isLoopInvariant(RHS, L)) 12223 return false; 12224 12225 auto *StrideC = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)); 12226 if (!StrideC || !StrideC->getAPInt().isPowerOf2()) 12227 return false; 12228 12229 if (!ControlsExit || !loopHasNoAbnormalExits(L)) 12230 return false; 12231 12232 return loopIsFiniteByAssumption(L); 12233 }; 12234 12235 if (!IV) { 12236 if (auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS)) { 12237 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ZExt->getOperand()); 12238 if (AR && AR->getLoop() == L && AR->isAffine()) { 12239 auto canProveNUW = [&]() { 12240 if (!isLoopInvariant(RHS, L)) 12241 return false; 12242 12243 if (!isKnownNonZero(AR->getStepRecurrence(*this))) 12244 // We need the sequence defined by AR to strictly increase in the 12245 // unsigned integer domain for the logic below to hold. 12246 return false; 12247 12248 const unsigned InnerBitWidth = getTypeSizeInBits(AR->getType()); 12249 const unsigned OuterBitWidth = getTypeSizeInBits(RHS->getType()); 12250 // If RHS <=u Limit, then there must exist a value V in the sequence 12251 // defined by AR (e.g. {Start,+,Step}) such that V >u RHS, and 12252 // V <=u UINT_MAX. Thus, we must exit the loop before unsigned 12253 // overflow occurs. This limit also implies that a signed comparison 12254 // (in the wide bitwidth) is equivalent to an unsigned comparison as 12255 // the high bits on both sides must be zero. 12256 APInt StrideMax = getUnsignedRangeMax(AR->getStepRecurrence(*this)); 12257 APInt Limit = APInt::getMaxValue(InnerBitWidth) - (StrideMax - 1); 12258 Limit = Limit.zext(OuterBitWidth); 12259 return getUnsignedRangeMax(applyLoopGuards(RHS, L)).ule(Limit); 12260 }; 12261 auto Flags = AR->getNoWrapFlags(); 12262 if (!hasFlags(Flags, SCEV::FlagNUW) && canProveNUW()) 12263 Flags = setFlags(Flags, SCEV::FlagNUW); 12264 12265 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), Flags); 12266 if (AR->hasNoUnsignedWrap()) { 12267 // Emulate what getZeroExtendExpr would have done during construction 12268 // if we'd been able to infer the fact just above at that time. 12269 const SCEV *Step = AR->getStepRecurrence(*this); 12270 Type *Ty = ZExt->getType(); 12271 auto *S = getAddRecExpr( 12272 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, 0), 12273 getZeroExtendExpr(Step, Ty, 0), L, AR->getNoWrapFlags()); 12274 IV = dyn_cast<SCEVAddRecExpr>(S); 12275 } 12276 } 12277 } 12278 } 12279 12280 12281 if (!IV && AllowPredicates) { 12282 // Try to make this an AddRec using runtime tests, in the first X 12283 // iterations of this loop, where X is the SCEV expression found by the 12284 // algorithm below. 12285 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 12286 PredicatedIV = true; 12287 } 12288 12289 // Avoid weird loops 12290 if (!IV || IV->getLoop() != L || !IV->isAffine()) 12291 return getCouldNotCompute(); 12292 12293 // A precondition of this method is that the condition being analyzed 12294 // reaches an exiting branch which dominates the latch. Given that, we can 12295 // assume that an increment which violates the nowrap specification and 12296 // produces poison must cause undefined behavior when the resulting poison 12297 // value is branched upon and thus we can conclude that the backedge is 12298 // taken no more often than would be required to produce that poison value. 12299 // Note that a well defined loop can exit on the iteration which violates 12300 // the nowrap specification if there is another exit (either explicit or 12301 // implicit/exceptional) which causes the loop to execute before the 12302 // exiting instruction we're analyzing would trigger UB. 12303 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; 12304 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType); 12305 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; 12306 12307 const SCEV *Stride = IV->getStepRecurrence(*this); 12308 12309 bool PositiveStride = isKnownPositive(Stride); 12310 12311 // Avoid negative or zero stride values. 12312 if (!PositiveStride) { 12313 // We can compute the correct backedge taken count for loops with unknown 12314 // strides if we can prove that the loop is not an infinite loop with side 12315 // effects. Here's the loop structure we are trying to handle - 12316 // 12317 // i = start 12318 // do { 12319 // A[i] = i; 12320 // i += s; 12321 // } while (i < end); 12322 // 12323 // The backedge taken count for such loops is evaluated as - 12324 // (max(end, start + stride) - start - 1) /u stride 12325 // 12326 // The additional preconditions that we need to check to prove correctness 12327 // of the above formula is as follows - 12328 // 12329 // a) IV is either nuw or nsw depending upon signedness (indicated by the 12330 // NoWrap flag). 12331 // b) the loop is guaranteed to be finite (e.g. is mustprogress and has 12332 // no side effects within the loop) 12333 // c) loop has a single static exit (with no abnormal exits) 12334 // 12335 // Precondition a) implies that if the stride is negative, this is a single 12336 // trip loop. The backedge taken count formula reduces to zero in this case. 12337 // 12338 // Precondition b) and c) combine to imply that if rhs is invariant in L, 12339 // then a zero stride means the backedge can't be taken without executing 12340 // undefined behavior. 12341 // 12342 // The positive stride case is the same as isKnownPositive(Stride) returning 12343 // true (original behavior of the function). 12344 // 12345 if (PredicatedIV || !NoWrap || !loopIsFiniteByAssumption(L) || 12346 !loopHasNoAbnormalExits(L)) 12347 return getCouldNotCompute(); 12348 12349 // This bailout is protecting the logic in computeMaxBECountForLT which 12350 // has not yet been sufficiently auditted or tested with negative strides. 12351 // We used to filter out all known-non-positive cases here, we're in the 12352 // process of being less restrictive bit by bit. 12353 if (IsSigned && isKnownNonPositive(Stride)) 12354 return getCouldNotCompute(); 12355 12356 if (!isKnownNonZero(Stride)) { 12357 // If we have a step of zero, and RHS isn't invariant in L, we don't know 12358 // if it might eventually be greater than start and if so, on which 12359 // iteration. We can't even produce a useful upper bound. 12360 if (!isLoopInvariant(RHS, L)) 12361 return getCouldNotCompute(); 12362 12363 // We allow a potentially zero stride, but we need to divide by stride 12364 // below. Since the loop can't be infinite and this check must control 12365 // the sole exit, we can infer the exit must be taken on the first 12366 // iteration (e.g. backedge count = 0) if the stride is zero. Given that, 12367 // we know the numerator in the divides below must be zero, so we can 12368 // pick an arbitrary non-zero value for the denominator (e.g. stride) 12369 // and produce the right result. 12370 // FIXME: Handle the case where Stride is poison? 12371 auto wouldZeroStrideBeUB = [&]() { 12372 // Proof by contradiction. Suppose the stride were zero. If we can 12373 // prove that the backedge *is* taken on the first iteration, then since 12374 // we know this condition controls the sole exit, we must have an 12375 // infinite loop. We can't have a (well defined) infinite loop per 12376 // check just above. 12377 // Note: The (Start - Stride) term is used to get the start' term from 12378 // (start' + stride,+,stride). Remember that we only care about the 12379 // result of this expression when stride == 0 at runtime. 12380 auto *StartIfZero = getMinusSCEV(IV->getStart(), Stride); 12381 return isLoopEntryGuardedByCond(L, Cond, StartIfZero, RHS); 12382 }; 12383 if (!wouldZeroStrideBeUB()) { 12384 Stride = getUMaxExpr(Stride, getOne(Stride->getType())); 12385 } 12386 } 12387 } else if (!Stride->isOne() && !NoWrap) { 12388 auto isUBOnWrap = [&]() { 12389 // From no-self-wrap, we need to then prove no-(un)signed-wrap. This 12390 // follows trivially from the fact that every (un)signed-wrapped, but 12391 // not self-wrapped value must be LT than the last value before 12392 // (un)signed wrap. Since we know that last value didn't exit, nor 12393 // will any smaller one. 12394 return canAssumeNoSelfWrap(IV); 12395 }; 12396 12397 // Avoid proven overflow cases: this will ensure that the backedge taken 12398 // count will not generate any unsigned overflow. Relaxed no-overflow 12399 // conditions exploit NoWrapFlags, allowing to optimize in presence of 12400 // undefined behaviors like the case of C language. 12401 if (canIVOverflowOnLT(RHS, Stride, IsSigned) && !isUBOnWrap()) 12402 return getCouldNotCompute(); 12403 } 12404 12405 // On all paths just preceeding, we established the following invariant: 12406 // IV can be assumed not to overflow up to and including the exiting 12407 // iteration. We proved this in one of two ways: 12408 // 1) We can show overflow doesn't occur before the exiting iteration 12409 // 1a) canIVOverflowOnLT, and b) step of one 12410 // 2) We can show that if overflow occurs, the loop must execute UB 12411 // before any possible exit. 12412 // Note that we have not yet proved RHS invariant (in general). 12413 12414 const SCEV *Start = IV->getStart(); 12415 12416 // Preserve pointer-typed Start/RHS to pass to isLoopEntryGuardedByCond. 12417 // If we convert to integers, isLoopEntryGuardedByCond will miss some cases. 12418 // Use integer-typed versions for actual computation; we can't subtract 12419 // pointers in general. 12420 const SCEV *OrigStart = Start; 12421 const SCEV *OrigRHS = RHS; 12422 if (Start->getType()->isPointerTy()) { 12423 Start = getLosslessPtrToIntExpr(Start); 12424 if (isa<SCEVCouldNotCompute>(Start)) 12425 return Start; 12426 } 12427 if (RHS->getType()->isPointerTy()) { 12428 RHS = getLosslessPtrToIntExpr(RHS); 12429 if (isa<SCEVCouldNotCompute>(RHS)) 12430 return RHS; 12431 } 12432 12433 // When the RHS is not invariant, we do not know the end bound of the loop and 12434 // cannot calculate the ExactBECount needed by ExitLimit. However, we can 12435 // calculate the MaxBECount, given the start, stride and max value for the end 12436 // bound of the loop (RHS), and the fact that IV does not overflow (which is 12437 // checked above). 12438 if (!isLoopInvariant(RHS, L)) { 12439 const SCEV *MaxBECount = computeMaxBECountForLT( 12440 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 12441 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount, 12442 false /*MaxOrZero*/, Predicates); 12443 } 12444 12445 // We use the expression (max(End,Start)-Start)/Stride to describe the 12446 // backedge count, as if the backedge is taken at least once max(End,Start) 12447 // is End and so the result is as above, and if not max(End,Start) is Start 12448 // so we get a backedge count of zero. 12449 const SCEV *BECount = nullptr; 12450 auto *OrigStartMinusStride = getMinusSCEV(OrigStart, Stride); 12451 assert(isAvailableAtLoopEntry(OrigStartMinusStride, L) && "Must be!"); 12452 assert(isAvailableAtLoopEntry(OrigStart, L) && "Must be!"); 12453 assert(isAvailableAtLoopEntry(OrigRHS, L) && "Must be!"); 12454 // Can we prove (max(RHS,Start) > Start - Stride? 12455 if (isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigStart) && 12456 isLoopEntryGuardedByCond(L, Cond, OrigStartMinusStride, OrigRHS)) { 12457 // In this case, we can use a refined formula for computing backedge taken 12458 // count. The general formula remains: 12459 // "End-Start /uceiling Stride" where "End = max(RHS,Start)" 12460 // We want to use the alternate formula: 12461 // "((End - 1) - (Start - Stride)) /u Stride" 12462 // Let's do a quick case analysis to show these are equivalent under 12463 // our precondition that max(RHS,Start) > Start - Stride. 12464 // * For RHS <= Start, the backedge-taken count must be zero. 12465 // "((End - 1) - (Start - Stride)) /u Stride" reduces to 12466 // "((Start - 1) - (Start - Stride)) /u Stride" which simplies to 12467 // "Stride - 1 /u Stride" which is indeed zero for all non-zero values 12468 // of Stride. For 0 stride, we've use umin(1,Stride) above, reducing 12469 // this to the stride of 1 case. 12470 // * For RHS >= Start, the backedge count must be "RHS-Start /uceil Stride". 12471 // "((End - 1) - (Start - Stride)) /u Stride" reduces to 12472 // "((RHS - 1) - (Start - Stride)) /u Stride" reassociates to 12473 // "((RHS - (Start - Stride) - 1) /u Stride". 12474 // Our preconditions trivially imply no overflow in that form. 12475 const SCEV *MinusOne = getMinusOne(Stride->getType()); 12476 const SCEV *Numerator = 12477 getMinusSCEV(getAddExpr(RHS, MinusOne), getMinusSCEV(Start, Stride)); 12478 BECount = getUDivExpr(Numerator, Stride); 12479 } 12480 12481 const SCEV *BECountIfBackedgeTaken = nullptr; 12482 if (!BECount) { 12483 auto canProveRHSGreaterThanEqualStart = [&]() { 12484 auto CondGE = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 12485 if (isLoopEntryGuardedByCond(L, CondGE, OrigRHS, OrigStart)) 12486 return true; 12487 12488 // (RHS > Start - 1) implies RHS >= Start. 12489 // * "RHS >= Start" is trivially equivalent to "RHS > Start - 1" if 12490 // "Start - 1" doesn't overflow. 12491 // * For signed comparison, if Start - 1 does overflow, it's equal 12492 // to INT_MAX, and "RHS >s INT_MAX" is trivially false. 12493 // * For unsigned comparison, if Start - 1 does overflow, it's equal 12494 // to UINT_MAX, and "RHS >u UINT_MAX" is trivially false. 12495 // 12496 // FIXME: Should isLoopEntryGuardedByCond do this for us? 12497 auto CondGT = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 12498 auto *StartMinusOne = getAddExpr(OrigStart, 12499 getMinusOne(OrigStart->getType())); 12500 return isLoopEntryGuardedByCond(L, CondGT, OrigRHS, StartMinusOne); 12501 }; 12502 12503 // If we know that RHS >= Start in the context of loop, then we know that 12504 // max(RHS, Start) = RHS at this point. 12505 const SCEV *End; 12506 if (canProveRHSGreaterThanEqualStart()) { 12507 End = RHS; 12508 } else { 12509 // If RHS < Start, the backedge will be taken zero times. So in 12510 // general, we can write the backedge-taken count as: 12511 // 12512 // RHS >= Start ? ceil(RHS - Start) / Stride : 0 12513 // 12514 // We convert it to the following to make it more convenient for SCEV: 12515 // 12516 // ceil(max(RHS, Start) - Start) / Stride 12517 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start); 12518 12519 // See what would happen if we assume the backedge is taken. This is 12520 // used to compute MaxBECount. 12521 BECountIfBackedgeTaken = getUDivCeilSCEV(getMinusSCEV(RHS, Start), Stride); 12522 } 12523 12524 // At this point, we know: 12525 // 12526 // 1. If IsSigned, Start <=s End; otherwise, Start <=u End 12527 // 2. The index variable doesn't overflow. 12528 // 12529 // Therefore, we know N exists such that 12530 // (Start + Stride * N) >= End, and computing "(Start + Stride * N)" 12531 // doesn't overflow. 12532 // 12533 // Using this information, try to prove whether the addition in 12534 // "(Start - End) + (Stride - 1)" has unsigned overflow. 12535 const SCEV *One = getOne(Stride->getType()); 12536 bool MayAddOverflow = [&] { 12537 if (auto *StrideC = dyn_cast<SCEVConstant>(Stride)) { 12538 if (StrideC->getAPInt().isPowerOf2()) { 12539 // Suppose Stride is a power of two, and Start/End are unsigned 12540 // integers. Let UMAX be the largest representable unsigned 12541 // integer. 12542 // 12543 // By the preconditions of this function, we know 12544 // "(Start + Stride * N) >= End", and this doesn't overflow. 12545 // As a formula: 12546 // 12547 // End <= (Start + Stride * N) <= UMAX 12548 // 12549 // Subtracting Start from all the terms: 12550 // 12551 // End - Start <= Stride * N <= UMAX - Start 12552 // 12553 // Since Start is unsigned, UMAX - Start <= UMAX. Therefore: 12554 // 12555 // End - Start <= Stride * N <= UMAX 12556 // 12557 // Stride * N is a multiple of Stride. Therefore, 12558 // 12559 // End - Start <= Stride * N <= UMAX - (UMAX mod Stride) 12560 // 12561 // Since Stride is a power of two, UMAX + 1 is divisible by Stride. 12562 // Therefore, UMAX mod Stride == Stride - 1. So we can write: 12563 // 12564 // End - Start <= Stride * N <= UMAX - Stride - 1 12565 // 12566 // Dropping the middle term: 12567 // 12568 // End - Start <= UMAX - Stride - 1 12569 // 12570 // Adding Stride - 1 to both sides: 12571 // 12572 // (End - Start) + (Stride - 1) <= UMAX 12573 // 12574 // In other words, the addition doesn't have unsigned overflow. 12575 // 12576 // A similar proof works if we treat Start/End as signed values. 12577 // Just rewrite steps before "End - Start <= Stride * N <= UMAX" to 12578 // use signed max instead of unsigned max. Note that we're trying 12579 // to prove a lack of unsigned overflow in either case. 12580 return false; 12581 } 12582 } 12583 if (Start == Stride || Start == getMinusSCEV(Stride, One)) { 12584 // If Start is equal to Stride, (End - Start) + (Stride - 1) == End - 1. 12585 // If !IsSigned, 0 <u Stride == Start <=u End; so 0 <u End - 1 <u End. 12586 // If IsSigned, 0 <s Stride == Start <=s End; so 0 <s End - 1 <s End. 12587 // 12588 // If Start is equal to Stride - 1, (End - Start) + Stride - 1 == End. 12589 return false; 12590 } 12591 return true; 12592 }(); 12593 12594 const SCEV *Delta = getMinusSCEV(End, Start); 12595 if (!MayAddOverflow) { 12596 // floor((D + (S - 1)) / S) 12597 // We prefer this formulation if it's legal because it's fewer operations. 12598 BECount = 12599 getUDivExpr(getAddExpr(Delta, getMinusSCEV(Stride, One)), Stride); 12600 } else { 12601 BECount = getUDivCeilSCEV(Delta, Stride); 12602 } 12603 } 12604 12605 const SCEV *MaxBECount; 12606 bool MaxOrZero = false; 12607 if (isa<SCEVConstant>(BECount)) { 12608 MaxBECount = BECount; 12609 } else if (BECountIfBackedgeTaken && 12610 isa<SCEVConstant>(BECountIfBackedgeTaken)) { 12611 // If we know exactly how many times the backedge will be taken if it's 12612 // taken at least once, then the backedge count will either be that or 12613 // zero. 12614 MaxBECount = BECountIfBackedgeTaken; 12615 MaxOrZero = true; 12616 } else { 12617 MaxBECount = computeMaxBECountForLT( 12618 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned); 12619 } 12620 12621 if (isa<SCEVCouldNotCompute>(MaxBECount) && 12622 !isa<SCEVCouldNotCompute>(BECount)) 12623 MaxBECount = getConstant(getUnsignedRangeMax(BECount)); 12624 12625 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates); 12626 } 12627 12628 ScalarEvolution::ExitLimit 12629 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, 12630 const Loop *L, bool IsSigned, 12631 bool ControlsExit, bool AllowPredicates) { 12632 SmallPtrSet<const SCEVPredicate *, 4> Predicates; 12633 // We handle only IV > Invariant 12634 if (!isLoopInvariant(RHS, L)) 12635 return getCouldNotCompute(); 12636 12637 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS); 12638 if (!IV && AllowPredicates) 12639 // Try to make this an AddRec using runtime tests, in the first X 12640 // iterations of this loop, where X is the SCEV expression found by the 12641 // algorithm below. 12642 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates); 12643 12644 // Avoid weird loops 12645 if (!IV || IV->getLoop() != L || !IV->isAffine()) 12646 return getCouldNotCompute(); 12647 12648 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW; 12649 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType); 12650 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 12651 12652 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this)); 12653 12654 // Avoid negative or zero stride values 12655 if (!isKnownPositive(Stride)) 12656 return getCouldNotCompute(); 12657 12658 // Avoid proven overflow cases: this will ensure that the backedge taken count 12659 // will not generate any unsigned overflow. Relaxed no-overflow conditions 12660 // exploit NoWrapFlags, allowing to optimize in presence of undefined 12661 // behaviors like the case of C language. 12662 if (!Stride->isOne() && !NoWrap) 12663 if (canIVOverflowOnGT(RHS, Stride, IsSigned)) 12664 return getCouldNotCompute(); 12665 12666 const SCEV *Start = IV->getStart(); 12667 const SCEV *End = RHS; 12668 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) { 12669 // If we know that Start >= RHS in the context of loop, then we know that 12670 // min(RHS, Start) = RHS at this point. 12671 if (isLoopEntryGuardedByCond( 12672 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS)) 12673 End = RHS; 12674 else 12675 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start); 12676 } 12677 12678 if (Start->getType()->isPointerTy()) { 12679 Start = getLosslessPtrToIntExpr(Start); 12680 if (isa<SCEVCouldNotCompute>(Start)) 12681 return Start; 12682 } 12683 if (End->getType()->isPointerTy()) { 12684 End = getLosslessPtrToIntExpr(End); 12685 if (isa<SCEVCouldNotCompute>(End)) 12686 return End; 12687 } 12688 12689 // Compute ((Start - End) + (Stride - 1)) / Stride. 12690 // FIXME: This can overflow. Holding off on fixing this for now; 12691 // howManyGreaterThans will hopefully be gone soon. 12692 const SCEV *One = getOne(Stride->getType()); 12693 const SCEV *BECount = getUDivExpr( 12694 getAddExpr(getMinusSCEV(Start, End), getMinusSCEV(Stride, One)), Stride); 12695 12696 APInt MaxStart = IsSigned ? getSignedRangeMax(Start) 12697 : getUnsignedRangeMax(Start); 12698 12699 APInt MinStride = IsSigned ? getSignedRangeMin(Stride) 12700 : getUnsignedRangeMin(Stride); 12701 12702 unsigned BitWidth = getTypeSizeInBits(LHS->getType()); 12703 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1) 12704 : APInt::getMinValue(BitWidth) + (MinStride - 1); 12705 12706 // Although End can be a MIN expression we estimate MinEnd considering only 12707 // the case End = RHS. This is safe because in the other case (Start - End) 12708 // is zero, leading to a zero maximum backedge taken count. 12709 APInt MinEnd = 12710 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit) 12711 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit); 12712 12713 const SCEV *MaxBECount = isa<SCEVConstant>(BECount) 12714 ? BECount 12715 : getUDivCeilSCEV(getConstant(MaxStart - MinEnd), 12716 getConstant(MinStride)); 12717 12718 if (isa<SCEVCouldNotCompute>(MaxBECount)) 12719 MaxBECount = BECount; 12720 12721 return ExitLimit(BECount, MaxBECount, false, Predicates); 12722 } 12723 12724 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range, 12725 ScalarEvolution &SE) const { 12726 if (Range.isFullSet()) // Infinite loop. 12727 return SE.getCouldNotCompute(); 12728 12729 // If the start is a non-zero constant, shift the range to simplify things. 12730 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 12731 if (!SC->getValue()->isZero()) { 12732 SmallVector<const SCEV *, 4> Operands(operands()); 12733 Operands[0] = SE.getZero(SC->getType()); 12734 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(), 12735 getNoWrapFlags(FlagNW)); 12736 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted)) 12737 return ShiftedAddRec->getNumIterationsInRange( 12738 Range.subtract(SC->getAPInt()), SE); 12739 // This is strange and shouldn't happen. 12740 return SE.getCouldNotCompute(); 12741 } 12742 12743 // The only time we can solve this is when we have all constant indices. 12744 // Otherwise, we cannot determine the overflow conditions. 12745 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); })) 12746 return SE.getCouldNotCompute(); 12747 12748 // Okay at this point we know that all elements of the chrec are constants and 12749 // that the start element is zero. 12750 12751 // First check to see if the range contains zero. If not, the first 12752 // iteration exits. 12753 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 12754 if (!Range.contains(APInt(BitWidth, 0))) 12755 return SE.getZero(getType()); 12756 12757 if (isAffine()) { 12758 // If this is an affine expression then we have this situation: 12759 // Solve {0,+,A} in Range === Ax in Range 12760 12761 // We know that zero is in the range. If A is positive then we know that 12762 // the upper value of the range must be the first possible exit value. 12763 // If A is negative then the lower of the range is the last possible loop 12764 // value. Also note that we already checked for a full range. 12765 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt(); 12766 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower(); 12767 12768 // The exit value should be (End+A)/A. 12769 APInt ExitVal = (End + A).udiv(A); 12770 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 12771 12772 // Evaluate at the exit value. If we really did fall out of the valid 12773 // range, then we computed our trip count, otherwise wrap around or other 12774 // things must have happened. 12775 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 12776 if (Range.contains(Val->getValue())) 12777 return SE.getCouldNotCompute(); // Something strange happened 12778 12779 // Ensure that the previous value is in the range. 12780 assert(Range.contains( 12781 EvaluateConstantChrecAtConstant(this, 12782 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) && 12783 "Linear scev computation is off in a bad way!"); 12784 return SE.getConstant(ExitValue); 12785 } 12786 12787 if (isQuadratic()) { 12788 if (auto S = SolveQuadraticAddRecRange(this, Range, SE)) 12789 return SE.getConstant(S.getValue()); 12790 } 12791 12792 return SE.getCouldNotCompute(); 12793 } 12794 12795 const SCEVAddRecExpr * 12796 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const { 12797 assert(getNumOperands() > 1 && "AddRec with zero step?"); 12798 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)), 12799 // but in this case we cannot guarantee that the value returned will be an 12800 // AddRec because SCEV does not have a fixed point where it stops 12801 // simplification: it is legal to return ({rec1} + {rec2}). For example, it 12802 // may happen if we reach arithmetic depth limit while simplifying. So we 12803 // construct the returned value explicitly. 12804 SmallVector<const SCEV *, 3> Ops; 12805 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and 12806 // (this + Step) is {A+B,+,B+C,+...,+,N}. 12807 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i) 12808 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1))); 12809 // We know that the last operand is not a constant zero (otherwise it would 12810 // have been popped out earlier). This guarantees us that if the result has 12811 // the same last operand, then it will also not be popped out, meaning that 12812 // the returned value will be an AddRec. 12813 const SCEV *Last = getOperand(getNumOperands() - 1); 12814 assert(!Last->isZero() && "Recurrency with zero step?"); 12815 Ops.push_back(Last); 12816 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(), 12817 SCEV::FlagAnyWrap)); 12818 } 12819 12820 // Return true when S contains at least an undef value. 12821 bool ScalarEvolution::containsUndefs(const SCEV *S) const { 12822 return SCEVExprContains(S, [](const SCEV *S) { 12823 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 12824 return isa<UndefValue>(SU->getValue()); 12825 return false; 12826 }); 12827 } 12828 12829 // Return true when S contains a value that is a nullptr. 12830 bool ScalarEvolution::containsErasedValue(const SCEV *S) const { 12831 return SCEVExprContains(S, [](const SCEV *S) { 12832 if (const auto *SU = dyn_cast<SCEVUnknown>(S)) 12833 return SU->getValue() == nullptr; 12834 return false; 12835 }); 12836 } 12837 12838 /// Return the size of an element read or written by Inst. 12839 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { 12840 Type *Ty; 12841 if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) 12842 Ty = Store->getValueOperand()->getType(); 12843 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) 12844 Ty = Load->getType(); 12845 else 12846 return nullptr; 12847 12848 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty)); 12849 return getSizeOfExpr(ETy, Ty); 12850 } 12851 12852 //===----------------------------------------------------------------------===// 12853 // SCEVCallbackVH Class Implementation 12854 //===----------------------------------------------------------------------===// 12855 12856 void ScalarEvolution::SCEVCallbackVH::deleted() { 12857 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 12858 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 12859 SE->ConstantEvolutionLoopExitValue.erase(PN); 12860 SE->eraseValueFromMap(getValPtr()); 12861 // this now dangles! 12862 } 12863 12864 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 12865 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 12866 12867 // Forget all the expressions associated with users of the old value, 12868 // so that future queries will recompute the expressions using the new 12869 // value. 12870 Value *Old = getValPtr(); 12871 SmallVector<User *, 16> Worklist(Old->users()); 12872 SmallPtrSet<User *, 8> Visited; 12873 while (!Worklist.empty()) { 12874 User *U = Worklist.pop_back_val(); 12875 // Deleting the Old value will cause this to dangle. Postpone 12876 // that until everything else is done. 12877 if (U == Old) 12878 continue; 12879 if (!Visited.insert(U).second) 12880 continue; 12881 if (PHINode *PN = dyn_cast<PHINode>(U)) 12882 SE->ConstantEvolutionLoopExitValue.erase(PN); 12883 SE->eraseValueFromMap(U); 12884 llvm::append_range(Worklist, U->users()); 12885 } 12886 // Delete the Old value. 12887 if (PHINode *PN = dyn_cast<PHINode>(Old)) 12888 SE->ConstantEvolutionLoopExitValue.erase(PN); 12889 SE->eraseValueFromMap(Old); 12890 // this now dangles! 12891 } 12892 12893 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 12894 : CallbackVH(V), SE(se) {} 12895 12896 //===----------------------------------------------------------------------===// 12897 // ScalarEvolution Class Implementation 12898 //===----------------------------------------------------------------------===// 12899 12900 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, 12901 AssumptionCache &AC, DominatorTree &DT, 12902 LoopInfo &LI) 12903 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), 12904 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64), 12905 LoopDispositions(64), BlockDispositions(64) { 12906 // To use guards for proving predicates, we need to scan every instruction in 12907 // relevant basic blocks, and not just terminators. Doing this is a waste of 12908 // time if the IR does not actually contain any calls to 12909 // @llvm.experimental.guard, so do a quick check and remember this beforehand. 12910 // 12911 // This pessimizes the case where a pass that preserves ScalarEvolution wants 12912 // to _add_ guards to the module when there weren't any before, and wants 12913 // ScalarEvolution to optimize based on those guards. For now we prefer to be 12914 // efficient in lieu of being smart in that rather obscure case. 12915 12916 auto *GuardDecl = F.getParent()->getFunction( 12917 Intrinsic::getName(Intrinsic::experimental_guard)); 12918 HasGuards = GuardDecl && !GuardDecl->use_empty(); 12919 } 12920 12921 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) 12922 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), 12923 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)), 12924 ValueExprMap(std::move(Arg.ValueExprMap)), 12925 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)), 12926 PendingPhiRanges(std::move(Arg.PendingPhiRanges)), 12927 PendingMerges(std::move(Arg.PendingMerges)), 12928 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)), 12929 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), 12930 PredicatedBackedgeTakenCounts( 12931 std::move(Arg.PredicatedBackedgeTakenCounts)), 12932 BECountUsers(std::move(Arg.BECountUsers)), 12933 ConstantEvolutionLoopExitValue( 12934 std::move(Arg.ConstantEvolutionLoopExitValue)), 12935 ValuesAtScopes(std::move(Arg.ValuesAtScopes)), 12936 ValuesAtScopesUsers(std::move(Arg.ValuesAtScopesUsers)), 12937 LoopDispositions(std::move(Arg.LoopDispositions)), 12938 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)), 12939 BlockDispositions(std::move(Arg.BlockDispositions)), 12940 SCEVUsers(std::move(Arg.SCEVUsers)), 12941 UnsignedRanges(std::move(Arg.UnsignedRanges)), 12942 SignedRanges(std::move(Arg.SignedRanges)), 12943 UniqueSCEVs(std::move(Arg.UniqueSCEVs)), 12944 UniquePreds(std::move(Arg.UniquePreds)), 12945 SCEVAllocator(std::move(Arg.SCEVAllocator)), 12946 LoopUsers(std::move(Arg.LoopUsers)), 12947 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)), 12948 FirstUnknown(Arg.FirstUnknown) { 12949 Arg.FirstUnknown = nullptr; 12950 } 12951 12952 ScalarEvolution::~ScalarEvolution() { 12953 // Iterate through all the SCEVUnknown instances and call their 12954 // destructors, so that they release their references to their values. 12955 for (SCEVUnknown *U = FirstUnknown; U;) { 12956 SCEVUnknown *Tmp = U; 12957 U = U->Next; 12958 Tmp->~SCEVUnknown(); 12959 } 12960 FirstUnknown = nullptr; 12961 12962 ExprValueMap.clear(); 12963 ValueExprMap.clear(); 12964 HasRecMap.clear(); 12965 BackedgeTakenCounts.clear(); 12966 PredicatedBackedgeTakenCounts.clear(); 12967 12968 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); 12969 assert(PendingPhiRanges.empty() && "getRangeRef garbage"); 12970 assert(PendingMerges.empty() && "isImpliedViaMerge garbage"); 12971 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); 12972 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!"); 12973 } 12974 12975 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 12976 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 12977 } 12978 12979 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 12980 const Loop *L) { 12981 // Print all inner loops first 12982 for (Loop *I : *L) 12983 PrintLoopInfo(OS, SE, I); 12984 12985 OS << "Loop "; 12986 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 12987 OS << ": "; 12988 12989 SmallVector<BasicBlock *, 8> ExitingBlocks; 12990 L->getExitingBlocks(ExitingBlocks); 12991 if (ExitingBlocks.size() != 1) 12992 OS << "<multiple exits> "; 12993 12994 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 12995 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n"; 12996 else 12997 OS << "Unpredictable backedge-taken count.\n"; 12998 12999 if (ExitingBlocks.size() > 1) 13000 for (BasicBlock *ExitingBlock : ExitingBlocks) { 13001 OS << " exit count for " << ExitingBlock->getName() << ": " 13002 << *SE->getExitCount(L, ExitingBlock) << "\n"; 13003 } 13004 13005 OS << "Loop "; 13006 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13007 OS << ": "; 13008 13009 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) { 13010 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L); 13011 if (SE->isBackedgeTakenCountMaxOrZero(L)) 13012 OS << ", actual taken count either this or zero."; 13013 } else { 13014 OS << "Unpredictable max backedge-taken count. "; 13015 } 13016 13017 OS << "\n" 13018 "Loop "; 13019 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13020 OS << ": "; 13021 13022 SmallVector<const SCEVPredicate *, 4> Preds; 13023 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Preds); 13024 if (!isa<SCEVCouldNotCompute>(PBT)) { 13025 OS << "Predicated backedge-taken count is " << *PBT << "\n"; 13026 OS << " Predicates:\n"; 13027 for (auto *P : Preds) 13028 P->print(OS, 4); 13029 } else { 13030 OS << "Unpredictable predicated backedge-taken count. "; 13031 } 13032 OS << "\n"; 13033 13034 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 13035 OS << "Loop "; 13036 L->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13037 OS << ": "; 13038 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n"; 13039 } 13040 } 13041 13042 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) { 13043 switch (LD) { 13044 case ScalarEvolution::LoopVariant: 13045 return "Variant"; 13046 case ScalarEvolution::LoopInvariant: 13047 return "Invariant"; 13048 case ScalarEvolution::LoopComputable: 13049 return "Computable"; 13050 } 13051 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!"); 13052 } 13053 13054 void ScalarEvolution::print(raw_ostream &OS) const { 13055 // ScalarEvolution's implementation of the print method is to print 13056 // out SCEV values of all instructions that are interesting. Doing 13057 // this potentially causes it to create new SCEV objects though, 13058 // which technically conflicts with the const qualifier. This isn't 13059 // observable from outside the class though, so casting away the 13060 // const isn't dangerous. 13061 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 13062 13063 if (ClassifyExpressions) { 13064 OS << "Classifying expressions for: "; 13065 F.printAsOperand(OS, /*PrintType=*/false); 13066 OS << "\n"; 13067 for (Instruction &I : instructions(F)) 13068 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) { 13069 OS << I << '\n'; 13070 OS << " --> "; 13071 const SCEV *SV = SE.getSCEV(&I); 13072 SV->print(OS); 13073 if (!isa<SCEVCouldNotCompute>(SV)) { 13074 OS << " U: "; 13075 SE.getUnsignedRange(SV).print(OS); 13076 OS << " S: "; 13077 SE.getSignedRange(SV).print(OS); 13078 } 13079 13080 const Loop *L = LI.getLoopFor(I.getParent()); 13081 13082 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 13083 if (AtUse != SV) { 13084 OS << " --> "; 13085 AtUse->print(OS); 13086 if (!isa<SCEVCouldNotCompute>(AtUse)) { 13087 OS << " U: "; 13088 SE.getUnsignedRange(AtUse).print(OS); 13089 OS << " S: "; 13090 SE.getSignedRange(AtUse).print(OS); 13091 } 13092 } 13093 13094 if (L) { 13095 OS << "\t\t" "Exits: "; 13096 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 13097 if (!SE.isLoopInvariant(ExitValue, L)) { 13098 OS << "<<Unknown>>"; 13099 } else { 13100 OS << *ExitValue; 13101 } 13102 13103 bool First = true; 13104 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) { 13105 if (First) { 13106 OS << "\t\t" "LoopDispositions: { "; 13107 First = false; 13108 } else { 13109 OS << ", "; 13110 } 13111 13112 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13113 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter)); 13114 } 13115 13116 for (auto *InnerL : depth_first(L)) { 13117 if (InnerL == L) 13118 continue; 13119 if (First) { 13120 OS << "\t\t" "LoopDispositions: { "; 13121 First = false; 13122 } else { 13123 OS << ", "; 13124 } 13125 13126 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false); 13127 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL)); 13128 } 13129 13130 OS << " }"; 13131 } 13132 13133 OS << "\n"; 13134 } 13135 } 13136 13137 OS << "Determining loop execution counts for: "; 13138 F.printAsOperand(OS, /*PrintType=*/false); 13139 OS << "\n"; 13140 for (Loop *I : LI) 13141 PrintLoopInfo(OS, &SE, I); 13142 } 13143 13144 ScalarEvolution::LoopDisposition 13145 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) { 13146 auto &Values = LoopDispositions[S]; 13147 for (auto &V : Values) { 13148 if (V.getPointer() == L) 13149 return V.getInt(); 13150 } 13151 Values.emplace_back(L, LoopVariant); 13152 LoopDisposition D = computeLoopDisposition(S, L); 13153 auto &Values2 = LoopDispositions[S]; 13154 for (auto &V : llvm::reverse(Values2)) { 13155 if (V.getPointer() == L) { 13156 V.setInt(D); 13157 break; 13158 } 13159 } 13160 return D; 13161 } 13162 13163 ScalarEvolution::LoopDisposition 13164 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) { 13165 switch (S->getSCEVType()) { 13166 case scConstant: 13167 return LoopInvariant; 13168 case scPtrToInt: 13169 case scTruncate: 13170 case scZeroExtend: 13171 case scSignExtend: 13172 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L); 13173 case scAddRecExpr: { 13174 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 13175 13176 // If L is the addrec's loop, it's computable. 13177 if (AR->getLoop() == L) 13178 return LoopComputable; 13179 13180 // Add recurrences are never invariant in the function-body (null loop). 13181 if (!L) 13182 return LoopVariant; 13183 13184 // Everything that is not defined at loop entry is variant. 13185 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader())) 13186 return LoopVariant; 13187 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not" 13188 " dominate the contained loop's header?"); 13189 13190 // This recurrence is invariant w.r.t. L if AR's loop contains L. 13191 if (AR->getLoop()->contains(L)) 13192 return LoopInvariant; 13193 13194 // This recurrence is variant w.r.t. L if any of its operands 13195 // are variant. 13196 for (auto *Op : AR->operands()) 13197 if (!isLoopInvariant(Op, L)) 13198 return LoopVariant; 13199 13200 // Otherwise it's loop-invariant. 13201 return LoopInvariant; 13202 } 13203 case scAddExpr: 13204 case scMulExpr: 13205 case scUMaxExpr: 13206 case scSMaxExpr: 13207 case scUMinExpr: 13208 case scSMinExpr: 13209 case scSequentialUMinExpr: { 13210 bool HasVarying = false; 13211 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) { 13212 LoopDisposition D = getLoopDisposition(Op, L); 13213 if (D == LoopVariant) 13214 return LoopVariant; 13215 if (D == LoopComputable) 13216 HasVarying = true; 13217 } 13218 return HasVarying ? LoopComputable : LoopInvariant; 13219 } 13220 case scUDivExpr: { 13221 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 13222 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); 13223 if (LD == LoopVariant) 13224 return LoopVariant; 13225 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L); 13226 if (RD == LoopVariant) 13227 return LoopVariant; 13228 return (LD == LoopInvariant && RD == LoopInvariant) ? 13229 LoopInvariant : LoopComputable; 13230 } 13231 case scUnknown: 13232 // All non-instruction values are loop invariant. All instructions are loop 13233 // invariant if they are not contained in the specified loop. 13234 // Instructions are never considered invariant in the function body 13235 // (null loop) because they are defined within the "loop". 13236 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) 13237 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant; 13238 return LoopInvariant; 13239 case scCouldNotCompute: 13240 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 13241 } 13242 llvm_unreachable("Unknown SCEV kind!"); 13243 } 13244 13245 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) { 13246 return getLoopDisposition(S, L) == LoopInvariant; 13247 } 13248 13249 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) { 13250 return getLoopDisposition(S, L) == LoopComputable; 13251 } 13252 13253 ScalarEvolution::BlockDisposition 13254 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) { 13255 auto &Values = BlockDispositions[S]; 13256 for (auto &V : Values) { 13257 if (V.getPointer() == BB) 13258 return V.getInt(); 13259 } 13260 Values.emplace_back(BB, DoesNotDominateBlock); 13261 BlockDisposition D = computeBlockDisposition(S, BB); 13262 auto &Values2 = BlockDispositions[S]; 13263 for (auto &V : llvm::reverse(Values2)) { 13264 if (V.getPointer() == BB) { 13265 V.setInt(D); 13266 break; 13267 } 13268 } 13269 return D; 13270 } 13271 13272 ScalarEvolution::BlockDisposition 13273 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { 13274 switch (S->getSCEVType()) { 13275 case scConstant: 13276 return ProperlyDominatesBlock; 13277 case scPtrToInt: 13278 case scTruncate: 13279 case scZeroExtend: 13280 case scSignExtend: 13281 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB); 13282 case scAddRecExpr: { 13283 // This uses a "dominates" query instead of "properly dominates" query 13284 // to test for proper dominance too, because the instruction which 13285 // produces the addrec's value is a PHI, and a PHI effectively properly 13286 // dominates its entire containing block. 13287 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S); 13288 if (!DT.dominates(AR->getLoop()->getHeader(), BB)) 13289 return DoesNotDominateBlock; 13290 13291 // Fall through into SCEVNAryExpr handling. 13292 LLVM_FALLTHROUGH; 13293 } 13294 case scAddExpr: 13295 case scMulExpr: 13296 case scUMaxExpr: 13297 case scSMaxExpr: 13298 case scUMinExpr: 13299 case scSMinExpr: 13300 case scSequentialUMinExpr: { 13301 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S); 13302 bool Proper = true; 13303 for (const SCEV *NAryOp : NAry->operands()) { 13304 BlockDisposition D = getBlockDisposition(NAryOp, BB); 13305 if (D == DoesNotDominateBlock) 13306 return DoesNotDominateBlock; 13307 if (D == DominatesBlock) 13308 Proper = false; 13309 } 13310 return Proper ? ProperlyDominatesBlock : DominatesBlock; 13311 } 13312 case scUDivExpr: { 13313 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S); 13314 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); 13315 BlockDisposition LD = getBlockDisposition(LHS, BB); 13316 if (LD == DoesNotDominateBlock) 13317 return DoesNotDominateBlock; 13318 BlockDisposition RD = getBlockDisposition(RHS, BB); 13319 if (RD == DoesNotDominateBlock) 13320 return DoesNotDominateBlock; 13321 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ? 13322 ProperlyDominatesBlock : DominatesBlock; 13323 } 13324 case scUnknown: 13325 if (Instruction *I = 13326 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) { 13327 if (I->getParent() == BB) 13328 return DominatesBlock; 13329 if (DT.properlyDominates(I->getParent(), BB)) 13330 return ProperlyDominatesBlock; 13331 return DoesNotDominateBlock; 13332 } 13333 return ProperlyDominatesBlock; 13334 case scCouldNotCompute: 13335 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 13336 } 13337 llvm_unreachable("Unknown SCEV kind!"); 13338 } 13339 13340 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) { 13341 return getBlockDisposition(S, BB) >= DominatesBlock; 13342 } 13343 13344 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) { 13345 return getBlockDisposition(S, BB) == ProperlyDominatesBlock; 13346 } 13347 13348 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const { 13349 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; }); 13350 } 13351 13352 void ScalarEvolution::forgetBackedgeTakenCounts(const Loop *L, 13353 bool Predicated) { 13354 auto &BECounts = 13355 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts; 13356 auto It = BECounts.find(L); 13357 if (It != BECounts.end()) { 13358 for (const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) { 13359 if (!isa<SCEVConstant>(ENT.ExactNotTaken)) { 13360 auto UserIt = BECountUsers.find(ENT.ExactNotTaken); 13361 assert(UserIt != BECountUsers.end()); 13362 UserIt->second.erase({L, Predicated}); 13363 } 13364 } 13365 BECounts.erase(It); 13366 } 13367 } 13368 13369 void ScalarEvolution::forgetMemoizedResults(ArrayRef<const SCEV *> SCEVs) { 13370 SmallPtrSet<const SCEV *, 8> ToForget(SCEVs.begin(), SCEVs.end()); 13371 SmallVector<const SCEV *, 8> Worklist(ToForget.begin(), ToForget.end()); 13372 13373 while (!Worklist.empty()) { 13374 const SCEV *Curr = Worklist.pop_back_val(); 13375 auto Users = SCEVUsers.find(Curr); 13376 if (Users != SCEVUsers.end()) 13377 for (auto *User : Users->second) 13378 if (ToForget.insert(User).second) 13379 Worklist.push_back(User); 13380 } 13381 13382 for (auto *S : ToForget) 13383 forgetMemoizedResultsImpl(S); 13384 13385 for (auto I = PredicatedSCEVRewrites.begin(); 13386 I != PredicatedSCEVRewrites.end();) { 13387 std::pair<const SCEV *, const Loop *> Entry = I->first; 13388 if (ToForget.count(Entry.first)) 13389 PredicatedSCEVRewrites.erase(I++); 13390 else 13391 ++I; 13392 } 13393 } 13394 13395 void ScalarEvolution::forgetMemoizedResultsImpl(const SCEV *S) { 13396 LoopDispositions.erase(S); 13397 BlockDispositions.erase(S); 13398 UnsignedRanges.erase(S); 13399 SignedRanges.erase(S); 13400 HasRecMap.erase(S); 13401 MinTrailingZerosCache.erase(S); 13402 13403 auto ExprIt = ExprValueMap.find(S); 13404 if (ExprIt != ExprValueMap.end()) { 13405 for (Value *V : ExprIt->second) { 13406 auto ValueIt = ValueExprMap.find_as(V); 13407 if (ValueIt != ValueExprMap.end()) 13408 ValueExprMap.erase(ValueIt); 13409 } 13410 ExprValueMap.erase(ExprIt); 13411 } 13412 13413 auto ScopeIt = ValuesAtScopes.find(S); 13414 if (ScopeIt != ValuesAtScopes.end()) { 13415 for (const auto &Pair : ScopeIt->second) 13416 if (!isa_and_nonnull<SCEVConstant>(Pair.second)) 13417 erase_value(ValuesAtScopesUsers[Pair.second], 13418 std::make_pair(Pair.first, S)); 13419 ValuesAtScopes.erase(ScopeIt); 13420 } 13421 13422 auto ScopeUserIt = ValuesAtScopesUsers.find(S); 13423 if (ScopeUserIt != ValuesAtScopesUsers.end()) { 13424 for (const auto &Pair : ScopeUserIt->second) 13425 erase_value(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S)); 13426 ValuesAtScopesUsers.erase(ScopeUserIt); 13427 } 13428 13429 auto BEUsersIt = BECountUsers.find(S); 13430 if (BEUsersIt != BECountUsers.end()) { 13431 // Work on a copy, as forgetBackedgeTakenCounts() will modify the original. 13432 auto Copy = BEUsersIt->second; 13433 for (const auto &Pair : Copy) 13434 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt()); 13435 BECountUsers.erase(BEUsersIt); 13436 } 13437 } 13438 13439 void 13440 ScalarEvolution::getUsedLoops(const SCEV *S, 13441 SmallPtrSetImpl<const Loop *> &LoopsUsed) { 13442 struct FindUsedLoops { 13443 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed) 13444 : LoopsUsed(LoopsUsed) {} 13445 SmallPtrSetImpl<const Loop *> &LoopsUsed; 13446 bool follow(const SCEV *S) { 13447 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S)) 13448 LoopsUsed.insert(AR->getLoop()); 13449 return true; 13450 } 13451 13452 bool isDone() const { return false; } 13453 }; 13454 13455 FindUsedLoops F(LoopsUsed); 13456 SCEVTraversal<FindUsedLoops>(F).visitAll(S); 13457 } 13458 13459 void ScalarEvolution::getReachableBlocks( 13460 SmallPtrSetImpl<BasicBlock *> &Reachable, Function &F) { 13461 SmallVector<BasicBlock *> Worklist; 13462 Worklist.push_back(&F.getEntryBlock()); 13463 while (!Worklist.empty()) { 13464 BasicBlock *BB = Worklist.pop_back_val(); 13465 if (!Reachable.insert(BB).second) 13466 continue; 13467 13468 Value *Cond; 13469 BasicBlock *TrueBB, *FalseBB; 13470 if (match(BB->getTerminator(), m_Br(m_Value(Cond), m_BasicBlock(TrueBB), 13471 m_BasicBlock(FalseBB)))) { 13472 if (auto *C = dyn_cast<ConstantInt>(Cond)) { 13473 Worklist.push_back(C->isOne() ? TrueBB : FalseBB); 13474 continue; 13475 } 13476 13477 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) { 13478 const SCEV *L = getSCEV(Cmp->getOperand(0)); 13479 const SCEV *R = getSCEV(Cmp->getOperand(1)); 13480 if (isKnownPredicateViaConstantRanges(Cmp->getPredicate(), L, R)) { 13481 Worklist.push_back(TrueBB); 13482 continue; 13483 } 13484 if (isKnownPredicateViaConstantRanges(Cmp->getInversePredicate(), L, 13485 R)) { 13486 Worklist.push_back(FalseBB); 13487 continue; 13488 } 13489 } 13490 } 13491 13492 append_range(Worklist, successors(BB)); 13493 } 13494 } 13495 13496 void ScalarEvolution::verify() const { 13497 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 13498 ScalarEvolution SE2(F, TLI, AC, DT, LI); 13499 13500 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end()); 13501 13502 // Map's SCEV expressions from one ScalarEvolution "universe" to another. 13503 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> { 13504 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {} 13505 13506 const SCEV *visitConstant(const SCEVConstant *Constant) { 13507 return SE.getConstant(Constant->getAPInt()); 13508 } 13509 13510 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 13511 return SE.getUnknown(Expr->getValue()); 13512 } 13513 13514 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) { 13515 return SE.getCouldNotCompute(); 13516 } 13517 }; 13518 13519 SCEVMapper SCM(SE2); 13520 SmallPtrSet<BasicBlock *, 16> ReachableBlocks; 13521 SE2.getReachableBlocks(ReachableBlocks, F); 13522 13523 auto GetDelta = [&](const SCEV *Old, const SCEV *New) -> const SCEV * { 13524 if (containsUndefs(Old) || containsUndefs(New)) { 13525 // SCEV treats "undef" as an unknown but consistent value (i.e. it does 13526 // not propagate undef aggressively). This means we can (and do) fail 13527 // verification in cases where a transform makes a value go from "undef" 13528 // to "undef+1" (say). The transform is fine, since in both cases the 13529 // result is "undef", but SCEV thinks the value increased by 1. 13530 return nullptr; 13531 } 13532 13533 // Unless VerifySCEVStrict is set, we only compare constant deltas. 13534 const SCEV *Delta = SE2.getMinusSCEV(Old, New); 13535 if (!VerifySCEVStrict && !isa<SCEVConstant>(Delta)) 13536 return nullptr; 13537 13538 return Delta; 13539 }; 13540 13541 while (!LoopStack.empty()) { 13542 auto *L = LoopStack.pop_back_val(); 13543 llvm::append_range(LoopStack, *L); 13544 13545 // Only verify BECounts in reachable loops. For an unreachable loop, 13546 // any BECount is legal. 13547 if (!ReachableBlocks.contains(L->getHeader())) 13548 continue; 13549 13550 // Only verify cached BECounts. Computing new BECounts may change the 13551 // results of subsequent SCEV uses. 13552 auto It = BackedgeTakenCounts.find(L); 13553 if (It == BackedgeTakenCounts.end()) 13554 continue; 13555 13556 auto *CurBECount = 13557 SCM.visit(It->second.getExact(L, const_cast<ScalarEvolution *>(this))); 13558 auto *NewBECount = SE2.getBackedgeTakenCount(L); 13559 13560 if (CurBECount == SE2.getCouldNotCompute() || 13561 NewBECount == SE2.getCouldNotCompute()) { 13562 // NB! This situation is legal, but is very suspicious -- whatever pass 13563 // change the loop to make a trip count go from could not compute to 13564 // computable or vice-versa *should have* invalidated SCEV. However, we 13565 // choose not to assert here (for now) since we don't want false 13566 // positives. 13567 continue; 13568 } 13569 13570 if (SE.getTypeSizeInBits(CurBECount->getType()) > 13571 SE.getTypeSizeInBits(NewBECount->getType())) 13572 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType()); 13573 else if (SE.getTypeSizeInBits(CurBECount->getType()) < 13574 SE.getTypeSizeInBits(NewBECount->getType())) 13575 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType()); 13576 13577 const SCEV *Delta = GetDelta(CurBECount, NewBECount); 13578 if (Delta && !Delta->isZero()) { 13579 dbgs() << "Trip Count for " << *L << " Changed!\n"; 13580 dbgs() << "Old: " << *CurBECount << "\n"; 13581 dbgs() << "New: " << *NewBECount << "\n"; 13582 dbgs() << "Delta: " << *Delta << "\n"; 13583 std::abort(); 13584 } 13585 } 13586 13587 // Collect all valid loops currently in LoopInfo. 13588 SmallPtrSet<Loop *, 32> ValidLoops; 13589 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end()); 13590 while (!Worklist.empty()) { 13591 Loop *L = Worklist.pop_back_val(); 13592 if (ValidLoops.insert(L).second) 13593 Worklist.append(L->begin(), L->end()); 13594 } 13595 for (auto &KV : ValueExprMap) { 13596 #ifndef NDEBUG 13597 // Check for SCEV expressions referencing invalid/deleted loops. 13598 if (auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second)) { 13599 assert(ValidLoops.contains(AR->getLoop()) && 13600 "AddRec references invalid loop"); 13601 } 13602 #endif 13603 13604 // Check that the value is also part of the reverse map. 13605 auto It = ExprValueMap.find(KV.second); 13606 if (It == ExprValueMap.end() || !It->second.contains(KV.first)) { 13607 dbgs() << "Value " << *KV.first 13608 << " is in ValueExprMap but not in ExprValueMap\n"; 13609 std::abort(); 13610 } 13611 13612 if (auto *I = dyn_cast<Instruction>(&*KV.first)) { 13613 if (!ReachableBlocks.contains(I->getParent())) 13614 continue; 13615 const SCEV *OldSCEV = SCM.visit(KV.second); 13616 const SCEV *NewSCEV = SE2.getSCEV(I); 13617 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV); 13618 if (Delta && !Delta->isZero()) { 13619 dbgs() << "SCEV for value " << *I << " changed!\n" 13620 << "Old: " << *OldSCEV << "\n" 13621 << "New: " << *NewSCEV << "\n" 13622 << "Delta: " << *Delta << "\n"; 13623 std::abort(); 13624 } 13625 } 13626 } 13627 13628 for (const auto &KV : ExprValueMap) { 13629 for (Value *V : KV.second) { 13630 auto It = ValueExprMap.find_as(V); 13631 if (It == ValueExprMap.end()) { 13632 dbgs() << "Value " << *V 13633 << " is in ExprValueMap but not in ValueExprMap\n"; 13634 std::abort(); 13635 } 13636 if (It->second != KV.first) { 13637 dbgs() << "Value " << *V << " mapped to " << *It->second 13638 << " rather than " << *KV.first << "\n"; 13639 std::abort(); 13640 } 13641 } 13642 } 13643 13644 // Verify integrity of SCEV users. 13645 for (const auto &S : UniqueSCEVs) { 13646 SmallVector<const SCEV *, 4> Ops; 13647 collectUniqueOps(&S, Ops); 13648 for (const auto *Op : Ops) { 13649 // We do not store dependencies of constants. 13650 if (isa<SCEVConstant>(Op)) 13651 continue; 13652 auto It = SCEVUsers.find(Op); 13653 if (It != SCEVUsers.end() && It->second.count(&S)) 13654 continue; 13655 dbgs() << "Use of operand " << *Op << " by user " << S 13656 << " is not being tracked!\n"; 13657 std::abort(); 13658 } 13659 } 13660 13661 // Verify integrity of ValuesAtScopes users. 13662 for (const auto &ValueAndVec : ValuesAtScopes) { 13663 const SCEV *Value = ValueAndVec.first; 13664 for (const auto &LoopAndValueAtScope : ValueAndVec.second) { 13665 const Loop *L = LoopAndValueAtScope.first; 13666 const SCEV *ValueAtScope = LoopAndValueAtScope.second; 13667 if (!isa<SCEVConstant>(ValueAtScope)) { 13668 auto It = ValuesAtScopesUsers.find(ValueAtScope); 13669 if (It != ValuesAtScopesUsers.end() && 13670 is_contained(It->second, std::make_pair(L, Value))) 13671 continue; 13672 dbgs() << "Value: " << *Value << ", Loop: " << *L << ", ValueAtScope: " 13673 << *ValueAtScope << " missing in ValuesAtScopesUsers\n"; 13674 std::abort(); 13675 } 13676 } 13677 } 13678 13679 for (const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) { 13680 const SCEV *ValueAtScope = ValueAtScopeAndVec.first; 13681 for (const auto &LoopAndValue : ValueAtScopeAndVec.second) { 13682 const Loop *L = LoopAndValue.first; 13683 const SCEV *Value = LoopAndValue.second; 13684 assert(!isa<SCEVConstant>(Value)); 13685 auto It = ValuesAtScopes.find(Value); 13686 if (It != ValuesAtScopes.end() && 13687 is_contained(It->second, std::make_pair(L, ValueAtScope))) 13688 continue; 13689 dbgs() << "Value: " << *Value << ", Loop: " << *L << ", ValueAtScope: " 13690 << *ValueAtScope << " missing in ValuesAtScopes\n"; 13691 std::abort(); 13692 } 13693 } 13694 13695 // Verify integrity of BECountUsers. 13696 auto VerifyBECountUsers = [&](bool Predicated) { 13697 auto &BECounts = 13698 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts; 13699 for (const auto &LoopAndBEInfo : BECounts) { 13700 for (const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) { 13701 if (!isa<SCEVConstant>(ENT.ExactNotTaken)) { 13702 auto UserIt = BECountUsers.find(ENT.ExactNotTaken); 13703 if (UserIt != BECountUsers.end() && 13704 UserIt->second.contains({ LoopAndBEInfo.first, Predicated })) 13705 continue; 13706 dbgs() << "Value " << *ENT.ExactNotTaken << " for loop " 13707 << *LoopAndBEInfo.first << " missing from BECountUsers\n"; 13708 std::abort(); 13709 } 13710 } 13711 } 13712 }; 13713 VerifyBECountUsers(/* Predicated */ false); 13714 VerifyBECountUsers(/* Predicated */ true); 13715 } 13716 13717 bool ScalarEvolution::invalidate( 13718 Function &F, const PreservedAnalyses &PA, 13719 FunctionAnalysisManager::Invalidator &Inv) { 13720 // Invalidate the ScalarEvolution object whenever it isn't preserved or one 13721 // of its dependencies is invalidated. 13722 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>(); 13723 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 13724 Inv.invalidate<AssumptionAnalysis>(F, PA) || 13725 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 13726 Inv.invalidate<LoopAnalysis>(F, PA); 13727 } 13728 13729 AnalysisKey ScalarEvolutionAnalysis::Key; 13730 13731 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, 13732 FunctionAnalysisManager &AM) { 13733 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F), 13734 AM.getResult<AssumptionAnalysis>(F), 13735 AM.getResult<DominatorTreeAnalysis>(F), 13736 AM.getResult<LoopAnalysis>(F)); 13737 } 13738 13739 PreservedAnalyses 13740 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) { 13741 AM.getResult<ScalarEvolutionAnalysis>(F).verify(); 13742 return PreservedAnalyses::all(); 13743 } 13744 13745 PreservedAnalyses 13746 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 13747 // For compatibility with opt's -analyze feature under legacy pass manager 13748 // which was not ported to NPM. This keeps tests using 13749 // update_analyze_test_checks.py working. 13750 OS << "Printing analysis 'Scalar Evolution Analysis' for function '" 13751 << F.getName() << "':\n"; 13752 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS); 13753 return PreservedAnalyses::all(); 13754 } 13755 13756 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", 13757 "Scalar Evolution Analysis", false, true) 13758 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 13759 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 13760 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 13761 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 13762 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", 13763 "Scalar Evolution Analysis", false, true) 13764 13765 char ScalarEvolutionWrapperPass::ID = 0; 13766 13767 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { 13768 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); 13769 } 13770 13771 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { 13772 SE.reset(new ScalarEvolution( 13773 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 13774 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 13775 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 13776 getAnalysis<LoopInfoWrapperPass>().getLoopInfo())); 13777 return false; 13778 } 13779 13780 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } 13781 13782 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { 13783 SE->print(OS); 13784 } 13785 13786 void ScalarEvolutionWrapperPass::verifyAnalysis() const { 13787 if (!VerifySCEV) 13788 return; 13789 13790 SE->verify(); 13791 } 13792 13793 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 13794 AU.setPreservesAll(); 13795 AU.addRequiredTransitive<AssumptionCacheTracker>(); 13796 AU.addRequiredTransitive<LoopInfoWrapperPass>(); 13797 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 13798 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 13799 } 13800 13801 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS, 13802 const SCEV *RHS) { 13803 return getComparePredicate(ICmpInst::ICMP_EQ, LHS, RHS); 13804 } 13805 13806 const SCEVPredicate * 13807 ScalarEvolution::getComparePredicate(const ICmpInst::Predicate Pred, 13808 const SCEV *LHS, const SCEV *RHS) { 13809 FoldingSetNodeID ID; 13810 assert(LHS->getType() == RHS->getType() && 13811 "Type mismatch between LHS and RHS"); 13812 // Unique this node based on the arguments 13813 ID.AddInteger(SCEVPredicate::P_Compare); 13814 ID.AddInteger(Pred); 13815 ID.AddPointer(LHS); 13816 ID.AddPointer(RHS); 13817 void *IP = nullptr; 13818 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 13819 return S; 13820 SCEVComparePredicate *Eq = new (SCEVAllocator) 13821 SCEVComparePredicate(ID.Intern(SCEVAllocator), Pred, LHS, RHS); 13822 UniquePreds.InsertNode(Eq, IP); 13823 return Eq; 13824 } 13825 13826 const SCEVPredicate *ScalarEvolution::getWrapPredicate( 13827 const SCEVAddRecExpr *AR, 13828 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 13829 FoldingSetNodeID ID; 13830 // Unique this node based on the arguments 13831 ID.AddInteger(SCEVPredicate::P_Wrap); 13832 ID.AddPointer(AR); 13833 ID.AddInteger(AddedFlags); 13834 void *IP = nullptr; 13835 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP)) 13836 return S; 13837 auto *OF = new (SCEVAllocator) 13838 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags); 13839 UniquePreds.InsertNode(OF, IP); 13840 return OF; 13841 } 13842 13843 namespace { 13844 13845 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> { 13846 public: 13847 13848 /// Rewrites \p S in the context of a loop L and the SCEV predication 13849 /// infrastructure. 13850 /// 13851 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the 13852 /// equivalences present in \p Pred. 13853 /// 13854 /// If \p NewPreds is non-null, rewrite is free to add further predicates to 13855 /// \p NewPreds such that the result will be an AddRecExpr. 13856 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE, 13857 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 13858 const SCEVPredicate *Pred) { 13859 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred); 13860 return Rewriter.visit(S); 13861 } 13862 13863 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 13864 if (Pred) { 13865 if (auto *U = dyn_cast<SCEVUnionPredicate>(Pred)) { 13866 for (auto *Pred : U->getPredicates()) 13867 if (const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred)) 13868 if (IPred->getLHS() == Expr && 13869 IPred->getPredicate() == ICmpInst::ICMP_EQ) 13870 return IPred->getRHS(); 13871 } else if (const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred)) { 13872 if (IPred->getLHS() == Expr && 13873 IPred->getPredicate() == ICmpInst::ICMP_EQ) 13874 return IPred->getRHS(); 13875 } 13876 } 13877 return convertToAddRecWithPreds(Expr); 13878 } 13879 13880 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 13881 const SCEV *Operand = visit(Expr->getOperand()); 13882 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 13883 if (AR && AR->getLoop() == L && AR->isAffine()) { 13884 // This couldn't be folded because the operand didn't have the nuw 13885 // flag. Add the nusw flag as an assumption that we could make. 13886 const SCEV *Step = AR->getStepRecurrence(SE); 13887 Type *Ty = Expr->getType(); 13888 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW)) 13889 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty), 13890 SE.getSignExtendExpr(Step, Ty), L, 13891 AR->getNoWrapFlags()); 13892 } 13893 return SE.getZeroExtendExpr(Operand, Expr->getType()); 13894 } 13895 13896 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { 13897 const SCEV *Operand = visit(Expr->getOperand()); 13898 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand); 13899 if (AR && AR->getLoop() == L && AR->isAffine()) { 13900 // This couldn't be folded because the operand didn't have the nsw 13901 // flag. Add the nssw flag as an assumption that we could make. 13902 const SCEV *Step = AR->getStepRecurrence(SE); 13903 Type *Ty = Expr->getType(); 13904 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW)) 13905 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty), 13906 SE.getSignExtendExpr(Step, Ty), L, 13907 AR->getNoWrapFlags()); 13908 } 13909 return SE.getSignExtendExpr(Operand, Expr->getType()); 13910 } 13911 13912 private: 13913 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE, 13914 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds, 13915 const SCEVPredicate *Pred) 13916 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {} 13917 13918 bool addOverflowAssumption(const SCEVPredicate *P) { 13919 if (!NewPreds) { 13920 // Check if we've already made this assumption. 13921 return Pred && Pred->implies(P); 13922 } 13923 NewPreds->insert(P); 13924 return true; 13925 } 13926 13927 bool addOverflowAssumption(const SCEVAddRecExpr *AR, 13928 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) { 13929 auto *A = SE.getWrapPredicate(AR, AddedFlags); 13930 return addOverflowAssumption(A); 13931 } 13932 13933 // If \p Expr represents a PHINode, we try to see if it can be represented 13934 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible 13935 // to add this predicate as a runtime overflow check, we return the AddRec. 13936 // If \p Expr does not meet these conditions (is not a PHI node, or we 13937 // couldn't create an AddRec for it, or couldn't add the predicate), we just 13938 // return \p Expr. 13939 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) { 13940 if (!isa<PHINode>(Expr->getValue())) 13941 return Expr; 13942 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>> 13943 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr); 13944 if (!PredicatedRewrite) 13945 return Expr; 13946 for (auto *P : PredicatedRewrite->second){ 13947 // Wrap predicates from outer loops are not supported. 13948 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) { 13949 if (L != WP->getExpr()->getLoop()) 13950 return Expr; 13951 } 13952 if (!addOverflowAssumption(P)) 13953 return Expr; 13954 } 13955 return PredicatedRewrite->first; 13956 } 13957 13958 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds; 13959 const SCEVPredicate *Pred; 13960 const Loop *L; 13961 }; 13962 13963 } // end anonymous namespace 13964 13965 const SCEV * 13966 ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L, 13967 const SCEVPredicate &Preds) { 13968 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds); 13969 } 13970 13971 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates( 13972 const SCEV *S, const Loop *L, 13973 SmallPtrSetImpl<const SCEVPredicate *> &Preds) { 13974 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds; 13975 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr); 13976 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S); 13977 13978 if (!AddRec) 13979 return nullptr; 13980 13981 // Since the transformation was successful, we can now transfer the SCEV 13982 // predicates. 13983 for (auto *P : TransformPreds) 13984 Preds.insert(P); 13985 13986 return AddRec; 13987 } 13988 13989 /// SCEV predicates 13990 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID, 13991 SCEVPredicateKind Kind) 13992 : FastID(ID), Kind(Kind) {} 13993 13994 SCEVComparePredicate::SCEVComparePredicate(const FoldingSetNodeIDRef ID, 13995 const ICmpInst::Predicate Pred, 13996 const SCEV *LHS, const SCEV *RHS) 13997 : SCEVPredicate(ID, P_Compare), Pred(Pred), LHS(LHS), RHS(RHS) { 13998 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match"); 13999 assert(LHS != RHS && "LHS and RHS are the same SCEV"); 14000 } 14001 14002 bool SCEVComparePredicate::implies(const SCEVPredicate *N) const { 14003 const auto *Op = dyn_cast<SCEVComparePredicate>(N); 14004 14005 if (!Op) 14006 return false; 14007 14008 if (Pred != ICmpInst::ICMP_EQ) 14009 return false; 14010 14011 return Op->LHS == LHS && Op->RHS == RHS; 14012 } 14013 14014 bool SCEVComparePredicate::isAlwaysTrue() const { return false; } 14015 14016 void SCEVComparePredicate::print(raw_ostream &OS, unsigned Depth) const { 14017 if (Pred == ICmpInst::ICMP_EQ) 14018 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n"; 14019 else 14020 OS.indent(Depth) << "Compare predicate: " << *LHS 14021 << " " << CmpInst::getPredicateName(Pred) << ") " 14022 << *RHS << "\n"; 14023 14024 } 14025 14026 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID, 14027 const SCEVAddRecExpr *AR, 14028 IncrementWrapFlags Flags) 14029 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {} 14030 14031 const SCEVAddRecExpr *SCEVWrapPredicate::getExpr() const { return AR; } 14032 14033 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const { 14034 const auto *Op = dyn_cast<SCEVWrapPredicate>(N); 14035 14036 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags; 14037 } 14038 14039 bool SCEVWrapPredicate::isAlwaysTrue() const { 14040 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags(); 14041 IncrementWrapFlags IFlags = Flags; 14042 14043 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags) 14044 IFlags = clearFlags(IFlags, IncrementNSSW); 14045 14046 return IFlags == IncrementAnyWrap; 14047 } 14048 14049 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const { 14050 OS.indent(Depth) << *getExpr() << " Added Flags: "; 14051 if (SCEVWrapPredicate::IncrementNUSW & getFlags()) 14052 OS << "<nusw>"; 14053 if (SCEVWrapPredicate::IncrementNSSW & getFlags()) 14054 OS << "<nssw>"; 14055 OS << "\n"; 14056 } 14057 14058 SCEVWrapPredicate::IncrementWrapFlags 14059 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR, 14060 ScalarEvolution &SE) { 14061 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap; 14062 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags(); 14063 14064 // We can safely transfer the NSW flag as NSSW. 14065 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags) 14066 ImpliedFlags = IncrementNSSW; 14067 14068 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) { 14069 // If the increment is positive, the SCEV NUW flag will also imply the 14070 // WrapPredicate NUSW flag. 14071 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) 14072 if (Step->getValue()->getValue().isNonNegative()) 14073 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW); 14074 } 14075 14076 return ImpliedFlags; 14077 } 14078 14079 /// Union predicates don't get cached so create a dummy set ID for it. 14080 SCEVUnionPredicate::SCEVUnionPredicate(ArrayRef<const SCEVPredicate *> Preds) 14081 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) { 14082 for (auto *P : Preds) 14083 add(P); 14084 } 14085 14086 bool SCEVUnionPredicate::isAlwaysTrue() const { 14087 return all_of(Preds, 14088 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); }); 14089 } 14090 14091 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const { 14092 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) 14093 return all_of(Set->Preds, 14094 [this](const SCEVPredicate *I) { return this->implies(I); }); 14095 14096 return any_of(Preds, 14097 [N](const SCEVPredicate *I) { return I->implies(N); }); 14098 } 14099 14100 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const { 14101 for (auto Pred : Preds) 14102 Pred->print(OS, Depth); 14103 } 14104 14105 void SCEVUnionPredicate::add(const SCEVPredicate *N) { 14106 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) { 14107 for (auto Pred : Set->Preds) 14108 add(Pred); 14109 return; 14110 } 14111 14112 Preds.push_back(N); 14113 } 14114 14115 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE, 14116 Loop &L) 14117 : SE(SE), L(L) { 14118 SmallVector<const SCEVPredicate*, 4> Empty; 14119 Preds = std::make_unique<SCEVUnionPredicate>(Empty); 14120 } 14121 14122 void ScalarEvolution::registerUser(const SCEV *User, 14123 ArrayRef<const SCEV *> Ops) { 14124 for (auto *Op : Ops) 14125 // We do not expect that forgetting cached data for SCEVConstants will ever 14126 // open any prospects for sharpening or introduce any correctness issues, 14127 // so we don't bother storing their dependencies. 14128 if (!isa<SCEVConstant>(Op)) 14129 SCEVUsers[Op].insert(User); 14130 } 14131 14132 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) { 14133 const SCEV *Expr = SE.getSCEV(V); 14134 RewriteEntry &Entry = RewriteMap[Expr]; 14135 14136 // If we already have an entry and the version matches, return it. 14137 if (Entry.second && Generation == Entry.first) 14138 return Entry.second; 14139 14140 // We found an entry but it's stale. Rewrite the stale entry 14141 // according to the current predicate. 14142 if (Entry.second) 14143 Expr = Entry.second; 14144 14145 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, *Preds); 14146 Entry = {Generation, NewSCEV}; 14147 14148 return NewSCEV; 14149 } 14150 14151 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() { 14152 if (!BackedgeCount) { 14153 SmallVector<const SCEVPredicate *, 4> Preds; 14154 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, Preds); 14155 for (auto *P : Preds) 14156 addPredicate(*P); 14157 } 14158 return BackedgeCount; 14159 } 14160 14161 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) { 14162 if (Preds->implies(&Pred)) 14163 return; 14164 14165 auto &OldPreds = Preds->getPredicates(); 14166 SmallVector<const SCEVPredicate*, 4> NewPreds(OldPreds.begin(), OldPreds.end()); 14167 NewPreds.push_back(&Pred); 14168 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds); 14169 updateGeneration(); 14170 } 14171 14172 const SCEVPredicate &PredicatedScalarEvolution::getPredicate() const { 14173 return *Preds; 14174 } 14175 14176 void PredicatedScalarEvolution::updateGeneration() { 14177 // If the generation number wrapped recompute everything. 14178 if (++Generation == 0) { 14179 for (auto &II : RewriteMap) { 14180 const SCEV *Rewritten = II.second.second; 14181 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, *Preds)}; 14182 } 14183 } 14184 } 14185 14186 void PredicatedScalarEvolution::setNoOverflow( 14187 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 14188 const SCEV *Expr = getSCEV(V); 14189 const auto *AR = cast<SCEVAddRecExpr>(Expr); 14190 14191 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE); 14192 14193 // Clear the statically implied flags. 14194 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags); 14195 addPredicate(*SE.getWrapPredicate(AR, Flags)); 14196 14197 auto II = FlagsMap.insert({V, Flags}); 14198 if (!II.second) 14199 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second); 14200 } 14201 14202 bool PredicatedScalarEvolution::hasNoOverflow( 14203 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) { 14204 const SCEV *Expr = getSCEV(V); 14205 const auto *AR = cast<SCEVAddRecExpr>(Expr); 14206 14207 Flags = SCEVWrapPredicate::clearFlags( 14208 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE)); 14209 14210 auto II = FlagsMap.find(V); 14211 14212 if (II != FlagsMap.end()) 14213 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second); 14214 14215 return Flags == SCEVWrapPredicate::IncrementAnyWrap; 14216 } 14217 14218 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) { 14219 const SCEV *Expr = this->getSCEV(V); 14220 SmallPtrSet<const SCEVPredicate *, 4> NewPreds; 14221 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds); 14222 14223 if (!New) 14224 return nullptr; 14225 14226 for (auto *P : NewPreds) 14227 addPredicate(*P); 14228 14229 RewriteMap[SE.getSCEV(V)] = {Generation, New}; 14230 return New; 14231 } 14232 14233 PredicatedScalarEvolution::PredicatedScalarEvolution( 14234 const PredicatedScalarEvolution &Init) 14235 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), 14236 Preds(std::make_unique<SCEVUnionPredicate>(Init.Preds->getPredicates())), 14237 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) { 14238 for (auto I : Init.FlagsMap) 14239 FlagsMap.insert(I); 14240 } 14241 14242 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const { 14243 // For each block. 14244 for (auto *BB : L.getBlocks()) 14245 for (auto &I : *BB) { 14246 if (!SE.isSCEVable(I.getType())) 14247 continue; 14248 14249 auto *Expr = SE.getSCEV(&I); 14250 auto II = RewriteMap.find(Expr); 14251 14252 if (II == RewriteMap.end()) 14253 continue; 14254 14255 // Don't print things that are not interesting. 14256 if (II->second.second == Expr) 14257 continue; 14258 14259 OS.indent(Depth) << "[PSE]" << I << ":\n"; 14260 OS.indent(Depth + 2) << *Expr << "\n"; 14261 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n"; 14262 } 14263 } 14264 14265 // Match the mathematical pattern A - (A / B) * B, where A and B can be 14266 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used 14267 // for URem with constant power-of-2 second operands. 14268 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is 14269 // 4, A / B becomes X / 8). 14270 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS, 14271 const SCEV *&RHS) { 14272 // Try to match 'zext (trunc A to iB) to iY', which is used 14273 // for URem with constant power-of-2 second operands. Make sure the size of 14274 // the operand A matches the size of the whole expressions. 14275 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr)) 14276 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) { 14277 LHS = Trunc->getOperand(); 14278 // Bail out if the type of the LHS is larger than the type of the 14279 // expression for now. 14280 if (getTypeSizeInBits(LHS->getType()) > 14281 getTypeSizeInBits(Expr->getType())) 14282 return false; 14283 if (LHS->getType() != Expr->getType()) 14284 LHS = getZeroExtendExpr(LHS, Expr->getType()); 14285 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1) 14286 << getTypeSizeInBits(Trunc->getType())); 14287 return true; 14288 } 14289 const auto *Add = dyn_cast<SCEVAddExpr>(Expr); 14290 if (Add == nullptr || Add->getNumOperands() != 2) 14291 return false; 14292 14293 const SCEV *A = Add->getOperand(1); 14294 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0)); 14295 14296 if (Mul == nullptr) 14297 return false; 14298 14299 const auto MatchURemWithDivisor = [&](const SCEV *B) { 14300 // (SomeExpr + (-(SomeExpr / B) * B)). 14301 if (Expr == getURemExpr(A, B)) { 14302 LHS = A; 14303 RHS = B; 14304 return true; 14305 } 14306 return false; 14307 }; 14308 14309 // (SomeExpr + (-1 * (SomeExpr / B) * B)). 14310 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0))) 14311 return MatchURemWithDivisor(Mul->getOperand(1)) || 14312 MatchURemWithDivisor(Mul->getOperand(2)); 14313 14314 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)). 14315 if (Mul->getNumOperands() == 2) 14316 return MatchURemWithDivisor(Mul->getOperand(1)) || 14317 MatchURemWithDivisor(Mul->getOperand(0)) || 14318 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) || 14319 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0))); 14320 return false; 14321 } 14322 14323 const SCEV * 14324 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) { 14325 SmallVector<BasicBlock*, 16> ExitingBlocks; 14326 L->getExitingBlocks(ExitingBlocks); 14327 14328 // Form an expression for the maximum exit count possible for this loop. We 14329 // merge the max and exact information to approximate a version of 14330 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants. 14331 SmallVector<const SCEV*, 4> ExitCounts; 14332 for (BasicBlock *ExitingBB : ExitingBlocks) { 14333 const SCEV *ExitCount = getExitCount(L, ExitingBB); 14334 if (isa<SCEVCouldNotCompute>(ExitCount)) 14335 ExitCount = getExitCount(L, ExitingBB, 14336 ScalarEvolution::ConstantMaximum); 14337 if (!isa<SCEVCouldNotCompute>(ExitCount)) { 14338 assert(DT.dominates(ExitingBB, L->getLoopLatch()) && 14339 "We should only have known counts for exiting blocks that " 14340 "dominate latch!"); 14341 ExitCounts.push_back(ExitCount); 14342 } 14343 } 14344 if (ExitCounts.empty()) 14345 return getCouldNotCompute(); 14346 return getUMinFromMismatchedTypes(ExitCounts); 14347 } 14348 14349 /// A rewriter to replace SCEV expressions in Map with the corresponding entry 14350 /// in the map. It skips AddRecExpr because we cannot guarantee that the 14351 /// replacement is loop invariant in the loop of the AddRec. 14352 /// 14353 /// At the moment only rewriting SCEVUnknown and SCEVZeroExtendExpr is 14354 /// supported. 14355 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> { 14356 const DenseMap<const SCEV *, const SCEV *> ⤅ 14357 14358 public: 14359 SCEVLoopGuardRewriter(ScalarEvolution &SE, 14360 DenseMap<const SCEV *, const SCEV *> &M) 14361 : SCEVRewriteVisitor(SE), Map(M) {} 14362 14363 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; } 14364 14365 const SCEV *visitUnknown(const SCEVUnknown *Expr) { 14366 auto I = Map.find(Expr); 14367 if (I == Map.end()) 14368 return Expr; 14369 return I->second; 14370 } 14371 14372 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { 14373 auto I = Map.find(Expr); 14374 if (I == Map.end()) 14375 return SCEVRewriteVisitor<SCEVLoopGuardRewriter>::visitZeroExtendExpr( 14376 Expr); 14377 return I->second; 14378 } 14379 }; 14380 14381 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) { 14382 SmallVector<const SCEV *> ExprsToRewrite; 14383 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS, 14384 const SCEV *RHS, 14385 DenseMap<const SCEV *, const SCEV *> 14386 &RewriteMap) { 14387 // WARNING: It is generally unsound to apply any wrap flags to the proposed 14388 // replacement SCEV which isn't directly implied by the structure of that 14389 // SCEV. In particular, using contextual facts to imply flags is *NOT* 14390 // legal. See the scoping rules for flags in the header to understand why. 14391 14392 // If LHS is a constant, apply information to the other expression. 14393 if (isa<SCEVConstant>(LHS)) { 14394 std::swap(LHS, RHS); 14395 Predicate = CmpInst::getSwappedPredicate(Predicate); 14396 } 14397 14398 // Check for a condition of the form (-C1 + X < C2). InstCombine will 14399 // create this form when combining two checks of the form (X u< C2 + C1) and 14400 // (X >=u C1). 14401 auto MatchRangeCheckIdiom = [this, Predicate, LHS, RHS, &RewriteMap, 14402 &ExprsToRewrite]() { 14403 auto *AddExpr = dyn_cast<SCEVAddExpr>(LHS); 14404 if (!AddExpr || AddExpr->getNumOperands() != 2) 14405 return false; 14406 14407 auto *C1 = dyn_cast<SCEVConstant>(AddExpr->getOperand(0)); 14408 auto *LHSUnknown = dyn_cast<SCEVUnknown>(AddExpr->getOperand(1)); 14409 auto *C2 = dyn_cast<SCEVConstant>(RHS); 14410 if (!C1 || !C2 || !LHSUnknown) 14411 return false; 14412 14413 auto ExactRegion = 14414 ConstantRange::makeExactICmpRegion(Predicate, C2->getAPInt()) 14415 .sub(C1->getAPInt()); 14416 14417 // Bail out, unless we have a non-wrapping, monotonic range. 14418 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet()) 14419 return false; 14420 auto I = RewriteMap.find(LHSUnknown); 14421 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHSUnknown; 14422 RewriteMap[LHSUnknown] = getUMaxExpr( 14423 getConstant(ExactRegion.getUnsignedMin()), 14424 getUMinExpr(RewrittenLHS, getConstant(ExactRegion.getUnsignedMax()))); 14425 ExprsToRewrite.push_back(LHSUnknown); 14426 return true; 14427 }; 14428 if (MatchRangeCheckIdiom()) 14429 return; 14430 14431 // If we have LHS == 0, check if LHS is computing a property of some unknown 14432 // SCEV %v which we can rewrite %v to express explicitly. 14433 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS); 14434 if (Predicate == CmpInst::ICMP_EQ && RHSC && 14435 RHSC->getValue()->isNullValue()) { 14436 // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to 14437 // explicitly express that. 14438 const SCEV *URemLHS = nullptr; 14439 const SCEV *URemRHS = nullptr; 14440 if (matchURem(LHS, URemLHS, URemRHS)) { 14441 if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) { 14442 auto Multiple = getMulExpr(getUDivExpr(URemLHS, URemRHS), URemRHS); 14443 RewriteMap[LHSUnknown] = Multiple; 14444 ExprsToRewrite.push_back(LHSUnknown); 14445 return; 14446 } 14447 } 14448 } 14449 14450 // Do not apply information for constants or if RHS contains an AddRec. 14451 if (isa<SCEVConstant>(LHS) || containsAddRecurrence(RHS)) 14452 return; 14453 14454 // If RHS is SCEVUnknown, make sure the information is applied to it. 14455 if (!isa<SCEVUnknown>(LHS) && isa<SCEVUnknown>(RHS)) { 14456 std::swap(LHS, RHS); 14457 Predicate = CmpInst::getSwappedPredicate(Predicate); 14458 } 14459 14460 // Limit to expressions that can be rewritten. 14461 if (!isa<SCEVUnknown>(LHS) && !isa<SCEVZeroExtendExpr>(LHS)) 14462 return; 14463 14464 // Check whether LHS has already been rewritten. In that case we want to 14465 // chain further rewrites onto the already rewritten value. 14466 auto I = RewriteMap.find(LHS); 14467 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS; 14468 14469 const SCEV *RewrittenRHS = nullptr; 14470 switch (Predicate) { 14471 case CmpInst::ICMP_ULT: 14472 RewrittenRHS = 14473 getUMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType()))); 14474 break; 14475 case CmpInst::ICMP_SLT: 14476 RewrittenRHS = 14477 getSMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType()))); 14478 break; 14479 case CmpInst::ICMP_ULE: 14480 RewrittenRHS = getUMinExpr(RewrittenLHS, RHS); 14481 break; 14482 case CmpInst::ICMP_SLE: 14483 RewrittenRHS = getSMinExpr(RewrittenLHS, RHS); 14484 break; 14485 case CmpInst::ICMP_UGT: 14486 RewrittenRHS = 14487 getUMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType()))); 14488 break; 14489 case CmpInst::ICMP_SGT: 14490 RewrittenRHS = 14491 getSMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType()))); 14492 break; 14493 case CmpInst::ICMP_UGE: 14494 RewrittenRHS = getUMaxExpr(RewrittenLHS, RHS); 14495 break; 14496 case CmpInst::ICMP_SGE: 14497 RewrittenRHS = getSMaxExpr(RewrittenLHS, RHS); 14498 break; 14499 case CmpInst::ICMP_EQ: 14500 if (isa<SCEVConstant>(RHS)) 14501 RewrittenRHS = RHS; 14502 break; 14503 case CmpInst::ICMP_NE: 14504 if (isa<SCEVConstant>(RHS) && 14505 cast<SCEVConstant>(RHS)->getValue()->isNullValue()) 14506 RewrittenRHS = getUMaxExpr(RewrittenLHS, getOne(RHS->getType())); 14507 break; 14508 default: 14509 break; 14510 } 14511 14512 if (RewrittenRHS) { 14513 RewriteMap[LHS] = RewrittenRHS; 14514 if (LHS == RewrittenLHS) 14515 ExprsToRewrite.push_back(LHS); 14516 } 14517 }; 14518 // First, collect conditions from dominating branches. Starting at the loop 14519 // predecessor, climb up the predecessor chain, as long as there are 14520 // predecessors that can be found that have unique successors leading to the 14521 // original header. 14522 // TODO: share this logic with isLoopEntryGuardedByCond. 14523 SmallVector<std::pair<Value *, bool>> Terms; 14524 for (std::pair<const BasicBlock *, const BasicBlock *> Pair( 14525 L->getLoopPredecessor(), L->getHeader()); 14526 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 14527 14528 const BranchInst *LoopEntryPredicate = 14529 dyn_cast<BranchInst>(Pair.first->getTerminator()); 14530 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) 14531 continue; 14532 14533 Terms.emplace_back(LoopEntryPredicate->getCondition(), 14534 LoopEntryPredicate->getSuccessor(0) == Pair.second); 14535 } 14536 14537 // Now apply the information from the collected conditions to RewriteMap. 14538 // Conditions are processed in reverse order, so the earliest conditions is 14539 // processed first. This ensures the SCEVs with the shortest dependency chains 14540 // are constructed first. 14541 DenseMap<const SCEV *, const SCEV *> RewriteMap; 14542 for (auto &E : reverse(Terms)) { 14543 bool EnterIfTrue = E.second; 14544 SmallVector<Value *, 8> Worklist; 14545 SmallPtrSet<Value *, 8> Visited; 14546 Worklist.push_back(E.first); 14547 while (!Worklist.empty()) { 14548 Value *Cond = Worklist.pop_back_val(); 14549 if (!Visited.insert(Cond).second) 14550 continue; 14551 14552 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) { 14553 auto Predicate = 14554 EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate(); 14555 const auto *LHS = getSCEV(Cmp->getOperand(0)); 14556 const auto *RHS = getSCEV(Cmp->getOperand(1)); 14557 CollectCondition(Predicate, LHS, RHS, RewriteMap); 14558 continue; 14559 } 14560 14561 Value *L, *R; 14562 if (EnterIfTrue ? match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))) 14563 : match(Cond, m_LogicalOr(m_Value(L), m_Value(R)))) { 14564 Worklist.push_back(L); 14565 Worklist.push_back(R); 14566 } 14567 } 14568 } 14569 14570 // Also collect information from assumptions dominating the loop. 14571 for (auto &AssumeVH : AC.assumptions()) { 14572 if (!AssumeVH) 14573 continue; 14574 auto *AssumeI = cast<CallInst>(AssumeVH); 14575 auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0)); 14576 if (!Cmp || !DT.dominates(AssumeI, L->getHeader())) 14577 continue; 14578 const auto *LHS = getSCEV(Cmp->getOperand(0)); 14579 const auto *RHS = getSCEV(Cmp->getOperand(1)); 14580 CollectCondition(Cmp->getPredicate(), LHS, RHS, RewriteMap); 14581 } 14582 14583 if (RewriteMap.empty()) 14584 return Expr; 14585 14586 // Now that all rewrite information is collect, rewrite the collected 14587 // expressions with the information in the map. This applies information to 14588 // sub-expressions. 14589 if (ExprsToRewrite.size() > 1) { 14590 for (const SCEV *Expr : ExprsToRewrite) { 14591 const SCEV *RewriteTo = RewriteMap[Expr]; 14592 RewriteMap.erase(Expr); 14593 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 14594 RewriteMap.insert({Expr, Rewriter.visit(RewriteTo)}); 14595 } 14596 } 14597 14598 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap); 14599 return Rewriter.visit(Expr); 14600 } 14601